coding style: remove useless break after a goto or return
[openocd.git] / src / target / armv4_5.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2008 by Spencer Oliver *
6 * spen@spen-soft.co.uk *
7 * *
8 * Copyright (C) 2008 by Oyvind Harboe *
9 * oyvind.harboe@zylin.com *
10 * *
11 * Copyright (C) 2018 by Liviu Ionescu *
12 * <ilg@livius.net> *
13 * *
14 * This program is free software; you can redistribute it and/or modify *
15 * it under the terms of the GNU General Public License as published by *
16 * the Free Software Foundation; either version 2 of the License, or *
17 * (at your option) any later version. *
18 * *
19 * This program is distributed in the hope that it will be useful, *
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
22 * GNU General Public License for more details. *
23 * *
24 * You should have received a copy of the GNU General Public License *
25 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
26 ***************************************************************************/
27
28 #ifdef HAVE_CONFIG_H
29 #include "config.h"
30 #endif
31
32 #include "arm.h"
33 #include "armv4_5.h"
34 #include "arm_jtag.h"
35 #include "breakpoints.h"
36 #include "arm_disassembler.h"
37 #include <helper/binarybuffer.h>
38 #include "algorithm.h"
39 #include "register.h"
40 #include "semihosting_common.h"
41
42 /* offsets into armv4_5 core register cache */
43 enum {
44 /* ARMV4_5_CPSR = 31, */
45 ARMV4_5_SPSR_FIQ = 32,
46 ARMV4_5_SPSR_IRQ = 33,
47 ARMV4_5_SPSR_SVC = 34,
48 ARMV4_5_SPSR_ABT = 35,
49 ARMV4_5_SPSR_UND = 36,
50 ARM_SPSR_MON = 41,
51 ARM_SPSR_HYP = 43,
52 };
53
54 static const uint8_t arm_usr_indices[17] = {
55 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, ARMV4_5_CPSR,
56 };
57
58 static const uint8_t arm_fiq_indices[8] = {
59 16, 17, 18, 19, 20, 21, 22, ARMV4_5_SPSR_FIQ,
60 };
61
62 static const uint8_t arm_irq_indices[3] = {
63 23, 24, ARMV4_5_SPSR_IRQ,
64 };
65
66 static const uint8_t arm_svc_indices[3] = {
67 25, 26, ARMV4_5_SPSR_SVC,
68 };
69
70 static const uint8_t arm_abt_indices[3] = {
71 27, 28, ARMV4_5_SPSR_ABT,
72 };
73
74 static const uint8_t arm_und_indices[3] = {
75 29, 30, ARMV4_5_SPSR_UND,
76 };
77
78 static const uint8_t arm_mon_indices[3] = {
79 39, 40, ARM_SPSR_MON,
80 };
81
82 static const uint8_t arm_hyp_indices[2] = {
83 42, ARM_SPSR_HYP,
84 };
85
86 static const struct {
87 const char *name;
88 unsigned short psr;
89 /* For user and system modes, these list indices for all registers.
90 * otherwise they're just indices for the shadow registers and SPSR.
91 */
92 unsigned short n_indices;
93 const uint8_t *indices;
94 } arm_mode_data[] = {
95 /* Seven modes are standard from ARM7 on. "System" and "User" share
96 * the same registers; other modes shadow from 3 to 8 registers.
97 */
98 {
99 .name = "User",
100 .psr = ARM_MODE_USR,
101 .n_indices = ARRAY_SIZE(arm_usr_indices),
102 .indices = arm_usr_indices,
103 },
104 {
105 .name = "FIQ",
106 .psr = ARM_MODE_FIQ,
107 .n_indices = ARRAY_SIZE(arm_fiq_indices),
108 .indices = arm_fiq_indices,
109 },
110 {
111 .name = "Supervisor",
112 .psr = ARM_MODE_SVC,
113 .n_indices = ARRAY_SIZE(arm_svc_indices),
114 .indices = arm_svc_indices,
115 },
116 {
117 .name = "Abort",
118 .psr = ARM_MODE_ABT,
119 .n_indices = ARRAY_SIZE(arm_abt_indices),
120 .indices = arm_abt_indices,
121 },
122 {
123 .name = "IRQ",
124 .psr = ARM_MODE_IRQ,
125 .n_indices = ARRAY_SIZE(arm_irq_indices),
126 .indices = arm_irq_indices,
127 },
128 {
129 .name = "Undefined instruction",
130 .psr = ARM_MODE_UND,
131 .n_indices = ARRAY_SIZE(arm_und_indices),
132 .indices = arm_und_indices,
133 },
134 {
135 .name = "System",
136 .psr = ARM_MODE_SYS,
137 .n_indices = ARRAY_SIZE(arm_usr_indices),
138 .indices = arm_usr_indices,
139 },
140 /* TrustZone "Security Extensions" add a secure monitor mode.
141 * This is distinct from a "debug monitor" which can support
142 * non-halting debug, in conjunction with some debuggers.
143 */
144 {
145 .name = "Secure Monitor",
146 .psr = ARM_MODE_MON,
147 .n_indices = ARRAY_SIZE(arm_mon_indices),
148 .indices = arm_mon_indices,
149 },
150 {
151 .name = "Secure Monitor ARM1176JZF-S",
152 .psr = ARM_MODE_1176_MON,
153 .n_indices = ARRAY_SIZE(arm_mon_indices),
154 .indices = arm_mon_indices,
155 },
156
157 /* These special modes are currently only supported
158 * by ARMv6M and ARMv7M profiles */
159 {
160 .name = "Thread",
161 .psr = ARM_MODE_THREAD,
162 },
163 {
164 .name = "Thread (User)",
165 .psr = ARM_MODE_USER_THREAD,
166 },
167 {
168 .name = "Handler",
169 .psr = ARM_MODE_HANDLER,
170 },
171
172 /* armv7-a with virtualization extension */
173 {
174 .name = "Hypervisor",
175 .psr = ARM_MODE_HYP,
176 .n_indices = ARRAY_SIZE(arm_hyp_indices),
177 .indices = arm_hyp_indices,
178 },
179 };
180
181 /** Map PSR mode bits to the name of an ARM processor operating mode. */
182 const char *arm_mode_name(unsigned psr_mode)
183 {
184 for (unsigned i = 0; i < ARRAY_SIZE(arm_mode_data); i++) {
185 if (arm_mode_data[i].psr == psr_mode)
186 return arm_mode_data[i].name;
187 }
188 LOG_ERROR("unrecognized psr mode: %#02x", psr_mode);
189 return "UNRECOGNIZED";
190 }
191
192 /** Return true iff the parameter denotes a valid ARM processor mode. */
193 bool is_arm_mode(unsigned psr_mode)
194 {
195 for (unsigned i = 0; i < ARRAY_SIZE(arm_mode_data); i++) {
196 if (arm_mode_data[i].psr == psr_mode)
197 return true;
198 }
199 return false;
200 }
201
202 /** Map PSR mode bits to linear number indexing armv4_5_core_reg_map */
203 int arm_mode_to_number(enum arm_mode mode)
204 {
205 switch (mode) {
206 case ARM_MODE_ANY:
207 /* map MODE_ANY to user mode */
208 case ARM_MODE_USR:
209 return 0;
210 case ARM_MODE_FIQ:
211 return 1;
212 case ARM_MODE_IRQ:
213 return 2;
214 case ARM_MODE_SVC:
215 return 3;
216 case ARM_MODE_ABT:
217 return 4;
218 case ARM_MODE_UND:
219 return 5;
220 case ARM_MODE_SYS:
221 return 6;
222 case ARM_MODE_MON:
223 case ARM_MODE_1176_MON:
224 return 7;
225 case ARM_MODE_HYP:
226 return 8;
227 default:
228 LOG_ERROR("invalid mode value encountered %d", mode);
229 return -1;
230 }
231 }
232
233 /** Map linear number indexing armv4_5_core_reg_map to PSR mode bits. */
234 enum arm_mode armv4_5_number_to_mode(int number)
235 {
236 switch (number) {
237 case 0:
238 return ARM_MODE_USR;
239 case 1:
240 return ARM_MODE_FIQ;
241 case 2:
242 return ARM_MODE_IRQ;
243 case 3:
244 return ARM_MODE_SVC;
245 case 4:
246 return ARM_MODE_ABT;
247 case 5:
248 return ARM_MODE_UND;
249 case 6:
250 return ARM_MODE_SYS;
251 case 7:
252 return ARM_MODE_MON;
253 case 8:
254 return ARM_MODE_HYP;
255 default:
256 LOG_ERROR("mode index out of bounds %d", number);
257 return ARM_MODE_ANY;
258 }
259 }
260
261 static const char *arm_state_strings[] = {
262 "ARM", "Thumb", "Jazelle", "ThumbEE",
263 };
264
265 /* Templates for ARM core registers.
266 *
267 * NOTE: offsets in this table are coupled to the arm_mode_data
268 * table above, the armv4_5_core_reg_map array below, and also to
269 * the ARMV4_5_CPSR symbol (which should vanish after ARM11 updates).
270 */
271 static const struct {
272 /* The name is used for e.g. the "regs" command. */
273 const char *name;
274
275 /* The {cookie, mode} tuple uniquely identifies one register.
276 * In a given mode, cookies 0..15 map to registers R0..R15,
277 * with R13..R15 usually called SP, LR, PC.
278 *
279 * MODE_ANY is used as *input* to the mapping, and indicates
280 * various special cases (sigh) and errors.
281 *
282 * Cookie 16 is (currently) confusing, since it indicates
283 * CPSR -or- SPSR depending on whether 'mode' is MODE_ANY.
284 * (Exception modes have both CPSR and SPSR registers ...)
285 */
286 unsigned cookie;
287 unsigned gdb_index;
288 enum arm_mode mode;
289 } arm_core_regs[] = {
290 /* IMPORTANT: we guarantee that the first eight cached registers
291 * correspond to r0..r7, and the fifteenth to PC, so that callers
292 * don't need to map them.
293 */
294 [0] = { .name = "r0", .cookie = 0, .mode = ARM_MODE_ANY, .gdb_index = 0, },
295 [1] = { .name = "r1", .cookie = 1, .mode = ARM_MODE_ANY, .gdb_index = 1, },
296 [2] = { .name = "r2", .cookie = 2, .mode = ARM_MODE_ANY, .gdb_index = 2, },
297 [3] = { .name = "r3", .cookie = 3, .mode = ARM_MODE_ANY, .gdb_index = 3, },
298 [4] = { .name = "r4", .cookie = 4, .mode = ARM_MODE_ANY, .gdb_index = 4, },
299 [5] = { .name = "r5", .cookie = 5, .mode = ARM_MODE_ANY, .gdb_index = 5, },
300 [6] = { .name = "r6", .cookie = 6, .mode = ARM_MODE_ANY, .gdb_index = 6, },
301 [7] = { .name = "r7", .cookie = 7, .mode = ARM_MODE_ANY, .gdb_index = 7, },
302
303 /* NOTE: regs 8..12 might be shadowed by FIQ ... flagging
304 * them as MODE_ANY creates special cases. (ANY means
305 * "not mapped" elsewhere; here it's "everything but FIQ".)
306 */
307 [8] = { .name = "r8", .cookie = 8, .mode = ARM_MODE_ANY, .gdb_index = 8, },
308 [9] = { .name = "r9", .cookie = 9, .mode = ARM_MODE_ANY, .gdb_index = 9, },
309 [10] = { .name = "r10", .cookie = 10, .mode = ARM_MODE_ANY, .gdb_index = 10, },
310 [11] = { .name = "r11", .cookie = 11, .mode = ARM_MODE_ANY, .gdb_index = 11, },
311 [12] = { .name = "r12", .cookie = 12, .mode = ARM_MODE_ANY, .gdb_index = 12, },
312
313 /* Historical GDB mapping of indices:
314 * - 13-14 are sp and lr, but banked counterparts are used
315 * - 16-24 are left for deprecated 8 FPA + 1 FPS
316 * - 25 is the cpsr
317 */
318
319 /* NOTE all MODE_USR registers are equivalent to MODE_SYS ones */
320 [13] = { .name = "sp_usr", .cookie = 13, .mode = ARM_MODE_USR, .gdb_index = 26, },
321 [14] = { .name = "lr_usr", .cookie = 14, .mode = ARM_MODE_USR, .gdb_index = 27, },
322
323 /* guaranteed to be at index 15 */
324 [15] = { .name = "pc", .cookie = 15, .mode = ARM_MODE_ANY, .gdb_index = 15, },
325 [16] = { .name = "r8_fiq", .cookie = 8, .mode = ARM_MODE_FIQ, .gdb_index = 28, },
326 [17] = { .name = "r9_fiq", .cookie = 9, .mode = ARM_MODE_FIQ, .gdb_index = 29, },
327 [18] = { .name = "r10_fiq", .cookie = 10, .mode = ARM_MODE_FIQ, .gdb_index = 30, },
328 [19] = { .name = "r11_fiq", .cookie = 11, .mode = ARM_MODE_FIQ, .gdb_index = 31, },
329 [20] = { .name = "r12_fiq", .cookie = 12, .mode = ARM_MODE_FIQ, .gdb_index = 32, },
330
331 [21] = { .name = "sp_fiq", .cookie = 13, .mode = ARM_MODE_FIQ, .gdb_index = 33, },
332 [22] = { .name = "lr_fiq", .cookie = 14, .mode = ARM_MODE_FIQ, .gdb_index = 34, },
333
334 [23] = { .name = "sp_irq", .cookie = 13, .mode = ARM_MODE_IRQ, .gdb_index = 35, },
335 [24] = { .name = "lr_irq", .cookie = 14, .mode = ARM_MODE_IRQ, .gdb_index = 36, },
336
337 [25] = { .name = "sp_svc", .cookie = 13, .mode = ARM_MODE_SVC, .gdb_index = 37, },
338 [26] = { .name = "lr_svc", .cookie = 14, .mode = ARM_MODE_SVC, .gdb_index = 38, },
339
340 [27] = { .name = "sp_abt", .cookie = 13, .mode = ARM_MODE_ABT, .gdb_index = 39, },
341 [28] = { .name = "lr_abt", .cookie = 14, .mode = ARM_MODE_ABT, .gdb_index = 40, },
342
343 [29] = { .name = "sp_und", .cookie = 13, .mode = ARM_MODE_UND, .gdb_index = 41, },
344 [30] = { .name = "lr_und", .cookie = 14, .mode = ARM_MODE_UND, .gdb_index = 42, },
345
346 [31] = { .name = "cpsr", .cookie = 16, .mode = ARM_MODE_ANY, .gdb_index = 25, },
347 [32] = { .name = "spsr_fiq", .cookie = 16, .mode = ARM_MODE_FIQ, .gdb_index = 43, },
348 [33] = { .name = "spsr_irq", .cookie = 16, .mode = ARM_MODE_IRQ, .gdb_index = 44, },
349 [34] = { .name = "spsr_svc", .cookie = 16, .mode = ARM_MODE_SVC, .gdb_index = 45, },
350 [35] = { .name = "spsr_abt", .cookie = 16, .mode = ARM_MODE_ABT, .gdb_index = 46, },
351 [36] = { .name = "spsr_und", .cookie = 16, .mode = ARM_MODE_UND, .gdb_index = 47, },
352
353 /* These are only used for GDB target description, banked registers are accessed instead */
354 [37] = { .name = "sp", .cookie = 13, .mode = ARM_MODE_ANY, .gdb_index = 13, },
355 [38] = { .name = "lr", .cookie = 14, .mode = ARM_MODE_ANY, .gdb_index = 14, },
356
357 /* These exist only when the Security Extension (TrustZone) is present */
358 [39] = { .name = "sp_mon", .cookie = 13, .mode = ARM_MODE_MON, .gdb_index = 48, },
359 [40] = { .name = "lr_mon", .cookie = 14, .mode = ARM_MODE_MON, .gdb_index = 49, },
360 [41] = { .name = "spsr_mon", .cookie = 16, .mode = ARM_MODE_MON, .gdb_index = 50, },
361
362 /* These exist only when the Virtualization Extensions is present */
363 [42] = { .name = "sp_hyp", .cookie = 13, .mode = ARM_MODE_HYP, .gdb_index = 51, },
364 [43] = { .name = "spsr_hyp", .cookie = 16, .mode = ARM_MODE_HYP, .gdb_index = 52, },
365 };
366
367 static const struct {
368 unsigned int id;
369 const char *name;
370 uint32_t bits;
371 enum arm_mode mode;
372 enum reg_type type;
373 const char *group;
374 const char *feature;
375 } arm_vfp_v3_regs[] = {
376 { ARM_VFP_V3_D0, "d0", 64, ARM_MODE_ANY, REG_TYPE_IEEE_DOUBLE, NULL, "org.gnu.gdb.arm.vfp"},
377 { ARM_VFP_V3_D1, "d1", 64, ARM_MODE_ANY, REG_TYPE_IEEE_DOUBLE, NULL, "org.gnu.gdb.arm.vfp"},
378 { ARM_VFP_V3_D2, "d2", 64, ARM_MODE_ANY, REG_TYPE_IEEE_DOUBLE, NULL, "org.gnu.gdb.arm.vfp"},
379 { ARM_VFP_V3_D3, "d3", 64, ARM_MODE_ANY, REG_TYPE_IEEE_DOUBLE, NULL, "org.gnu.gdb.arm.vfp"},
380 { ARM_VFP_V3_D4, "d4", 64, ARM_MODE_ANY, REG_TYPE_IEEE_DOUBLE, NULL, "org.gnu.gdb.arm.vfp"},
381 { ARM_VFP_V3_D5, "d5", 64, ARM_MODE_ANY, REG_TYPE_IEEE_DOUBLE, NULL, "org.gnu.gdb.arm.vfp"},
382 { ARM_VFP_V3_D6, "d6", 64, ARM_MODE_ANY, REG_TYPE_IEEE_DOUBLE, NULL, "org.gnu.gdb.arm.vfp"},
383 { ARM_VFP_V3_D7, "d7", 64, ARM_MODE_ANY, REG_TYPE_IEEE_DOUBLE, NULL, "org.gnu.gdb.arm.vfp"},
384 { ARM_VFP_V3_D8, "d8", 64, ARM_MODE_ANY, REG_TYPE_IEEE_DOUBLE, NULL, "org.gnu.gdb.arm.vfp"},
385 { ARM_VFP_V3_D9, "d9", 64, ARM_MODE_ANY, REG_TYPE_IEEE_DOUBLE, NULL, "org.gnu.gdb.arm.vfp"},
386 { ARM_VFP_V3_D10, "d10", 64, ARM_MODE_ANY, REG_TYPE_IEEE_DOUBLE, NULL, "org.gnu.gdb.arm.vfp"},
387 { ARM_VFP_V3_D11, "d11", 64, ARM_MODE_ANY, REG_TYPE_IEEE_DOUBLE, NULL, "org.gnu.gdb.arm.vfp"},
388 { ARM_VFP_V3_D12, "d12", 64, ARM_MODE_ANY, REG_TYPE_IEEE_DOUBLE, NULL, "org.gnu.gdb.arm.vfp"},
389 { ARM_VFP_V3_D13, "d13", 64, ARM_MODE_ANY, REG_TYPE_IEEE_DOUBLE, NULL, "org.gnu.gdb.arm.vfp"},
390 { ARM_VFP_V3_D14, "d14", 64, ARM_MODE_ANY, REG_TYPE_IEEE_DOUBLE, NULL, "org.gnu.gdb.arm.vfp"},
391 { ARM_VFP_V3_D15, "d15", 64, ARM_MODE_ANY, REG_TYPE_IEEE_DOUBLE, NULL, "org.gnu.gdb.arm.vfp"},
392 { ARM_VFP_V3_D16, "d16", 64, ARM_MODE_ANY, REG_TYPE_IEEE_DOUBLE, NULL, "org.gnu.gdb.arm.vfp"},
393 { ARM_VFP_V3_D17, "d17", 64, ARM_MODE_ANY, REG_TYPE_IEEE_DOUBLE, NULL, "org.gnu.gdb.arm.vfp"},
394 { ARM_VFP_V3_D18, "d18", 64, ARM_MODE_ANY, REG_TYPE_IEEE_DOUBLE, NULL, "org.gnu.gdb.arm.vfp"},
395 { ARM_VFP_V3_D19, "d19", 64, ARM_MODE_ANY, REG_TYPE_IEEE_DOUBLE, NULL, "org.gnu.gdb.arm.vfp"},
396 { ARM_VFP_V3_D20, "d20", 64, ARM_MODE_ANY, REG_TYPE_IEEE_DOUBLE, NULL, "org.gnu.gdb.arm.vfp"},
397 { ARM_VFP_V3_D21, "d21", 64, ARM_MODE_ANY, REG_TYPE_IEEE_DOUBLE, NULL, "org.gnu.gdb.arm.vfp"},
398 { ARM_VFP_V3_D22, "d22", 64, ARM_MODE_ANY, REG_TYPE_IEEE_DOUBLE, NULL, "org.gnu.gdb.arm.vfp"},
399 { ARM_VFP_V3_D23, "d23", 64, ARM_MODE_ANY, REG_TYPE_IEEE_DOUBLE, NULL, "org.gnu.gdb.arm.vfp"},
400 { ARM_VFP_V3_D24, "d24", 64, ARM_MODE_ANY, REG_TYPE_IEEE_DOUBLE, NULL, "org.gnu.gdb.arm.vfp"},
401 { ARM_VFP_V3_D25, "d25", 64, ARM_MODE_ANY, REG_TYPE_IEEE_DOUBLE, NULL, "org.gnu.gdb.arm.vfp"},
402 { ARM_VFP_V3_D26, "d26", 64, ARM_MODE_ANY, REG_TYPE_IEEE_DOUBLE, NULL, "org.gnu.gdb.arm.vfp"},
403 { ARM_VFP_V3_D27, "d27", 64, ARM_MODE_ANY, REG_TYPE_IEEE_DOUBLE, NULL, "org.gnu.gdb.arm.vfp"},
404 { ARM_VFP_V3_D28, "d28", 64, ARM_MODE_ANY, REG_TYPE_IEEE_DOUBLE, NULL, "org.gnu.gdb.arm.vfp"},
405 { ARM_VFP_V3_D29, "d29", 64, ARM_MODE_ANY, REG_TYPE_IEEE_DOUBLE, NULL, "org.gnu.gdb.arm.vfp"},
406 { ARM_VFP_V3_D30, "d30", 64, ARM_MODE_ANY, REG_TYPE_IEEE_DOUBLE, NULL, "org.gnu.gdb.arm.vfp"},
407 { ARM_VFP_V3_D31, "d31", 64, ARM_MODE_ANY, REG_TYPE_IEEE_DOUBLE, NULL, "org.gnu.gdb.arm.vfp"},
408 { ARM_VFP_V3_FPSCR, "fpscr", 32, ARM_MODE_ANY, REG_TYPE_INT, "float", "org.gnu.gdb.arm.vfp"},
409 };
410
411 /* map core mode (USR, FIQ, ...) and register number to
412 * indices into the register cache
413 */
414 const int armv4_5_core_reg_map[9][17] = {
415 { /* USR */
416 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 31
417 },
418 { /* FIQ (8 shadows of USR, vs normal 3) */
419 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 15, 32
420 },
421 { /* IRQ */
422 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 23, 24, 15, 33
423 },
424 { /* SVC */
425 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 25, 26, 15, 34
426 },
427 { /* ABT */
428 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 27, 28, 15, 35
429 },
430 { /* UND */
431 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 29, 30, 15, 36
432 },
433 { /* SYS (same registers as USR) */
434 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 31
435 },
436 { /* MON */
437 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 39, 40, 15, 41,
438 },
439 { /* HYP */
440 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 42, 14, 15, 43,
441 }
442 };
443
444 /**
445 * Configures host-side ARM records to reflect the specified CPSR.
446 * Later, code can use arm_reg_current() to map register numbers
447 * according to how they are exposed by this mode.
448 */
449 void arm_set_cpsr(struct arm *arm, uint32_t cpsr)
450 {
451 enum arm_mode mode = cpsr & 0x1f;
452 int num;
453
454 /* NOTE: this may be called very early, before the register
455 * cache is set up. We can't defend against many errors, in
456 * particular against CPSRs that aren't valid *here* ...
457 */
458 if (arm->cpsr) {
459 buf_set_u32(arm->cpsr->value, 0, 32, cpsr);
460 arm->cpsr->valid = true;
461 arm->cpsr->dirty = false;
462 }
463
464 arm->core_mode = mode;
465
466 /* mode_to_number() warned; set up a somewhat-sane mapping */
467 num = arm_mode_to_number(mode);
468 if (num < 0) {
469 mode = ARM_MODE_USR;
470 num = 0;
471 }
472
473 arm->map = &armv4_5_core_reg_map[num][0];
474 arm->spsr = (mode == ARM_MODE_USR || mode == ARM_MODE_SYS)
475 ? NULL
476 : arm->core_cache->reg_list + arm->map[16];
477
478 /* Older ARMs won't have the J bit */
479 enum arm_state state;
480
481 if (cpsr & (1 << 5)) { /* T */
482 if (cpsr & (1 << 24)) { /* J */
483 LOG_WARNING("ThumbEE -- incomplete support");
484 state = ARM_STATE_THUMB_EE;
485 } else
486 state = ARM_STATE_THUMB;
487 } else {
488 if (cpsr & (1 << 24)) { /* J */
489 LOG_ERROR("Jazelle state handling is BROKEN!");
490 state = ARM_STATE_JAZELLE;
491 } else
492 state = ARM_STATE_ARM;
493 }
494 arm->core_state = state;
495
496 LOG_DEBUG("set CPSR %#8.8x: %s mode, %s state", (unsigned) cpsr,
497 arm_mode_name(mode),
498 arm_state_strings[arm->core_state]);
499 }
500
501 /**
502 * Returns handle to the register currently mapped to a given number.
503 * Someone must have called arm_set_cpsr() before.
504 *
505 * \param arm This core's state and registers are used.
506 * \param regnum From 0..15 corresponding to R0..R14 and PC.
507 * Note that R0..R7 don't require mapping; you may access those
508 * as the first eight entries in the register cache. Likewise
509 * R15 (PC) doesn't need mapping; you may also access it directly.
510 * However, R8..R14, and SPSR (arm->spsr) *must* be mapped.
511 * CPSR (arm->cpsr) is also not mapped.
512 */
513 struct reg *arm_reg_current(struct arm *arm, unsigned regnum)
514 {
515 struct reg *r;
516
517 if (regnum > 16)
518 return NULL;
519
520 if (!arm->map) {
521 LOG_ERROR("Register map is not available yet, the target is not fully initialised");
522 r = arm->core_cache->reg_list + regnum;
523 } else
524 r = arm->core_cache->reg_list + arm->map[regnum];
525
526 /* e.g. invalid CPSR said "secure monitor" mode on a core
527 * that doesn't support it...
528 */
529 if (!r) {
530 LOG_ERROR("Invalid CPSR mode");
531 r = arm->core_cache->reg_list + regnum;
532 }
533
534 return r;
535 }
536
537 static const uint8_t arm_gdb_dummy_fp_value[12];
538
539 static struct reg_feature arm_gdb_dummy_fp_features = {
540 .name = "net.sourceforge.openocd.fake_fpa"
541 };
542
543 /**
544 * Dummy FPA registers are required to support GDB on ARM.
545 * Register packets require eight obsolete FPA register values.
546 * Modern ARM cores use Vector Floating Point (VFP), if they
547 * have any floating point support. VFP is not FPA-compatible.
548 */
549 struct reg arm_gdb_dummy_fp_reg = {
550 .name = "GDB dummy FPA register",
551 .value = (uint8_t *) arm_gdb_dummy_fp_value,
552 .valid = true,
553 .size = 96,
554 .exist = false,
555 .number = 16,
556 .feature = &arm_gdb_dummy_fp_features,
557 .group = "fake_fpa",
558 };
559
560 static const uint8_t arm_gdb_dummy_fps_value[4];
561
562 /**
563 * Dummy FPA status registers are required to support GDB on ARM.
564 * Register packets require an obsolete FPA status register.
565 */
566 struct reg arm_gdb_dummy_fps_reg = {
567 .name = "GDB dummy FPA status register",
568 .value = (uint8_t *) arm_gdb_dummy_fps_value,
569 .valid = true,
570 .size = 32,
571 .exist = false,
572 .number = 24,
573 .feature = &arm_gdb_dummy_fp_features,
574 .group = "fake_fpa",
575 };
576
577 static void arm_gdb_dummy_init(void) __attribute__ ((constructor));
578
579 static void arm_gdb_dummy_init(void)
580 {
581 register_init_dummy(&arm_gdb_dummy_fp_reg);
582 register_init_dummy(&arm_gdb_dummy_fps_reg);
583 }
584
585 static int armv4_5_get_core_reg(struct reg *reg)
586 {
587 int retval;
588 struct arm_reg *reg_arch_info = reg->arch_info;
589 struct target *target = reg_arch_info->target;
590
591 if (target->state != TARGET_HALTED) {
592 LOG_ERROR("Target not halted");
593 return ERROR_TARGET_NOT_HALTED;
594 }
595
596 retval = reg_arch_info->arm->read_core_reg(target, reg,
597 reg_arch_info->num, reg_arch_info->mode);
598 if (retval == ERROR_OK) {
599 reg->valid = true;
600 reg->dirty = false;
601 }
602
603 return retval;
604 }
605
606 static int armv4_5_set_core_reg(struct reg *reg, uint8_t *buf)
607 {
608 struct arm_reg *reg_arch_info = reg->arch_info;
609 struct target *target = reg_arch_info->target;
610 struct arm *armv4_5_target = target_to_arm(target);
611 uint32_t value = buf_get_u32(buf, 0, 32);
612
613 if (target->state != TARGET_HALTED) {
614 LOG_ERROR("Target not halted");
615 return ERROR_TARGET_NOT_HALTED;
616 }
617
618 /* Except for CPSR, the "reg" command exposes a writeback model
619 * for the register cache.
620 */
621 if (reg == armv4_5_target->cpsr) {
622 arm_set_cpsr(armv4_5_target, value);
623
624 /* Older cores need help to be in ARM mode during halt
625 * mode debug, so we clear the J and T bits if we flush.
626 * For newer cores (v6/v7a/v7r) we don't need that, but
627 * it won't hurt since CPSR is always flushed anyway.
628 */
629 if (armv4_5_target->core_mode !=
630 (enum arm_mode)(value & 0x1f)) {
631 LOG_DEBUG("changing ARM core mode to '%s'",
632 arm_mode_name(value & 0x1f));
633 value &= ~((1 << 24) | (1 << 5));
634 uint8_t t[4];
635 buf_set_u32(t, 0, 32, value);
636 armv4_5_target->write_core_reg(target, reg,
637 16, ARM_MODE_ANY, t);
638 }
639 } else {
640 buf_set_u32(reg->value, 0, 32, value);
641 if (reg->size == 64) {
642 value = buf_get_u32(buf + 4, 0, 32);
643 buf_set_u32(reg->value + 4, 0, 32, value);
644 }
645 reg->valid = true;
646 }
647 reg->dirty = true;
648
649 return ERROR_OK;
650 }
651
652 static const struct reg_arch_type arm_reg_type = {
653 .get = armv4_5_get_core_reg,
654 .set = armv4_5_set_core_reg,
655 };
656
657 struct reg_cache *arm_build_reg_cache(struct target *target, struct arm *arm)
658 {
659 int num_regs = ARRAY_SIZE(arm_core_regs);
660 int num_core_regs = num_regs;
661 if (arm->arm_vfp_version == ARM_VFP_V3)
662 num_regs += ARRAY_SIZE(arm_vfp_v3_regs);
663
664 struct reg_cache *cache = malloc(sizeof(struct reg_cache));
665 struct reg *reg_list = calloc(num_regs, sizeof(struct reg));
666 struct arm_reg *reg_arch_info = calloc(num_regs, sizeof(struct arm_reg));
667 int i;
668
669 if (!cache || !reg_list || !reg_arch_info) {
670 free(cache);
671 free(reg_list);
672 free(reg_arch_info);
673 return NULL;
674 }
675
676 cache->name = "ARM registers";
677 cache->next = NULL;
678 cache->reg_list = reg_list;
679 cache->num_regs = 0;
680
681 for (i = 0; i < num_core_regs; i++) {
682 /* Skip registers this core doesn't expose */
683 if (arm_core_regs[i].mode == ARM_MODE_MON
684 && arm->core_type != ARM_CORE_TYPE_SEC_EXT
685 && arm->core_type != ARM_CORE_TYPE_VIRT_EXT)
686 continue;
687 if (arm_core_regs[i].mode == ARM_MODE_HYP
688 && arm->core_type != ARM_CORE_TYPE_VIRT_EXT)
689 continue;
690
691 /* REVISIT handle Cortex-M, which only shadows R13/SP */
692
693 reg_arch_info[i].num = arm_core_regs[i].cookie;
694 reg_arch_info[i].mode = arm_core_regs[i].mode;
695 reg_arch_info[i].target = target;
696 reg_arch_info[i].arm = arm;
697
698 reg_list[i].name = arm_core_regs[i].name;
699 reg_list[i].number = arm_core_regs[i].gdb_index;
700 reg_list[i].size = 32;
701 reg_list[i].value = reg_arch_info[i].value;
702 reg_list[i].type = &arm_reg_type;
703 reg_list[i].arch_info = &reg_arch_info[i];
704 reg_list[i].exist = true;
705
706 /* This really depends on the calling convention in use */
707 reg_list[i].caller_save = false;
708
709 /* Registers data type, as used by GDB target description */
710 reg_list[i].reg_data_type = malloc(sizeof(struct reg_data_type));
711 switch (arm_core_regs[i].cookie) {
712 case 13:
713 reg_list[i].reg_data_type->type = REG_TYPE_DATA_PTR;
714 break;
715 case 14:
716 case 15:
717 reg_list[i].reg_data_type->type = REG_TYPE_CODE_PTR;
718 break;
719 default:
720 reg_list[i].reg_data_type->type = REG_TYPE_UINT32;
721 break;
722 }
723
724 /* let GDB shows banked registers only in "info all-reg" */
725 reg_list[i].feature = malloc(sizeof(struct reg_feature));
726 if (reg_list[i].number <= 15 || reg_list[i].number == 25) {
727 reg_list[i].feature->name = "org.gnu.gdb.arm.core";
728 reg_list[i].group = "general";
729 } else {
730 reg_list[i].feature->name = "net.sourceforge.openocd.banked";
731 reg_list[i].group = "banked";
732 }
733
734 cache->num_regs++;
735 }
736
737 int j;
738 for (i = num_core_regs, j = 0; i < num_regs; i++, j++) {
739 reg_arch_info[i].num = arm_vfp_v3_regs[j].id;
740 reg_arch_info[i].mode = arm_vfp_v3_regs[j].mode;
741 reg_arch_info[i].target = target;
742 reg_arch_info[i].arm = arm;
743
744 reg_list[i].name = arm_vfp_v3_regs[j].name;
745 reg_list[i].number = arm_vfp_v3_regs[j].id;
746 reg_list[i].size = arm_vfp_v3_regs[j].bits;
747 reg_list[i].value = reg_arch_info[i].value;
748 reg_list[i].type = &arm_reg_type;
749 reg_list[i].arch_info = &reg_arch_info[i];
750 reg_list[i].exist = true;
751
752 reg_list[i].caller_save = false;
753
754 reg_list[i].reg_data_type = malloc(sizeof(struct reg_data_type));
755 reg_list[i].reg_data_type->type = arm_vfp_v3_regs[j].type;
756
757 reg_list[i].feature = malloc(sizeof(struct reg_feature));
758 reg_list[i].feature->name = arm_vfp_v3_regs[j].feature;
759
760 reg_list[i].group = arm_vfp_v3_regs[j].group;
761
762 cache->num_regs++;
763 }
764
765 arm->pc = reg_list + 15;
766 arm->cpsr = reg_list + ARMV4_5_CPSR;
767 arm->core_cache = cache;
768
769 return cache;
770 }
771
772 int arm_arch_state(struct target *target)
773 {
774 struct arm *arm = target_to_arm(target);
775
776 if (arm->common_magic != ARM_COMMON_MAGIC) {
777 LOG_ERROR("BUG: called for a non-ARM target");
778 return ERROR_FAIL;
779 }
780
781 /* avoid filling log waiting for fileio reply */
782 if (target->semihosting && target->semihosting->hit_fileio)
783 return ERROR_OK;
784
785 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
786 "cpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "%s%s",
787 arm_state_strings[arm->core_state],
788 debug_reason_name(target),
789 arm_mode_name(arm->core_mode),
790 buf_get_u32(arm->cpsr->value, 0, 32),
791 buf_get_u32(arm->pc->value, 0, 32),
792 (target->semihosting && target->semihosting->is_active) ? ", semihosting" : "",
793 (target->semihosting && target->semihosting->is_fileio) ? " fileio" : "");
794
795 return ERROR_OK;
796 }
797
798 COMMAND_HANDLER(handle_armv4_5_reg_command)
799 {
800 struct target *target = get_current_target(CMD_CTX);
801 struct arm *arm = target_to_arm(target);
802 struct reg *regs;
803
804 if (!is_arm(arm)) {
805 command_print(CMD, "current target isn't an ARM");
806 return ERROR_FAIL;
807 }
808
809 if (target->state != TARGET_HALTED) {
810 command_print(CMD, "error: target must be halted for register accesses");
811 return ERROR_FAIL;
812 }
813
814 if (arm->core_type != ARM_CORE_TYPE_STD) {
815 command_print(CMD,
816 "Microcontroller Profile not supported - use standard reg cmd");
817 return ERROR_OK;
818 }
819
820 if (!is_arm_mode(arm->core_mode)) {
821 LOG_ERROR("not a valid arm core mode - communication failure?");
822 return ERROR_FAIL;
823 }
824
825 if (!arm->full_context) {
826 command_print(CMD, "error: target doesn't support %s",
827 CMD_NAME);
828 return ERROR_FAIL;
829 }
830
831 regs = arm->core_cache->reg_list;
832
833 for (unsigned mode = 0; mode < ARRAY_SIZE(arm_mode_data); mode++) {
834 const char *name;
835 char *sep = "\n";
836 char *shadow = "";
837
838 /* label this bank of registers (or shadows) */
839 switch (arm_mode_data[mode].psr) {
840 case ARM_MODE_SYS:
841 continue;
842 case ARM_MODE_USR:
843 name = "System and User";
844 sep = "";
845 break;
846 case ARM_MODE_HYP:
847 if (arm->core_type != ARM_CORE_TYPE_VIRT_EXT)
848 continue;
849 /* FALLTHROUGH */
850 case ARM_MODE_MON:
851 if (arm->core_type != ARM_CORE_TYPE_SEC_EXT
852 && arm->core_type != ARM_CORE_TYPE_VIRT_EXT)
853 continue;
854 /* FALLTHROUGH */
855 default:
856 name = arm_mode_data[mode].name;
857 shadow = "shadow ";
858 break;
859 }
860 command_print(CMD, "%s%s mode %sregisters",
861 sep, name, shadow);
862
863 /* display N rows of up to 4 registers each */
864 for (unsigned i = 0; i < arm_mode_data[mode].n_indices; ) {
865 char output[80];
866 int output_len = 0;
867
868 for (unsigned j = 0; j < 4; j++, i++) {
869 uint32_t value;
870 struct reg *reg = regs;
871
872 if (i >= arm_mode_data[mode].n_indices)
873 break;
874
875 reg += arm_mode_data[mode].indices[i];
876
877 /* REVISIT be smarter about faults... */
878 if (!reg->valid)
879 arm->full_context(target);
880
881 value = buf_get_u32(reg->value, 0, 32);
882 output_len += snprintf(output + output_len,
883 sizeof(output) - output_len,
884 "%8s: %8.8" PRIx32 " ",
885 reg->name, value);
886 }
887 command_print(CMD, "%s", output);
888 }
889 }
890
891 return ERROR_OK;
892 }
893
894 COMMAND_HANDLER(handle_armv4_5_core_state_command)
895 {
896 struct target *target = get_current_target(CMD_CTX);
897 struct arm *arm = target_to_arm(target);
898
899 if (!is_arm(arm)) {
900 command_print(CMD, "current target isn't an ARM");
901 return ERROR_FAIL;
902 }
903
904 if (arm->core_type == ARM_CORE_TYPE_M_PROFILE) {
905 /* armv7m not supported */
906 command_print(CMD, "Unsupported Command");
907 return ERROR_OK;
908 }
909
910 if (CMD_ARGC > 0) {
911 if (strcmp(CMD_ARGV[0], "arm") == 0)
912 arm->core_state = ARM_STATE_ARM;
913 if (strcmp(CMD_ARGV[0], "thumb") == 0)
914 arm->core_state = ARM_STATE_THUMB;
915 }
916
917 command_print(CMD, "core state: %s", arm_state_strings[arm->core_state]);
918
919 return ERROR_OK;
920 }
921
922 COMMAND_HANDLER(handle_arm_disassemble_command)
923 {
924 int retval = ERROR_OK;
925 struct target *target = get_current_target(CMD_CTX);
926
927 if (target == NULL) {
928 LOG_ERROR("No target selected");
929 return ERROR_FAIL;
930 }
931
932 struct arm *arm = target_to_arm(target);
933 target_addr_t address;
934 int count = 1;
935 int thumb = 0;
936
937 if (!is_arm(arm)) {
938 command_print(CMD, "current target isn't an ARM");
939 return ERROR_FAIL;
940 }
941
942 if (arm->core_type == ARM_CORE_TYPE_M_PROFILE) {
943 /* armv7m is always thumb mode */
944 thumb = 1;
945 }
946
947 switch (CMD_ARGC) {
948 case 3:
949 if (strcmp(CMD_ARGV[2], "thumb") != 0)
950 goto usage;
951 thumb = 1;
952 /* FALL THROUGH */
953 case 2:
954 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], count);
955 /* FALL THROUGH */
956 case 1:
957 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
958 if (address & 0x01) {
959 if (!thumb) {
960 command_print(CMD, "Disassemble as Thumb");
961 thumb = 1;
962 }
963 address &= ~1;
964 }
965 break;
966 default:
967 usage:
968 count = 0;
969 retval = ERROR_COMMAND_SYNTAX_ERROR;
970 }
971
972 while (count-- > 0) {
973 struct arm_instruction cur_instruction;
974
975 if (thumb) {
976 /* Always use Thumb2 disassembly for best handling
977 * of 32-bit BL/BLX, and to work with newer cores
978 * (some ARMv6, all ARMv7) that use Thumb2.
979 */
980 retval = thumb2_opcode(target, address,
981 &cur_instruction);
982 if (retval != ERROR_OK)
983 break;
984 } else {
985 uint32_t opcode;
986
987 retval = target_read_u32(target, address, &opcode);
988 if (retval != ERROR_OK)
989 break;
990 retval = arm_evaluate_opcode(opcode, address,
991 &cur_instruction) != ERROR_OK;
992 if (retval != ERROR_OK)
993 break;
994 }
995 command_print(CMD, "%s", cur_instruction.text);
996 address += cur_instruction.instruction_size;
997 }
998
999 return retval;
1000 }
1001
1002 static int jim_mcrmrc(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
1003 {
1004 struct command_context *context;
1005 struct target *target;
1006 struct arm *arm;
1007 int retval;
1008
1009 context = current_command_context(interp);
1010 assert(context != NULL);
1011
1012 target = get_current_target(context);
1013 if (target == NULL) {
1014 LOG_ERROR("%s: no current target", __func__);
1015 return JIM_ERR;
1016 }
1017 if (!target_was_examined(target)) {
1018 LOG_ERROR("%s: not yet examined", target_name(target));
1019 return JIM_ERR;
1020 }
1021 arm = target_to_arm(target);
1022 if (!is_arm(arm)) {
1023 LOG_ERROR("%s: not an ARM", target_name(target));
1024 return JIM_ERR;
1025 }
1026
1027 if ((argc < 6) || (argc > 7)) {
1028 /* FIXME use the command name to verify # params... */
1029 LOG_ERROR("%s: wrong number of arguments", __func__);
1030 return JIM_ERR;
1031 }
1032
1033 int cpnum;
1034 uint32_t op1;
1035 uint32_t op2;
1036 uint32_t CRn;
1037 uint32_t CRm;
1038 uint32_t value;
1039 long l;
1040
1041 /* NOTE: parameter sequence matches ARM instruction set usage:
1042 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
1043 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
1044 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
1045 */
1046 retval = Jim_GetLong(interp, argv[1], &l);
1047 if (retval != JIM_OK)
1048 return retval;
1049 if (l & ~0xf) {
1050 LOG_ERROR("%s: %s %d out of range", __func__,
1051 "coprocessor", (int) l);
1052 return JIM_ERR;
1053 }
1054 cpnum = l;
1055
1056 retval = Jim_GetLong(interp, argv[2], &l);
1057 if (retval != JIM_OK)
1058 return retval;
1059 if (l & ~0x7) {
1060 LOG_ERROR("%s: %s %d out of range", __func__,
1061 "op1", (int) l);
1062 return JIM_ERR;
1063 }
1064 op1 = l;
1065
1066 retval = Jim_GetLong(interp, argv[3], &l);
1067 if (retval != JIM_OK)
1068 return retval;
1069 if (l & ~0xf) {
1070 LOG_ERROR("%s: %s %d out of range", __func__,
1071 "CRn", (int) l);
1072 return JIM_ERR;
1073 }
1074 CRn = l;
1075
1076 retval = Jim_GetLong(interp, argv[4], &l);
1077 if (retval != JIM_OK)
1078 return retval;
1079 if (l & ~0xf) {
1080 LOG_ERROR("%s: %s %d out of range", __func__,
1081 "CRm", (int) l);
1082 return JIM_ERR;
1083 }
1084 CRm = l;
1085
1086 retval = Jim_GetLong(interp, argv[5], &l);
1087 if (retval != JIM_OK)
1088 return retval;
1089 if (l & ~0x7) {
1090 LOG_ERROR("%s: %s %d out of range", __func__,
1091 "op2", (int) l);
1092 return JIM_ERR;
1093 }
1094 op2 = l;
1095
1096 value = 0;
1097
1098 /* FIXME don't assume "mrc" vs "mcr" from the number of params;
1099 * that could easily be a typo! Check both...
1100 *
1101 * FIXME change the call syntax here ... simplest to just pass
1102 * the MRC() or MCR() instruction to be executed. That will also
1103 * let us support the "mrc2" and "mcr2" opcodes (toggling one bit)
1104 * if that's ever needed.
1105 */
1106 if (argc == 7) {
1107 retval = Jim_GetLong(interp, argv[6], &l);
1108 if (retval != JIM_OK)
1109 return retval;
1110 value = l;
1111
1112 /* NOTE: parameters reordered! */
1113 /* ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2) */
1114 retval = arm->mcr(target, cpnum, op1, op2, CRn, CRm, value);
1115 if (retval != ERROR_OK)
1116 return JIM_ERR;
1117 } else {
1118 /* NOTE: parameters reordered! */
1119 /* ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2) */
1120 retval = arm->mrc(target, cpnum, op1, op2, CRn, CRm, &value);
1121 if (retval != ERROR_OK)
1122 return JIM_ERR;
1123
1124 Jim_SetResult(interp, Jim_NewIntObj(interp, value));
1125 }
1126
1127 return JIM_OK;
1128 }
1129
1130 extern const struct command_registration semihosting_common_handlers[];
1131
1132 static const struct command_registration arm_exec_command_handlers[] = {
1133 {
1134 .name = "reg",
1135 .handler = handle_armv4_5_reg_command,
1136 .mode = COMMAND_EXEC,
1137 .help = "display ARM core registers",
1138 .usage = "",
1139 },
1140 {
1141 .name = "core_state",
1142 .handler = handle_armv4_5_core_state_command,
1143 .mode = COMMAND_EXEC,
1144 .usage = "['arm'|'thumb']",
1145 .help = "display/change ARM core state",
1146 },
1147 {
1148 .name = "disassemble",
1149 .handler = handle_arm_disassemble_command,
1150 .mode = COMMAND_EXEC,
1151 .usage = "address [count ['thumb']]",
1152 .help = "disassemble instructions ",
1153 },
1154 {
1155 .name = "mcr",
1156 .mode = COMMAND_EXEC,
1157 .jim_handler = &jim_mcrmrc,
1158 .help = "write coprocessor register",
1159 .usage = "cpnum op1 CRn CRm op2 value",
1160 },
1161 {
1162 .name = "mrc",
1163 .mode = COMMAND_EXEC,
1164 .jim_handler = &jim_mcrmrc,
1165 .help = "read coprocessor register",
1166 .usage = "cpnum op1 CRn CRm op2",
1167 },
1168 {
1169 .chain = semihosting_common_handlers,
1170 },
1171 COMMAND_REGISTRATION_DONE
1172 };
1173 const struct command_registration arm_command_handlers[] = {
1174 {
1175 .name = "arm",
1176 .mode = COMMAND_ANY,
1177 .help = "ARM command group",
1178 .usage = "",
1179 .chain = arm_exec_command_handlers,
1180 },
1181 COMMAND_REGISTRATION_DONE
1182 };
1183
1184 /*
1185 * gdb for arm targets (e.g. arm-none-eabi-gdb) supports several variants
1186 * of arm architecture. You can list them using the autocompletion of gdb
1187 * command prompt by typing "set architecture " and then press TAB key.
1188 * The default, selected automatically, is "arm".
1189 * Let's use the default value, here, to make gdb-multiarch behave in the
1190 * same way as a gdb for arm. This can be changed later on. User can still
1191 * set the specific architecture variant with the gdb command.
1192 */
1193 const char *arm_get_gdb_arch(struct target *target)
1194 {
1195 return "arm";
1196 }
1197
1198 int arm_get_gdb_reg_list(struct target *target,
1199 struct reg **reg_list[], int *reg_list_size,
1200 enum target_register_class reg_class)
1201 {
1202 struct arm *arm = target_to_arm(target);
1203 unsigned int i;
1204
1205 if (!is_arm_mode(arm->core_mode)) {
1206 LOG_ERROR("not a valid arm core mode - communication failure?");
1207 return ERROR_FAIL;
1208 }
1209
1210 switch (reg_class) {
1211 case REG_CLASS_GENERAL:
1212 *reg_list_size = 26;
1213 *reg_list = malloc(sizeof(struct reg *) * (*reg_list_size));
1214
1215 for (i = 0; i < 16; i++)
1216 (*reg_list)[i] = arm_reg_current(arm, i);
1217
1218 /* For GDB compatibility, take FPA registers size into account and zero-fill it*/
1219 for (i = 16; i < 24; i++)
1220 (*reg_list)[i] = &arm_gdb_dummy_fp_reg;
1221 (*reg_list)[24] = &arm_gdb_dummy_fps_reg;
1222
1223 (*reg_list)[25] = arm->cpsr;
1224
1225 return ERROR_OK;
1226
1227 case REG_CLASS_ALL:
1228 switch (arm->core_type) {
1229 case ARM_CORE_TYPE_SEC_EXT:
1230 *reg_list_size = 51;
1231 break;
1232 case ARM_CORE_TYPE_VIRT_EXT:
1233 *reg_list_size = 53;
1234 break;
1235 default:
1236 *reg_list_size = 48;
1237 }
1238 unsigned int list_size_core = *reg_list_size;
1239 if (arm->arm_vfp_version == ARM_VFP_V3)
1240 *reg_list_size += 33;
1241
1242 *reg_list = malloc(sizeof(struct reg *) * (*reg_list_size));
1243
1244 for (i = 0; i < 16; i++)
1245 (*reg_list)[i] = arm_reg_current(arm, i);
1246
1247 for (i = 13; i < ARRAY_SIZE(arm_core_regs); i++) {
1248 int reg_index = arm->core_cache->reg_list[i].number;
1249
1250 if (arm_core_regs[i].mode == ARM_MODE_MON
1251 && arm->core_type != ARM_CORE_TYPE_SEC_EXT
1252 && arm->core_type != ARM_CORE_TYPE_VIRT_EXT)
1253 continue;
1254 if (arm_core_regs[i].mode == ARM_MODE_HYP
1255 && arm->core_type != ARM_CORE_TYPE_VIRT_EXT)
1256 continue;
1257 (*reg_list)[reg_index] = &(arm->core_cache->reg_list[i]);
1258 }
1259
1260 /* When we supply the target description, there is no need for fake FPA */
1261 for (i = 16; i < 24; i++) {
1262 (*reg_list)[i] = &arm_gdb_dummy_fp_reg;
1263 (*reg_list)[i]->size = 0;
1264 }
1265 (*reg_list)[24] = &arm_gdb_dummy_fps_reg;
1266 (*reg_list)[24]->size = 0;
1267
1268 if (arm->arm_vfp_version == ARM_VFP_V3) {
1269 unsigned int num_core_regs = ARRAY_SIZE(arm_core_regs);
1270 for (i = 0; i < 33; i++)
1271 (*reg_list)[list_size_core + i] = &(arm->core_cache->reg_list[num_core_regs + i]);
1272 }
1273
1274 return ERROR_OK;
1275
1276 default:
1277 LOG_ERROR("not a valid register class type in query.");
1278 return ERROR_FAIL;
1279 }
1280 }
1281
1282 /* wait for execution to complete and check exit point */
1283 static int armv4_5_run_algorithm_completion(struct target *target,
1284 uint32_t exit_point,
1285 int timeout_ms,
1286 void *arch_info)
1287 {
1288 int retval;
1289 struct arm *arm = target_to_arm(target);
1290
1291 retval = target_wait_state(target, TARGET_HALTED, timeout_ms);
1292 if (retval != ERROR_OK)
1293 return retval;
1294 if (target->state != TARGET_HALTED) {
1295 retval = target_halt(target);
1296 if (retval != ERROR_OK)
1297 return retval;
1298 retval = target_wait_state(target, TARGET_HALTED, 500);
1299 if (retval != ERROR_OK)
1300 return retval;
1301 return ERROR_TARGET_TIMEOUT;
1302 }
1303
1304 /* fast exit: ARMv5+ code can use BKPT */
1305 if (exit_point && buf_get_u32(arm->pc->value, 0, 32) != exit_point) {
1306 LOG_WARNING(
1307 "target reentered debug state, but not at the desired exit point: 0x%4.4" PRIx32 "",
1308 buf_get_u32(arm->pc->value, 0, 32));
1309 return ERROR_TARGET_TIMEOUT;
1310 }
1311
1312 return ERROR_OK;
1313 }
1314
1315 int armv4_5_run_algorithm_inner(struct target *target,
1316 int num_mem_params, struct mem_param *mem_params,
1317 int num_reg_params, struct reg_param *reg_params,
1318 uint32_t entry_point, uint32_t exit_point,
1319 int timeout_ms, void *arch_info,
1320 int (*run_it)(struct target *target, uint32_t exit_point,
1321 int timeout_ms, void *arch_info))
1322 {
1323 struct arm *arm = target_to_arm(target);
1324 struct arm_algorithm *arm_algorithm_info = arch_info;
1325 enum arm_state core_state = arm->core_state;
1326 uint32_t context[17];
1327 uint32_t cpsr;
1328 int exit_breakpoint_size = 0;
1329 int i;
1330 int retval = ERROR_OK;
1331
1332 LOG_DEBUG("Running algorithm");
1333
1334 if (arm_algorithm_info->common_magic != ARM_COMMON_MAGIC) {
1335 LOG_ERROR("current target isn't an ARMV4/5 target");
1336 return ERROR_TARGET_INVALID;
1337 }
1338
1339 if (target->state != TARGET_HALTED) {
1340 LOG_WARNING("target not halted");
1341 return ERROR_TARGET_NOT_HALTED;
1342 }
1343
1344 if (!is_arm_mode(arm->core_mode)) {
1345 LOG_ERROR("not a valid arm core mode - communication failure?");
1346 return ERROR_FAIL;
1347 }
1348
1349 /* armv5 and later can terminate with BKPT instruction; less overhead */
1350 if (!exit_point && arm->is_armv4) {
1351 LOG_ERROR("ARMv4 target needs HW breakpoint location");
1352 return ERROR_FAIL;
1353 }
1354
1355 /* save r0..pc, cpsr-or-spsr, and then cpsr-for-sure;
1356 * they'll be restored later.
1357 */
1358 for (i = 0; i <= 16; i++) {
1359 struct reg *r;
1360
1361 r = &ARMV4_5_CORE_REG_MODE(arm->core_cache,
1362 arm_algorithm_info->core_mode, i);
1363 if (!r->valid)
1364 arm->read_core_reg(target, r, i,
1365 arm_algorithm_info->core_mode);
1366 context[i] = buf_get_u32(r->value, 0, 32);
1367 }
1368 cpsr = buf_get_u32(arm->cpsr->value, 0, 32);
1369
1370 for (i = 0; i < num_mem_params; i++) {
1371 if (mem_params[i].direction == PARAM_IN)
1372 continue;
1373 retval = target_write_buffer(target, mem_params[i].address, mem_params[i].size,
1374 mem_params[i].value);
1375 if (retval != ERROR_OK)
1376 return retval;
1377 }
1378
1379 for (i = 0; i < num_reg_params; i++) {
1380 if (reg_params[i].direction == PARAM_IN)
1381 continue;
1382
1383 struct reg *reg = register_get_by_name(arm->core_cache, reg_params[i].reg_name, 0);
1384 if (!reg) {
1385 LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
1386 return ERROR_COMMAND_SYNTAX_ERROR;
1387 }
1388
1389 if (reg->size != reg_params[i].size) {
1390 LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size",
1391 reg_params[i].reg_name);
1392 return ERROR_COMMAND_SYNTAX_ERROR;
1393 }
1394
1395 retval = armv4_5_set_core_reg(reg, reg_params[i].value);
1396 if (retval != ERROR_OK)
1397 return retval;
1398 }
1399
1400 arm->core_state = arm_algorithm_info->core_state;
1401 if (arm->core_state == ARM_STATE_ARM)
1402 exit_breakpoint_size = 4;
1403 else if (arm->core_state == ARM_STATE_THUMB)
1404 exit_breakpoint_size = 2;
1405 else {
1406 LOG_ERROR("BUG: can't execute algorithms when not in ARM or Thumb state");
1407 return ERROR_COMMAND_SYNTAX_ERROR;
1408 }
1409
1410 if (arm_algorithm_info->core_mode != ARM_MODE_ANY) {
1411 LOG_DEBUG("setting core_mode: 0x%2.2x",
1412 arm_algorithm_info->core_mode);
1413 buf_set_u32(arm->cpsr->value, 0, 5,
1414 arm_algorithm_info->core_mode);
1415 arm->cpsr->dirty = true;
1416 arm->cpsr->valid = true;
1417 }
1418
1419 /* terminate using a hardware or (ARMv5+) software breakpoint */
1420 if (exit_point) {
1421 retval = breakpoint_add(target, exit_point,
1422 exit_breakpoint_size, BKPT_HARD);
1423 if (retval != ERROR_OK) {
1424 LOG_ERROR("can't add HW breakpoint to terminate algorithm");
1425 return ERROR_TARGET_FAILURE;
1426 }
1427 }
1428
1429 retval = target_resume(target, 0, entry_point, 1, 1);
1430 if (retval != ERROR_OK)
1431 return retval;
1432 retval = run_it(target, exit_point, timeout_ms, arch_info);
1433
1434 if (exit_point)
1435 breakpoint_remove(target, exit_point);
1436
1437 if (retval != ERROR_OK)
1438 return retval;
1439
1440 for (i = 0; i < num_mem_params; i++) {
1441 if (mem_params[i].direction != PARAM_OUT) {
1442 int retvaltemp = target_read_buffer(target, mem_params[i].address,
1443 mem_params[i].size,
1444 mem_params[i].value);
1445 if (retvaltemp != ERROR_OK)
1446 retval = retvaltemp;
1447 }
1448 }
1449
1450 for (i = 0; i < num_reg_params; i++) {
1451 if (reg_params[i].direction != PARAM_OUT) {
1452
1453 struct reg *reg = register_get_by_name(arm->core_cache,
1454 reg_params[i].reg_name,
1455 0);
1456 if (!reg) {
1457 LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
1458 retval = ERROR_COMMAND_SYNTAX_ERROR;
1459 continue;
1460 }
1461
1462 if (reg->size != reg_params[i].size) {
1463 LOG_ERROR(
1464 "BUG: register '%s' size doesn't match reg_params[i].size",
1465 reg_params[i].reg_name);
1466 retval = ERROR_COMMAND_SYNTAX_ERROR;
1467 continue;
1468 }
1469
1470 buf_set_u32(reg_params[i].value, 0, 32, buf_get_u32(reg->value, 0, 32));
1471 }
1472 }
1473
1474 /* restore everything we saved before (17 or 18 registers) */
1475 for (i = 0; i <= 16; i++) {
1476 uint32_t regvalue;
1477 regvalue = buf_get_u32(ARMV4_5_CORE_REG_MODE(arm->core_cache,
1478 arm_algorithm_info->core_mode, i).value, 0, 32);
1479 if (regvalue != context[i]) {
1480 LOG_DEBUG("restoring register %s with value 0x%8.8" PRIx32 "",
1481 ARMV4_5_CORE_REG_MODE(arm->core_cache,
1482 arm_algorithm_info->core_mode, i).name, context[i]);
1483 buf_set_u32(ARMV4_5_CORE_REG_MODE(arm->core_cache,
1484 arm_algorithm_info->core_mode, i).value, 0, 32, context[i]);
1485 ARMV4_5_CORE_REG_MODE(arm->core_cache, arm_algorithm_info->core_mode,
1486 i).valid = true;
1487 ARMV4_5_CORE_REG_MODE(arm->core_cache, arm_algorithm_info->core_mode,
1488 i).dirty = true;
1489 }
1490 }
1491
1492 arm_set_cpsr(arm, cpsr);
1493 arm->cpsr->dirty = true;
1494
1495 arm->core_state = core_state;
1496
1497 return retval;
1498 }
1499
1500 int armv4_5_run_algorithm(struct target *target,
1501 int num_mem_params,
1502 struct mem_param *mem_params,
1503 int num_reg_params,
1504 struct reg_param *reg_params,
1505 target_addr_t entry_point,
1506 target_addr_t exit_point,
1507 int timeout_ms,
1508 void *arch_info)
1509 {
1510 return armv4_5_run_algorithm_inner(target,
1511 num_mem_params,
1512 mem_params,
1513 num_reg_params,
1514 reg_params,
1515 (uint32_t)entry_point,
1516 (uint32_t)exit_point,
1517 timeout_ms,
1518 arch_info,
1519 armv4_5_run_algorithm_completion);
1520 }
1521
1522 /**
1523 * Runs ARM code in the target to calculate a CRC32 checksum.
1524 *
1525 */
1526 int arm_checksum_memory(struct target *target,
1527 target_addr_t address, uint32_t count, uint32_t *checksum)
1528 {
1529 struct working_area *crc_algorithm;
1530 struct arm_algorithm arm_algo;
1531 struct arm *arm = target_to_arm(target);
1532 struct reg_param reg_params[2];
1533 int retval;
1534 uint32_t i;
1535 uint32_t exit_var = 0;
1536
1537 static const uint8_t arm_crc_code_le[] = {
1538 #include "../../contrib/loaders/checksum/armv4_5_crc.inc"
1539 };
1540
1541 assert(sizeof(arm_crc_code_le) % 4 == 0);
1542
1543 retval = target_alloc_working_area(target,
1544 sizeof(arm_crc_code_le), &crc_algorithm);
1545 if (retval != ERROR_OK)
1546 return retval;
1547
1548 /* convert code into a buffer in target endianness */
1549 for (i = 0; i < ARRAY_SIZE(arm_crc_code_le) / 4; i++) {
1550 retval = target_write_u32(target,
1551 crc_algorithm->address + i * sizeof(uint32_t),
1552 le_to_h_u32(&arm_crc_code_le[i * 4]));
1553 if (retval != ERROR_OK)
1554 goto cleanup;
1555 }
1556
1557 arm_algo.common_magic = ARM_COMMON_MAGIC;
1558 arm_algo.core_mode = ARM_MODE_SVC;
1559 arm_algo.core_state = ARM_STATE_ARM;
1560
1561 init_reg_param(&reg_params[0], "r0", 32, PARAM_IN_OUT);
1562 init_reg_param(&reg_params[1], "r1", 32, PARAM_OUT);
1563
1564 buf_set_u32(reg_params[0].value, 0, 32, address);
1565 buf_set_u32(reg_params[1].value, 0, 32, count);
1566
1567 /* 20 second timeout/megabyte */
1568 int timeout = 20000 * (1 + (count / (1024 * 1024)));
1569
1570 /* armv4 must exit using a hardware breakpoint */
1571 if (arm->is_armv4)
1572 exit_var = crc_algorithm->address + sizeof(arm_crc_code_le) - 8;
1573
1574 retval = target_run_algorithm(target, 0, NULL, 2, reg_params,
1575 crc_algorithm->address,
1576 exit_var,
1577 timeout, &arm_algo);
1578
1579 if (retval == ERROR_OK)
1580 *checksum = buf_get_u32(reg_params[0].value, 0, 32);
1581 else
1582 LOG_ERROR("error executing ARM crc algorithm");
1583
1584 destroy_reg_param(&reg_params[0]);
1585 destroy_reg_param(&reg_params[1]);
1586
1587 cleanup:
1588 target_free_working_area(target, crc_algorithm);
1589
1590 return retval;
1591 }
1592
1593 /**
1594 * Runs ARM code in the target to check whether a memory block holds
1595 * all ones. NOR flash which has been erased, and thus may be written,
1596 * holds all ones.
1597 *
1598 */
1599 int arm_blank_check_memory(struct target *target,
1600 struct target_memory_check_block *blocks, int num_blocks, uint8_t erased_value)
1601 {
1602 struct working_area *check_algorithm;
1603 struct reg_param reg_params[3];
1604 struct arm_algorithm arm_algo;
1605 struct arm *arm = target_to_arm(target);
1606 int retval;
1607 uint32_t i;
1608 uint32_t exit_var = 0;
1609
1610 static const uint8_t check_code_le[] = {
1611 #include "../../contrib/loaders/erase_check/armv4_5_erase_check.inc"
1612 };
1613
1614 assert(sizeof(check_code_le) % 4 == 0);
1615
1616 if (erased_value != 0xff) {
1617 LOG_ERROR("Erase value 0x%02" PRIx8 " not yet supported for ARMv4/v5 targets",
1618 erased_value);
1619 return ERROR_FAIL;
1620 }
1621
1622 /* make sure we have a working area */
1623 retval = target_alloc_working_area(target,
1624 sizeof(check_code_le), &check_algorithm);
1625 if (retval != ERROR_OK)
1626 return retval;
1627
1628 /* convert code into a buffer in target endianness */
1629 for (i = 0; i < ARRAY_SIZE(check_code_le) / 4; i++) {
1630 retval = target_write_u32(target,
1631 check_algorithm->address
1632 + i * sizeof(uint32_t),
1633 le_to_h_u32(&check_code_le[i * 4]));
1634 if (retval != ERROR_OK)
1635 goto cleanup;
1636 }
1637
1638 arm_algo.common_magic = ARM_COMMON_MAGIC;
1639 arm_algo.core_mode = ARM_MODE_SVC;
1640 arm_algo.core_state = ARM_STATE_ARM;
1641
1642 init_reg_param(&reg_params[0], "r0", 32, PARAM_OUT);
1643 buf_set_u32(reg_params[0].value, 0, 32, blocks[0].address);
1644
1645 init_reg_param(&reg_params[1], "r1", 32, PARAM_OUT);
1646 buf_set_u32(reg_params[1].value, 0, 32, blocks[0].size);
1647
1648 init_reg_param(&reg_params[2], "r2", 32, PARAM_IN_OUT);
1649 buf_set_u32(reg_params[2].value, 0, 32, erased_value);
1650
1651 /* armv4 must exit using a hardware breakpoint */
1652 if (arm->is_armv4)
1653 exit_var = check_algorithm->address + sizeof(check_code_le) - 4;
1654
1655 retval = target_run_algorithm(target, 0, NULL, 3, reg_params,
1656 check_algorithm->address,
1657 exit_var,
1658 10000, &arm_algo);
1659
1660 if (retval == ERROR_OK)
1661 blocks[0].result = buf_get_u32(reg_params[2].value, 0, 32);
1662
1663 destroy_reg_param(&reg_params[0]);
1664 destroy_reg_param(&reg_params[1]);
1665 destroy_reg_param(&reg_params[2]);
1666
1667 cleanup:
1668 target_free_working_area(target, check_algorithm);
1669
1670 if (retval != ERROR_OK)
1671 return retval;
1672
1673 return 1; /* only one block has been checked */
1674 }
1675
1676 static int arm_full_context(struct target *target)
1677 {
1678 struct arm *arm = target_to_arm(target);
1679 unsigned num_regs = arm->core_cache->num_regs;
1680 struct reg *reg = arm->core_cache->reg_list;
1681 int retval = ERROR_OK;
1682
1683 for (; num_regs && retval == ERROR_OK; num_regs--, reg++) {
1684 if (reg->valid)
1685 continue;
1686 retval = armv4_5_get_core_reg(reg);
1687 }
1688 return retval;
1689 }
1690
1691 static int arm_default_mrc(struct target *target, int cpnum,
1692 uint32_t op1, uint32_t op2,
1693 uint32_t CRn, uint32_t CRm,
1694 uint32_t *value)
1695 {
1696 LOG_ERROR("%s doesn't implement MRC", target_type_name(target));
1697 return ERROR_FAIL;
1698 }
1699
1700 static int arm_default_mcr(struct target *target, int cpnum,
1701 uint32_t op1, uint32_t op2,
1702 uint32_t CRn, uint32_t CRm,
1703 uint32_t value)
1704 {
1705 LOG_ERROR("%s doesn't implement MCR", target_type_name(target));
1706 return ERROR_FAIL;
1707 }
1708
1709 int arm_init_arch_info(struct target *target, struct arm *arm)
1710 {
1711 target->arch_info = arm;
1712 arm->target = target;
1713
1714 arm->common_magic = ARM_COMMON_MAGIC;
1715
1716 /* core_type may be overridden by subtype logic */
1717 if (arm->core_type != ARM_CORE_TYPE_M_PROFILE) {
1718 arm->core_type = ARM_CORE_TYPE_STD;
1719 arm_set_cpsr(arm, ARM_MODE_USR);
1720 }
1721
1722 /* default full_context() has no core-specific optimizations */
1723 if (!arm->full_context && arm->read_core_reg)
1724 arm->full_context = arm_full_context;
1725
1726 if (!arm->mrc)
1727 arm->mrc = arm_default_mrc;
1728 if (!arm->mcr)
1729 arm->mcr = arm_default_mcr;
1730
1731 return ERROR_OK;
1732 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)