854e8eb586adb300819490b4734c020f30d4bc64
[openocd.git] / src / target / cortex_m.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 * *
7 * Copyright (C) 2006 by Magnus Lundin *
8 * lundin@mlu.mine.nu *
9 * *
10 * Copyright (C) 2008 by Spencer Oliver *
11 * spen@spen-soft.co.uk *
12 * *
13 * *
14 * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
15 * *
16 ***************************************************************************/
17 #ifdef HAVE_CONFIG_H
18 #include "config.h"
19 #endif
20
21 #include "jtag/interface.h"
22 #include "breakpoints.h"
23 #include "cortex_m.h"
24 #include "target_request.h"
25 #include "target_type.h"
26 #include "arm_adi_v5.h"
27 #include "arm_disassembler.h"
28 #include "register.h"
29 #include "arm_opcodes.h"
30 #include "arm_semihosting.h"
31 #include "smp.h"
32 #include <helper/nvp.h>
33 #include <helper/time_support.h>
34 #include <rtt/rtt.h>
35
36 /* NOTE: most of this should work fine for the Cortex-M1 and
37 * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
38 * Some differences: M0/M1 doesn't have FPB remapping or the
39 * DWT tracing/profiling support. (So the cycle counter will
40 * not be usable; the other stuff isn't currently used here.)
41 *
42 * Although there are some workarounds for errata seen only in r0p0
43 * silicon, such old parts are hard to find and thus not much tested
44 * any longer.
45 */
46
47 /* Timeout for register r/w */
48 #define DHCSR_S_REGRDY_TIMEOUT (500)
49
50 /* Supported Cortex-M Cores */
51 static const struct cortex_m_part_info cortex_m_parts[] = {
52 {
53 .impl_part = CORTEX_M0_PARTNO,
54 .name = "Cortex-M0",
55 .arch = ARM_ARCH_V6M,
56 },
57 {
58 .impl_part = CORTEX_M0P_PARTNO,
59 .name = "Cortex-M0+",
60 .arch = ARM_ARCH_V6M,
61 },
62 {
63 .impl_part = CORTEX_M1_PARTNO,
64 .name = "Cortex-M1",
65 .arch = ARM_ARCH_V6M,
66 },
67 {
68 .impl_part = CORTEX_M3_PARTNO,
69 .name = "Cortex-M3",
70 .arch = ARM_ARCH_V7M,
71 .flags = CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
72 },
73 {
74 .impl_part = CORTEX_M4_PARTNO,
75 .name = "Cortex-M4",
76 .arch = ARM_ARCH_V7M,
77 .flags = CORTEX_M_F_HAS_FPV4 | CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
78 },
79 {
80 .impl_part = CORTEX_M7_PARTNO,
81 .name = "Cortex-M7",
82 .arch = ARM_ARCH_V7M,
83 .flags = CORTEX_M_F_HAS_FPV5,
84 },
85 {
86 .impl_part = CORTEX_M23_PARTNO,
87 .name = "Cortex-M23",
88 .arch = ARM_ARCH_V8M,
89 },
90 {
91 .impl_part = CORTEX_M33_PARTNO,
92 .name = "Cortex-M33",
93 .arch = ARM_ARCH_V8M,
94 .flags = CORTEX_M_F_HAS_FPV5,
95 },
96 {
97 .impl_part = CORTEX_M35P_PARTNO,
98 .name = "Cortex-M35P",
99 .arch = ARM_ARCH_V8M,
100 .flags = CORTEX_M_F_HAS_FPV5,
101 },
102 {
103 .impl_part = CORTEX_M55_PARTNO,
104 .name = "Cortex-M55",
105 .arch = ARM_ARCH_V8M,
106 .flags = CORTEX_M_F_HAS_FPV5,
107 },
108 {
109 .impl_part = STAR_MC1_PARTNO,
110 .name = "STAR-MC1",
111 .arch = ARM_ARCH_V8M,
112 .flags = CORTEX_M_F_HAS_FPV5,
113 },
114 {
115 .impl_part = REALTEK_M200_PARTNO,
116 .name = "Real-M200 (KM0)",
117 .arch = ARM_ARCH_V8M,
118 },
119 {
120 .impl_part = REALTEK_M300_PARTNO,
121 .name = "Real-M300 (KM4)",
122 .arch = ARM_ARCH_V8M,
123 .flags = CORTEX_M_F_HAS_FPV5,
124 },
125 };
126
127 /* forward declarations */
128 static int cortex_m_store_core_reg_u32(struct target *target,
129 uint32_t num, uint32_t value);
130 static void cortex_m_dwt_free(struct target *target);
131
132 /** DCB DHCSR register contains S_RETIRE_ST and S_RESET_ST bits cleared
133 * on a read. Call this helper function each time DHCSR is read
134 * to preserve S_RESET_ST state in case of a reset event was detected.
135 */
136 static inline void cortex_m_cumulate_dhcsr_sticky(struct cortex_m_common *cortex_m,
137 uint32_t dhcsr)
138 {
139 cortex_m->dcb_dhcsr_cumulated_sticky |= dhcsr;
140 }
141
142 /** Read DCB DHCSR register to cortex_m->dcb_dhcsr and cumulate
143 * sticky bits in cortex_m->dcb_dhcsr_cumulated_sticky
144 */
145 static int cortex_m_read_dhcsr_atomic_sticky(struct target *target)
146 {
147 struct cortex_m_common *cortex_m = target_to_cm(target);
148 struct armv7m_common *armv7m = target_to_armv7m(target);
149
150 int retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR,
151 &cortex_m->dcb_dhcsr);
152 if (retval != ERROR_OK)
153 return retval;
154
155 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
156 return ERROR_OK;
157 }
158
159 static int cortex_m_load_core_reg_u32(struct target *target,
160 uint32_t regsel, uint32_t *value)
161 {
162 struct cortex_m_common *cortex_m = target_to_cm(target);
163 struct armv7m_common *armv7m = target_to_armv7m(target);
164 int retval;
165 uint32_t dcrdr, tmp_value;
166 int64_t then;
167
168 /* because the DCB_DCRDR is used for the emulated dcc channel
169 * we have to save/restore the DCB_DCRDR when used */
170 if (target->dbg_msg_enabled) {
171 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
172 if (retval != ERROR_OK)
173 return retval;
174 }
175
176 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
177 if (retval != ERROR_OK)
178 return retval;
179
180 /* check if value from register is ready and pre-read it */
181 then = timeval_ms();
182 while (1) {
183 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR,
184 &cortex_m->dcb_dhcsr);
185 if (retval != ERROR_OK)
186 return retval;
187 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DCRDR,
188 &tmp_value);
189 if (retval != ERROR_OK)
190 return retval;
191 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
192 if (cortex_m->dcb_dhcsr & S_REGRDY)
193 break;
194 cortex_m->slow_register_read = true; /* Polling (still) needed. */
195 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
196 LOG_TARGET_ERROR(target, "Timeout waiting for DCRDR transfer ready");
197 return ERROR_TIMEOUT_REACHED;
198 }
199 keep_alive();
200 }
201
202 *value = tmp_value;
203
204 if (target->dbg_msg_enabled) {
205 /* restore DCB_DCRDR - this needs to be in a separate
206 * transaction otherwise the emulated DCC channel breaks */
207 if (retval == ERROR_OK)
208 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
209 }
210
211 return retval;
212 }
213
214 static int cortex_m_slow_read_all_regs(struct target *target)
215 {
216 struct cortex_m_common *cortex_m = target_to_cm(target);
217 struct armv7m_common *armv7m = target_to_armv7m(target);
218 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
219
220 /* Opportunistically restore fast read, it'll revert to slow
221 * if any register needed polling in cortex_m_load_core_reg_u32(). */
222 cortex_m->slow_register_read = false;
223
224 for (unsigned int reg_id = 0; reg_id < num_regs; reg_id++) {
225 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
226 if (r->exist) {
227 int retval = armv7m->arm.read_core_reg(target, r, reg_id, ARM_MODE_ANY);
228 if (retval != ERROR_OK)
229 return retval;
230 }
231 }
232
233 if (!cortex_m->slow_register_read)
234 LOG_TARGET_DEBUG(target, "Switching back to fast register reads");
235
236 return ERROR_OK;
237 }
238
239 static int cortex_m_queue_reg_read(struct target *target, uint32_t regsel,
240 uint32_t *reg_value, uint32_t *dhcsr)
241 {
242 struct armv7m_common *armv7m = target_to_armv7m(target);
243 int retval;
244
245 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
246 if (retval != ERROR_OK)
247 return retval;
248
249 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR, dhcsr);
250 if (retval != ERROR_OK)
251 return retval;
252
253 return mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, reg_value);
254 }
255
256 static int cortex_m_fast_read_all_regs(struct target *target)
257 {
258 struct cortex_m_common *cortex_m = target_to_cm(target);
259 struct armv7m_common *armv7m = target_to_armv7m(target);
260 int retval;
261 uint32_t dcrdr;
262
263 /* because the DCB_DCRDR is used for the emulated dcc channel
264 * we have to save/restore the DCB_DCRDR when used */
265 if (target->dbg_msg_enabled) {
266 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
267 if (retval != ERROR_OK)
268 return retval;
269 }
270
271 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
272 const unsigned int n_r32 = ARMV7M_LAST_REG - ARMV7M_CORE_FIRST_REG + 1
273 + ARMV7M_FPU_LAST_REG - ARMV7M_FPU_FIRST_REG + 1;
274 /* we need one 32-bit word for each register except FP D0..D15, which
275 * need two words */
276 uint32_t r_vals[n_r32];
277 uint32_t dhcsr[n_r32];
278
279 unsigned int wi = 0; /* write index to r_vals and dhcsr arrays */
280 unsigned int reg_id; /* register index in the reg_list, ARMV7M_R0... */
281 for (reg_id = 0; reg_id < num_regs; reg_id++) {
282 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
283 if (!r->exist)
284 continue; /* skip non existent registers */
285
286 if (r->size <= 8) {
287 /* Any 8-bit or shorter register is unpacked from a 32-bit
288 * container register. Skip it now. */
289 continue;
290 }
291
292 uint32_t regsel = armv7m_map_id_to_regsel(reg_id);
293 retval = cortex_m_queue_reg_read(target, regsel, &r_vals[wi],
294 &dhcsr[wi]);
295 if (retval != ERROR_OK)
296 return retval;
297 wi++;
298
299 assert(r->size == 32 || r->size == 64);
300 if (r->size == 32)
301 continue; /* done with 32-bit register */
302
303 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
304 /* the odd part of FP register (S1, S3...) */
305 retval = cortex_m_queue_reg_read(target, regsel + 1, &r_vals[wi],
306 &dhcsr[wi]);
307 if (retval != ERROR_OK)
308 return retval;
309 wi++;
310 }
311
312 assert(wi <= n_r32);
313
314 retval = dap_run(armv7m->debug_ap->dap);
315 if (retval != ERROR_OK)
316 return retval;
317
318 if (target->dbg_msg_enabled) {
319 /* restore DCB_DCRDR - this needs to be in a separate
320 * transaction otherwise the emulated DCC channel breaks */
321 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
322 if (retval != ERROR_OK)
323 return retval;
324 }
325
326 bool not_ready = false;
327 for (unsigned int i = 0; i < wi; i++) {
328 if ((dhcsr[i] & S_REGRDY) == 0) {
329 not_ready = true;
330 LOG_TARGET_DEBUG(target, "Register %u was not ready during fast read", i);
331 }
332 cortex_m_cumulate_dhcsr_sticky(cortex_m, dhcsr[i]);
333 }
334
335 if (not_ready) {
336 /* Any register was not ready,
337 * fall back to slow read with S_REGRDY polling */
338 return ERROR_TIMEOUT_REACHED;
339 }
340
341 LOG_TARGET_DEBUG(target, "read %u 32-bit registers", wi);
342
343 unsigned int ri = 0; /* read index from r_vals array */
344 for (reg_id = 0; reg_id < num_regs; reg_id++) {
345 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
346 if (!r->exist)
347 continue; /* skip non existent registers */
348
349 r->dirty = false;
350
351 unsigned int reg32_id;
352 uint32_t offset;
353 if (armv7m_map_reg_packing(reg_id, &reg32_id, &offset)) {
354 /* Unpack a partial register from 32-bit container register */
355 struct reg *r32 = &armv7m->arm.core_cache->reg_list[reg32_id];
356
357 /* The container register ought to precede all regs unpacked
358 * from it in the reg_list. So the value should be ready
359 * to unpack */
360 assert(r32->valid);
361 buf_cpy(r32->value + offset, r->value, r->size);
362
363 } else {
364 assert(r->size == 32 || r->size == 64);
365 buf_set_u32(r->value, 0, 32, r_vals[ri++]);
366
367 if (r->size == 64) {
368 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
369 /* the odd part of FP register (S1, S3...) */
370 buf_set_u32(r->value + 4, 0, 32, r_vals[ri++]);
371 }
372 }
373 r->valid = true;
374 }
375 assert(ri == wi);
376
377 return retval;
378 }
379
380 static int cortex_m_store_core_reg_u32(struct target *target,
381 uint32_t regsel, uint32_t value)
382 {
383 struct cortex_m_common *cortex_m = target_to_cm(target);
384 struct armv7m_common *armv7m = target_to_armv7m(target);
385 int retval;
386 uint32_t dcrdr;
387 int64_t then;
388
389 /* because the DCB_DCRDR is used for the emulated dcc channel
390 * we have to save/restore the DCB_DCRDR when used */
391 if (target->dbg_msg_enabled) {
392 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
393 if (retval != ERROR_OK)
394 return retval;
395 }
396
397 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, value);
398 if (retval != ERROR_OK)
399 return retval;
400
401 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel | DCRSR_WNR);
402 if (retval != ERROR_OK)
403 return retval;
404
405 /* check if value is written into register */
406 then = timeval_ms();
407 while (1) {
408 retval = cortex_m_read_dhcsr_atomic_sticky(target);
409 if (retval != ERROR_OK)
410 return retval;
411 if (cortex_m->dcb_dhcsr & S_REGRDY)
412 break;
413 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
414 LOG_TARGET_ERROR(target, "Timeout waiting for DCRDR transfer ready");
415 return ERROR_TIMEOUT_REACHED;
416 }
417 keep_alive();
418 }
419
420 if (target->dbg_msg_enabled) {
421 /* restore DCB_DCRDR - this needs to be in a separate
422 * transaction otherwise the emulated DCC channel breaks */
423 if (retval == ERROR_OK)
424 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
425 }
426
427 return retval;
428 }
429
430 static int cortex_m_write_debug_halt_mask(struct target *target,
431 uint32_t mask_on, uint32_t mask_off)
432 {
433 struct cortex_m_common *cortex_m = target_to_cm(target);
434 struct armv7m_common *armv7m = &cortex_m->armv7m;
435
436 /* mask off status bits */
437 cortex_m->dcb_dhcsr &= ~((0xFFFFul << 16) | mask_off);
438 /* create new register mask */
439 cortex_m->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
440
441 return mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DHCSR, cortex_m->dcb_dhcsr);
442 }
443
444 static int cortex_m_set_maskints(struct target *target, bool mask)
445 {
446 struct cortex_m_common *cortex_m = target_to_cm(target);
447 if (!!(cortex_m->dcb_dhcsr & C_MASKINTS) != mask)
448 return cortex_m_write_debug_halt_mask(target, mask ? C_MASKINTS : 0, mask ? 0 : C_MASKINTS);
449 else
450 return ERROR_OK;
451 }
452
453 static int cortex_m_set_maskints_for_halt(struct target *target)
454 {
455 struct cortex_m_common *cortex_m = target_to_cm(target);
456 switch (cortex_m->isrmasking_mode) {
457 case CORTEX_M_ISRMASK_AUTO:
458 /* interrupts taken at resume, whether for step or run -> no mask */
459 return cortex_m_set_maskints(target, false);
460
461 case CORTEX_M_ISRMASK_OFF:
462 /* interrupts never masked */
463 return cortex_m_set_maskints(target, false);
464
465 case CORTEX_M_ISRMASK_ON:
466 /* interrupts always masked */
467 return cortex_m_set_maskints(target, true);
468
469 case CORTEX_M_ISRMASK_STEPONLY:
470 /* interrupts masked for single step only -> mask now if MASKINTS
471 * erratum, otherwise only mask before stepping */
472 return cortex_m_set_maskints(target, cortex_m->maskints_erratum);
473 }
474 return ERROR_OK;
475 }
476
477 static int cortex_m_set_maskints_for_run(struct target *target)
478 {
479 switch (target_to_cm(target)->isrmasking_mode) {
480 case CORTEX_M_ISRMASK_AUTO:
481 /* interrupts taken at resume, whether for step or run -> no mask */
482 return cortex_m_set_maskints(target, false);
483
484 case CORTEX_M_ISRMASK_OFF:
485 /* interrupts never masked */
486 return cortex_m_set_maskints(target, false);
487
488 case CORTEX_M_ISRMASK_ON:
489 /* interrupts always masked */
490 return cortex_m_set_maskints(target, true);
491
492 case CORTEX_M_ISRMASK_STEPONLY:
493 /* interrupts masked for single step only -> no mask */
494 return cortex_m_set_maskints(target, false);
495 }
496 return ERROR_OK;
497 }
498
499 static int cortex_m_set_maskints_for_step(struct target *target)
500 {
501 switch (target_to_cm(target)->isrmasking_mode) {
502 case CORTEX_M_ISRMASK_AUTO:
503 /* the auto-interrupt should already be done -> mask */
504 return cortex_m_set_maskints(target, true);
505
506 case CORTEX_M_ISRMASK_OFF:
507 /* interrupts never masked */
508 return cortex_m_set_maskints(target, false);
509
510 case CORTEX_M_ISRMASK_ON:
511 /* interrupts always masked */
512 return cortex_m_set_maskints(target, true);
513
514 case CORTEX_M_ISRMASK_STEPONLY:
515 /* interrupts masked for single step only -> mask */
516 return cortex_m_set_maskints(target, true);
517 }
518 return ERROR_OK;
519 }
520
521 static int cortex_m_clear_halt(struct target *target)
522 {
523 struct cortex_m_common *cortex_m = target_to_cm(target);
524 struct armv7m_common *armv7m = &cortex_m->armv7m;
525 int retval;
526
527 /* clear step if any */
528 cortex_m_write_debug_halt_mask(target, C_HALT, C_STEP);
529
530 /* Read Debug Fault Status Register */
531 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR, &cortex_m->nvic_dfsr);
532 if (retval != ERROR_OK)
533 return retval;
534
535 /* Clear Debug Fault Status */
536 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_DFSR, cortex_m->nvic_dfsr);
537 if (retval != ERROR_OK)
538 return retval;
539 LOG_TARGET_DEBUG(target, "NVIC_DFSR 0x%" PRIx32 "", cortex_m->nvic_dfsr);
540
541 return ERROR_OK;
542 }
543
544 static int cortex_m_single_step_core(struct target *target)
545 {
546 struct cortex_m_common *cortex_m = target_to_cm(target);
547 int retval;
548
549 /* Mask interrupts before clearing halt, if not done already. This avoids
550 * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
551 * HALT can put the core into an unknown state.
552 */
553 if (!(cortex_m->dcb_dhcsr & C_MASKINTS)) {
554 retval = cortex_m_write_debug_halt_mask(target, C_MASKINTS, 0);
555 if (retval != ERROR_OK)
556 return retval;
557 }
558 retval = cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
559 if (retval != ERROR_OK)
560 return retval;
561 LOG_TARGET_DEBUG(target, "single step");
562
563 /* restore dhcsr reg */
564 cortex_m_clear_halt(target);
565
566 return ERROR_OK;
567 }
568
569 static int cortex_m_enable_fpb(struct target *target)
570 {
571 int retval = target_write_u32(target, FP_CTRL, 3);
572 if (retval != ERROR_OK)
573 return retval;
574
575 /* check the fpb is actually enabled */
576 uint32_t fpctrl;
577 retval = target_read_u32(target, FP_CTRL, &fpctrl);
578 if (retval != ERROR_OK)
579 return retval;
580
581 if (fpctrl & 1)
582 return ERROR_OK;
583
584 return ERROR_FAIL;
585 }
586
587 static int cortex_m_endreset_event(struct target *target)
588 {
589 int retval;
590 uint32_t dcb_demcr;
591 struct cortex_m_common *cortex_m = target_to_cm(target);
592 struct armv7m_common *armv7m = &cortex_m->armv7m;
593 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
594 struct cortex_m_fp_comparator *fp_list = cortex_m->fp_comparator_list;
595 struct cortex_m_dwt_comparator *dwt_list = cortex_m->dwt_comparator_list;
596
597 /* REVISIT The four debug monitor bits are currently ignored... */
598 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &dcb_demcr);
599 if (retval != ERROR_OK)
600 return retval;
601 LOG_TARGET_DEBUG(target, "DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr);
602
603 /* this register is used for emulated dcc channel */
604 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
605 if (retval != ERROR_OK)
606 return retval;
607
608 retval = cortex_m_read_dhcsr_atomic_sticky(target);
609 if (retval != ERROR_OK)
610 return retval;
611
612 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
613 /* Enable debug requests */
614 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
615 if (retval != ERROR_OK)
616 return retval;
617 }
618
619 /* Restore proper interrupt masking setting for running CPU. */
620 cortex_m_set_maskints_for_run(target);
621
622 /* Enable features controlled by ITM and DWT blocks, and catch only
623 * the vectors we were told to pay attention to.
624 *
625 * Target firmware is responsible for all fault handling policy
626 * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
627 * or manual updates to the NVIC SHCSR and CCR registers.
628 */
629 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, TRCENA | armv7m->demcr);
630 if (retval != ERROR_OK)
631 return retval;
632
633 /* Paranoia: evidently some (early?) chips don't preserve all the
634 * debug state (including FPB, DWT, etc) across reset...
635 */
636
637 /* Enable FPB */
638 retval = cortex_m_enable_fpb(target);
639 if (retval != ERROR_OK) {
640 LOG_TARGET_ERROR(target, "Failed to enable the FPB");
641 return retval;
642 }
643
644 cortex_m->fpb_enabled = true;
645
646 /* Restore FPB registers */
647 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
648 retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
649 if (retval != ERROR_OK)
650 return retval;
651 }
652
653 /* Restore DWT registers */
654 for (unsigned int i = 0; i < cortex_m->dwt_num_comp; i++) {
655 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
656 dwt_list[i].comp);
657 if (retval != ERROR_OK)
658 return retval;
659 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
660 dwt_list[i].mask);
661 if (retval != ERROR_OK)
662 return retval;
663 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
664 dwt_list[i].function);
665 if (retval != ERROR_OK)
666 return retval;
667 }
668 retval = dap_run(swjdp);
669 if (retval != ERROR_OK)
670 return retval;
671
672 register_cache_invalidate(armv7m->arm.core_cache);
673
674 /* TODO: invalidate also working areas (needed in the case of detected reset).
675 * Doing so will require flash drivers to test if working area
676 * is still valid in all target algo calling loops.
677 */
678
679 /* make sure we have latest dhcsr flags */
680 retval = cortex_m_read_dhcsr_atomic_sticky(target);
681 if (retval != ERROR_OK)
682 return retval;
683
684 return retval;
685 }
686
687 static int cortex_m_examine_debug_reason(struct target *target)
688 {
689 struct cortex_m_common *cortex_m = target_to_cm(target);
690
691 /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
692 * only check the debug reason if we don't know it already */
693
694 if ((target->debug_reason != DBG_REASON_DBGRQ)
695 && (target->debug_reason != DBG_REASON_SINGLESTEP)) {
696 if (cortex_m->nvic_dfsr & DFSR_BKPT) {
697 target->debug_reason = DBG_REASON_BREAKPOINT;
698 if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
699 target->debug_reason = DBG_REASON_WPTANDBKPT;
700 } else if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
701 target->debug_reason = DBG_REASON_WATCHPOINT;
702 else if (cortex_m->nvic_dfsr & DFSR_VCATCH)
703 target->debug_reason = DBG_REASON_BREAKPOINT;
704 else if (cortex_m->nvic_dfsr & DFSR_EXTERNAL)
705 target->debug_reason = DBG_REASON_DBGRQ;
706 else /* HALTED */
707 target->debug_reason = DBG_REASON_UNDEFINED;
708 }
709
710 return ERROR_OK;
711 }
712
713 static int cortex_m_examine_exception_reason(struct target *target)
714 {
715 uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
716 struct armv7m_common *armv7m = target_to_armv7m(target);
717 struct adiv5_dap *swjdp = armv7m->arm.dap;
718 int retval;
719
720 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SHCSR, &shcsr);
721 if (retval != ERROR_OK)
722 return retval;
723 switch (armv7m->exception_number) {
724 case 2: /* NMI */
725 break;
726 case 3: /* Hard Fault */
727 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_HFSR, &except_sr);
728 if (retval != ERROR_OK)
729 return retval;
730 if (except_sr & 0x40000000) {
731 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &cfsr);
732 if (retval != ERROR_OK)
733 return retval;
734 }
735 break;
736 case 4: /* Memory Management */
737 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
738 if (retval != ERROR_OK)
739 return retval;
740 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_MMFAR, &except_ar);
741 if (retval != ERROR_OK)
742 return retval;
743 break;
744 case 5: /* Bus Fault */
745 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
746 if (retval != ERROR_OK)
747 return retval;
748 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_BFAR, &except_ar);
749 if (retval != ERROR_OK)
750 return retval;
751 break;
752 case 6: /* Usage Fault */
753 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
754 if (retval != ERROR_OK)
755 return retval;
756 break;
757 case 7: /* Secure Fault */
758 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFSR, &except_sr);
759 if (retval != ERROR_OK)
760 return retval;
761 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFAR, &except_ar);
762 if (retval != ERROR_OK)
763 return retval;
764 break;
765 case 11: /* SVCall */
766 break;
767 case 12: /* Debug Monitor */
768 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_DFSR, &except_sr);
769 if (retval != ERROR_OK)
770 return retval;
771 break;
772 case 14: /* PendSV */
773 break;
774 case 15: /* SysTick */
775 break;
776 default:
777 except_sr = 0;
778 break;
779 }
780 retval = dap_run(swjdp);
781 if (retval == ERROR_OK)
782 LOG_TARGET_DEBUG(target, "%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
783 ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
784 armv7m_exception_string(armv7m->exception_number),
785 shcsr, except_sr, cfsr, except_ar);
786 return retval;
787 }
788
789 static int cortex_m_debug_entry(struct target *target)
790 {
791 uint32_t xpsr;
792 int retval;
793 struct cortex_m_common *cortex_m = target_to_cm(target);
794 struct armv7m_common *armv7m = &cortex_m->armv7m;
795 struct arm *arm = &armv7m->arm;
796 struct reg *r;
797
798 LOG_TARGET_DEBUG(target, " ");
799
800 /* Do this really early to minimize the window where the MASKINTS erratum
801 * can pile up pending interrupts. */
802 cortex_m_set_maskints_for_halt(target);
803
804 cortex_m_clear_halt(target);
805
806 retval = cortex_m_read_dhcsr_atomic_sticky(target);
807 if (retval != ERROR_OK)
808 return retval;
809
810 retval = armv7m->examine_debug_reason(target);
811 if (retval != ERROR_OK)
812 return retval;
813
814 /* examine PE security state */
815 uint32_t dscsr = 0;
816 if (armv7m->arm.arch == ARM_ARCH_V8M) {
817 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DSCSR, &dscsr);
818 if (retval != ERROR_OK)
819 return retval;
820 }
821
822 /* Load all registers to arm.core_cache */
823 if (!cortex_m->slow_register_read) {
824 retval = cortex_m_fast_read_all_regs(target);
825 if (retval == ERROR_TIMEOUT_REACHED) {
826 cortex_m->slow_register_read = true;
827 LOG_TARGET_DEBUG(target, "Switched to slow register read");
828 }
829 }
830
831 if (cortex_m->slow_register_read)
832 retval = cortex_m_slow_read_all_regs(target);
833
834 if (retval != ERROR_OK)
835 return retval;
836
837 r = arm->cpsr;
838 xpsr = buf_get_u32(r->value, 0, 32);
839
840 /* Are we in an exception handler */
841 if (xpsr & 0x1FF) {
842 armv7m->exception_number = (xpsr & 0x1FF);
843
844 arm->core_mode = ARM_MODE_HANDLER;
845 arm->map = armv7m_msp_reg_map;
846 } else {
847 unsigned control = buf_get_u32(arm->core_cache
848 ->reg_list[ARMV7M_CONTROL].value, 0, 3);
849
850 /* is this thread privileged? */
851 arm->core_mode = control & 1
852 ? ARM_MODE_USER_THREAD
853 : ARM_MODE_THREAD;
854
855 /* which stack is it using? */
856 if (control & 2)
857 arm->map = armv7m_psp_reg_map;
858 else
859 arm->map = armv7m_msp_reg_map;
860
861 armv7m->exception_number = 0;
862 }
863
864 if (armv7m->exception_number)
865 cortex_m_examine_exception_reason(target);
866
867 bool secure_state = (dscsr & DSCSR_CDS) == DSCSR_CDS;
868 LOG_TARGET_DEBUG(target, "entered debug state in core mode: %s at PC 0x%" PRIx32
869 ", cpu in %s state, target->state: %s",
870 arm_mode_name(arm->core_mode),
871 buf_get_u32(arm->pc->value, 0, 32),
872 secure_state ? "Secure" : "Non-Secure",
873 target_state_name(target));
874
875 if (armv7m->post_debug_entry) {
876 retval = armv7m->post_debug_entry(target);
877 if (retval != ERROR_OK)
878 return retval;
879 }
880
881 return ERROR_OK;
882 }
883
884 static int cortex_m_poll_one(struct target *target)
885 {
886 int detected_failure = ERROR_OK;
887 int retval = ERROR_OK;
888 enum target_state prev_target_state = target->state;
889 struct cortex_m_common *cortex_m = target_to_cm(target);
890 struct armv7m_common *armv7m = &cortex_m->armv7m;
891
892 /* Read from Debug Halting Control and Status Register */
893 retval = cortex_m_read_dhcsr_atomic_sticky(target);
894 if (retval != ERROR_OK) {
895 target->state = TARGET_UNKNOWN;
896 return retval;
897 }
898
899 /* Recover from lockup. See ARMv7-M architecture spec,
900 * section B1.5.15 "Unrecoverable exception cases".
901 */
902 if (cortex_m->dcb_dhcsr & S_LOCKUP) {
903 LOG_TARGET_ERROR(target, "clearing lockup after double fault");
904 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
905 target->debug_reason = DBG_REASON_DBGRQ;
906
907 /* We have to execute the rest (the "finally" equivalent, but
908 * still throw this exception again).
909 */
910 detected_failure = ERROR_FAIL;
911
912 /* refresh status bits */
913 retval = cortex_m_read_dhcsr_atomic_sticky(target);
914 if (retval != ERROR_OK)
915 return retval;
916 }
917
918 if (cortex_m->dcb_dhcsr_cumulated_sticky & S_RESET_ST) {
919 cortex_m->dcb_dhcsr_cumulated_sticky &= ~S_RESET_ST;
920 if (target->state != TARGET_RESET) {
921 target->state = TARGET_RESET;
922 LOG_TARGET_INFO(target, "external reset detected");
923 }
924 return ERROR_OK;
925 }
926
927 if (target->state == TARGET_RESET) {
928 /* Cannot switch context while running so endreset is
929 * called with target->state == TARGET_RESET
930 */
931 LOG_TARGET_DEBUG(target, "Exit from reset with dcb_dhcsr 0x%" PRIx32,
932 cortex_m->dcb_dhcsr);
933 retval = cortex_m_endreset_event(target);
934 if (retval != ERROR_OK) {
935 target->state = TARGET_UNKNOWN;
936 return retval;
937 }
938 target->state = TARGET_RUNNING;
939 prev_target_state = TARGET_RUNNING;
940 }
941
942 if (cortex_m->dcb_dhcsr & S_HALT) {
943 target->state = TARGET_HALTED;
944
945 if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET)) {
946 retval = cortex_m_debug_entry(target);
947
948 /* arm_semihosting needs to know registers, don't run if debug entry returned error */
949 if (retval == ERROR_OK && arm_semihosting(target, &retval) != 0)
950 return retval;
951
952 if (target->smp) {
953 LOG_TARGET_DEBUG(target, "postpone target event 'halted'");
954 target->smp_halt_event_postponed = true;
955 } else {
956 /* regardless of errors returned in previous code update state */
957 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
958 }
959 }
960 if (prev_target_state == TARGET_DEBUG_RUNNING) {
961 retval = cortex_m_debug_entry(target);
962
963 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
964 }
965 if (retval != ERROR_OK)
966 return retval;
967 }
968
969 if (target->state == TARGET_UNKNOWN) {
970 /* Check if processor is retiring instructions or sleeping.
971 * Unlike S_RESET_ST here we test if the target *is* running now,
972 * not if it has been running (possibly in the past). Instructions are
973 * typically processed much faster than OpenOCD polls DHCSR so S_RETIRE_ST
974 * is read always 1. That's the reason not to use dcb_dhcsr_cumulated_sticky.
975 */
976 if (cortex_m->dcb_dhcsr & S_RETIRE_ST || cortex_m->dcb_dhcsr & S_SLEEP) {
977 target->state = TARGET_RUNNING;
978 retval = ERROR_OK;
979 }
980 }
981
982 /* Check that target is truly halted, since the target could be resumed externally */
983 if ((prev_target_state == TARGET_HALTED) && !(cortex_m->dcb_dhcsr & S_HALT)) {
984 /* registers are now invalid */
985 register_cache_invalidate(armv7m->arm.core_cache);
986
987 target->state = TARGET_RUNNING;
988 LOG_TARGET_WARNING(target, "external resume detected");
989 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
990 retval = ERROR_OK;
991 }
992
993 /* Did we detect a failure condition that we cleared? */
994 if (detected_failure != ERROR_OK)
995 retval = detected_failure;
996 return retval;
997 }
998
999 static int cortex_m_halt_one(struct target *target);
1000
1001 static int cortex_m_smp_halt_all(struct list_head *smp_targets)
1002 {
1003 int retval = ERROR_OK;
1004 struct target_list *head;
1005
1006 foreach_smp_target(head, smp_targets) {
1007 struct target *curr = head->target;
1008 if (!target_was_examined(curr))
1009 continue;
1010 if (curr->state == TARGET_HALTED)
1011 continue;
1012
1013 int ret2 = cortex_m_halt_one(curr);
1014 if (retval == ERROR_OK)
1015 retval = ret2; /* store the first error code ignore others */
1016 }
1017 return retval;
1018 }
1019
1020 static int cortex_m_smp_post_halt_poll(struct list_head *smp_targets)
1021 {
1022 int retval = ERROR_OK;
1023 struct target_list *head;
1024
1025 foreach_smp_target(head, smp_targets) {
1026 struct target *curr = head->target;
1027 if (!target_was_examined(curr))
1028 continue;
1029 /* skip targets that were already halted */
1030 if (curr->state == TARGET_HALTED)
1031 continue;
1032
1033 int ret2 = cortex_m_poll_one(curr);
1034 if (retval == ERROR_OK)
1035 retval = ret2; /* store the first error code ignore others */
1036 }
1037 return retval;
1038 }
1039
1040 static int cortex_m_poll_smp(struct list_head *smp_targets)
1041 {
1042 int retval = ERROR_OK;
1043 struct target_list *head;
1044 bool halted = false;
1045
1046 foreach_smp_target(head, smp_targets) {
1047 struct target *curr = head->target;
1048 if (curr->smp_halt_event_postponed) {
1049 halted = true;
1050 break;
1051 }
1052 }
1053
1054 if (halted) {
1055 retval = cortex_m_smp_halt_all(smp_targets);
1056
1057 int ret2 = cortex_m_smp_post_halt_poll(smp_targets);
1058 if (retval == ERROR_OK)
1059 retval = ret2; /* store the first error code ignore others */
1060
1061 foreach_smp_target(head, smp_targets) {
1062 struct target *curr = head->target;
1063 if (!curr->smp_halt_event_postponed)
1064 continue;
1065
1066 curr->smp_halt_event_postponed = false;
1067 if (curr->state == TARGET_HALTED) {
1068 LOG_TARGET_DEBUG(curr, "sending postponed target event 'halted'");
1069 target_call_event_callbacks(curr, TARGET_EVENT_HALTED);
1070 }
1071 }
1072 /* There is no need to set gdb_service->target
1073 * as hwthread_update_threads() selects an interesting thread
1074 * by its own
1075 */
1076 }
1077 return retval;
1078 }
1079
1080 static int cortex_m_poll(struct target *target)
1081 {
1082 int retval = cortex_m_poll_one(target);
1083
1084 if (target->smp) {
1085 struct target_list *last;
1086 last = list_last_entry(target->smp_targets, struct target_list, lh);
1087 if (target == last->target)
1088 /* After the last target in SMP group has been polled
1089 * check for postponed halted events and eventually halt and re-poll
1090 * other targets */
1091 cortex_m_poll_smp(target->smp_targets);
1092 }
1093 return retval;
1094 }
1095
1096 static int cortex_m_halt_one(struct target *target)
1097 {
1098 LOG_TARGET_DEBUG(target, "target->state: %s", target_state_name(target));
1099
1100 if (target->state == TARGET_HALTED) {
1101 LOG_TARGET_DEBUG(target, "target was already halted");
1102 return ERROR_OK;
1103 }
1104
1105 if (target->state == TARGET_UNKNOWN)
1106 LOG_TARGET_WARNING(target, "target was in unknown state when halt was requested");
1107
1108 if (target->state == TARGET_RESET) {
1109 if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
1110 LOG_TARGET_ERROR(target, "can't request a halt while in reset if nSRST pulls nTRST");
1111 return ERROR_TARGET_FAILURE;
1112 } else {
1113 /* we came here in a reset_halt or reset_init sequence
1114 * debug entry was already prepared in cortex_m3_assert_reset()
1115 */
1116 target->debug_reason = DBG_REASON_DBGRQ;
1117
1118 return ERROR_OK;
1119 }
1120 }
1121
1122 /* Write to Debug Halting Control and Status Register */
1123 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1124
1125 /* Do this really early to minimize the window where the MASKINTS erratum
1126 * can pile up pending interrupts. */
1127 cortex_m_set_maskints_for_halt(target);
1128
1129 target->debug_reason = DBG_REASON_DBGRQ;
1130
1131 return ERROR_OK;
1132 }
1133
1134 static int cortex_m_halt(struct target *target)
1135 {
1136 if (target->smp)
1137 return cortex_m_smp_halt_all(target->smp_targets);
1138 else
1139 return cortex_m_halt_one(target);
1140 }
1141
1142 static int cortex_m_soft_reset_halt(struct target *target)
1143 {
1144 struct cortex_m_common *cortex_m = target_to_cm(target);
1145 struct armv7m_common *armv7m = &cortex_m->armv7m;
1146 int retval, timeout = 0;
1147
1148 /* on single cortex_m MCU soft_reset_halt should be avoided as same functionality
1149 * can be obtained by using 'reset halt' and 'cortex_m reset_config vectreset'.
1150 * As this reset only uses VC_CORERESET it would only ever reset the cortex_m
1151 * core, not the peripherals */
1152 LOG_TARGET_DEBUG(target, "soft_reset_halt is discouraged, please use 'reset halt' instead.");
1153
1154 if (!cortex_m->vectreset_supported) {
1155 LOG_TARGET_ERROR(target, "VECTRESET is not supported on this Cortex-M core");
1156 return ERROR_FAIL;
1157 }
1158
1159 /* Set C_DEBUGEN */
1160 retval = cortex_m_write_debug_halt_mask(target, 0, C_STEP | C_MASKINTS);
1161 if (retval != ERROR_OK)
1162 return retval;
1163
1164 /* Enter debug state on reset; restore DEMCR in endreset_event() */
1165 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR,
1166 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1167 if (retval != ERROR_OK)
1168 return retval;
1169
1170 /* Request a core-only reset */
1171 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1172 AIRCR_VECTKEY | AIRCR_VECTRESET);
1173 if (retval != ERROR_OK)
1174 return retval;
1175 target->state = TARGET_RESET;
1176
1177 /* registers are now invalid */
1178 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1179
1180 while (timeout < 100) {
1181 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1182 if (retval == ERROR_OK) {
1183 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR,
1184 &cortex_m->nvic_dfsr);
1185 if (retval != ERROR_OK)
1186 return retval;
1187 if ((cortex_m->dcb_dhcsr & S_HALT)
1188 && (cortex_m->nvic_dfsr & DFSR_VCATCH)) {
1189 LOG_TARGET_DEBUG(target, "system reset-halted, DHCSR 0x%08" PRIx32 ", DFSR 0x%08" PRIx32,
1190 cortex_m->dcb_dhcsr, cortex_m->nvic_dfsr);
1191 cortex_m_poll(target);
1192 /* FIXME restore user's vector catch config */
1193 return ERROR_OK;
1194 } else {
1195 LOG_TARGET_DEBUG(target, "waiting for system reset-halt, "
1196 "DHCSR 0x%08" PRIx32 ", %d ms",
1197 cortex_m->dcb_dhcsr, timeout);
1198 }
1199 }
1200 timeout++;
1201 alive_sleep(1);
1202 }
1203
1204 return ERROR_OK;
1205 }
1206
1207 void cortex_m_enable_breakpoints(struct target *target)
1208 {
1209 struct breakpoint *breakpoint = target->breakpoints;
1210
1211 /* set any pending breakpoints */
1212 while (breakpoint) {
1213 if (!breakpoint->is_set)
1214 cortex_m_set_breakpoint(target, breakpoint);
1215 breakpoint = breakpoint->next;
1216 }
1217 }
1218
1219 static int cortex_m_restore_one(struct target *target, bool current,
1220 target_addr_t *address, bool handle_breakpoints, bool debug_execution)
1221 {
1222 struct armv7m_common *armv7m = target_to_armv7m(target);
1223 struct breakpoint *breakpoint = NULL;
1224 uint32_t resume_pc;
1225 struct reg *r;
1226
1227 if (target->state != TARGET_HALTED) {
1228 LOG_TARGET_ERROR(target, "not halted");
1229 return ERROR_TARGET_NOT_HALTED;
1230 }
1231
1232 if (!debug_execution) {
1233 target_free_all_working_areas(target);
1234 cortex_m_enable_breakpoints(target);
1235 cortex_m_enable_watchpoints(target);
1236 }
1237
1238 if (debug_execution) {
1239 r = armv7m->arm.core_cache->reg_list + ARMV7M_PRIMASK;
1240
1241 /* Disable interrupts */
1242 /* We disable interrupts in the PRIMASK register instead of
1243 * masking with C_MASKINTS. This is probably the same issue
1244 * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
1245 * in parallel with disabled interrupts can cause local faults
1246 * to not be taken.
1247 *
1248 * This breaks non-debug (application) execution if not
1249 * called from armv7m_start_algorithm() which saves registers.
1250 */
1251 buf_set_u32(r->value, 0, 1, 1);
1252 r->dirty = true;
1253 r->valid = true;
1254
1255 /* Make sure we are in Thumb mode, set xPSR.T bit */
1256 /* armv7m_start_algorithm() initializes entire xPSR register.
1257 * This duplicity handles the case when cortex_m_resume()
1258 * is used with the debug_execution flag directly,
1259 * not called through armv7m_start_algorithm().
1260 */
1261 r = armv7m->arm.cpsr;
1262 buf_set_u32(r->value, 24, 1, 1);
1263 r->dirty = true;
1264 r->valid = true;
1265 }
1266
1267 /* current = 1: continue on current pc, otherwise continue at <address> */
1268 r = armv7m->arm.pc;
1269 if (!current) {
1270 buf_set_u32(r->value, 0, 32, *address);
1271 r->dirty = true;
1272 r->valid = true;
1273 }
1274
1275 /* if we halted last time due to a bkpt instruction
1276 * then we have to manually step over it, otherwise
1277 * the core will break again */
1278
1279 if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
1280 && !debug_execution)
1281 armv7m_maybe_skip_bkpt_inst(target, NULL);
1282
1283 resume_pc = buf_get_u32(r->value, 0, 32);
1284 if (current)
1285 *address = resume_pc;
1286
1287 int retval = armv7m_restore_context(target);
1288 if (retval != ERROR_OK)
1289 return retval;
1290
1291 /* the front-end may request us not to handle breakpoints */
1292 if (handle_breakpoints) {
1293 /* Single step past breakpoint at current address */
1294 breakpoint = breakpoint_find(target, resume_pc);
1295 if (breakpoint) {
1296 LOG_TARGET_DEBUG(target, "unset breakpoint at " TARGET_ADDR_FMT " (ID: %" PRIu32 ")",
1297 breakpoint->address,
1298 breakpoint->unique_id);
1299 retval = cortex_m_unset_breakpoint(target, breakpoint);
1300 if (retval == ERROR_OK)
1301 retval = cortex_m_single_step_core(target);
1302 int ret2 = cortex_m_set_breakpoint(target, breakpoint);
1303 if (retval != ERROR_OK)
1304 return retval;
1305 if (ret2 != ERROR_OK)
1306 return ret2;
1307 }
1308 }
1309
1310 return ERROR_OK;
1311 }
1312
1313 static int cortex_m_restart_one(struct target *target, bool debug_execution)
1314 {
1315 struct armv7m_common *armv7m = target_to_armv7m(target);
1316
1317 /* Restart core */
1318 cortex_m_set_maskints_for_run(target);
1319 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1320
1321 target->debug_reason = DBG_REASON_NOTHALTED;
1322 /* registers are now invalid */
1323 register_cache_invalidate(armv7m->arm.core_cache);
1324
1325 if (!debug_execution) {
1326 target->state = TARGET_RUNNING;
1327 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1328 } else {
1329 target->state = TARGET_DEBUG_RUNNING;
1330 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1331 }
1332
1333 return ERROR_OK;
1334 }
1335
1336 static int cortex_m_restore_smp(struct target *target, bool handle_breakpoints)
1337 {
1338 struct target_list *head;
1339 target_addr_t address;
1340 foreach_smp_target(head, target->smp_targets) {
1341 struct target *curr = head->target;
1342 /* skip calling target */
1343 if (curr == target)
1344 continue;
1345 if (!target_was_examined(curr))
1346 continue;
1347 /* skip running targets */
1348 if (curr->state == TARGET_RUNNING)
1349 continue;
1350
1351 int retval = cortex_m_restore_one(curr, true, &address,
1352 handle_breakpoints, false);
1353 if (retval != ERROR_OK)
1354 return retval;
1355
1356 retval = cortex_m_restart_one(curr, false);
1357 if (retval != ERROR_OK)
1358 return retval;
1359
1360 LOG_TARGET_DEBUG(curr, "SMP resumed at " TARGET_ADDR_FMT, address);
1361 }
1362 return ERROR_OK;
1363 }
1364
1365 static int cortex_m_resume(struct target *target, int current,
1366 target_addr_t address, int handle_breakpoints, int debug_execution)
1367 {
1368 int retval = cortex_m_restore_one(target, !!current, &address, !!handle_breakpoints, !!debug_execution);
1369 if (retval != ERROR_OK) {
1370 LOG_TARGET_ERROR(target, "context restore failed, aborting resume");
1371 return retval;
1372 }
1373
1374 if (target->smp && !debug_execution) {
1375 retval = cortex_m_restore_smp(target, !!handle_breakpoints);
1376 if (retval != ERROR_OK)
1377 LOG_WARNING("resume of a SMP target failed, trying to resume current one");
1378 }
1379
1380 cortex_m_restart_one(target, !!debug_execution);
1381 if (retval != ERROR_OK) {
1382 LOG_TARGET_ERROR(target, "resume failed");
1383 return retval;
1384 }
1385
1386 LOG_TARGET_DEBUG(target, "%sresumed at " TARGET_ADDR_FMT,
1387 debug_execution ? "debug " : "", address);
1388
1389 return ERROR_OK;
1390 }
1391
1392 /* int irqstepcount = 0; */
1393 static int cortex_m_step(struct target *target, int current,
1394 target_addr_t address, int handle_breakpoints)
1395 {
1396 struct cortex_m_common *cortex_m = target_to_cm(target);
1397 struct armv7m_common *armv7m = &cortex_m->armv7m;
1398 struct breakpoint *breakpoint = NULL;
1399 struct reg *pc = armv7m->arm.pc;
1400 bool bkpt_inst_found = false;
1401 int retval;
1402 bool isr_timed_out = false;
1403
1404 if (target->state != TARGET_HALTED) {
1405 LOG_TARGET_ERROR(target, "not halted");
1406 return ERROR_TARGET_NOT_HALTED;
1407 }
1408
1409 /* Just one of SMP cores will step. Set the gdb control
1410 * target to current one or gdb miss gdb-end event */
1411 if (target->smp && target->gdb_service)
1412 target->gdb_service->target = target;
1413
1414 /* current = 1: continue on current pc, otherwise continue at <address> */
1415 if (!current) {
1416 buf_set_u32(pc->value, 0, 32, address);
1417 pc->dirty = true;
1418 pc->valid = true;
1419 }
1420
1421 uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
1422
1423 /* the front-end may request us not to handle breakpoints */
1424 if (handle_breakpoints) {
1425 breakpoint = breakpoint_find(target, pc_value);
1426 if (breakpoint)
1427 cortex_m_unset_breakpoint(target, breakpoint);
1428 }
1429
1430 armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
1431
1432 target->debug_reason = DBG_REASON_SINGLESTEP;
1433
1434 armv7m_restore_context(target);
1435
1436 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1437
1438 /* if no bkpt instruction is found at pc then we can perform
1439 * a normal step, otherwise we have to manually step over the bkpt
1440 * instruction - as such simulate a step */
1441 if (bkpt_inst_found == false) {
1442 if (cortex_m->isrmasking_mode != CORTEX_M_ISRMASK_AUTO) {
1443 /* Automatic ISR masking mode off: Just step over the next
1444 * instruction, with interrupts on or off as appropriate. */
1445 cortex_m_set_maskints_for_step(target);
1446 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1447 } else {
1448 /* Process interrupts during stepping in a way they don't interfere
1449 * debugging.
1450 *
1451 * Principle:
1452 *
1453 * Set a temporary break point at the current pc and let the core run
1454 * with interrupts enabled. Pending interrupts get served and we run
1455 * into the breakpoint again afterwards. Then we step over the next
1456 * instruction with interrupts disabled.
1457 *
1458 * If the pending interrupts don't complete within time, we leave the
1459 * core running. This may happen if the interrupts trigger faster
1460 * than the core can process them or the handler doesn't return.
1461 *
1462 * If no more breakpoints are available we simply do a step with
1463 * interrupts enabled.
1464 *
1465 */
1466
1467 /* 2012-09-29 ph
1468 *
1469 * If a break point is already set on the lower half word then a break point on
1470 * the upper half word will not break again when the core is restarted. So we
1471 * just step over the instruction with interrupts disabled.
1472 *
1473 * The documentation has no information about this, it was found by observation
1474 * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 doesn't seem to
1475 * suffer from this problem.
1476 *
1477 * To add some confusion: pc_value has bit 0 always set, while the breakpoint
1478 * address has it always cleared. The former is done to indicate thumb mode
1479 * to gdb.
1480 *
1481 */
1482 if ((pc_value & 0x02) && breakpoint_find(target, pc_value & ~0x03)) {
1483 LOG_TARGET_DEBUG(target, "Stepping over next instruction with interrupts disabled");
1484 cortex_m_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
1485 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1486 /* Re-enable interrupts if appropriate */
1487 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1488 cortex_m_set_maskints_for_halt(target);
1489 } else {
1490
1491 /* Set a temporary break point */
1492 if (breakpoint) {
1493 retval = cortex_m_set_breakpoint(target, breakpoint);
1494 } else {
1495 enum breakpoint_type type = BKPT_HARD;
1496 if (cortex_m->fp_rev == 0 && pc_value > 0x1FFFFFFF) {
1497 /* FPB rev.1 cannot handle such addr, try BKPT instr */
1498 type = BKPT_SOFT;
1499 }
1500 retval = breakpoint_add(target, pc_value, 2, type);
1501 }
1502
1503 bool tmp_bp_set = (retval == ERROR_OK);
1504
1505 /* No more breakpoints left, just do a step */
1506 if (!tmp_bp_set) {
1507 cortex_m_set_maskints_for_step(target);
1508 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1509 /* Re-enable interrupts if appropriate */
1510 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1511 cortex_m_set_maskints_for_halt(target);
1512 } else {
1513 /* Start the core */
1514 LOG_TARGET_DEBUG(target, "Starting core to serve pending interrupts");
1515 int64_t t_start = timeval_ms();
1516 cortex_m_set_maskints_for_run(target);
1517 cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
1518
1519 /* Wait for pending handlers to complete or timeout */
1520 do {
1521 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1522 if (retval != ERROR_OK) {
1523 target->state = TARGET_UNKNOWN;
1524 return retval;
1525 }
1526 isr_timed_out = ((timeval_ms() - t_start) > 500);
1527 } while (!((cortex_m->dcb_dhcsr & S_HALT) || isr_timed_out));
1528
1529 /* only remove breakpoint if we created it */
1530 if (breakpoint)
1531 cortex_m_unset_breakpoint(target, breakpoint);
1532 else {
1533 /* Remove the temporary breakpoint */
1534 breakpoint_remove(target, pc_value);
1535 }
1536
1537 if (isr_timed_out) {
1538 LOG_TARGET_DEBUG(target, "Interrupt handlers didn't complete within time, "
1539 "leaving target running");
1540 } else {
1541 /* Step over next instruction with interrupts disabled */
1542 cortex_m_set_maskints_for_step(target);
1543 cortex_m_write_debug_halt_mask(target,
1544 C_HALT | C_MASKINTS,
1545 0);
1546 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1547 /* Re-enable interrupts if appropriate */
1548 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1549 cortex_m_set_maskints_for_halt(target);
1550 }
1551 }
1552 }
1553 }
1554 }
1555
1556 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1557 if (retval != ERROR_OK)
1558 return retval;
1559
1560 /* registers are now invalid */
1561 register_cache_invalidate(armv7m->arm.core_cache);
1562
1563 if (breakpoint)
1564 cortex_m_set_breakpoint(target, breakpoint);
1565
1566 if (isr_timed_out) {
1567 /* Leave the core running. The user has to stop execution manually. */
1568 target->debug_reason = DBG_REASON_NOTHALTED;
1569 target->state = TARGET_RUNNING;
1570 return ERROR_OK;
1571 }
1572
1573 LOG_TARGET_DEBUG(target, "target stepped dcb_dhcsr = 0x%" PRIx32
1574 " nvic_icsr = 0x%" PRIx32,
1575 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1576
1577 retval = cortex_m_debug_entry(target);
1578 if (retval != ERROR_OK)
1579 return retval;
1580 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1581
1582 LOG_TARGET_DEBUG(target, "target stepped dcb_dhcsr = 0x%" PRIx32
1583 " nvic_icsr = 0x%" PRIx32,
1584 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1585
1586 return ERROR_OK;
1587 }
1588
1589 static int cortex_m_assert_reset(struct target *target)
1590 {
1591 struct cortex_m_common *cortex_m = target_to_cm(target);
1592 struct armv7m_common *armv7m = &cortex_m->armv7m;
1593 enum cortex_m_soft_reset_config reset_config = cortex_m->soft_reset_config;
1594
1595 LOG_TARGET_DEBUG(target, "target->state: %s,%s examined",
1596 target_state_name(target),
1597 target_was_examined(target) ? "" : " not");
1598
1599 enum reset_types jtag_reset_config = jtag_get_reset_config();
1600
1601 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1602 /* allow scripts to override the reset event */
1603
1604 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1605 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1606 target->state = TARGET_RESET;
1607
1608 return ERROR_OK;
1609 }
1610
1611 /* some cores support connecting while srst is asserted
1612 * use that mode is it has been configured */
1613
1614 bool srst_asserted = false;
1615
1616 if ((jtag_reset_config & RESET_HAS_SRST) &&
1617 ((jtag_reset_config & RESET_SRST_NO_GATING) || !armv7m->debug_ap)) {
1618 /* If we have no debug_ap, asserting SRST is the only thing
1619 * we can do now */
1620 adapter_assert_reset();
1621 srst_asserted = true;
1622 }
1623
1624 /* TODO: replace the hack calling target_examine_one()
1625 * as soon as a better reset framework is available */
1626 if (!target_was_examined(target) && !target->defer_examine
1627 && srst_asserted && (jtag_reset_config & RESET_SRST_NO_GATING)) {
1628 LOG_TARGET_DEBUG(target, "Trying to re-examine under reset");
1629 target_examine_one(target);
1630 }
1631
1632 /* We need at least debug_ap to go further.
1633 * Inform user and bail out if we don't have one. */
1634 if (!armv7m->debug_ap) {
1635 if (srst_asserted) {
1636 if (target->reset_halt)
1637 LOG_TARGET_ERROR(target, "Debug AP not available, will not halt after reset!");
1638
1639 /* Do not propagate error: reset was asserted, proceed to deassert! */
1640 target->state = TARGET_RESET;
1641 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1642 return ERROR_OK;
1643
1644 } else {
1645 LOG_TARGET_ERROR(target, "Debug AP not available, reset NOT asserted!");
1646 return ERROR_FAIL;
1647 }
1648 }
1649
1650 /* Enable debug requests */
1651 int retval = cortex_m_read_dhcsr_atomic_sticky(target);
1652
1653 /* Store important errors instead of failing and proceed to reset assert */
1654
1655 if (retval != ERROR_OK || !(cortex_m->dcb_dhcsr & C_DEBUGEN))
1656 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
1657
1658 /* If the processor is sleeping in a WFI or WFE instruction, the
1659 * C_HALT bit must be asserted to regain control */
1660 if (retval == ERROR_OK && (cortex_m->dcb_dhcsr & S_SLEEP))
1661 retval = cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1662
1663 mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
1664 /* Ignore less important errors */
1665
1666 if (!target->reset_halt) {
1667 /* Set/Clear C_MASKINTS in a separate operation */
1668 cortex_m_set_maskints_for_run(target);
1669
1670 /* clear any debug flags before resuming */
1671 cortex_m_clear_halt(target);
1672
1673 /* clear C_HALT in dhcsr reg */
1674 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1675 } else {
1676 /* Halt in debug on reset; endreset_event() restores DEMCR.
1677 *
1678 * REVISIT catching BUSERR presumably helps to defend against
1679 * bad vector table entries. Should this include MMERR or
1680 * other flags too?
1681 */
1682 int retval2;
1683 retval2 = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DEMCR,
1684 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1685 if (retval != ERROR_OK || retval2 != ERROR_OK)
1686 LOG_TARGET_INFO(target, "AP write error, reset will not halt");
1687 }
1688
1689 if (jtag_reset_config & RESET_HAS_SRST) {
1690 /* default to asserting srst */
1691 if (!srst_asserted)
1692 adapter_assert_reset();
1693
1694 /* srst is asserted, ignore AP access errors */
1695 retval = ERROR_OK;
1696 } else {
1697 /* Use a standard Cortex-M3 software reset mechanism.
1698 * We default to using VECTRESET as it is supported on all current cores
1699 * (except Cortex-M0, M0+ and M1 which support SYSRESETREQ only!)
1700 * This has the disadvantage of not resetting the peripherals, so a
1701 * reset-init event handler is needed to perform any peripheral resets.
1702 */
1703 if (!cortex_m->vectreset_supported
1704 && reset_config == CORTEX_M_RESET_VECTRESET) {
1705 reset_config = CORTEX_M_RESET_SYSRESETREQ;
1706 LOG_TARGET_WARNING(target, "VECTRESET is not supported on this Cortex-M core, using SYSRESETREQ instead.");
1707 LOG_TARGET_WARNING(target, "Set 'cortex_m reset_config sysresetreq'.");
1708 }
1709
1710 LOG_TARGET_DEBUG(target, "Using Cortex-M %s", (reset_config == CORTEX_M_RESET_SYSRESETREQ)
1711 ? "SYSRESETREQ" : "VECTRESET");
1712
1713 if (reset_config == CORTEX_M_RESET_VECTRESET) {
1714 LOG_TARGET_WARNING(target, "Only resetting the Cortex-M core, use a reset-init event "
1715 "handler to reset any peripherals or configure hardware srst support.");
1716 }
1717
1718 int retval3;
1719 retval3 = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1720 AIRCR_VECTKEY | ((reset_config == CORTEX_M_RESET_SYSRESETREQ)
1721 ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
1722 if (retval3 != ERROR_OK)
1723 LOG_TARGET_DEBUG(target, "Ignoring AP write error right after reset");
1724
1725 retval3 = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1726 if (retval3 != ERROR_OK) {
1727 LOG_TARGET_ERROR(target, "DP initialisation failed");
1728 /* The error return value must not be propagated in this case.
1729 * SYSRESETREQ or VECTRESET have been possibly triggered
1730 * so reset processing should continue */
1731 } else {
1732 /* I do not know why this is necessary, but it
1733 * fixes strange effects (step/resume cause NMI
1734 * after reset) on LM3S6918 -- Michael Schwingen
1735 */
1736 uint32_t tmp;
1737 mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_AIRCR, &tmp);
1738 }
1739 }
1740
1741 target->state = TARGET_RESET;
1742 jtag_sleep(50000);
1743
1744 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1745
1746 /* now return stored error code if any */
1747 if (retval != ERROR_OK)
1748 return retval;
1749
1750 if (target->reset_halt && target_was_examined(target)) {
1751 retval = target_halt(target);
1752 if (retval != ERROR_OK)
1753 return retval;
1754 }
1755
1756 return ERROR_OK;
1757 }
1758
1759 static int cortex_m_deassert_reset(struct target *target)
1760 {
1761 struct armv7m_common *armv7m = &target_to_cm(target)->armv7m;
1762
1763 LOG_TARGET_DEBUG(target, "target->state: %s,%s examined",
1764 target_state_name(target),
1765 target_was_examined(target) ? "" : " not");
1766
1767 /* deassert reset lines */
1768 adapter_deassert_reset();
1769
1770 enum reset_types jtag_reset_config = jtag_get_reset_config();
1771
1772 if ((jtag_reset_config & RESET_HAS_SRST) &&
1773 !(jtag_reset_config & RESET_SRST_NO_GATING) &&
1774 armv7m->debug_ap) {
1775
1776 int retval = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1777 if (retval != ERROR_OK) {
1778 LOG_TARGET_ERROR(target, "DP initialisation failed");
1779 return retval;
1780 }
1781 }
1782
1783 return ERROR_OK;
1784 }
1785
1786 int cortex_m_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
1787 {
1788 int retval;
1789 unsigned int fp_num = 0;
1790 struct cortex_m_common *cortex_m = target_to_cm(target);
1791 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1792
1793 if (breakpoint->is_set) {
1794 LOG_TARGET_WARNING(target, "breakpoint (BPID: %" PRIu32 ") already set", breakpoint->unique_id);
1795 return ERROR_OK;
1796 }
1797
1798 if (breakpoint->type == BKPT_HARD) {
1799 uint32_t fpcr_value;
1800 while (comparator_list[fp_num].used && (fp_num < cortex_m->fp_num_code))
1801 fp_num++;
1802 if (fp_num >= cortex_m->fp_num_code) {
1803 LOG_TARGET_ERROR(target, "Can not find free FPB Comparator!");
1804 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1805 }
1806 breakpoint_hw_set(breakpoint, fp_num);
1807 fpcr_value = breakpoint->address | 1;
1808 if (cortex_m->fp_rev == 0) {
1809 if (breakpoint->address > 0x1FFFFFFF) {
1810 LOG_TARGET_ERROR(target, "Cortex-M Flash Patch Breakpoint rev.1 "
1811 "cannot handle HW breakpoint above address 0x1FFFFFFE");
1812 return ERROR_FAIL;
1813 }
1814 uint32_t hilo;
1815 hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
1816 fpcr_value = (fpcr_value & 0x1FFFFFFC) | hilo | 1;
1817 } else if (cortex_m->fp_rev > 1) {
1818 LOG_TARGET_ERROR(target, "Unhandled Cortex-M Flash Patch Breakpoint architecture revision");
1819 return ERROR_FAIL;
1820 }
1821 comparator_list[fp_num].used = true;
1822 comparator_list[fp_num].fpcr_value = fpcr_value;
1823 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1824 comparator_list[fp_num].fpcr_value);
1825 LOG_TARGET_DEBUG(target, "fpc_num %i fpcr_value 0x%" PRIx32 "",
1826 fp_num,
1827 comparator_list[fp_num].fpcr_value);
1828 if (!cortex_m->fpb_enabled) {
1829 LOG_TARGET_DEBUG(target, "FPB wasn't enabled, do it now");
1830 retval = cortex_m_enable_fpb(target);
1831 if (retval != ERROR_OK) {
1832 LOG_TARGET_ERROR(target, "Failed to enable the FPB");
1833 return retval;
1834 }
1835
1836 cortex_m->fpb_enabled = true;
1837 }
1838 } else if (breakpoint->type == BKPT_SOFT) {
1839 uint8_t code[4];
1840
1841 /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
1842 * semihosting; don't use that. Otherwise the BKPT
1843 * parameter is arbitrary.
1844 */
1845 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1846 retval = target_read_memory(target,
1847 breakpoint->address & 0xFFFFFFFE,
1848 breakpoint->length, 1,
1849 breakpoint->orig_instr);
1850 if (retval != ERROR_OK)
1851 return retval;
1852 retval = target_write_memory(target,
1853 breakpoint->address & 0xFFFFFFFE,
1854 breakpoint->length, 1,
1855 code);
1856 if (retval != ERROR_OK)
1857 return retval;
1858 breakpoint->is_set = true;
1859 }
1860
1861 LOG_TARGET_DEBUG(target, "BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (n=%u)",
1862 breakpoint->unique_id,
1863 (int)(breakpoint->type),
1864 breakpoint->address,
1865 breakpoint->length,
1866 (breakpoint->type == BKPT_SOFT) ? 0 : breakpoint->number);
1867
1868 return ERROR_OK;
1869 }
1870
1871 int cortex_m_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1872 {
1873 int retval;
1874 struct cortex_m_common *cortex_m = target_to_cm(target);
1875 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1876
1877 if (!breakpoint->is_set) {
1878 LOG_TARGET_WARNING(target, "breakpoint not set");
1879 return ERROR_OK;
1880 }
1881
1882 LOG_TARGET_DEBUG(target, "BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (n=%u)",
1883 breakpoint->unique_id,
1884 (int)(breakpoint->type),
1885 breakpoint->address,
1886 breakpoint->length,
1887 (breakpoint->type == BKPT_SOFT) ? 0 : breakpoint->number);
1888
1889 if (breakpoint->type == BKPT_HARD) {
1890 unsigned int fp_num = breakpoint->number;
1891 if (fp_num >= cortex_m->fp_num_code) {
1892 LOG_TARGET_DEBUG(target, "Invalid FP Comparator number in breakpoint");
1893 return ERROR_OK;
1894 }
1895 comparator_list[fp_num].used = false;
1896 comparator_list[fp_num].fpcr_value = 0;
1897 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1898 comparator_list[fp_num].fpcr_value);
1899 } else {
1900 /* restore original instruction (kept in target endianness) */
1901 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE,
1902 breakpoint->length, 1,
1903 breakpoint->orig_instr);
1904 if (retval != ERROR_OK)
1905 return retval;
1906 }
1907 breakpoint->is_set = false;
1908
1909 return ERROR_OK;
1910 }
1911
1912 int cortex_m_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
1913 {
1914 if (breakpoint->length == 3) {
1915 LOG_TARGET_DEBUG(target, "Using a two byte breakpoint for 32bit Thumb-2 request");
1916 breakpoint->length = 2;
1917 }
1918
1919 if ((breakpoint->length != 2)) {
1920 LOG_TARGET_INFO(target, "only breakpoints of two bytes length supported");
1921 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1922 }
1923
1924 return cortex_m_set_breakpoint(target, breakpoint);
1925 }
1926
1927 int cortex_m_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1928 {
1929 if (!breakpoint->is_set)
1930 return ERROR_OK;
1931
1932 return cortex_m_unset_breakpoint(target, breakpoint);
1933 }
1934
1935 static int cortex_m_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1936 {
1937 unsigned int dwt_num = 0;
1938 struct cortex_m_common *cortex_m = target_to_cm(target);
1939
1940 /* REVISIT Don't fully trust these "not used" records ... users
1941 * may set up breakpoints by hand, e.g. dual-address data value
1942 * watchpoint using comparator #1; comparator #0 matching cycle
1943 * count; send data trace info through ITM and TPIU; etc
1944 */
1945 struct cortex_m_dwt_comparator *comparator;
1946
1947 for (comparator = cortex_m->dwt_comparator_list;
1948 comparator->used && dwt_num < cortex_m->dwt_num_comp;
1949 comparator++, dwt_num++)
1950 continue;
1951 if (dwt_num >= cortex_m->dwt_num_comp) {
1952 LOG_TARGET_ERROR(target, "Can not find free DWT Comparator");
1953 return ERROR_FAIL;
1954 }
1955 comparator->used = true;
1956 watchpoint_set(watchpoint, dwt_num);
1957
1958 comparator->comp = watchpoint->address;
1959 target_write_u32(target, comparator->dwt_comparator_address + 0,
1960 comparator->comp);
1961
1962 if ((cortex_m->dwt_devarch & 0x1FFFFF) != DWT_DEVARCH_ARMV8M_V2_0
1963 && (cortex_m->dwt_devarch & 0x1FFFFF) != DWT_DEVARCH_ARMV8M_V2_1) {
1964 uint32_t mask = 0, temp;
1965
1966 /* watchpoint params were validated earlier */
1967 temp = watchpoint->length;
1968 while (temp) {
1969 temp >>= 1;
1970 mask++;
1971 }
1972 mask--;
1973
1974 comparator->mask = mask;
1975 target_write_u32(target, comparator->dwt_comparator_address + 4,
1976 comparator->mask);
1977
1978 switch (watchpoint->rw) {
1979 case WPT_READ:
1980 comparator->function = 5;
1981 break;
1982 case WPT_WRITE:
1983 comparator->function = 6;
1984 break;
1985 case WPT_ACCESS:
1986 comparator->function = 7;
1987 break;
1988 }
1989 } else {
1990 uint32_t data_size = watchpoint->length >> 1;
1991 comparator->mask = (watchpoint->length >> 1) | 1;
1992
1993 switch (watchpoint->rw) {
1994 case WPT_ACCESS:
1995 comparator->function = 4;
1996 break;
1997 case WPT_WRITE:
1998 comparator->function = 5;
1999 break;
2000 case WPT_READ:
2001 comparator->function = 6;
2002 break;
2003 }
2004 comparator->function = comparator->function | (1 << 4) |
2005 (data_size << 10);
2006 }
2007
2008 target_write_u32(target, comparator->dwt_comparator_address + 8,
2009 comparator->function);
2010
2011 LOG_TARGET_DEBUG(target, "Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
2012 watchpoint->unique_id, dwt_num,
2013 (unsigned) comparator->comp,
2014 (unsigned) comparator->mask,
2015 (unsigned) comparator->function);
2016 return ERROR_OK;
2017 }
2018
2019 static int cortex_m_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
2020 {
2021 struct cortex_m_common *cortex_m = target_to_cm(target);
2022 struct cortex_m_dwt_comparator *comparator;
2023
2024 if (!watchpoint->is_set) {
2025 LOG_TARGET_WARNING(target, "watchpoint (wpid: %d) not set",
2026 watchpoint->unique_id);
2027 return ERROR_OK;
2028 }
2029
2030 unsigned int dwt_num = watchpoint->number;
2031
2032 LOG_TARGET_DEBUG(target, "Watchpoint (ID %d) DWT%u address: 0x%08x clear",
2033 watchpoint->unique_id, dwt_num,
2034 (unsigned) watchpoint->address);
2035
2036 if (dwt_num >= cortex_m->dwt_num_comp) {
2037 LOG_TARGET_DEBUG(target, "Invalid DWT Comparator number in watchpoint");
2038 return ERROR_OK;
2039 }
2040
2041 comparator = cortex_m->dwt_comparator_list + dwt_num;
2042 comparator->used = false;
2043 comparator->function = 0;
2044 target_write_u32(target, comparator->dwt_comparator_address + 8,
2045 comparator->function);
2046
2047 watchpoint->is_set = false;
2048
2049 return ERROR_OK;
2050 }
2051
2052 int cortex_m_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
2053 {
2054 struct cortex_m_common *cortex_m = target_to_cm(target);
2055
2056 if (cortex_m->dwt_comp_available < 1) {
2057 LOG_TARGET_DEBUG(target, "no comparators?");
2058 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2059 }
2060
2061 /* REVISIT This DWT may well be able to watch for specific data
2062 * values. Requires comparator #1 to set DATAVMATCH and match
2063 * the data, and another comparator (DATAVADDR0) matching addr.
2064 *
2065 * NOTE: hardware doesn't support data value masking, so we'll need
2066 * to check that mask is zero
2067 */
2068 if (watchpoint->mask != WATCHPOINT_IGNORE_DATA_VALUE_MASK) {
2069 LOG_TARGET_DEBUG(target, "watchpoint value masks not supported");
2070 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2071 }
2072
2073 /* hardware allows address masks of up to 32K */
2074 unsigned mask;
2075
2076 for (mask = 0; mask < 16; mask++) {
2077 if ((1u << mask) == watchpoint->length)
2078 break;
2079 }
2080 if (mask == 16) {
2081 LOG_TARGET_DEBUG(target, "unsupported watchpoint length");
2082 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2083 }
2084 if (watchpoint->address & ((1 << mask) - 1)) {
2085 LOG_TARGET_DEBUG(target, "watchpoint address is unaligned");
2086 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2087 }
2088
2089 cortex_m->dwt_comp_available--;
2090 LOG_TARGET_DEBUG(target, "dwt_comp_available: %d", cortex_m->dwt_comp_available);
2091
2092 return ERROR_OK;
2093 }
2094
2095 int cortex_m_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2096 {
2097 struct cortex_m_common *cortex_m = target_to_cm(target);
2098
2099 /* REVISIT why check? DWT can be updated with core running ... */
2100 if (target->state != TARGET_HALTED) {
2101 LOG_TARGET_ERROR(target, "not halted");
2102 return ERROR_TARGET_NOT_HALTED;
2103 }
2104
2105 if (watchpoint->is_set)
2106 cortex_m_unset_watchpoint(target, watchpoint);
2107
2108 cortex_m->dwt_comp_available++;
2109 LOG_TARGET_DEBUG(target, "dwt_comp_available: %d", cortex_m->dwt_comp_available);
2110
2111 return ERROR_OK;
2112 }
2113
2114 static int cortex_m_hit_watchpoint(struct target *target, struct watchpoint **hit_watchpoint)
2115 {
2116 if (target->debug_reason != DBG_REASON_WATCHPOINT)
2117 return ERROR_FAIL;
2118
2119 struct cortex_m_common *cortex_m = target_to_cm(target);
2120
2121 for (struct watchpoint *wp = target->watchpoints; wp; wp = wp->next) {
2122 if (!wp->is_set)
2123 continue;
2124
2125 unsigned int dwt_num = wp->number;
2126 struct cortex_m_dwt_comparator *comparator = cortex_m->dwt_comparator_list + dwt_num;
2127
2128 uint32_t dwt_function;
2129 int retval = target_read_u32(target, comparator->dwt_comparator_address + 8, &dwt_function);
2130 if (retval != ERROR_OK)
2131 return ERROR_FAIL;
2132
2133 /* check the MATCHED bit */
2134 if (dwt_function & BIT(24)) {
2135 *hit_watchpoint = wp;
2136 return ERROR_OK;
2137 }
2138 }
2139
2140 return ERROR_FAIL;
2141 }
2142
2143 void cortex_m_enable_watchpoints(struct target *target)
2144 {
2145 struct watchpoint *watchpoint = target->watchpoints;
2146
2147 /* set any pending watchpoints */
2148 while (watchpoint) {
2149 if (!watchpoint->is_set)
2150 cortex_m_set_watchpoint(target, watchpoint);
2151 watchpoint = watchpoint->next;
2152 }
2153 }
2154
2155 static int cortex_m_read_memory(struct target *target, target_addr_t address,
2156 uint32_t size, uint32_t count, uint8_t *buffer)
2157 {
2158 struct armv7m_common *armv7m = target_to_armv7m(target);
2159
2160 if (armv7m->arm.arch == ARM_ARCH_V6M) {
2161 /* armv6m does not handle unaligned memory access */
2162 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
2163 return ERROR_TARGET_UNALIGNED_ACCESS;
2164 }
2165
2166 return mem_ap_read_buf(armv7m->debug_ap, buffer, size, count, address);
2167 }
2168
2169 static int cortex_m_write_memory(struct target *target, target_addr_t address,
2170 uint32_t size, uint32_t count, const uint8_t *buffer)
2171 {
2172 struct armv7m_common *armv7m = target_to_armv7m(target);
2173
2174 if (armv7m->arm.arch == ARM_ARCH_V6M) {
2175 /* armv6m does not handle unaligned memory access */
2176 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
2177 return ERROR_TARGET_UNALIGNED_ACCESS;
2178 }
2179
2180 return mem_ap_write_buf(armv7m->debug_ap, buffer, size, count, address);
2181 }
2182
2183 static int cortex_m_init_target(struct command_context *cmd_ctx,
2184 struct target *target)
2185 {
2186 armv7m_build_reg_cache(target);
2187 arm_semihosting_init(target);
2188 return ERROR_OK;
2189 }
2190
2191 void cortex_m_deinit_target(struct target *target)
2192 {
2193 struct cortex_m_common *cortex_m = target_to_cm(target);
2194 struct armv7m_common *armv7m = target_to_armv7m(target);
2195
2196 if (!armv7m->is_hla_target && armv7m->debug_ap)
2197 dap_put_ap(armv7m->debug_ap);
2198
2199 free(cortex_m->fp_comparator_list);
2200
2201 cortex_m_dwt_free(target);
2202 armv7m_free_reg_cache(target);
2203
2204 free(target->private_config);
2205 free(cortex_m);
2206 }
2207
2208 int cortex_m_profiling(struct target *target, uint32_t *samples,
2209 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2210 {
2211 struct timeval timeout, now;
2212 struct armv7m_common *armv7m = target_to_armv7m(target);
2213 uint32_t reg_value;
2214 int retval;
2215
2216 retval = target_read_u32(target, DWT_PCSR, &reg_value);
2217 if (retval != ERROR_OK) {
2218 LOG_TARGET_ERROR(target, "Error while reading PCSR");
2219 return retval;
2220 }
2221 if (reg_value == 0) {
2222 LOG_TARGET_INFO(target, "PCSR sampling not supported on this processor.");
2223 return target_profiling_default(target, samples, max_num_samples, num_samples, seconds);
2224 }
2225
2226 gettimeofday(&timeout, NULL);
2227 timeval_add_time(&timeout, seconds, 0);
2228
2229 LOG_TARGET_INFO(target, "Starting Cortex-M profiling. Sampling DWT_PCSR as fast as we can...");
2230
2231 /* Make sure the target is running */
2232 target_poll(target);
2233 if (target->state == TARGET_HALTED)
2234 retval = target_resume(target, 1, 0, 0, 0);
2235
2236 if (retval != ERROR_OK) {
2237 LOG_TARGET_ERROR(target, "Error while resuming target");
2238 return retval;
2239 }
2240
2241 uint32_t sample_count = 0;
2242
2243 for (;;) {
2244 if (armv7m && armv7m->debug_ap) {
2245 uint32_t read_count = max_num_samples - sample_count;
2246 if (read_count > 1024)
2247 read_count = 1024;
2248
2249 retval = mem_ap_read_buf_noincr(armv7m->debug_ap,
2250 (void *)&samples[sample_count],
2251 4, read_count, DWT_PCSR);
2252 sample_count += read_count;
2253 } else {
2254 target_read_u32(target, DWT_PCSR, &samples[sample_count++]);
2255 }
2256
2257 if (retval != ERROR_OK) {
2258 LOG_TARGET_ERROR(target, "Error while reading PCSR");
2259 return retval;
2260 }
2261
2262
2263 gettimeofday(&now, NULL);
2264 if (sample_count >= max_num_samples || timeval_compare(&now, &timeout) > 0) {
2265 LOG_TARGET_INFO(target, "Profiling completed. %" PRIu32 " samples.", sample_count);
2266 break;
2267 }
2268 }
2269
2270 *num_samples = sample_count;
2271 return retval;
2272 }
2273
2274
2275 /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
2276 * on r/w if the core is not running, and clear on resume or reset ... or
2277 * at least, in a post_restore_context() method.
2278 */
2279
2280 struct dwt_reg_state {
2281 struct target *target;
2282 uint32_t addr;
2283 uint8_t value[4]; /* scratch/cache */
2284 };
2285
2286 static int cortex_m_dwt_get_reg(struct reg *reg)
2287 {
2288 struct dwt_reg_state *state = reg->arch_info;
2289
2290 uint32_t tmp;
2291 int retval = target_read_u32(state->target, state->addr, &tmp);
2292 if (retval != ERROR_OK)
2293 return retval;
2294
2295 buf_set_u32(state->value, 0, 32, tmp);
2296 return ERROR_OK;
2297 }
2298
2299 static int cortex_m_dwt_set_reg(struct reg *reg, uint8_t *buf)
2300 {
2301 struct dwt_reg_state *state = reg->arch_info;
2302
2303 return target_write_u32(state->target, state->addr,
2304 buf_get_u32(buf, 0, reg->size));
2305 }
2306
2307 struct dwt_reg {
2308 uint32_t addr;
2309 const char *name;
2310 unsigned size;
2311 };
2312
2313 static const struct dwt_reg dwt_base_regs[] = {
2314 { DWT_CTRL, "dwt_ctrl", 32, },
2315 /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
2316 * increments while the core is asleep.
2317 */
2318 { DWT_CYCCNT, "dwt_cyccnt", 32, },
2319 /* plus some 8 bit counters, useful for profiling with TPIU */
2320 };
2321
2322 static const struct dwt_reg dwt_comp[] = {
2323 #define DWT_COMPARATOR(i) \
2324 { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
2325 { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
2326 { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
2327 DWT_COMPARATOR(0),
2328 DWT_COMPARATOR(1),
2329 DWT_COMPARATOR(2),
2330 DWT_COMPARATOR(3),
2331 DWT_COMPARATOR(4),
2332 DWT_COMPARATOR(5),
2333 DWT_COMPARATOR(6),
2334 DWT_COMPARATOR(7),
2335 DWT_COMPARATOR(8),
2336 DWT_COMPARATOR(9),
2337 DWT_COMPARATOR(10),
2338 DWT_COMPARATOR(11),
2339 DWT_COMPARATOR(12),
2340 DWT_COMPARATOR(13),
2341 DWT_COMPARATOR(14),
2342 DWT_COMPARATOR(15),
2343 #undef DWT_COMPARATOR
2344 };
2345
2346 static const struct reg_arch_type dwt_reg_type = {
2347 .get = cortex_m_dwt_get_reg,
2348 .set = cortex_m_dwt_set_reg,
2349 };
2350
2351 static void cortex_m_dwt_addreg(struct target *t, struct reg *r, const struct dwt_reg *d)
2352 {
2353 struct dwt_reg_state *state;
2354
2355 state = calloc(1, sizeof(*state));
2356 if (!state)
2357 return;
2358 state->addr = d->addr;
2359 state->target = t;
2360
2361 r->name = d->name;
2362 r->size = d->size;
2363 r->value = state->value;
2364 r->arch_info = state;
2365 r->type = &dwt_reg_type;
2366 }
2367
2368 static void cortex_m_dwt_setup(struct cortex_m_common *cm, struct target *target)
2369 {
2370 uint32_t dwtcr;
2371 struct reg_cache *cache;
2372 struct cortex_m_dwt_comparator *comparator;
2373 int reg;
2374
2375 target_read_u32(target, DWT_CTRL, &dwtcr);
2376 LOG_TARGET_DEBUG(target, "DWT_CTRL: 0x%" PRIx32, dwtcr);
2377 if (!dwtcr) {
2378 LOG_TARGET_DEBUG(target, "no DWT");
2379 return;
2380 }
2381
2382 target_read_u32(target, DWT_DEVARCH, &cm->dwt_devarch);
2383 LOG_TARGET_DEBUG(target, "DWT_DEVARCH: 0x%" PRIx32, cm->dwt_devarch);
2384
2385 cm->dwt_num_comp = (dwtcr >> 28) & 0xF;
2386 cm->dwt_comp_available = cm->dwt_num_comp;
2387 cm->dwt_comparator_list = calloc(cm->dwt_num_comp,
2388 sizeof(struct cortex_m_dwt_comparator));
2389 if (!cm->dwt_comparator_list) {
2390 fail0:
2391 cm->dwt_num_comp = 0;
2392 LOG_TARGET_ERROR(target, "out of mem");
2393 return;
2394 }
2395
2396 cache = calloc(1, sizeof(*cache));
2397 if (!cache) {
2398 fail1:
2399 free(cm->dwt_comparator_list);
2400 goto fail0;
2401 }
2402 cache->name = "Cortex-M DWT registers";
2403 cache->num_regs = 2 + cm->dwt_num_comp * 3;
2404 cache->reg_list = calloc(cache->num_regs, sizeof(*cache->reg_list));
2405 if (!cache->reg_list) {
2406 free(cache);
2407 goto fail1;
2408 }
2409
2410 for (reg = 0; reg < 2; reg++)
2411 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2412 dwt_base_regs + reg);
2413
2414 comparator = cm->dwt_comparator_list;
2415 for (unsigned int i = 0; i < cm->dwt_num_comp; i++, comparator++) {
2416 int j;
2417
2418 comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
2419 for (j = 0; j < 3; j++, reg++)
2420 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2421 dwt_comp + 3 * i + j);
2422
2423 /* make sure we clear any watchpoints enabled on the target */
2424 target_write_u32(target, comparator->dwt_comparator_address + 8, 0);
2425 }
2426
2427 *register_get_last_cache_p(&target->reg_cache) = cache;
2428 cm->dwt_cache = cache;
2429
2430 LOG_TARGET_DEBUG(target, "DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
2431 dwtcr, cm->dwt_num_comp,
2432 (dwtcr & (0xf << 24)) ? " only" : "/trigger");
2433
2434 /* REVISIT: if num_comp > 1, check whether comparator #1 can
2435 * implement single-address data value watchpoints ... so we
2436 * won't need to check it later, when asked to set one up.
2437 */
2438 }
2439
2440 static void cortex_m_dwt_free(struct target *target)
2441 {
2442 struct cortex_m_common *cm = target_to_cm(target);
2443 struct reg_cache *cache = cm->dwt_cache;
2444
2445 free(cm->dwt_comparator_list);
2446 cm->dwt_comparator_list = NULL;
2447 cm->dwt_num_comp = 0;
2448
2449 if (cache) {
2450 register_unlink_cache(&target->reg_cache, cache);
2451
2452 if (cache->reg_list) {
2453 for (size_t i = 0; i < cache->num_regs; i++)
2454 free(cache->reg_list[i].arch_info);
2455 free(cache->reg_list);
2456 }
2457 free(cache);
2458 }
2459 cm->dwt_cache = NULL;
2460 }
2461
2462 static bool cortex_m_has_tz(struct target *target)
2463 {
2464 struct armv7m_common *armv7m = target_to_armv7m(target);
2465 uint32_t dauthstatus;
2466
2467 if (armv7m->arm.arch != ARM_ARCH_V8M)
2468 return false;
2469
2470 int retval = target_read_u32(target, DAUTHSTATUS, &dauthstatus);
2471 if (retval != ERROR_OK) {
2472 LOG_WARNING("Error reading DAUTHSTATUS register");
2473 return false;
2474 }
2475 return (dauthstatus & DAUTHSTATUS_SID_MASK) != 0;
2476 }
2477
2478 #define MVFR0 0xe000ef40
2479 #define MVFR1 0xe000ef44
2480
2481 #define MVFR0_DEFAULT_M4 0x10110021
2482 #define MVFR1_DEFAULT_M4 0x11000011
2483
2484 #define MVFR0_DEFAULT_M7_SP 0x10110021
2485 #define MVFR0_DEFAULT_M7_DP 0x10110221
2486 #define MVFR1_DEFAULT_M7_SP 0x11000011
2487 #define MVFR1_DEFAULT_M7_DP 0x12000011
2488
2489 static int cortex_m_find_mem_ap(struct adiv5_dap *swjdp,
2490 struct adiv5_ap **debug_ap)
2491 {
2492 if (dap_find_get_ap(swjdp, AP_TYPE_AHB3_AP, debug_ap) == ERROR_OK)
2493 return ERROR_OK;
2494
2495 return dap_find_get_ap(swjdp, AP_TYPE_AHB5_AP, debug_ap);
2496 }
2497
2498 int cortex_m_examine(struct target *target)
2499 {
2500 int retval;
2501 uint32_t cpuid, fpcr, mvfr0, mvfr1;
2502 struct cortex_m_common *cortex_m = target_to_cm(target);
2503 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
2504 struct armv7m_common *armv7m = target_to_armv7m(target);
2505
2506 /* hla_target shares the examine handler but does not support
2507 * all its calls */
2508 if (!armv7m->is_hla_target) {
2509 if (!armv7m->debug_ap) {
2510 if (cortex_m->apsel == DP_APSEL_INVALID) {
2511 /* Search for the MEM-AP */
2512 retval = cortex_m_find_mem_ap(swjdp, &armv7m->debug_ap);
2513 if (retval != ERROR_OK) {
2514 LOG_TARGET_ERROR(target, "Could not find MEM-AP to control the core");
2515 return retval;
2516 }
2517 } else {
2518 armv7m->debug_ap = dap_get_ap(swjdp, cortex_m->apsel);
2519 if (!armv7m->debug_ap) {
2520 LOG_ERROR("Cannot get AP");
2521 return ERROR_FAIL;
2522 }
2523 }
2524 }
2525
2526 armv7m->debug_ap->memaccess_tck = 8;
2527
2528 retval = mem_ap_init(armv7m->debug_ap);
2529 if (retval != ERROR_OK)
2530 return retval;
2531 }
2532
2533 if (!target_was_examined(target)) {
2534 target_set_examined(target);
2535
2536 /* Read from Device Identification Registers */
2537 retval = target_read_u32(target, CPUID, &cpuid);
2538 if (retval != ERROR_OK)
2539 return retval;
2540
2541 /* Inspect implementor/part to look for recognized cores */
2542 unsigned int impl_part = cpuid & (ARM_CPUID_IMPLEMENTOR_MASK | ARM_CPUID_PARTNO_MASK);
2543
2544 for (unsigned int n = 0; n < ARRAY_SIZE(cortex_m_parts); n++) {
2545 if (impl_part == cortex_m_parts[n].impl_part) {
2546 cortex_m->core_info = &cortex_m_parts[n];
2547 break;
2548 }
2549 }
2550
2551 if (!cortex_m->core_info) {
2552 LOG_TARGET_ERROR(target, "Cortex-M CPUID: 0x%x is unrecognized", cpuid);
2553 return ERROR_FAIL;
2554 }
2555
2556 armv7m->arm.arch = cortex_m->core_info->arch;
2557
2558 LOG_TARGET_INFO(target, "%s r%" PRId8 "p%" PRId8 " processor detected",
2559 cortex_m->core_info->name,
2560 (uint8_t)((cpuid >> 20) & 0xf),
2561 (uint8_t)((cpuid >> 0) & 0xf));
2562
2563 cortex_m->maskints_erratum = false;
2564 if (impl_part == CORTEX_M7_PARTNO) {
2565 uint8_t rev, patch;
2566 rev = (cpuid >> 20) & 0xf;
2567 patch = (cpuid >> 0) & 0xf;
2568 if ((rev == 0) && (patch < 2)) {
2569 LOG_TARGET_WARNING(target, "Silicon bug: single stepping may enter pending exception handler!");
2570 cortex_m->maskints_erratum = true;
2571 }
2572 }
2573 LOG_TARGET_DEBUG(target, "cpuid: 0x%8.8" PRIx32 "", cpuid);
2574
2575 if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV4) {
2576 target_read_u32(target, MVFR0, &mvfr0);
2577 target_read_u32(target, MVFR1, &mvfr1);
2578
2579 /* test for floating point feature on Cortex-M4 */
2580 if ((mvfr0 == MVFR0_DEFAULT_M4) && (mvfr1 == MVFR1_DEFAULT_M4)) {
2581 LOG_TARGET_DEBUG(target, "%s floating point feature FPv4_SP found", cortex_m->core_info->name);
2582 armv7m->fp_feature = FPV4_SP;
2583 }
2584 } else if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV5) {
2585 target_read_u32(target, MVFR0, &mvfr0);
2586 target_read_u32(target, MVFR1, &mvfr1);
2587
2588 /* test for floating point features on Cortex-M7 */
2589 if ((mvfr0 == MVFR0_DEFAULT_M7_SP) && (mvfr1 == MVFR1_DEFAULT_M7_SP)) {
2590 LOG_TARGET_DEBUG(target, "%s floating point feature FPv5_SP found", cortex_m->core_info->name);
2591 armv7m->fp_feature = FPV5_SP;
2592 } else if ((mvfr0 == MVFR0_DEFAULT_M7_DP) && (mvfr1 == MVFR1_DEFAULT_M7_DP)) {
2593 LOG_TARGET_DEBUG(target, "%s floating point feature FPv5_DP found", cortex_m->core_info->name);
2594 armv7m->fp_feature = FPV5_DP;
2595 }
2596 }
2597
2598 /* VECTRESET is supported only on ARMv7-M cores */
2599 cortex_m->vectreset_supported = armv7m->arm.arch == ARM_ARCH_V7M;
2600
2601 /* Check for FPU, otherwise mark FPU register as non-existent */
2602 if (armv7m->fp_feature == FP_NONE)
2603 for (size_t idx = ARMV7M_FPU_FIRST_REG; idx <= ARMV7M_FPU_LAST_REG; idx++)
2604 armv7m->arm.core_cache->reg_list[idx].exist = false;
2605
2606 if (!cortex_m_has_tz(target))
2607 for (size_t idx = ARMV8M_FIRST_REG; idx <= ARMV8M_LAST_REG; idx++)
2608 armv7m->arm.core_cache->reg_list[idx].exist = false;
2609
2610 if (!armv7m->is_hla_target) {
2611 if (cortex_m->core_info->flags & CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K)
2612 /* Cortex-M3/M4 have 4096 bytes autoincrement range,
2613 * s. ARM IHI 0031C: MEM-AP 7.2.2 */
2614 armv7m->debug_ap->tar_autoincr_block = (1 << 12);
2615 }
2616
2617 retval = target_read_u32(target, DCB_DHCSR, &cortex_m->dcb_dhcsr);
2618 if (retval != ERROR_OK)
2619 return retval;
2620
2621 /* Don't cumulate sticky S_RESET_ST at the very first read of DHCSR
2622 * as S_RESET_ST may indicate a reset that happened long time ago
2623 * (most probably the power-on reset before OpenOCD was started).
2624 * As we are just initializing the debug system we do not need
2625 * to call cortex_m_endreset_event() in the following poll.
2626 */
2627 if (!cortex_m->dcb_dhcsr_sticky_is_recent) {
2628 cortex_m->dcb_dhcsr_sticky_is_recent = true;
2629 if (cortex_m->dcb_dhcsr & S_RESET_ST) {
2630 LOG_TARGET_DEBUG(target, "reset happened some time ago, ignore");
2631 cortex_m->dcb_dhcsr &= ~S_RESET_ST;
2632 }
2633 }
2634 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
2635
2636 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
2637 /* Enable debug requests */
2638 uint32_t dhcsr = (cortex_m->dcb_dhcsr | C_DEBUGEN) & ~(C_HALT | C_STEP | C_MASKINTS);
2639
2640 retval = target_write_u32(target, DCB_DHCSR, DBGKEY | (dhcsr & 0x0000FFFFUL));
2641 if (retval != ERROR_OK)
2642 return retval;
2643 cortex_m->dcb_dhcsr = dhcsr;
2644 }
2645
2646 /* Configure trace modules */
2647 retval = target_write_u32(target, DCB_DEMCR, TRCENA | armv7m->demcr);
2648 if (retval != ERROR_OK)
2649 return retval;
2650
2651 if (armv7m->trace_config.itm_deferred_config)
2652 armv7m_trace_itm_config(target);
2653
2654 /* NOTE: FPB and DWT are both optional. */
2655
2656 /* Setup FPB */
2657 target_read_u32(target, FP_CTRL, &fpcr);
2658 /* bits [14:12] and [7:4] */
2659 cortex_m->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF);
2660 cortex_m->fp_num_lit = (fpcr >> 8) & 0xF;
2661 /* Detect flash patch revision, see RM DDI 0403E.b page C1-817.
2662 Revision is zero base, fp_rev == 1 means Rev.2 ! */
2663 cortex_m->fp_rev = (fpcr >> 28) & 0xf;
2664 free(cortex_m->fp_comparator_list);
2665 cortex_m->fp_comparator_list = calloc(
2666 cortex_m->fp_num_code + cortex_m->fp_num_lit,
2667 sizeof(struct cortex_m_fp_comparator));
2668 cortex_m->fpb_enabled = fpcr & 1;
2669 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
2670 cortex_m->fp_comparator_list[i].type =
2671 (i < cortex_m->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
2672 cortex_m->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
2673
2674 /* make sure we clear any breakpoints enabled on the target */
2675 target_write_u32(target, cortex_m->fp_comparator_list[i].fpcr_address, 0);
2676 }
2677 LOG_TARGET_DEBUG(target, "FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i",
2678 fpcr,
2679 cortex_m->fp_num_code,
2680 cortex_m->fp_num_lit);
2681
2682 /* Setup DWT */
2683 cortex_m_dwt_free(target);
2684 cortex_m_dwt_setup(cortex_m, target);
2685
2686 /* These hardware breakpoints only work for code in flash! */
2687 LOG_TARGET_INFO(target, "target has %d breakpoints, %d watchpoints",
2688 cortex_m->fp_num_code,
2689 cortex_m->dwt_num_comp);
2690 }
2691
2692 return ERROR_OK;
2693 }
2694
2695 static int cortex_m_dcc_read(struct target *target, uint8_t *value, uint8_t *ctrl)
2696 {
2697 struct armv7m_common *armv7m = target_to_armv7m(target);
2698 uint16_t dcrdr;
2699 uint8_t buf[2];
2700 int retval;
2701
2702 retval = mem_ap_read_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2703 if (retval != ERROR_OK)
2704 return retval;
2705
2706 dcrdr = target_buffer_get_u16(target, buf);
2707 *ctrl = (uint8_t)dcrdr;
2708 *value = (uint8_t)(dcrdr >> 8);
2709
2710 LOG_TARGET_DEBUG(target, "data 0x%x ctrl 0x%x", *value, *ctrl);
2711
2712 /* write ack back to software dcc register
2713 * signify we have read data */
2714 if (dcrdr & (1 << 0)) {
2715 target_buffer_set_u16(target, buf, 0);
2716 retval = mem_ap_write_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2717 if (retval != ERROR_OK)
2718 return retval;
2719 }
2720
2721 return ERROR_OK;
2722 }
2723
2724 static int cortex_m_target_request_data(struct target *target,
2725 uint32_t size, uint8_t *buffer)
2726 {
2727 uint8_t data;
2728 uint8_t ctrl;
2729 uint32_t i;
2730
2731 for (i = 0; i < (size * 4); i++) {
2732 int retval = cortex_m_dcc_read(target, &data, &ctrl);
2733 if (retval != ERROR_OK)
2734 return retval;
2735 buffer[i] = data;
2736 }
2737
2738 return ERROR_OK;
2739 }
2740
2741 static int cortex_m_handle_target_request(void *priv)
2742 {
2743 struct target *target = priv;
2744 if (!target_was_examined(target))
2745 return ERROR_OK;
2746
2747 if (!target->dbg_msg_enabled)
2748 return ERROR_OK;
2749
2750 if (target->state == TARGET_RUNNING) {
2751 uint8_t data;
2752 uint8_t ctrl;
2753 int retval;
2754
2755 retval = cortex_m_dcc_read(target, &data, &ctrl);
2756 if (retval != ERROR_OK)
2757 return retval;
2758
2759 /* check if we have data */
2760 if (ctrl & (1 << 0)) {
2761 uint32_t request;
2762
2763 /* we assume target is quick enough */
2764 request = data;
2765 for (int i = 1; i <= 3; i++) {
2766 retval = cortex_m_dcc_read(target, &data, &ctrl);
2767 if (retval != ERROR_OK)
2768 return retval;
2769 request |= ((uint32_t)data << (i * 8));
2770 }
2771 target_request(target, request);
2772 }
2773 }
2774
2775 return ERROR_OK;
2776 }
2777
2778 static int cortex_m_init_arch_info(struct target *target,
2779 struct cortex_m_common *cortex_m, struct adiv5_dap *dap)
2780 {
2781 struct armv7m_common *armv7m = &cortex_m->armv7m;
2782
2783 armv7m_init_arch_info(target, armv7m);
2784
2785 /* default reset mode is to use srst if fitted
2786 * if not it will use CORTEX_M3_RESET_VECTRESET */
2787 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2788
2789 armv7m->arm.dap = dap;
2790
2791 /* register arch-specific functions */
2792 armv7m->examine_debug_reason = cortex_m_examine_debug_reason;
2793
2794 armv7m->post_debug_entry = NULL;
2795
2796 armv7m->pre_restore_context = NULL;
2797
2798 armv7m->load_core_reg_u32 = cortex_m_load_core_reg_u32;
2799 armv7m->store_core_reg_u32 = cortex_m_store_core_reg_u32;
2800
2801 target_register_timer_callback(cortex_m_handle_target_request, 1,
2802 TARGET_TIMER_TYPE_PERIODIC, target);
2803
2804 return ERROR_OK;
2805 }
2806
2807 static int cortex_m_target_create(struct target *target, Jim_Interp *interp)
2808 {
2809 struct adiv5_private_config *pc;
2810
2811 pc = (struct adiv5_private_config *)target->private_config;
2812 if (adiv5_verify_config(pc) != ERROR_OK)
2813 return ERROR_FAIL;
2814
2815 struct cortex_m_common *cortex_m = calloc(1, sizeof(struct cortex_m_common));
2816 if (!cortex_m) {
2817 LOG_TARGET_ERROR(target, "No memory creating target");
2818 return ERROR_FAIL;
2819 }
2820
2821 cortex_m->common_magic = CORTEX_M_COMMON_MAGIC;
2822 cortex_m->apsel = pc->ap_num;
2823
2824 cortex_m_init_arch_info(target, cortex_m, pc->dap);
2825
2826 return ERROR_OK;
2827 }
2828
2829 /*--------------------------------------------------------------------------*/
2830
2831 static int cortex_m_verify_pointer(struct command_invocation *cmd,
2832 struct cortex_m_common *cm)
2833 {
2834 if (!is_cortex_m_with_dap_access(cm)) {
2835 command_print(cmd, "target is not a Cortex-M");
2836 return ERROR_TARGET_INVALID;
2837 }
2838 return ERROR_OK;
2839 }
2840
2841 /*
2842 * Only stuff below this line should need to verify that its target
2843 * is a Cortex-M3. Everything else should have indirected through the
2844 * cortexm3_target structure, which is only used with CM3 targets.
2845 */
2846
2847 COMMAND_HANDLER(handle_cortex_m_vector_catch_command)
2848 {
2849 struct target *target = get_current_target(CMD_CTX);
2850 struct cortex_m_common *cortex_m = target_to_cm(target);
2851 struct armv7m_common *armv7m = &cortex_m->armv7m;
2852 uint32_t demcr = 0;
2853 int retval;
2854
2855 static const struct {
2856 char name[10];
2857 unsigned mask;
2858 } vec_ids[] = {
2859 { "hard_err", VC_HARDERR, },
2860 { "int_err", VC_INTERR, },
2861 { "bus_err", VC_BUSERR, },
2862 { "state_err", VC_STATERR, },
2863 { "chk_err", VC_CHKERR, },
2864 { "nocp_err", VC_NOCPERR, },
2865 { "mm_err", VC_MMERR, },
2866 { "reset", VC_CORERESET, },
2867 };
2868
2869 retval = cortex_m_verify_pointer(CMD, cortex_m);
2870 if (retval != ERROR_OK)
2871 return retval;
2872
2873 if (!target_was_examined(target)) {
2874 LOG_TARGET_ERROR(target, "Target not examined yet");
2875 return ERROR_FAIL;
2876 }
2877
2878 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2879 if (retval != ERROR_OK)
2880 return retval;
2881
2882 if (CMD_ARGC > 0) {
2883 unsigned catch = 0;
2884
2885 if (CMD_ARGC == 1) {
2886 if (strcmp(CMD_ARGV[0], "all") == 0) {
2887 catch = VC_HARDERR | VC_INTERR | VC_BUSERR
2888 | VC_STATERR | VC_CHKERR | VC_NOCPERR
2889 | VC_MMERR | VC_CORERESET;
2890 goto write;
2891 } else if (strcmp(CMD_ARGV[0], "none") == 0)
2892 goto write;
2893 }
2894 while (CMD_ARGC-- > 0) {
2895 unsigned i;
2896 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2897 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
2898 continue;
2899 catch |= vec_ids[i].mask;
2900 break;
2901 }
2902 if (i == ARRAY_SIZE(vec_ids)) {
2903 LOG_TARGET_ERROR(target, "No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]);
2904 return ERROR_COMMAND_SYNTAX_ERROR;
2905 }
2906 }
2907 write:
2908 /* For now, armv7m->demcr only stores vector catch flags. */
2909 armv7m->demcr = catch;
2910
2911 demcr &= ~0xffff;
2912 demcr |= catch;
2913
2914 /* write, but don't assume it stuck (why not??) */
2915 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, demcr);
2916 if (retval != ERROR_OK)
2917 return retval;
2918 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2919 if (retval != ERROR_OK)
2920 return retval;
2921
2922 /* FIXME be sure to clear DEMCR on clean server shutdown.
2923 * Otherwise the vector catch hardware could fire when there's
2924 * no debugger hooked up, causing much confusion...
2925 */
2926 }
2927
2928 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2929 command_print(CMD, "%9s: %s", vec_ids[i].name,
2930 (demcr & vec_ids[i].mask) ? "catch" : "ignore");
2931 }
2932
2933 return ERROR_OK;
2934 }
2935
2936 COMMAND_HANDLER(handle_cortex_m_mask_interrupts_command)
2937 {
2938 struct target *target = get_current_target(CMD_CTX);
2939 struct cortex_m_common *cortex_m = target_to_cm(target);
2940 int retval;
2941
2942 static const struct nvp nvp_maskisr_modes[] = {
2943 { .name = "auto", .value = CORTEX_M_ISRMASK_AUTO },
2944 { .name = "off", .value = CORTEX_M_ISRMASK_OFF },
2945 { .name = "on", .value = CORTEX_M_ISRMASK_ON },
2946 { .name = "steponly", .value = CORTEX_M_ISRMASK_STEPONLY },
2947 { .name = NULL, .value = -1 },
2948 };
2949 const struct nvp *n;
2950
2951
2952 retval = cortex_m_verify_pointer(CMD, cortex_m);
2953 if (retval != ERROR_OK)
2954 return retval;
2955
2956 if (target->state != TARGET_HALTED) {
2957 command_print(CMD, "Error: target must be stopped for \"%s\" command", CMD_NAME);
2958 return ERROR_TARGET_NOT_HALTED;
2959 }
2960
2961 if (CMD_ARGC > 0) {
2962 n = nvp_name2value(nvp_maskisr_modes, CMD_ARGV[0]);
2963 if (!n->name)
2964 return ERROR_COMMAND_SYNTAX_ERROR;
2965 cortex_m->isrmasking_mode = n->value;
2966 cortex_m_set_maskints_for_halt(target);
2967 }
2968
2969 n = nvp_value2name(nvp_maskisr_modes, cortex_m->isrmasking_mode);
2970 command_print(CMD, "cortex_m interrupt mask %s", n->name);
2971
2972 return ERROR_OK;
2973 }
2974
2975 COMMAND_HANDLER(handle_cortex_m_reset_config_command)
2976 {
2977 struct target *target = get_current_target(CMD_CTX);
2978 struct cortex_m_common *cortex_m = target_to_cm(target);
2979 int retval;
2980 char *reset_config;
2981
2982 retval = cortex_m_verify_pointer(CMD, cortex_m);
2983 if (retval != ERROR_OK)
2984 return retval;
2985
2986 if (CMD_ARGC > 0) {
2987 if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
2988 cortex_m->soft_reset_config = CORTEX_M_RESET_SYSRESETREQ;
2989
2990 else if (strcmp(*CMD_ARGV, "vectreset") == 0) {
2991 if (target_was_examined(target)
2992 && !cortex_m->vectreset_supported)
2993 LOG_TARGET_WARNING(target, "VECTRESET is not supported on your Cortex-M core!");
2994 else
2995 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2996
2997 } else
2998 return ERROR_COMMAND_SYNTAX_ERROR;
2999 }
3000
3001 switch (cortex_m->soft_reset_config) {
3002 case CORTEX_M_RESET_SYSRESETREQ:
3003 reset_config = "sysresetreq";
3004 break;
3005
3006 case CORTEX_M_RESET_VECTRESET:
3007 reset_config = "vectreset";
3008 break;
3009
3010 default:
3011 reset_config = "unknown";
3012 break;
3013 }
3014
3015 command_print(CMD, "cortex_m reset_config %s", reset_config);
3016
3017 return ERROR_OK;
3018 }
3019
3020 static const struct command_registration cortex_m_exec_command_handlers[] = {
3021 {
3022 .name = "maskisr",
3023 .handler = handle_cortex_m_mask_interrupts_command,
3024 .mode = COMMAND_EXEC,
3025 .help = "mask cortex_m interrupts",
3026 .usage = "['auto'|'on'|'off'|'steponly']",
3027 },
3028 {
3029 .name = "vector_catch",
3030 .handler = handle_cortex_m_vector_catch_command,
3031 .mode = COMMAND_EXEC,
3032 .help = "configure hardware vectors to trigger debug entry",
3033 .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
3034 },
3035 {
3036 .name = "reset_config",
3037 .handler = handle_cortex_m_reset_config_command,
3038 .mode = COMMAND_ANY,
3039 .help = "configure software reset handling",
3040 .usage = "['sysresetreq'|'vectreset']",
3041 },
3042 {
3043 .chain = smp_command_handlers,
3044 },
3045 COMMAND_REGISTRATION_DONE
3046 };
3047 static const struct command_registration cortex_m_command_handlers[] = {
3048 {
3049 .chain = armv7m_command_handlers,
3050 },
3051 {
3052 .chain = armv7m_trace_command_handlers,
3053 },
3054 /* START_DEPRECATED_TPIU */
3055 {
3056 .chain = arm_tpiu_deprecated_command_handlers,
3057 },
3058 /* END_DEPRECATED_TPIU */
3059 {
3060 .name = "cortex_m",
3061 .mode = COMMAND_EXEC,
3062 .help = "Cortex-M command group",
3063 .usage = "",
3064 .chain = cortex_m_exec_command_handlers,
3065 },
3066 {
3067 .chain = rtt_target_command_handlers,
3068 },
3069 COMMAND_REGISTRATION_DONE
3070 };
3071
3072 struct target_type cortexm_target = {
3073 .name = "cortex_m",
3074
3075 .poll = cortex_m_poll,
3076 .arch_state = armv7m_arch_state,
3077
3078 .target_request_data = cortex_m_target_request_data,
3079
3080 .halt = cortex_m_halt,
3081 .resume = cortex_m_resume,
3082 .step = cortex_m_step,
3083
3084 .assert_reset = cortex_m_assert_reset,
3085 .deassert_reset = cortex_m_deassert_reset,
3086 .soft_reset_halt = cortex_m_soft_reset_halt,
3087
3088 .get_gdb_arch = arm_get_gdb_arch,
3089 .get_gdb_reg_list = armv7m_get_gdb_reg_list,
3090
3091 .read_memory = cortex_m_read_memory,
3092 .write_memory = cortex_m_write_memory,
3093 .checksum_memory = armv7m_checksum_memory,
3094 .blank_check_memory = armv7m_blank_check_memory,
3095
3096 .run_algorithm = armv7m_run_algorithm,
3097 .start_algorithm = armv7m_start_algorithm,
3098 .wait_algorithm = armv7m_wait_algorithm,
3099
3100 .add_breakpoint = cortex_m_add_breakpoint,
3101 .remove_breakpoint = cortex_m_remove_breakpoint,
3102 .add_watchpoint = cortex_m_add_watchpoint,
3103 .remove_watchpoint = cortex_m_remove_watchpoint,
3104 .hit_watchpoint = cortex_m_hit_watchpoint,
3105
3106 .commands = cortex_m_command_handlers,
3107 .target_create = cortex_m_target_create,
3108 .target_jim_configure = adiv5_jim_configure,
3109 .init_target = cortex_m_init_target,
3110 .examine = cortex_m_examine,
3111 .deinit_target = cortex_m_deinit_target,
3112
3113 .profiling = cortex_m_profiling,
3114 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)