jtag: linuxgpiod: drop extra parenthesis
[openocd.git] / src / target / cortex_m.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 * *
7 * Copyright (C) 2006 by Magnus Lundin *
8 * lundin@mlu.mine.nu *
9 * *
10 * Copyright (C) 2008 by Spencer Oliver *
11 * spen@spen-soft.co.uk *
12 * *
13 * *
14 * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
15 * *
16 ***************************************************************************/
17 #ifdef HAVE_CONFIG_H
18 #include "config.h"
19 #endif
20
21 #include "jtag/interface.h"
22 #include "breakpoints.h"
23 #include "cortex_m.h"
24 #include "target_request.h"
25 #include "target_type.h"
26 #include "arm_adi_v5.h"
27 #include "arm_disassembler.h"
28 #include "register.h"
29 #include "arm_opcodes.h"
30 #include "arm_semihosting.h"
31 #include "smp.h"
32 #include <helper/nvp.h>
33 #include <helper/time_support.h>
34 #include <rtt/rtt.h>
35
36 /* NOTE: most of this should work fine for the Cortex-M1 and
37 * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
38 * Some differences: M0/M1 doesn't have FPB remapping or the
39 * DWT tracing/profiling support. (So the cycle counter will
40 * not be usable; the other stuff isn't currently used here.)
41 *
42 * Although there are some workarounds for errata seen only in r0p0
43 * silicon, such old parts are hard to find and thus not much tested
44 * any longer.
45 */
46
47 /* Timeout for register r/w */
48 #define DHCSR_S_REGRDY_TIMEOUT (500)
49
50 /* Supported Cortex-M Cores */
51 static const struct cortex_m_part_info cortex_m_parts[] = {
52 {
53 .impl_part = CORTEX_M0_PARTNO,
54 .name = "Cortex-M0",
55 .arch = ARM_ARCH_V6M,
56 },
57 {
58 .impl_part = CORTEX_M0P_PARTNO,
59 .name = "Cortex-M0+",
60 .arch = ARM_ARCH_V6M,
61 },
62 {
63 .impl_part = CORTEX_M1_PARTNO,
64 .name = "Cortex-M1",
65 .arch = ARM_ARCH_V6M,
66 },
67 {
68 .impl_part = CORTEX_M3_PARTNO,
69 .name = "Cortex-M3",
70 .arch = ARM_ARCH_V7M,
71 .flags = CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
72 },
73 {
74 .impl_part = CORTEX_M4_PARTNO,
75 .name = "Cortex-M4",
76 .arch = ARM_ARCH_V7M,
77 .flags = CORTEX_M_F_HAS_FPV4 | CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
78 },
79 {
80 .impl_part = CORTEX_M7_PARTNO,
81 .name = "Cortex-M7",
82 .arch = ARM_ARCH_V7M,
83 .flags = CORTEX_M_F_HAS_FPV5,
84 },
85 {
86 .impl_part = CORTEX_M23_PARTNO,
87 .name = "Cortex-M23",
88 .arch = ARM_ARCH_V8M,
89 },
90 {
91 .impl_part = CORTEX_M33_PARTNO,
92 .name = "Cortex-M33",
93 .arch = ARM_ARCH_V8M,
94 .flags = CORTEX_M_F_HAS_FPV5,
95 },
96 {
97 .impl_part = CORTEX_M35P_PARTNO,
98 .name = "Cortex-M35P",
99 .arch = ARM_ARCH_V8M,
100 .flags = CORTEX_M_F_HAS_FPV5,
101 },
102 {
103 .impl_part = CORTEX_M55_PARTNO,
104 .name = "Cortex-M55",
105 .arch = ARM_ARCH_V8M,
106 .flags = CORTEX_M_F_HAS_FPV5,
107 },
108 {
109 .impl_part = CORTEX_M85_PARTNO,
110 .name = "Cortex-M85",
111 .arch = ARM_ARCH_V8M,
112 .flags = CORTEX_M_F_HAS_FPV5,
113 },
114 {
115 .impl_part = STAR_MC1_PARTNO,
116 .name = "STAR-MC1",
117 .arch = ARM_ARCH_V8M,
118 .flags = CORTEX_M_F_HAS_FPV5,
119 },
120 {
121 .impl_part = INFINEON_SLX2_PARTNO,
122 .name = "Infineon-SLx2",
123 .arch = ARM_ARCH_V8M,
124 },
125 {
126 .impl_part = REALTEK_M200_PARTNO,
127 .name = "Real-M200 (KM0)",
128 .arch = ARM_ARCH_V8M,
129 },
130 {
131 .impl_part = REALTEK_M300_PARTNO,
132 .name = "Real-M300 (KM4)",
133 .arch = ARM_ARCH_V8M,
134 .flags = CORTEX_M_F_HAS_FPV5,
135 },
136 };
137
138 /* forward declarations */
139 static int cortex_m_store_core_reg_u32(struct target *target,
140 uint32_t num, uint32_t value);
141 static void cortex_m_dwt_free(struct target *target);
142
143 /** DCB DHCSR register contains S_RETIRE_ST and S_RESET_ST bits cleared
144 * on a read. Call this helper function each time DHCSR is read
145 * to preserve S_RESET_ST state in case of a reset event was detected.
146 */
147 static inline void cortex_m_cumulate_dhcsr_sticky(struct cortex_m_common *cortex_m,
148 uint32_t dhcsr)
149 {
150 cortex_m->dcb_dhcsr_cumulated_sticky |= dhcsr;
151 }
152
153 /** Read DCB DHCSR register to cortex_m->dcb_dhcsr and cumulate
154 * sticky bits in cortex_m->dcb_dhcsr_cumulated_sticky
155 */
156 static int cortex_m_read_dhcsr_atomic_sticky(struct target *target)
157 {
158 struct cortex_m_common *cortex_m = target_to_cm(target);
159 struct armv7m_common *armv7m = target_to_armv7m(target);
160
161 int retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR,
162 &cortex_m->dcb_dhcsr);
163 if (retval != ERROR_OK)
164 return retval;
165
166 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
167 return ERROR_OK;
168 }
169
170 static int cortex_m_load_core_reg_u32(struct target *target,
171 uint32_t regsel, uint32_t *value)
172 {
173 struct cortex_m_common *cortex_m = target_to_cm(target);
174 struct armv7m_common *armv7m = target_to_armv7m(target);
175 int retval;
176 uint32_t dcrdr, tmp_value;
177 int64_t then;
178
179 /* because the DCB_DCRDR is used for the emulated dcc channel
180 * we have to save/restore the DCB_DCRDR when used */
181 if (target->dbg_msg_enabled) {
182 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
183 if (retval != ERROR_OK)
184 return retval;
185 }
186
187 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
188 if (retval != ERROR_OK)
189 return retval;
190
191 /* check if value from register is ready and pre-read it */
192 then = timeval_ms();
193 while (1) {
194 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR,
195 &cortex_m->dcb_dhcsr);
196 if (retval != ERROR_OK)
197 return retval;
198 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DCRDR,
199 &tmp_value);
200 if (retval != ERROR_OK)
201 return retval;
202 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
203 if (cortex_m->dcb_dhcsr & S_REGRDY)
204 break;
205 cortex_m->slow_register_read = true; /* Polling (still) needed. */
206 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
207 LOG_TARGET_ERROR(target, "Timeout waiting for DCRDR transfer ready");
208 return ERROR_TIMEOUT_REACHED;
209 }
210 keep_alive();
211 }
212
213 *value = tmp_value;
214
215 if (target->dbg_msg_enabled) {
216 /* restore DCB_DCRDR - this needs to be in a separate
217 * transaction otherwise the emulated DCC channel breaks */
218 if (retval == ERROR_OK)
219 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
220 }
221
222 return retval;
223 }
224
225 static int cortex_m_slow_read_all_regs(struct target *target)
226 {
227 struct cortex_m_common *cortex_m = target_to_cm(target);
228 struct armv7m_common *armv7m = target_to_armv7m(target);
229 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
230
231 /* Opportunistically restore fast read, it'll revert to slow
232 * if any register needed polling in cortex_m_load_core_reg_u32(). */
233 cortex_m->slow_register_read = false;
234
235 for (unsigned int reg_id = 0; reg_id < num_regs; reg_id++) {
236 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
237 if (r->exist) {
238 int retval = armv7m->arm.read_core_reg(target, r, reg_id, ARM_MODE_ANY);
239 if (retval != ERROR_OK)
240 return retval;
241 }
242 }
243
244 if (!cortex_m->slow_register_read)
245 LOG_TARGET_DEBUG(target, "Switching back to fast register reads");
246
247 return ERROR_OK;
248 }
249
250 static int cortex_m_queue_reg_read(struct target *target, uint32_t regsel,
251 uint32_t *reg_value, uint32_t *dhcsr)
252 {
253 struct armv7m_common *armv7m = target_to_armv7m(target);
254 int retval;
255
256 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
257 if (retval != ERROR_OK)
258 return retval;
259
260 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR, dhcsr);
261 if (retval != ERROR_OK)
262 return retval;
263
264 return mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, reg_value);
265 }
266
267 static int cortex_m_fast_read_all_regs(struct target *target)
268 {
269 struct cortex_m_common *cortex_m = target_to_cm(target);
270 struct armv7m_common *armv7m = target_to_armv7m(target);
271 int retval;
272 uint32_t dcrdr;
273
274 /* because the DCB_DCRDR is used for the emulated dcc channel
275 * we have to save/restore the DCB_DCRDR when used */
276 if (target->dbg_msg_enabled) {
277 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
278 if (retval != ERROR_OK)
279 return retval;
280 }
281
282 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
283 const unsigned int n_r32 = ARMV7M_LAST_REG - ARMV7M_CORE_FIRST_REG + 1
284 + ARMV7M_FPU_LAST_REG - ARMV7M_FPU_FIRST_REG + 1;
285 /* we need one 32-bit word for each register except FP D0..D15, which
286 * need two words */
287 uint32_t r_vals[n_r32];
288 uint32_t dhcsr[n_r32];
289
290 unsigned int wi = 0; /* write index to r_vals and dhcsr arrays */
291 unsigned int reg_id; /* register index in the reg_list, ARMV7M_R0... */
292 for (reg_id = 0; reg_id < num_regs; reg_id++) {
293 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
294 if (!r->exist)
295 continue; /* skip non existent registers */
296
297 if (r->size <= 8) {
298 /* Any 8-bit or shorter register is unpacked from a 32-bit
299 * container register. Skip it now. */
300 continue;
301 }
302
303 uint32_t regsel = armv7m_map_id_to_regsel(reg_id);
304 retval = cortex_m_queue_reg_read(target, regsel, &r_vals[wi],
305 &dhcsr[wi]);
306 if (retval != ERROR_OK)
307 return retval;
308 wi++;
309
310 assert(r->size == 32 || r->size == 64);
311 if (r->size == 32)
312 continue; /* done with 32-bit register */
313
314 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
315 /* the odd part of FP register (S1, S3...) */
316 retval = cortex_m_queue_reg_read(target, regsel + 1, &r_vals[wi],
317 &dhcsr[wi]);
318 if (retval != ERROR_OK)
319 return retval;
320 wi++;
321 }
322
323 assert(wi <= n_r32);
324
325 retval = dap_run(armv7m->debug_ap->dap);
326 if (retval != ERROR_OK)
327 return retval;
328
329 if (target->dbg_msg_enabled) {
330 /* restore DCB_DCRDR - this needs to be in a separate
331 * transaction otherwise the emulated DCC channel breaks */
332 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
333 if (retval != ERROR_OK)
334 return retval;
335 }
336
337 bool not_ready = false;
338 for (unsigned int i = 0; i < wi; i++) {
339 if ((dhcsr[i] & S_REGRDY) == 0) {
340 not_ready = true;
341 LOG_TARGET_DEBUG(target, "Register %u was not ready during fast read", i);
342 }
343 cortex_m_cumulate_dhcsr_sticky(cortex_m, dhcsr[i]);
344 }
345
346 if (not_ready) {
347 /* Any register was not ready,
348 * fall back to slow read with S_REGRDY polling */
349 return ERROR_TIMEOUT_REACHED;
350 }
351
352 LOG_TARGET_DEBUG(target, "read %u 32-bit registers", wi);
353
354 unsigned int ri = 0; /* read index from r_vals array */
355 for (reg_id = 0; reg_id < num_regs; reg_id++) {
356 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
357 if (!r->exist)
358 continue; /* skip non existent registers */
359
360 r->dirty = false;
361
362 unsigned int reg32_id;
363 uint32_t offset;
364 if (armv7m_map_reg_packing(reg_id, &reg32_id, &offset)) {
365 /* Unpack a partial register from 32-bit container register */
366 struct reg *r32 = &armv7m->arm.core_cache->reg_list[reg32_id];
367
368 /* The container register ought to precede all regs unpacked
369 * from it in the reg_list. So the value should be ready
370 * to unpack */
371 assert(r32->valid);
372 buf_cpy(r32->value + offset, r->value, r->size);
373
374 } else {
375 assert(r->size == 32 || r->size == 64);
376 buf_set_u32(r->value, 0, 32, r_vals[ri++]);
377
378 if (r->size == 64) {
379 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
380 /* the odd part of FP register (S1, S3...) */
381 buf_set_u32(r->value + 4, 0, 32, r_vals[ri++]);
382 }
383 }
384 r->valid = true;
385 }
386 assert(ri == wi);
387
388 return retval;
389 }
390
391 static int cortex_m_store_core_reg_u32(struct target *target,
392 uint32_t regsel, uint32_t value)
393 {
394 struct cortex_m_common *cortex_m = target_to_cm(target);
395 struct armv7m_common *armv7m = target_to_armv7m(target);
396 int retval;
397 uint32_t dcrdr;
398 int64_t then;
399
400 /* because the DCB_DCRDR is used for the emulated dcc channel
401 * we have to save/restore the DCB_DCRDR when used */
402 if (target->dbg_msg_enabled) {
403 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
404 if (retval != ERROR_OK)
405 return retval;
406 }
407
408 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, value);
409 if (retval != ERROR_OK)
410 return retval;
411
412 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel | DCRSR_WNR);
413 if (retval != ERROR_OK)
414 return retval;
415
416 /* check if value is written into register */
417 then = timeval_ms();
418 while (1) {
419 retval = cortex_m_read_dhcsr_atomic_sticky(target);
420 if (retval != ERROR_OK)
421 return retval;
422 if (cortex_m->dcb_dhcsr & S_REGRDY)
423 break;
424 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
425 LOG_TARGET_ERROR(target, "Timeout waiting for DCRDR transfer ready");
426 return ERROR_TIMEOUT_REACHED;
427 }
428 keep_alive();
429 }
430
431 if (target->dbg_msg_enabled) {
432 /* restore DCB_DCRDR - this needs to be in a separate
433 * transaction otherwise the emulated DCC channel breaks */
434 if (retval == ERROR_OK)
435 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
436 }
437
438 return retval;
439 }
440
441 static int cortex_m_write_debug_halt_mask(struct target *target,
442 uint32_t mask_on, uint32_t mask_off)
443 {
444 struct cortex_m_common *cortex_m = target_to_cm(target);
445 struct armv7m_common *armv7m = &cortex_m->armv7m;
446
447 /* mask off status bits */
448 cortex_m->dcb_dhcsr &= ~((0xFFFFul << 16) | mask_off);
449 /* create new register mask */
450 cortex_m->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
451
452 return mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DHCSR, cortex_m->dcb_dhcsr);
453 }
454
455 static int cortex_m_set_maskints(struct target *target, bool mask)
456 {
457 struct cortex_m_common *cortex_m = target_to_cm(target);
458 if (!!(cortex_m->dcb_dhcsr & C_MASKINTS) != mask)
459 return cortex_m_write_debug_halt_mask(target, mask ? C_MASKINTS : 0, mask ? 0 : C_MASKINTS);
460 else
461 return ERROR_OK;
462 }
463
464 static int cortex_m_set_maskints_for_halt(struct target *target)
465 {
466 struct cortex_m_common *cortex_m = target_to_cm(target);
467 switch (cortex_m->isrmasking_mode) {
468 case CORTEX_M_ISRMASK_AUTO:
469 /* interrupts taken at resume, whether for step or run -> no mask */
470 return cortex_m_set_maskints(target, false);
471
472 case CORTEX_M_ISRMASK_OFF:
473 /* interrupts never masked */
474 return cortex_m_set_maskints(target, false);
475
476 case CORTEX_M_ISRMASK_ON:
477 /* interrupts always masked */
478 return cortex_m_set_maskints(target, true);
479
480 case CORTEX_M_ISRMASK_STEPONLY:
481 /* interrupts masked for single step only -> mask now if MASKINTS
482 * erratum, otherwise only mask before stepping */
483 return cortex_m_set_maskints(target, cortex_m->maskints_erratum);
484 }
485 return ERROR_OK;
486 }
487
488 static int cortex_m_set_maskints_for_run(struct target *target)
489 {
490 switch (target_to_cm(target)->isrmasking_mode) {
491 case CORTEX_M_ISRMASK_AUTO:
492 /* interrupts taken at resume, whether for step or run -> no mask */
493 return cortex_m_set_maskints(target, false);
494
495 case CORTEX_M_ISRMASK_OFF:
496 /* interrupts never masked */
497 return cortex_m_set_maskints(target, false);
498
499 case CORTEX_M_ISRMASK_ON:
500 /* interrupts always masked */
501 return cortex_m_set_maskints(target, true);
502
503 case CORTEX_M_ISRMASK_STEPONLY:
504 /* interrupts masked for single step only -> no mask */
505 return cortex_m_set_maskints(target, false);
506 }
507 return ERROR_OK;
508 }
509
510 static int cortex_m_set_maskints_for_step(struct target *target)
511 {
512 switch (target_to_cm(target)->isrmasking_mode) {
513 case CORTEX_M_ISRMASK_AUTO:
514 /* the auto-interrupt should already be done -> mask */
515 return cortex_m_set_maskints(target, true);
516
517 case CORTEX_M_ISRMASK_OFF:
518 /* interrupts never masked */
519 return cortex_m_set_maskints(target, false);
520
521 case CORTEX_M_ISRMASK_ON:
522 /* interrupts always masked */
523 return cortex_m_set_maskints(target, true);
524
525 case CORTEX_M_ISRMASK_STEPONLY:
526 /* interrupts masked for single step only -> mask */
527 return cortex_m_set_maskints(target, true);
528 }
529 return ERROR_OK;
530 }
531
532 static int cortex_m_clear_halt(struct target *target)
533 {
534 struct cortex_m_common *cortex_m = target_to_cm(target);
535 struct armv7m_common *armv7m = &cortex_m->armv7m;
536 int retval;
537
538 /* clear step if any */
539 cortex_m_write_debug_halt_mask(target, C_HALT, C_STEP);
540
541 /* Read Debug Fault Status Register */
542 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR, &cortex_m->nvic_dfsr);
543 if (retval != ERROR_OK)
544 return retval;
545
546 /* Clear Debug Fault Status */
547 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_DFSR, cortex_m->nvic_dfsr);
548 if (retval != ERROR_OK)
549 return retval;
550 LOG_TARGET_DEBUG(target, "NVIC_DFSR 0x%" PRIx32 "", cortex_m->nvic_dfsr);
551
552 return ERROR_OK;
553 }
554
555 static int cortex_m_single_step_core(struct target *target)
556 {
557 struct cortex_m_common *cortex_m = target_to_cm(target);
558 int retval;
559
560 /* Mask interrupts before clearing halt, if not done already. This avoids
561 * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
562 * HALT can put the core into an unknown state.
563 */
564 if (!(cortex_m->dcb_dhcsr & C_MASKINTS)) {
565 retval = cortex_m_write_debug_halt_mask(target, C_MASKINTS, 0);
566 if (retval != ERROR_OK)
567 return retval;
568 }
569 retval = cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
570 if (retval != ERROR_OK)
571 return retval;
572 LOG_TARGET_DEBUG(target, "single step");
573
574 /* restore dhcsr reg */
575 cortex_m_clear_halt(target);
576
577 return ERROR_OK;
578 }
579
580 static int cortex_m_enable_fpb(struct target *target)
581 {
582 int retval = target_write_u32(target, FP_CTRL, 3);
583 if (retval != ERROR_OK)
584 return retval;
585
586 /* check the fpb is actually enabled */
587 uint32_t fpctrl;
588 retval = target_read_u32(target, FP_CTRL, &fpctrl);
589 if (retval != ERROR_OK)
590 return retval;
591
592 if (fpctrl & 1)
593 return ERROR_OK;
594
595 return ERROR_FAIL;
596 }
597
598 static int cortex_m_endreset_event(struct target *target)
599 {
600 int retval;
601 uint32_t dcb_demcr;
602 struct cortex_m_common *cortex_m = target_to_cm(target);
603 struct armv7m_common *armv7m = &cortex_m->armv7m;
604 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
605 struct cortex_m_fp_comparator *fp_list = cortex_m->fp_comparator_list;
606 struct cortex_m_dwt_comparator *dwt_list = cortex_m->dwt_comparator_list;
607
608 /* REVISIT The four debug monitor bits are currently ignored... */
609 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &dcb_demcr);
610 if (retval != ERROR_OK)
611 return retval;
612 LOG_TARGET_DEBUG(target, "DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr);
613
614 /* this register is used for emulated dcc channel */
615 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
616 if (retval != ERROR_OK)
617 return retval;
618
619 retval = cortex_m_read_dhcsr_atomic_sticky(target);
620 if (retval != ERROR_OK)
621 return retval;
622
623 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
624 /* Enable debug requests */
625 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
626 if (retval != ERROR_OK)
627 return retval;
628 }
629
630 /* Restore proper interrupt masking setting for running CPU. */
631 cortex_m_set_maskints_for_run(target);
632
633 /* Enable features controlled by ITM and DWT blocks, and catch only
634 * the vectors we were told to pay attention to.
635 *
636 * Target firmware is responsible for all fault handling policy
637 * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
638 * or manual updates to the NVIC SHCSR and CCR registers.
639 */
640 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, TRCENA | armv7m->demcr);
641 if (retval != ERROR_OK)
642 return retval;
643
644 /* Paranoia: evidently some (early?) chips don't preserve all the
645 * debug state (including FPB, DWT, etc) across reset...
646 */
647
648 /* Enable FPB */
649 retval = cortex_m_enable_fpb(target);
650 if (retval != ERROR_OK) {
651 LOG_TARGET_ERROR(target, "Failed to enable the FPB");
652 return retval;
653 }
654
655 cortex_m->fpb_enabled = true;
656
657 /* Restore FPB registers */
658 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
659 retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
660 if (retval != ERROR_OK)
661 return retval;
662 }
663
664 /* Restore DWT registers */
665 for (unsigned int i = 0; i < cortex_m->dwt_num_comp; i++) {
666 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
667 dwt_list[i].comp);
668 if (retval != ERROR_OK)
669 return retval;
670 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
671 dwt_list[i].mask);
672 if (retval != ERROR_OK)
673 return retval;
674 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
675 dwt_list[i].function);
676 if (retval != ERROR_OK)
677 return retval;
678 }
679 retval = dap_run(swjdp);
680 if (retval != ERROR_OK)
681 return retval;
682
683 register_cache_invalidate(armv7m->arm.core_cache);
684
685 /* TODO: invalidate also working areas (needed in the case of detected reset).
686 * Doing so will require flash drivers to test if working area
687 * is still valid in all target algo calling loops.
688 */
689
690 /* make sure we have latest dhcsr flags */
691 retval = cortex_m_read_dhcsr_atomic_sticky(target);
692 if (retval != ERROR_OK)
693 return retval;
694
695 return retval;
696 }
697
698 static int cortex_m_examine_debug_reason(struct target *target)
699 {
700 struct cortex_m_common *cortex_m = target_to_cm(target);
701
702 /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
703 * only check the debug reason if we don't know it already */
704
705 if ((target->debug_reason != DBG_REASON_DBGRQ)
706 && (target->debug_reason != DBG_REASON_SINGLESTEP)) {
707 if (cortex_m->nvic_dfsr & DFSR_BKPT) {
708 target->debug_reason = DBG_REASON_BREAKPOINT;
709 if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
710 target->debug_reason = DBG_REASON_WPTANDBKPT;
711 } else if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
712 target->debug_reason = DBG_REASON_WATCHPOINT;
713 else if (cortex_m->nvic_dfsr & DFSR_VCATCH)
714 target->debug_reason = DBG_REASON_BREAKPOINT;
715 else if (cortex_m->nvic_dfsr & DFSR_EXTERNAL)
716 target->debug_reason = DBG_REASON_DBGRQ;
717 else /* HALTED */
718 target->debug_reason = DBG_REASON_UNDEFINED;
719 }
720
721 return ERROR_OK;
722 }
723
724 static int cortex_m_examine_exception_reason(struct target *target)
725 {
726 uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
727 struct armv7m_common *armv7m = target_to_armv7m(target);
728 struct adiv5_dap *swjdp = armv7m->arm.dap;
729 int retval;
730
731 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SHCSR, &shcsr);
732 if (retval != ERROR_OK)
733 return retval;
734 switch (armv7m->exception_number) {
735 case 2: /* NMI */
736 break;
737 case 3: /* Hard Fault */
738 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_HFSR, &except_sr);
739 if (retval != ERROR_OK)
740 return retval;
741 if (except_sr & 0x40000000) {
742 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &cfsr);
743 if (retval != ERROR_OK)
744 return retval;
745 }
746 break;
747 case 4: /* Memory Management */
748 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
749 if (retval != ERROR_OK)
750 return retval;
751 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_MMFAR, &except_ar);
752 if (retval != ERROR_OK)
753 return retval;
754 break;
755 case 5: /* Bus Fault */
756 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
757 if (retval != ERROR_OK)
758 return retval;
759 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_BFAR, &except_ar);
760 if (retval != ERROR_OK)
761 return retval;
762 break;
763 case 6: /* Usage Fault */
764 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
765 if (retval != ERROR_OK)
766 return retval;
767 break;
768 case 7: /* Secure Fault */
769 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFSR, &except_sr);
770 if (retval != ERROR_OK)
771 return retval;
772 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFAR, &except_ar);
773 if (retval != ERROR_OK)
774 return retval;
775 break;
776 case 11: /* SVCall */
777 break;
778 case 12: /* Debug Monitor */
779 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_DFSR, &except_sr);
780 if (retval != ERROR_OK)
781 return retval;
782 break;
783 case 14: /* PendSV */
784 break;
785 case 15: /* SysTick */
786 break;
787 default:
788 except_sr = 0;
789 break;
790 }
791 retval = dap_run(swjdp);
792 if (retval == ERROR_OK)
793 LOG_TARGET_DEBUG(target, "%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
794 ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
795 armv7m_exception_string(armv7m->exception_number),
796 shcsr, except_sr, cfsr, except_ar);
797 return retval;
798 }
799
800 static int cortex_m_debug_entry(struct target *target)
801 {
802 uint32_t xpsr;
803 int retval;
804 struct cortex_m_common *cortex_m = target_to_cm(target);
805 struct armv7m_common *armv7m = &cortex_m->armv7m;
806 struct arm *arm = &armv7m->arm;
807 struct reg *r;
808
809 LOG_TARGET_DEBUG(target, " ");
810
811 /* Do this really early to minimize the window where the MASKINTS erratum
812 * can pile up pending interrupts. */
813 cortex_m_set_maskints_for_halt(target);
814
815 cortex_m_clear_halt(target);
816
817 retval = cortex_m_read_dhcsr_atomic_sticky(target);
818 if (retval != ERROR_OK)
819 return retval;
820
821 retval = armv7m->examine_debug_reason(target);
822 if (retval != ERROR_OK)
823 return retval;
824
825 /* examine PE security state */
826 uint32_t dscsr = 0;
827 if (armv7m->arm.arch == ARM_ARCH_V8M) {
828 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DSCSR, &dscsr);
829 if (retval != ERROR_OK)
830 return retval;
831 }
832
833 /* Load all registers to arm.core_cache */
834 if (!cortex_m->slow_register_read) {
835 retval = cortex_m_fast_read_all_regs(target);
836 if (retval == ERROR_TIMEOUT_REACHED) {
837 cortex_m->slow_register_read = true;
838 LOG_TARGET_DEBUG(target, "Switched to slow register read");
839 }
840 }
841
842 if (cortex_m->slow_register_read)
843 retval = cortex_m_slow_read_all_regs(target);
844
845 if (retval != ERROR_OK)
846 return retval;
847
848 r = arm->cpsr;
849 xpsr = buf_get_u32(r->value, 0, 32);
850
851 /* Are we in an exception handler */
852 if (xpsr & 0x1FF) {
853 armv7m->exception_number = (xpsr & 0x1FF);
854
855 arm->core_mode = ARM_MODE_HANDLER;
856 arm->map = armv7m_msp_reg_map;
857 } else {
858 unsigned control = buf_get_u32(arm->core_cache
859 ->reg_list[ARMV7M_CONTROL].value, 0, 3);
860
861 /* is this thread privileged? */
862 arm->core_mode = control & 1
863 ? ARM_MODE_USER_THREAD
864 : ARM_MODE_THREAD;
865
866 /* which stack is it using? */
867 if (control & 2)
868 arm->map = armv7m_psp_reg_map;
869 else
870 arm->map = armv7m_msp_reg_map;
871
872 armv7m->exception_number = 0;
873 }
874
875 if (armv7m->exception_number)
876 cortex_m_examine_exception_reason(target);
877
878 bool secure_state = (dscsr & DSCSR_CDS) == DSCSR_CDS;
879 LOG_TARGET_DEBUG(target, "entered debug state in core mode: %s at PC 0x%" PRIx32
880 ", cpu in %s state, target->state: %s",
881 arm_mode_name(arm->core_mode),
882 buf_get_u32(arm->pc->value, 0, 32),
883 secure_state ? "Secure" : "Non-Secure",
884 target_state_name(target));
885
886 if (armv7m->post_debug_entry) {
887 retval = armv7m->post_debug_entry(target);
888 if (retval != ERROR_OK)
889 return retval;
890 }
891
892 return ERROR_OK;
893 }
894
895 static int cortex_m_poll_one(struct target *target)
896 {
897 int detected_failure = ERROR_OK;
898 int retval = ERROR_OK;
899 enum target_state prev_target_state = target->state;
900 struct cortex_m_common *cortex_m = target_to_cm(target);
901 struct armv7m_common *armv7m = &cortex_m->armv7m;
902
903 /* Read from Debug Halting Control and Status Register */
904 retval = cortex_m_read_dhcsr_atomic_sticky(target);
905 if (retval != ERROR_OK) {
906 target->state = TARGET_UNKNOWN;
907 return retval;
908 }
909
910 /* Recover from lockup. See ARMv7-M architecture spec,
911 * section B1.5.15 "Unrecoverable exception cases".
912 */
913 if (cortex_m->dcb_dhcsr & S_LOCKUP) {
914 LOG_TARGET_ERROR(target, "clearing lockup after double fault");
915 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
916 target->debug_reason = DBG_REASON_DBGRQ;
917
918 /* We have to execute the rest (the "finally" equivalent, but
919 * still throw this exception again).
920 */
921 detected_failure = ERROR_FAIL;
922
923 /* refresh status bits */
924 retval = cortex_m_read_dhcsr_atomic_sticky(target);
925 if (retval != ERROR_OK)
926 return retval;
927 }
928
929 if (cortex_m->dcb_dhcsr_cumulated_sticky & S_RESET_ST) {
930 cortex_m->dcb_dhcsr_cumulated_sticky &= ~S_RESET_ST;
931 if (target->state != TARGET_RESET) {
932 target->state = TARGET_RESET;
933 LOG_TARGET_INFO(target, "external reset detected");
934 }
935 return ERROR_OK;
936 }
937
938 if (target->state == TARGET_RESET) {
939 /* Cannot switch context while running so endreset is
940 * called with target->state == TARGET_RESET
941 */
942 LOG_TARGET_DEBUG(target, "Exit from reset with dcb_dhcsr 0x%" PRIx32,
943 cortex_m->dcb_dhcsr);
944 retval = cortex_m_endreset_event(target);
945 if (retval != ERROR_OK) {
946 target->state = TARGET_UNKNOWN;
947 return retval;
948 }
949 target->state = TARGET_RUNNING;
950 prev_target_state = TARGET_RUNNING;
951 }
952
953 if (cortex_m->dcb_dhcsr & S_HALT) {
954 target->state = TARGET_HALTED;
955
956 if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET)) {
957 retval = cortex_m_debug_entry(target);
958
959 /* arm_semihosting needs to know registers, don't run if debug entry returned error */
960 if (retval == ERROR_OK && arm_semihosting(target, &retval) != 0)
961 return retval;
962
963 if (target->smp) {
964 LOG_TARGET_DEBUG(target, "postpone target event 'halted'");
965 target->smp_halt_event_postponed = true;
966 } else {
967 /* regardless of errors returned in previous code update state */
968 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
969 }
970 }
971 if (prev_target_state == TARGET_DEBUG_RUNNING) {
972 retval = cortex_m_debug_entry(target);
973
974 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
975 }
976 if (retval != ERROR_OK)
977 return retval;
978 }
979
980 if (target->state == TARGET_UNKNOWN) {
981 /* Check if processor is retiring instructions or sleeping.
982 * Unlike S_RESET_ST here we test if the target *is* running now,
983 * not if it has been running (possibly in the past). Instructions are
984 * typically processed much faster than OpenOCD polls DHCSR so S_RETIRE_ST
985 * is read always 1. That's the reason not to use dcb_dhcsr_cumulated_sticky.
986 */
987 if (cortex_m->dcb_dhcsr & S_RETIRE_ST || cortex_m->dcb_dhcsr & S_SLEEP) {
988 target->state = TARGET_RUNNING;
989 retval = ERROR_OK;
990 }
991 }
992
993 /* Check that target is truly halted, since the target could be resumed externally */
994 if ((prev_target_state == TARGET_HALTED) && !(cortex_m->dcb_dhcsr & S_HALT)) {
995 /* registers are now invalid */
996 register_cache_invalidate(armv7m->arm.core_cache);
997
998 target->state = TARGET_RUNNING;
999 LOG_TARGET_WARNING(target, "external resume detected");
1000 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1001 retval = ERROR_OK;
1002 }
1003
1004 /* Did we detect a failure condition that we cleared? */
1005 if (detected_failure != ERROR_OK)
1006 retval = detected_failure;
1007 return retval;
1008 }
1009
1010 static int cortex_m_halt_one(struct target *target);
1011
1012 static int cortex_m_smp_halt_all(struct list_head *smp_targets)
1013 {
1014 int retval = ERROR_OK;
1015 struct target_list *head;
1016
1017 foreach_smp_target(head, smp_targets) {
1018 struct target *curr = head->target;
1019 if (!target_was_examined(curr))
1020 continue;
1021 if (curr->state == TARGET_HALTED)
1022 continue;
1023
1024 int ret2 = cortex_m_halt_one(curr);
1025 if (retval == ERROR_OK)
1026 retval = ret2; /* store the first error code ignore others */
1027 }
1028 return retval;
1029 }
1030
1031 static int cortex_m_smp_post_halt_poll(struct list_head *smp_targets)
1032 {
1033 int retval = ERROR_OK;
1034 struct target_list *head;
1035
1036 foreach_smp_target(head, smp_targets) {
1037 struct target *curr = head->target;
1038 if (!target_was_examined(curr))
1039 continue;
1040 /* skip targets that were already halted */
1041 if (curr->state == TARGET_HALTED)
1042 continue;
1043
1044 int ret2 = cortex_m_poll_one(curr);
1045 if (retval == ERROR_OK)
1046 retval = ret2; /* store the first error code ignore others */
1047 }
1048 return retval;
1049 }
1050
1051 static int cortex_m_poll_smp(struct list_head *smp_targets)
1052 {
1053 int retval = ERROR_OK;
1054 struct target_list *head;
1055 bool halted = false;
1056
1057 foreach_smp_target(head, smp_targets) {
1058 struct target *curr = head->target;
1059 if (curr->smp_halt_event_postponed) {
1060 halted = true;
1061 break;
1062 }
1063 }
1064
1065 if (halted) {
1066 retval = cortex_m_smp_halt_all(smp_targets);
1067
1068 int ret2 = cortex_m_smp_post_halt_poll(smp_targets);
1069 if (retval == ERROR_OK)
1070 retval = ret2; /* store the first error code ignore others */
1071
1072 foreach_smp_target(head, smp_targets) {
1073 struct target *curr = head->target;
1074 if (!curr->smp_halt_event_postponed)
1075 continue;
1076
1077 curr->smp_halt_event_postponed = false;
1078 if (curr->state == TARGET_HALTED) {
1079 LOG_TARGET_DEBUG(curr, "sending postponed target event 'halted'");
1080 target_call_event_callbacks(curr, TARGET_EVENT_HALTED);
1081 }
1082 }
1083 /* There is no need to set gdb_service->target
1084 * as hwthread_update_threads() selects an interesting thread
1085 * by its own
1086 */
1087 }
1088 return retval;
1089 }
1090
1091 static int cortex_m_poll(struct target *target)
1092 {
1093 int retval = cortex_m_poll_one(target);
1094
1095 if (target->smp) {
1096 struct target_list *last;
1097 last = list_last_entry(target->smp_targets, struct target_list, lh);
1098 if (target == last->target)
1099 /* After the last target in SMP group has been polled
1100 * check for postponed halted events and eventually halt and re-poll
1101 * other targets */
1102 cortex_m_poll_smp(target->smp_targets);
1103 }
1104 return retval;
1105 }
1106
1107 static int cortex_m_halt_one(struct target *target)
1108 {
1109 int retval;
1110 LOG_TARGET_DEBUG(target, "target->state: %s", target_state_name(target));
1111
1112 if (target->state == TARGET_HALTED) {
1113 LOG_TARGET_DEBUG(target, "target was already halted");
1114 return ERROR_OK;
1115 }
1116
1117 if (target->state == TARGET_UNKNOWN)
1118 LOG_TARGET_WARNING(target, "target was in unknown state when halt was requested");
1119
1120 /* Write to Debug Halting Control and Status Register */
1121 retval = cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1122
1123 /* Do this really early to minimize the window where the MASKINTS erratum
1124 * can pile up pending interrupts. */
1125 cortex_m_set_maskints_for_halt(target);
1126
1127 target->debug_reason = DBG_REASON_DBGRQ;
1128
1129 return retval;
1130 }
1131
1132 static int cortex_m_halt(struct target *target)
1133 {
1134 if (target->smp)
1135 return cortex_m_smp_halt_all(target->smp_targets);
1136 else
1137 return cortex_m_halt_one(target);
1138 }
1139
1140 static int cortex_m_soft_reset_halt(struct target *target)
1141 {
1142 struct cortex_m_common *cortex_m = target_to_cm(target);
1143 struct armv7m_common *armv7m = &cortex_m->armv7m;
1144 int retval, timeout = 0;
1145
1146 /* on single cortex_m MCU soft_reset_halt should be avoided as same functionality
1147 * can be obtained by using 'reset halt' and 'cortex_m reset_config vectreset'.
1148 * As this reset only uses VC_CORERESET it would only ever reset the cortex_m
1149 * core, not the peripherals */
1150 LOG_TARGET_DEBUG(target, "soft_reset_halt is discouraged, please use 'reset halt' instead.");
1151
1152 if (!cortex_m->vectreset_supported) {
1153 LOG_TARGET_ERROR(target, "VECTRESET is not supported on this Cortex-M core");
1154 return ERROR_FAIL;
1155 }
1156
1157 /* Set C_DEBUGEN */
1158 retval = cortex_m_write_debug_halt_mask(target, 0, C_STEP | C_MASKINTS);
1159 if (retval != ERROR_OK)
1160 return retval;
1161
1162 /* Enter debug state on reset; restore DEMCR in endreset_event() */
1163 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR,
1164 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1165 if (retval != ERROR_OK)
1166 return retval;
1167
1168 /* Request a core-only reset */
1169 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1170 AIRCR_VECTKEY | AIRCR_VECTRESET);
1171 if (retval != ERROR_OK)
1172 return retval;
1173 target->state = TARGET_RESET;
1174
1175 /* registers are now invalid */
1176 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1177
1178 while (timeout < 100) {
1179 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1180 if (retval == ERROR_OK) {
1181 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR,
1182 &cortex_m->nvic_dfsr);
1183 if (retval != ERROR_OK)
1184 return retval;
1185 if ((cortex_m->dcb_dhcsr & S_HALT)
1186 && (cortex_m->nvic_dfsr & DFSR_VCATCH)) {
1187 LOG_TARGET_DEBUG(target, "system reset-halted, DHCSR 0x%08" PRIx32 ", DFSR 0x%08" PRIx32,
1188 cortex_m->dcb_dhcsr, cortex_m->nvic_dfsr);
1189 cortex_m_poll(target);
1190 /* FIXME restore user's vector catch config */
1191 return ERROR_OK;
1192 } else {
1193 LOG_TARGET_DEBUG(target, "waiting for system reset-halt, "
1194 "DHCSR 0x%08" PRIx32 ", %d ms",
1195 cortex_m->dcb_dhcsr, timeout);
1196 }
1197 }
1198 timeout++;
1199 alive_sleep(1);
1200 }
1201
1202 return ERROR_OK;
1203 }
1204
1205 void cortex_m_enable_breakpoints(struct target *target)
1206 {
1207 struct breakpoint *breakpoint = target->breakpoints;
1208
1209 /* set any pending breakpoints */
1210 while (breakpoint) {
1211 if (!breakpoint->is_set)
1212 cortex_m_set_breakpoint(target, breakpoint);
1213 breakpoint = breakpoint->next;
1214 }
1215 }
1216
1217 static int cortex_m_restore_one(struct target *target, bool current,
1218 target_addr_t *address, bool handle_breakpoints, bool debug_execution)
1219 {
1220 struct armv7m_common *armv7m = target_to_armv7m(target);
1221 struct breakpoint *breakpoint = NULL;
1222 uint32_t resume_pc;
1223 struct reg *r;
1224
1225 if (target->state != TARGET_HALTED) {
1226 LOG_TARGET_ERROR(target, "not halted");
1227 return ERROR_TARGET_NOT_HALTED;
1228 }
1229
1230 if (!debug_execution) {
1231 target_free_all_working_areas(target);
1232 cortex_m_enable_breakpoints(target);
1233 cortex_m_enable_watchpoints(target);
1234 }
1235
1236 if (debug_execution) {
1237 r = armv7m->arm.core_cache->reg_list + ARMV7M_PRIMASK;
1238
1239 /* Disable interrupts */
1240 /* We disable interrupts in the PRIMASK register instead of
1241 * masking with C_MASKINTS. This is probably the same issue
1242 * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
1243 * in parallel with disabled interrupts can cause local faults
1244 * to not be taken.
1245 *
1246 * This breaks non-debug (application) execution if not
1247 * called from armv7m_start_algorithm() which saves registers.
1248 */
1249 buf_set_u32(r->value, 0, 1, 1);
1250 r->dirty = true;
1251 r->valid = true;
1252
1253 /* Make sure we are in Thumb mode, set xPSR.T bit */
1254 /* armv7m_start_algorithm() initializes entire xPSR register.
1255 * This duplicity handles the case when cortex_m_resume()
1256 * is used with the debug_execution flag directly,
1257 * not called through armv7m_start_algorithm().
1258 */
1259 r = armv7m->arm.cpsr;
1260 buf_set_u32(r->value, 24, 1, 1);
1261 r->dirty = true;
1262 r->valid = true;
1263 }
1264
1265 /* current = 1: continue on current pc, otherwise continue at <address> */
1266 r = armv7m->arm.pc;
1267 if (!current) {
1268 buf_set_u32(r->value, 0, 32, *address);
1269 r->dirty = true;
1270 r->valid = true;
1271 }
1272
1273 /* if we halted last time due to a bkpt instruction
1274 * then we have to manually step over it, otherwise
1275 * the core will break again */
1276
1277 if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
1278 && !debug_execution)
1279 armv7m_maybe_skip_bkpt_inst(target, NULL);
1280
1281 resume_pc = buf_get_u32(r->value, 0, 32);
1282 if (current)
1283 *address = resume_pc;
1284
1285 int retval = armv7m_restore_context(target);
1286 if (retval != ERROR_OK)
1287 return retval;
1288
1289 /* the front-end may request us not to handle breakpoints */
1290 if (handle_breakpoints) {
1291 /* Single step past breakpoint at current address */
1292 breakpoint = breakpoint_find(target, resume_pc);
1293 if (breakpoint) {
1294 LOG_TARGET_DEBUG(target, "unset breakpoint at " TARGET_ADDR_FMT " (ID: %" PRIu32 ")",
1295 breakpoint->address,
1296 breakpoint->unique_id);
1297 retval = cortex_m_unset_breakpoint(target, breakpoint);
1298 if (retval == ERROR_OK)
1299 retval = cortex_m_single_step_core(target);
1300 int ret2 = cortex_m_set_breakpoint(target, breakpoint);
1301 if (retval != ERROR_OK)
1302 return retval;
1303 if (ret2 != ERROR_OK)
1304 return ret2;
1305 }
1306 }
1307
1308 return ERROR_OK;
1309 }
1310
1311 static int cortex_m_restart_one(struct target *target, bool debug_execution)
1312 {
1313 struct armv7m_common *armv7m = target_to_armv7m(target);
1314
1315 /* Restart core */
1316 cortex_m_set_maskints_for_run(target);
1317 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1318
1319 target->debug_reason = DBG_REASON_NOTHALTED;
1320 /* registers are now invalid */
1321 register_cache_invalidate(armv7m->arm.core_cache);
1322
1323 if (!debug_execution) {
1324 target->state = TARGET_RUNNING;
1325 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1326 } else {
1327 target->state = TARGET_DEBUG_RUNNING;
1328 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1329 }
1330
1331 return ERROR_OK;
1332 }
1333
1334 static int cortex_m_restore_smp(struct target *target, bool handle_breakpoints)
1335 {
1336 struct target_list *head;
1337 target_addr_t address;
1338 foreach_smp_target(head, target->smp_targets) {
1339 struct target *curr = head->target;
1340 /* skip calling target */
1341 if (curr == target)
1342 continue;
1343 if (!target_was_examined(curr))
1344 continue;
1345 /* skip running targets */
1346 if (curr->state == TARGET_RUNNING)
1347 continue;
1348
1349 int retval = cortex_m_restore_one(curr, true, &address,
1350 handle_breakpoints, false);
1351 if (retval != ERROR_OK)
1352 return retval;
1353
1354 retval = cortex_m_restart_one(curr, false);
1355 if (retval != ERROR_OK)
1356 return retval;
1357
1358 LOG_TARGET_DEBUG(curr, "SMP resumed at " TARGET_ADDR_FMT, address);
1359 }
1360 return ERROR_OK;
1361 }
1362
1363 static int cortex_m_resume(struct target *target, int current,
1364 target_addr_t address, int handle_breakpoints, int debug_execution)
1365 {
1366 int retval = cortex_m_restore_one(target, !!current, &address, !!handle_breakpoints, !!debug_execution);
1367 if (retval != ERROR_OK) {
1368 LOG_TARGET_ERROR(target, "context restore failed, aborting resume");
1369 return retval;
1370 }
1371
1372 if (target->smp && !debug_execution) {
1373 retval = cortex_m_restore_smp(target, !!handle_breakpoints);
1374 if (retval != ERROR_OK)
1375 LOG_WARNING("resume of a SMP target failed, trying to resume current one");
1376 }
1377
1378 cortex_m_restart_one(target, !!debug_execution);
1379 if (retval != ERROR_OK) {
1380 LOG_TARGET_ERROR(target, "resume failed");
1381 return retval;
1382 }
1383
1384 LOG_TARGET_DEBUG(target, "%sresumed at " TARGET_ADDR_FMT,
1385 debug_execution ? "debug " : "", address);
1386
1387 return ERROR_OK;
1388 }
1389
1390 /* int irqstepcount = 0; */
1391 static int cortex_m_step(struct target *target, int current,
1392 target_addr_t address, int handle_breakpoints)
1393 {
1394 struct cortex_m_common *cortex_m = target_to_cm(target);
1395 struct armv7m_common *armv7m = &cortex_m->armv7m;
1396 struct breakpoint *breakpoint = NULL;
1397 struct reg *pc = armv7m->arm.pc;
1398 bool bkpt_inst_found = false;
1399 int retval;
1400 bool isr_timed_out = false;
1401
1402 if (target->state != TARGET_HALTED) {
1403 LOG_TARGET_ERROR(target, "not halted");
1404 return ERROR_TARGET_NOT_HALTED;
1405 }
1406
1407 /* Just one of SMP cores will step. Set the gdb control
1408 * target to current one or gdb miss gdb-end event */
1409 if (target->smp && target->gdb_service)
1410 target->gdb_service->target = target;
1411
1412 /* current = 1: continue on current pc, otherwise continue at <address> */
1413 if (!current) {
1414 buf_set_u32(pc->value, 0, 32, address);
1415 pc->dirty = true;
1416 pc->valid = true;
1417 }
1418
1419 uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
1420
1421 /* the front-end may request us not to handle breakpoints */
1422 if (handle_breakpoints) {
1423 breakpoint = breakpoint_find(target, pc_value);
1424 if (breakpoint)
1425 cortex_m_unset_breakpoint(target, breakpoint);
1426 }
1427
1428 armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
1429
1430 target->debug_reason = DBG_REASON_SINGLESTEP;
1431
1432 armv7m_restore_context(target);
1433
1434 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1435
1436 /* if no bkpt instruction is found at pc then we can perform
1437 * a normal step, otherwise we have to manually step over the bkpt
1438 * instruction - as such simulate a step */
1439 if (bkpt_inst_found == false) {
1440 if (cortex_m->isrmasking_mode != CORTEX_M_ISRMASK_AUTO) {
1441 /* Automatic ISR masking mode off: Just step over the next
1442 * instruction, with interrupts on or off as appropriate. */
1443 cortex_m_set_maskints_for_step(target);
1444 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1445 } else {
1446 /* Process interrupts during stepping in a way they don't interfere
1447 * debugging.
1448 *
1449 * Principle:
1450 *
1451 * Set a temporary break point at the current pc and let the core run
1452 * with interrupts enabled. Pending interrupts get served and we run
1453 * into the breakpoint again afterwards. Then we step over the next
1454 * instruction with interrupts disabled.
1455 *
1456 * If the pending interrupts don't complete within time, we leave the
1457 * core running. This may happen if the interrupts trigger faster
1458 * than the core can process them or the handler doesn't return.
1459 *
1460 * If no more breakpoints are available we simply do a step with
1461 * interrupts enabled.
1462 *
1463 */
1464
1465 /* 2012-09-29 ph
1466 *
1467 * If a break point is already set on the lower half word then a break point on
1468 * the upper half word will not break again when the core is restarted. So we
1469 * just step over the instruction with interrupts disabled.
1470 *
1471 * The documentation has no information about this, it was found by observation
1472 * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 doesn't seem to
1473 * suffer from this problem.
1474 *
1475 * To add some confusion: pc_value has bit 0 always set, while the breakpoint
1476 * address has it always cleared. The former is done to indicate thumb mode
1477 * to gdb.
1478 *
1479 */
1480 if ((pc_value & 0x02) && breakpoint_find(target, pc_value & ~0x03)) {
1481 LOG_TARGET_DEBUG(target, "Stepping over next instruction with interrupts disabled");
1482 cortex_m_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
1483 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1484 /* Re-enable interrupts if appropriate */
1485 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1486 cortex_m_set_maskints_for_halt(target);
1487 } else {
1488
1489 /* Set a temporary break point */
1490 if (breakpoint) {
1491 retval = cortex_m_set_breakpoint(target, breakpoint);
1492 } else {
1493 enum breakpoint_type type = BKPT_HARD;
1494 if (cortex_m->fp_rev == 0 && pc_value > 0x1FFFFFFF) {
1495 /* FPB rev.1 cannot handle such addr, try BKPT instr */
1496 type = BKPT_SOFT;
1497 }
1498 retval = breakpoint_add(target, pc_value, 2, type);
1499 }
1500
1501 bool tmp_bp_set = (retval == ERROR_OK);
1502
1503 /* No more breakpoints left, just do a step */
1504 if (!tmp_bp_set) {
1505 cortex_m_set_maskints_for_step(target);
1506 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1507 /* Re-enable interrupts if appropriate */
1508 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1509 cortex_m_set_maskints_for_halt(target);
1510 } else {
1511 /* Start the core */
1512 LOG_TARGET_DEBUG(target, "Starting core to serve pending interrupts");
1513 int64_t t_start = timeval_ms();
1514 cortex_m_set_maskints_for_run(target);
1515 cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
1516
1517 /* Wait for pending handlers to complete or timeout */
1518 do {
1519 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1520 if (retval != ERROR_OK) {
1521 target->state = TARGET_UNKNOWN;
1522 return retval;
1523 }
1524 isr_timed_out = ((timeval_ms() - t_start) > 500);
1525 } while (!((cortex_m->dcb_dhcsr & S_HALT) || isr_timed_out));
1526
1527 /* only remove breakpoint if we created it */
1528 if (breakpoint)
1529 cortex_m_unset_breakpoint(target, breakpoint);
1530 else {
1531 /* Remove the temporary breakpoint */
1532 breakpoint_remove(target, pc_value);
1533 }
1534
1535 if (isr_timed_out) {
1536 LOG_TARGET_DEBUG(target, "Interrupt handlers didn't complete within time, "
1537 "leaving target running");
1538 } else {
1539 /* Step over next instruction with interrupts disabled */
1540 cortex_m_set_maskints_for_step(target);
1541 cortex_m_write_debug_halt_mask(target,
1542 C_HALT | C_MASKINTS,
1543 0);
1544 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1545 /* Re-enable interrupts if appropriate */
1546 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1547 cortex_m_set_maskints_for_halt(target);
1548 }
1549 }
1550 }
1551 }
1552 }
1553
1554 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1555 if (retval != ERROR_OK)
1556 return retval;
1557
1558 /* registers are now invalid */
1559 register_cache_invalidate(armv7m->arm.core_cache);
1560
1561 if (breakpoint)
1562 cortex_m_set_breakpoint(target, breakpoint);
1563
1564 if (isr_timed_out) {
1565 /* Leave the core running. The user has to stop execution manually. */
1566 target->debug_reason = DBG_REASON_NOTHALTED;
1567 target->state = TARGET_RUNNING;
1568 return ERROR_OK;
1569 }
1570
1571 LOG_TARGET_DEBUG(target, "target stepped dcb_dhcsr = 0x%" PRIx32
1572 " nvic_icsr = 0x%" PRIx32,
1573 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1574
1575 retval = cortex_m_debug_entry(target);
1576 if (retval != ERROR_OK)
1577 return retval;
1578 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1579
1580 LOG_TARGET_DEBUG(target, "target stepped dcb_dhcsr = 0x%" PRIx32
1581 " nvic_icsr = 0x%" PRIx32,
1582 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1583
1584 return ERROR_OK;
1585 }
1586
1587 static int cortex_m_assert_reset(struct target *target)
1588 {
1589 struct cortex_m_common *cortex_m = target_to_cm(target);
1590 struct armv7m_common *armv7m = &cortex_m->armv7m;
1591 enum cortex_m_soft_reset_config reset_config = cortex_m->soft_reset_config;
1592
1593 LOG_TARGET_DEBUG(target, "target->state: %s,%s examined",
1594 target_state_name(target),
1595 target_was_examined(target) ? "" : " not");
1596
1597 enum reset_types jtag_reset_config = jtag_get_reset_config();
1598
1599 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1600 /* allow scripts to override the reset event */
1601
1602 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1603 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1604 target->state = TARGET_RESET;
1605
1606 return ERROR_OK;
1607 }
1608
1609 /* some cores support connecting while srst is asserted
1610 * use that mode if it has been configured */
1611
1612 bool srst_asserted = false;
1613
1614 if ((jtag_reset_config & RESET_HAS_SRST) &&
1615 ((jtag_reset_config & RESET_SRST_NO_GATING)
1616 || (!armv7m->debug_ap && !target->defer_examine))) {
1617 /* If we have no debug_ap, asserting SRST is the only thing
1618 * we can do now */
1619 adapter_assert_reset();
1620 srst_asserted = true;
1621 }
1622
1623 /* TODO: replace the hack calling target_examine_one()
1624 * as soon as a better reset framework is available */
1625 if (!target_was_examined(target) && !target->defer_examine
1626 && srst_asserted && (jtag_reset_config & RESET_SRST_NO_GATING)) {
1627 LOG_TARGET_DEBUG(target, "Trying to re-examine under reset");
1628 target_examine_one(target);
1629 }
1630
1631 /* We need at least debug_ap to go further.
1632 * Inform user and bail out if we don't have one. */
1633 if (!armv7m->debug_ap) {
1634 if (srst_asserted) {
1635 if (target->reset_halt)
1636 LOG_TARGET_ERROR(target, "Debug AP not available, will not halt after reset!");
1637
1638 /* Do not propagate error: reset was asserted, proceed to deassert! */
1639 target->state = TARGET_RESET;
1640 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1641 return ERROR_OK;
1642
1643 } else {
1644 LOG_TARGET_ERROR(target, "Debug AP not available, reset NOT asserted!");
1645 return ERROR_FAIL;
1646 }
1647 }
1648
1649 /* Enable debug requests */
1650 int retval = cortex_m_read_dhcsr_atomic_sticky(target);
1651
1652 /* Store important errors instead of failing and proceed to reset assert */
1653
1654 if (retval != ERROR_OK || !(cortex_m->dcb_dhcsr & C_DEBUGEN))
1655 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
1656
1657 /* If the processor is sleeping in a WFI or WFE instruction, the
1658 * C_HALT bit must be asserted to regain control */
1659 if (retval == ERROR_OK && (cortex_m->dcb_dhcsr & S_SLEEP))
1660 retval = cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1661
1662 mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
1663 /* Ignore less important errors */
1664
1665 if (!target->reset_halt) {
1666 /* Set/Clear C_MASKINTS in a separate operation */
1667 cortex_m_set_maskints_for_run(target);
1668
1669 /* clear any debug flags before resuming */
1670 cortex_m_clear_halt(target);
1671
1672 /* clear C_HALT in dhcsr reg */
1673 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1674 } else {
1675 /* Halt in debug on reset; endreset_event() restores DEMCR.
1676 *
1677 * REVISIT catching BUSERR presumably helps to defend against
1678 * bad vector table entries. Should this include MMERR or
1679 * other flags too?
1680 */
1681 int retval2;
1682 retval2 = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DEMCR,
1683 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1684 if (retval != ERROR_OK || retval2 != ERROR_OK)
1685 LOG_TARGET_INFO(target, "AP write error, reset will not halt");
1686 }
1687
1688 if (jtag_reset_config & RESET_HAS_SRST) {
1689 /* default to asserting srst */
1690 if (!srst_asserted)
1691 adapter_assert_reset();
1692
1693 /* srst is asserted, ignore AP access errors */
1694 retval = ERROR_OK;
1695 } else {
1696 /* Use a standard Cortex-M software reset mechanism.
1697 * We default to using VECTRESET.
1698 * This has the disadvantage of not resetting the peripherals, so a
1699 * reset-init event handler is needed to perform any peripheral resets.
1700 */
1701 if (!cortex_m->vectreset_supported
1702 && reset_config == CORTEX_M_RESET_VECTRESET) {
1703 reset_config = CORTEX_M_RESET_SYSRESETREQ;
1704 LOG_TARGET_WARNING(target, "VECTRESET is not supported on this Cortex-M core, using SYSRESETREQ instead.");
1705 LOG_TARGET_WARNING(target, "Set 'cortex_m reset_config sysresetreq'.");
1706 }
1707
1708 LOG_TARGET_DEBUG(target, "Using Cortex-M %s", (reset_config == CORTEX_M_RESET_SYSRESETREQ)
1709 ? "SYSRESETREQ" : "VECTRESET");
1710
1711 if (reset_config == CORTEX_M_RESET_VECTRESET) {
1712 LOG_TARGET_WARNING(target, "Only resetting the Cortex-M core, use a reset-init event "
1713 "handler to reset any peripherals or configure hardware srst support.");
1714 }
1715
1716 int retval3;
1717 retval3 = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1718 AIRCR_VECTKEY | ((reset_config == CORTEX_M_RESET_SYSRESETREQ)
1719 ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
1720 if (retval3 != ERROR_OK)
1721 LOG_TARGET_DEBUG(target, "Ignoring AP write error right after reset");
1722
1723 retval3 = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1724 if (retval3 != ERROR_OK) {
1725 LOG_TARGET_ERROR(target, "DP initialisation failed");
1726 /* The error return value must not be propagated in this case.
1727 * SYSRESETREQ or VECTRESET have been possibly triggered
1728 * so reset processing should continue */
1729 } else {
1730 /* I do not know why this is necessary, but it
1731 * fixes strange effects (step/resume cause NMI
1732 * after reset) on LM3S6918 -- Michael Schwingen
1733 */
1734 uint32_t tmp;
1735 mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_AIRCR, &tmp);
1736 }
1737 }
1738
1739 target->state = TARGET_RESET;
1740 jtag_sleep(50000);
1741
1742 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1743
1744 return retval;
1745 }
1746
1747 static int cortex_m_deassert_reset(struct target *target)
1748 {
1749 struct armv7m_common *armv7m = &target_to_cm(target)->armv7m;
1750
1751 LOG_TARGET_DEBUG(target, "target->state: %s,%s examined",
1752 target_state_name(target),
1753 target_was_examined(target) ? "" : " not");
1754
1755 /* deassert reset lines */
1756 adapter_deassert_reset();
1757
1758 enum reset_types jtag_reset_config = jtag_get_reset_config();
1759
1760 if ((jtag_reset_config & RESET_HAS_SRST) &&
1761 !(jtag_reset_config & RESET_SRST_NO_GATING) &&
1762 armv7m->debug_ap) {
1763
1764 int retval = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1765 if (retval != ERROR_OK) {
1766 LOG_TARGET_ERROR(target, "DP initialisation failed");
1767 return retval;
1768 }
1769 }
1770
1771 return ERROR_OK;
1772 }
1773
1774 int cortex_m_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
1775 {
1776 int retval;
1777 unsigned int fp_num = 0;
1778 struct cortex_m_common *cortex_m = target_to_cm(target);
1779 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1780
1781 if (breakpoint->is_set) {
1782 LOG_TARGET_WARNING(target, "breakpoint (BPID: %" PRIu32 ") already set", breakpoint->unique_id);
1783 return ERROR_OK;
1784 }
1785
1786 if (breakpoint->type == BKPT_HARD) {
1787 uint32_t fpcr_value;
1788 while (comparator_list[fp_num].used && (fp_num < cortex_m->fp_num_code))
1789 fp_num++;
1790 if (fp_num >= cortex_m->fp_num_code) {
1791 LOG_TARGET_ERROR(target, "Can not find free FPB Comparator!");
1792 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1793 }
1794 breakpoint_hw_set(breakpoint, fp_num);
1795 fpcr_value = breakpoint->address | 1;
1796 if (cortex_m->fp_rev == 0) {
1797 if (breakpoint->address > 0x1FFFFFFF) {
1798 LOG_TARGET_ERROR(target, "Cortex-M Flash Patch Breakpoint rev.1 "
1799 "cannot handle HW breakpoint above address 0x1FFFFFFE");
1800 return ERROR_FAIL;
1801 }
1802 uint32_t hilo;
1803 hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
1804 fpcr_value = (fpcr_value & 0x1FFFFFFC) | hilo | 1;
1805 } else if (cortex_m->fp_rev > 1) {
1806 LOG_TARGET_ERROR(target, "Unhandled Cortex-M Flash Patch Breakpoint architecture revision");
1807 return ERROR_FAIL;
1808 }
1809 comparator_list[fp_num].used = true;
1810 comparator_list[fp_num].fpcr_value = fpcr_value;
1811 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1812 comparator_list[fp_num].fpcr_value);
1813 LOG_TARGET_DEBUG(target, "fpc_num %i fpcr_value 0x%" PRIx32 "",
1814 fp_num,
1815 comparator_list[fp_num].fpcr_value);
1816 if (!cortex_m->fpb_enabled) {
1817 LOG_TARGET_DEBUG(target, "FPB wasn't enabled, do it now");
1818 retval = cortex_m_enable_fpb(target);
1819 if (retval != ERROR_OK) {
1820 LOG_TARGET_ERROR(target, "Failed to enable the FPB");
1821 return retval;
1822 }
1823
1824 cortex_m->fpb_enabled = true;
1825 }
1826 } else if (breakpoint->type == BKPT_SOFT) {
1827 uint8_t code[4];
1828
1829 /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
1830 * semihosting; don't use that. Otherwise the BKPT
1831 * parameter is arbitrary.
1832 */
1833 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1834 retval = target_read_memory(target,
1835 breakpoint->address & 0xFFFFFFFE,
1836 breakpoint->length, 1,
1837 breakpoint->orig_instr);
1838 if (retval != ERROR_OK)
1839 return retval;
1840 retval = target_write_memory(target,
1841 breakpoint->address & 0xFFFFFFFE,
1842 breakpoint->length, 1,
1843 code);
1844 if (retval != ERROR_OK)
1845 return retval;
1846 breakpoint->is_set = true;
1847 }
1848
1849 LOG_TARGET_DEBUG(target, "BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (n=%u)",
1850 breakpoint->unique_id,
1851 (int)(breakpoint->type),
1852 breakpoint->address,
1853 breakpoint->length,
1854 (breakpoint->type == BKPT_SOFT) ? 0 : breakpoint->number);
1855
1856 return ERROR_OK;
1857 }
1858
1859 int cortex_m_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1860 {
1861 int retval;
1862 struct cortex_m_common *cortex_m = target_to_cm(target);
1863 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1864
1865 if (!breakpoint->is_set) {
1866 LOG_TARGET_WARNING(target, "breakpoint not set");
1867 return ERROR_OK;
1868 }
1869
1870 LOG_TARGET_DEBUG(target, "BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (n=%u)",
1871 breakpoint->unique_id,
1872 (int)(breakpoint->type),
1873 breakpoint->address,
1874 breakpoint->length,
1875 (breakpoint->type == BKPT_SOFT) ? 0 : breakpoint->number);
1876
1877 if (breakpoint->type == BKPT_HARD) {
1878 unsigned int fp_num = breakpoint->number;
1879 if (fp_num >= cortex_m->fp_num_code) {
1880 LOG_TARGET_DEBUG(target, "Invalid FP Comparator number in breakpoint");
1881 return ERROR_OK;
1882 }
1883 comparator_list[fp_num].used = false;
1884 comparator_list[fp_num].fpcr_value = 0;
1885 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1886 comparator_list[fp_num].fpcr_value);
1887 } else {
1888 /* restore original instruction (kept in target endianness) */
1889 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE,
1890 breakpoint->length, 1,
1891 breakpoint->orig_instr);
1892 if (retval != ERROR_OK)
1893 return retval;
1894 }
1895 breakpoint->is_set = false;
1896
1897 return ERROR_OK;
1898 }
1899
1900 int cortex_m_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
1901 {
1902 if (breakpoint->length == 3) {
1903 LOG_TARGET_DEBUG(target, "Using a two byte breakpoint for 32bit Thumb-2 request");
1904 breakpoint->length = 2;
1905 }
1906
1907 if ((breakpoint->length != 2)) {
1908 LOG_TARGET_INFO(target, "only breakpoints of two bytes length supported");
1909 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1910 }
1911
1912 return cortex_m_set_breakpoint(target, breakpoint);
1913 }
1914
1915 int cortex_m_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1916 {
1917 if (!breakpoint->is_set)
1918 return ERROR_OK;
1919
1920 return cortex_m_unset_breakpoint(target, breakpoint);
1921 }
1922
1923 static int cortex_m_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1924 {
1925 unsigned int dwt_num = 0;
1926 struct cortex_m_common *cortex_m = target_to_cm(target);
1927
1928 /* REVISIT Don't fully trust these "not used" records ... users
1929 * may set up breakpoints by hand, e.g. dual-address data value
1930 * watchpoint using comparator #1; comparator #0 matching cycle
1931 * count; send data trace info through ITM and TPIU; etc
1932 */
1933 struct cortex_m_dwt_comparator *comparator;
1934
1935 for (comparator = cortex_m->dwt_comparator_list;
1936 comparator->used && dwt_num < cortex_m->dwt_num_comp;
1937 comparator++, dwt_num++)
1938 continue;
1939 if (dwt_num >= cortex_m->dwt_num_comp) {
1940 LOG_TARGET_ERROR(target, "Can not find free DWT Comparator");
1941 return ERROR_FAIL;
1942 }
1943 comparator->used = true;
1944 watchpoint_set(watchpoint, dwt_num);
1945
1946 comparator->comp = watchpoint->address;
1947 target_write_u32(target, comparator->dwt_comparator_address + 0,
1948 comparator->comp);
1949
1950 if ((cortex_m->dwt_devarch & 0x1FFFFF) != DWT_DEVARCH_ARMV8M_V2_0
1951 && (cortex_m->dwt_devarch & 0x1FFFFF) != DWT_DEVARCH_ARMV8M_V2_1) {
1952 uint32_t mask = 0, temp;
1953
1954 /* watchpoint params were validated earlier */
1955 temp = watchpoint->length;
1956 while (temp) {
1957 temp >>= 1;
1958 mask++;
1959 }
1960 mask--;
1961
1962 comparator->mask = mask;
1963 target_write_u32(target, comparator->dwt_comparator_address + 4,
1964 comparator->mask);
1965
1966 switch (watchpoint->rw) {
1967 case WPT_READ:
1968 comparator->function = 5;
1969 break;
1970 case WPT_WRITE:
1971 comparator->function = 6;
1972 break;
1973 case WPT_ACCESS:
1974 comparator->function = 7;
1975 break;
1976 }
1977 } else {
1978 uint32_t data_size = watchpoint->length >> 1;
1979 comparator->mask = (watchpoint->length >> 1) | 1;
1980
1981 switch (watchpoint->rw) {
1982 case WPT_ACCESS:
1983 comparator->function = 4;
1984 break;
1985 case WPT_WRITE:
1986 comparator->function = 5;
1987 break;
1988 case WPT_READ:
1989 comparator->function = 6;
1990 break;
1991 }
1992 comparator->function = comparator->function | (1 << 4) |
1993 (data_size << 10);
1994 }
1995
1996 target_write_u32(target, comparator->dwt_comparator_address + 8,
1997 comparator->function);
1998
1999 LOG_TARGET_DEBUG(target, "Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
2000 watchpoint->unique_id, dwt_num,
2001 (unsigned) comparator->comp,
2002 (unsigned) comparator->mask,
2003 (unsigned) comparator->function);
2004 return ERROR_OK;
2005 }
2006
2007 static int cortex_m_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
2008 {
2009 struct cortex_m_common *cortex_m = target_to_cm(target);
2010 struct cortex_m_dwt_comparator *comparator;
2011
2012 if (!watchpoint->is_set) {
2013 LOG_TARGET_WARNING(target, "watchpoint (wpid: %d) not set",
2014 watchpoint->unique_id);
2015 return ERROR_OK;
2016 }
2017
2018 unsigned int dwt_num = watchpoint->number;
2019
2020 LOG_TARGET_DEBUG(target, "Watchpoint (ID %d) DWT%u address: 0x%08x clear",
2021 watchpoint->unique_id, dwt_num,
2022 (unsigned) watchpoint->address);
2023
2024 if (dwt_num >= cortex_m->dwt_num_comp) {
2025 LOG_TARGET_DEBUG(target, "Invalid DWT Comparator number in watchpoint");
2026 return ERROR_OK;
2027 }
2028
2029 comparator = cortex_m->dwt_comparator_list + dwt_num;
2030 comparator->used = false;
2031 comparator->function = 0;
2032 target_write_u32(target, comparator->dwt_comparator_address + 8,
2033 comparator->function);
2034
2035 watchpoint->is_set = false;
2036
2037 return ERROR_OK;
2038 }
2039
2040 int cortex_m_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
2041 {
2042 struct cortex_m_common *cortex_m = target_to_cm(target);
2043
2044 if (cortex_m->dwt_comp_available < 1) {
2045 LOG_TARGET_DEBUG(target, "no comparators?");
2046 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2047 }
2048
2049 /* REVISIT This DWT may well be able to watch for specific data
2050 * values. Requires comparator #1 to set DATAVMATCH and match
2051 * the data, and another comparator (DATAVADDR0) matching addr.
2052 *
2053 * NOTE: hardware doesn't support data value masking, so we'll need
2054 * to check that mask is zero
2055 */
2056 if (watchpoint->mask != WATCHPOINT_IGNORE_DATA_VALUE_MASK) {
2057 LOG_TARGET_DEBUG(target, "watchpoint value masks not supported");
2058 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2059 }
2060
2061 /* hardware allows address masks of up to 32K */
2062 unsigned mask;
2063
2064 for (mask = 0; mask < 16; mask++) {
2065 if ((1u << mask) == watchpoint->length)
2066 break;
2067 }
2068 if (mask == 16) {
2069 LOG_TARGET_DEBUG(target, "unsupported watchpoint length");
2070 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2071 }
2072 if (watchpoint->address & ((1 << mask) - 1)) {
2073 LOG_TARGET_DEBUG(target, "watchpoint address is unaligned");
2074 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2075 }
2076
2077 cortex_m->dwt_comp_available--;
2078 LOG_TARGET_DEBUG(target, "dwt_comp_available: %d", cortex_m->dwt_comp_available);
2079
2080 return ERROR_OK;
2081 }
2082
2083 int cortex_m_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2084 {
2085 struct cortex_m_common *cortex_m = target_to_cm(target);
2086
2087 /* REVISIT why check? DWT can be updated with core running ... */
2088 if (target->state != TARGET_HALTED) {
2089 LOG_TARGET_ERROR(target, "not halted");
2090 return ERROR_TARGET_NOT_HALTED;
2091 }
2092
2093 if (watchpoint->is_set)
2094 cortex_m_unset_watchpoint(target, watchpoint);
2095
2096 cortex_m->dwt_comp_available++;
2097 LOG_TARGET_DEBUG(target, "dwt_comp_available: %d", cortex_m->dwt_comp_available);
2098
2099 return ERROR_OK;
2100 }
2101
2102 static int cortex_m_hit_watchpoint(struct target *target, struct watchpoint **hit_watchpoint)
2103 {
2104 if (target->debug_reason != DBG_REASON_WATCHPOINT)
2105 return ERROR_FAIL;
2106
2107 struct cortex_m_common *cortex_m = target_to_cm(target);
2108
2109 for (struct watchpoint *wp = target->watchpoints; wp; wp = wp->next) {
2110 if (!wp->is_set)
2111 continue;
2112
2113 unsigned int dwt_num = wp->number;
2114 struct cortex_m_dwt_comparator *comparator = cortex_m->dwt_comparator_list + dwt_num;
2115
2116 uint32_t dwt_function;
2117 int retval = target_read_u32(target, comparator->dwt_comparator_address + 8, &dwt_function);
2118 if (retval != ERROR_OK)
2119 return ERROR_FAIL;
2120
2121 /* check the MATCHED bit */
2122 if (dwt_function & BIT(24)) {
2123 *hit_watchpoint = wp;
2124 return ERROR_OK;
2125 }
2126 }
2127
2128 return ERROR_FAIL;
2129 }
2130
2131 void cortex_m_enable_watchpoints(struct target *target)
2132 {
2133 struct watchpoint *watchpoint = target->watchpoints;
2134
2135 /* set any pending watchpoints */
2136 while (watchpoint) {
2137 if (!watchpoint->is_set)
2138 cortex_m_set_watchpoint(target, watchpoint);
2139 watchpoint = watchpoint->next;
2140 }
2141 }
2142
2143 static int cortex_m_read_memory(struct target *target, target_addr_t address,
2144 uint32_t size, uint32_t count, uint8_t *buffer)
2145 {
2146 struct armv7m_common *armv7m = target_to_armv7m(target);
2147
2148 if (armv7m->arm.arch == ARM_ARCH_V6M) {
2149 /* armv6m does not handle unaligned memory access */
2150 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
2151 return ERROR_TARGET_UNALIGNED_ACCESS;
2152 }
2153
2154 return mem_ap_read_buf(armv7m->debug_ap, buffer, size, count, address);
2155 }
2156
2157 static int cortex_m_write_memory(struct target *target, target_addr_t address,
2158 uint32_t size, uint32_t count, const uint8_t *buffer)
2159 {
2160 struct armv7m_common *armv7m = target_to_armv7m(target);
2161
2162 if (armv7m->arm.arch == ARM_ARCH_V6M) {
2163 /* armv6m does not handle unaligned memory access */
2164 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
2165 return ERROR_TARGET_UNALIGNED_ACCESS;
2166 }
2167
2168 return mem_ap_write_buf(armv7m->debug_ap, buffer, size, count, address);
2169 }
2170
2171 static int cortex_m_init_target(struct command_context *cmd_ctx,
2172 struct target *target)
2173 {
2174 armv7m_build_reg_cache(target);
2175 arm_semihosting_init(target);
2176 return ERROR_OK;
2177 }
2178
2179 void cortex_m_deinit_target(struct target *target)
2180 {
2181 struct cortex_m_common *cortex_m = target_to_cm(target);
2182 struct armv7m_common *armv7m = target_to_armv7m(target);
2183
2184 if (!armv7m->is_hla_target && armv7m->debug_ap)
2185 dap_put_ap(armv7m->debug_ap);
2186
2187 free(cortex_m->fp_comparator_list);
2188
2189 cortex_m_dwt_free(target);
2190 armv7m_free_reg_cache(target);
2191
2192 free(target->private_config);
2193 free(cortex_m);
2194 }
2195
2196 int cortex_m_profiling(struct target *target, uint32_t *samples,
2197 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2198 {
2199 struct timeval timeout, now;
2200 struct armv7m_common *armv7m = target_to_armv7m(target);
2201 uint32_t reg_value;
2202 int retval;
2203
2204 retval = target_read_u32(target, DWT_PCSR, &reg_value);
2205 if (retval != ERROR_OK) {
2206 LOG_TARGET_ERROR(target, "Error while reading PCSR");
2207 return retval;
2208 }
2209 if (reg_value == 0) {
2210 LOG_TARGET_INFO(target, "PCSR sampling not supported on this processor.");
2211 return target_profiling_default(target, samples, max_num_samples, num_samples, seconds);
2212 }
2213
2214 gettimeofday(&timeout, NULL);
2215 timeval_add_time(&timeout, seconds, 0);
2216
2217 LOG_TARGET_INFO(target, "Starting Cortex-M profiling. Sampling DWT_PCSR as fast as we can...");
2218
2219 /* Make sure the target is running */
2220 target_poll(target);
2221 if (target->state == TARGET_HALTED)
2222 retval = target_resume(target, 1, 0, 0, 0);
2223
2224 if (retval != ERROR_OK) {
2225 LOG_TARGET_ERROR(target, "Error while resuming target");
2226 return retval;
2227 }
2228
2229 uint32_t sample_count = 0;
2230
2231 for (;;) {
2232 if (armv7m && armv7m->debug_ap) {
2233 uint32_t read_count = max_num_samples - sample_count;
2234 if (read_count > 1024)
2235 read_count = 1024;
2236
2237 retval = mem_ap_read_buf_noincr(armv7m->debug_ap,
2238 (void *)&samples[sample_count],
2239 4, read_count, DWT_PCSR);
2240 sample_count += read_count;
2241 } else {
2242 target_read_u32(target, DWT_PCSR, &samples[sample_count++]);
2243 }
2244
2245 if (retval != ERROR_OK) {
2246 LOG_TARGET_ERROR(target, "Error while reading PCSR");
2247 return retval;
2248 }
2249
2250
2251 gettimeofday(&now, NULL);
2252 if (sample_count >= max_num_samples || timeval_compare(&now, &timeout) > 0) {
2253 LOG_TARGET_INFO(target, "Profiling completed. %" PRIu32 " samples.", sample_count);
2254 break;
2255 }
2256 }
2257
2258 *num_samples = sample_count;
2259 return retval;
2260 }
2261
2262
2263 /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
2264 * on r/w if the core is not running, and clear on resume or reset ... or
2265 * at least, in a post_restore_context() method.
2266 */
2267
2268 struct dwt_reg_state {
2269 struct target *target;
2270 uint32_t addr;
2271 uint8_t value[4]; /* scratch/cache */
2272 };
2273
2274 static int cortex_m_dwt_get_reg(struct reg *reg)
2275 {
2276 struct dwt_reg_state *state = reg->arch_info;
2277
2278 uint32_t tmp;
2279 int retval = target_read_u32(state->target, state->addr, &tmp);
2280 if (retval != ERROR_OK)
2281 return retval;
2282
2283 buf_set_u32(state->value, 0, 32, tmp);
2284 return ERROR_OK;
2285 }
2286
2287 static int cortex_m_dwt_set_reg(struct reg *reg, uint8_t *buf)
2288 {
2289 struct dwt_reg_state *state = reg->arch_info;
2290
2291 return target_write_u32(state->target, state->addr,
2292 buf_get_u32(buf, 0, reg->size));
2293 }
2294
2295 struct dwt_reg {
2296 uint32_t addr;
2297 const char *name;
2298 unsigned size;
2299 };
2300
2301 static const struct dwt_reg dwt_base_regs[] = {
2302 { DWT_CTRL, "dwt_ctrl", 32, },
2303 /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
2304 * increments while the core is asleep.
2305 */
2306 { DWT_CYCCNT, "dwt_cyccnt", 32, },
2307 /* plus some 8 bit counters, useful for profiling with TPIU */
2308 };
2309
2310 static const struct dwt_reg dwt_comp[] = {
2311 #define DWT_COMPARATOR(i) \
2312 { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
2313 { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
2314 { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
2315 DWT_COMPARATOR(0),
2316 DWT_COMPARATOR(1),
2317 DWT_COMPARATOR(2),
2318 DWT_COMPARATOR(3),
2319 DWT_COMPARATOR(4),
2320 DWT_COMPARATOR(5),
2321 DWT_COMPARATOR(6),
2322 DWT_COMPARATOR(7),
2323 DWT_COMPARATOR(8),
2324 DWT_COMPARATOR(9),
2325 DWT_COMPARATOR(10),
2326 DWT_COMPARATOR(11),
2327 DWT_COMPARATOR(12),
2328 DWT_COMPARATOR(13),
2329 DWT_COMPARATOR(14),
2330 DWT_COMPARATOR(15),
2331 #undef DWT_COMPARATOR
2332 };
2333
2334 static const struct reg_arch_type dwt_reg_type = {
2335 .get = cortex_m_dwt_get_reg,
2336 .set = cortex_m_dwt_set_reg,
2337 };
2338
2339 static void cortex_m_dwt_addreg(struct target *t, struct reg *r, const struct dwt_reg *d)
2340 {
2341 struct dwt_reg_state *state;
2342
2343 state = calloc(1, sizeof(*state));
2344 if (!state)
2345 return;
2346 state->addr = d->addr;
2347 state->target = t;
2348
2349 r->name = d->name;
2350 r->size = d->size;
2351 r->value = state->value;
2352 r->arch_info = state;
2353 r->type = &dwt_reg_type;
2354 }
2355
2356 static void cortex_m_dwt_setup(struct cortex_m_common *cm, struct target *target)
2357 {
2358 uint32_t dwtcr;
2359 struct reg_cache *cache;
2360 struct cortex_m_dwt_comparator *comparator;
2361 int reg;
2362
2363 target_read_u32(target, DWT_CTRL, &dwtcr);
2364 LOG_TARGET_DEBUG(target, "DWT_CTRL: 0x%" PRIx32, dwtcr);
2365 if (!dwtcr) {
2366 LOG_TARGET_DEBUG(target, "no DWT");
2367 return;
2368 }
2369
2370 target_read_u32(target, DWT_DEVARCH, &cm->dwt_devarch);
2371 LOG_TARGET_DEBUG(target, "DWT_DEVARCH: 0x%" PRIx32, cm->dwt_devarch);
2372
2373 cm->dwt_num_comp = (dwtcr >> 28) & 0xF;
2374 cm->dwt_comp_available = cm->dwt_num_comp;
2375 cm->dwt_comparator_list = calloc(cm->dwt_num_comp,
2376 sizeof(struct cortex_m_dwt_comparator));
2377 if (!cm->dwt_comparator_list) {
2378 fail0:
2379 cm->dwt_num_comp = 0;
2380 LOG_TARGET_ERROR(target, "out of mem");
2381 return;
2382 }
2383
2384 cache = calloc(1, sizeof(*cache));
2385 if (!cache) {
2386 fail1:
2387 free(cm->dwt_comparator_list);
2388 goto fail0;
2389 }
2390 cache->name = "Cortex-M DWT registers";
2391 cache->num_regs = 2 + cm->dwt_num_comp * 3;
2392 cache->reg_list = calloc(cache->num_regs, sizeof(*cache->reg_list));
2393 if (!cache->reg_list) {
2394 free(cache);
2395 goto fail1;
2396 }
2397
2398 for (reg = 0; reg < 2; reg++)
2399 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2400 dwt_base_regs + reg);
2401
2402 comparator = cm->dwt_comparator_list;
2403 for (unsigned int i = 0; i < cm->dwt_num_comp; i++, comparator++) {
2404 int j;
2405
2406 comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
2407 for (j = 0; j < 3; j++, reg++)
2408 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2409 dwt_comp + 3 * i + j);
2410
2411 /* make sure we clear any watchpoints enabled on the target */
2412 target_write_u32(target, comparator->dwt_comparator_address + 8, 0);
2413 }
2414
2415 *register_get_last_cache_p(&target->reg_cache) = cache;
2416 cm->dwt_cache = cache;
2417
2418 LOG_TARGET_DEBUG(target, "DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
2419 dwtcr, cm->dwt_num_comp,
2420 (dwtcr & (0xf << 24)) ? " only" : "/trigger");
2421
2422 /* REVISIT: if num_comp > 1, check whether comparator #1 can
2423 * implement single-address data value watchpoints ... so we
2424 * won't need to check it later, when asked to set one up.
2425 */
2426 }
2427
2428 static void cortex_m_dwt_free(struct target *target)
2429 {
2430 struct cortex_m_common *cm = target_to_cm(target);
2431 struct reg_cache *cache = cm->dwt_cache;
2432
2433 free(cm->dwt_comparator_list);
2434 cm->dwt_comparator_list = NULL;
2435 cm->dwt_num_comp = 0;
2436
2437 if (cache) {
2438 register_unlink_cache(&target->reg_cache, cache);
2439
2440 if (cache->reg_list) {
2441 for (size_t i = 0; i < cache->num_regs; i++)
2442 free(cache->reg_list[i].arch_info);
2443 free(cache->reg_list);
2444 }
2445 free(cache);
2446 }
2447 cm->dwt_cache = NULL;
2448 }
2449
2450 static bool cortex_m_has_tz(struct target *target)
2451 {
2452 struct armv7m_common *armv7m = target_to_armv7m(target);
2453 uint32_t dauthstatus;
2454
2455 if (armv7m->arm.arch != ARM_ARCH_V8M)
2456 return false;
2457
2458 int retval = target_read_u32(target, DAUTHSTATUS, &dauthstatus);
2459 if (retval != ERROR_OK) {
2460 LOG_WARNING("Error reading DAUTHSTATUS register");
2461 return false;
2462 }
2463 return (dauthstatus & DAUTHSTATUS_SID_MASK) != 0;
2464 }
2465
2466
2467 #define MVFR0 0xE000EF40
2468 #define MVFR0_SP_MASK 0x000000F0
2469 #define MVFR0_SP 0x00000020
2470 #define MVFR0_DP_MASK 0x00000F00
2471 #define MVFR0_DP 0x00000200
2472
2473 #define MVFR1 0xE000EF44
2474 #define MVFR1_MVE_MASK 0x00000F00
2475 #define MVFR1_MVE_I 0x00000100
2476 #define MVFR1_MVE_F 0x00000200
2477
2478 static int cortex_m_find_mem_ap(struct adiv5_dap *swjdp,
2479 struct adiv5_ap **debug_ap)
2480 {
2481 if (dap_find_get_ap(swjdp, AP_TYPE_AHB3_AP, debug_ap) == ERROR_OK)
2482 return ERROR_OK;
2483
2484 return dap_find_get_ap(swjdp, AP_TYPE_AHB5_AP, debug_ap);
2485 }
2486
2487 int cortex_m_examine(struct target *target)
2488 {
2489 int retval;
2490 uint32_t cpuid, fpcr;
2491 struct cortex_m_common *cortex_m = target_to_cm(target);
2492 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
2493 struct armv7m_common *armv7m = target_to_armv7m(target);
2494
2495 /* hla_target shares the examine handler but does not support
2496 * all its calls */
2497 if (!armv7m->is_hla_target) {
2498 if (!armv7m->debug_ap) {
2499 if (cortex_m->apsel == DP_APSEL_INVALID) {
2500 /* Search for the MEM-AP */
2501 retval = cortex_m_find_mem_ap(swjdp, &armv7m->debug_ap);
2502 if (retval != ERROR_OK) {
2503 LOG_TARGET_ERROR(target, "Could not find MEM-AP to control the core");
2504 return retval;
2505 }
2506 } else {
2507 armv7m->debug_ap = dap_get_ap(swjdp, cortex_m->apsel);
2508 if (!armv7m->debug_ap) {
2509 LOG_ERROR("Cannot get AP");
2510 return ERROR_FAIL;
2511 }
2512 }
2513 }
2514
2515 armv7m->debug_ap->memaccess_tck = 8;
2516
2517 retval = mem_ap_init(armv7m->debug_ap);
2518 if (retval != ERROR_OK)
2519 return retval;
2520 }
2521
2522 if (!target_was_examined(target)) {
2523 target_set_examined(target);
2524
2525 /* Read from Device Identification Registers */
2526 retval = target_read_u32(target, CPUID, &cpuid);
2527 if (retval != ERROR_OK)
2528 return retval;
2529
2530 /* Inspect implementor/part to look for recognized cores */
2531 unsigned int impl_part = cpuid & (ARM_CPUID_IMPLEMENTOR_MASK | ARM_CPUID_PARTNO_MASK);
2532
2533 for (unsigned int n = 0; n < ARRAY_SIZE(cortex_m_parts); n++) {
2534 if (impl_part == cortex_m_parts[n].impl_part) {
2535 cortex_m->core_info = &cortex_m_parts[n];
2536 break;
2537 }
2538 }
2539
2540 if (!cortex_m->core_info) {
2541 LOG_TARGET_ERROR(target, "Cortex-M CPUID: 0x%x is unrecognized", cpuid);
2542 return ERROR_FAIL;
2543 }
2544
2545 armv7m->arm.arch = cortex_m->core_info->arch;
2546
2547 LOG_TARGET_INFO(target, "%s r%" PRId8 "p%" PRId8 " processor detected",
2548 cortex_m->core_info->name,
2549 (uint8_t)((cpuid >> 20) & 0xf),
2550 (uint8_t)((cpuid >> 0) & 0xf));
2551
2552 cortex_m->maskints_erratum = false;
2553 if (impl_part == CORTEX_M7_PARTNO) {
2554 uint8_t rev, patch;
2555 rev = (cpuid >> 20) & 0xf;
2556 patch = (cpuid >> 0) & 0xf;
2557 if ((rev == 0) && (patch < 2)) {
2558 LOG_TARGET_WARNING(target, "Silicon bug: single stepping may enter pending exception handler!");
2559 cortex_m->maskints_erratum = true;
2560 }
2561 }
2562 LOG_TARGET_DEBUG(target, "cpuid: 0x%8.8" PRIx32 "", cpuid);
2563
2564 if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV4) {
2565 uint32_t mvfr0;
2566 target_read_u32(target, MVFR0, &mvfr0);
2567
2568 if ((mvfr0 & MVFR0_SP_MASK) == MVFR0_SP) {
2569 LOG_TARGET_DEBUG(target, "%s floating point feature FPv4_SP found",
2570 cortex_m->core_info->name);
2571 armv7m->fp_feature = FPV4_SP;
2572 }
2573 } else if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV5) {
2574 uint32_t mvfr0, mvfr1;
2575 target_read_u32(target, MVFR0, &mvfr0);
2576 target_read_u32(target, MVFR1, &mvfr1);
2577
2578 if ((mvfr0 & MVFR0_DP_MASK) == MVFR0_DP) {
2579 if ((mvfr1 & MVFR1_MVE_MASK) == MVFR1_MVE_F) {
2580 LOG_TARGET_DEBUG(target, "%s floating point feature FPv5_DP + MVE-F found",
2581 cortex_m->core_info->name);
2582 armv7m->fp_feature = FPV5_MVE_F;
2583 } else {
2584 LOG_TARGET_DEBUG(target, "%s floating point feature FPv5_DP found",
2585 cortex_m->core_info->name);
2586 armv7m->fp_feature = FPV5_DP;
2587 }
2588 } else if ((mvfr0 & MVFR0_SP_MASK) == MVFR0_SP) {
2589 LOG_TARGET_DEBUG(target, "%s floating point feature FPv5_SP found",
2590 cortex_m->core_info->name);
2591 armv7m->fp_feature = FPV5_SP;
2592 } else if ((mvfr1 & MVFR1_MVE_MASK) == MVFR1_MVE_I) {
2593 LOG_TARGET_DEBUG(target, "%s floating point feature MVE-I found",
2594 cortex_m->core_info->name);
2595 armv7m->fp_feature = FPV5_MVE_I;
2596 }
2597 }
2598
2599 /* VECTRESET is supported only on ARMv7-M cores */
2600 cortex_m->vectreset_supported = armv7m->arm.arch == ARM_ARCH_V7M;
2601
2602 /* Check for FPU, otherwise mark FPU register as non-existent */
2603 if (armv7m->fp_feature == FP_NONE)
2604 for (size_t idx = ARMV7M_FPU_FIRST_REG; idx <= ARMV7M_FPU_LAST_REG; idx++)
2605 armv7m->arm.core_cache->reg_list[idx].exist = false;
2606
2607 if (!cortex_m_has_tz(target))
2608 for (size_t idx = ARMV8M_FIRST_REG; idx <= ARMV8M_LAST_REG; idx++)
2609 armv7m->arm.core_cache->reg_list[idx].exist = false;
2610
2611 if (!armv7m->is_hla_target) {
2612 if (cortex_m->core_info->flags & CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K)
2613 /* Cortex-M3/M4 have 4096 bytes autoincrement range,
2614 * s. ARM IHI 0031C: MEM-AP 7.2.2 */
2615 armv7m->debug_ap->tar_autoincr_block = (1 << 12);
2616 }
2617
2618 retval = target_read_u32(target, DCB_DHCSR, &cortex_m->dcb_dhcsr);
2619 if (retval != ERROR_OK)
2620 return retval;
2621
2622 /* Don't cumulate sticky S_RESET_ST at the very first read of DHCSR
2623 * as S_RESET_ST may indicate a reset that happened long time ago
2624 * (most probably the power-on reset before OpenOCD was started).
2625 * As we are just initializing the debug system we do not need
2626 * to call cortex_m_endreset_event() in the following poll.
2627 */
2628 if (!cortex_m->dcb_dhcsr_sticky_is_recent) {
2629 cortex_m->dcb_dhcsr_sticky_is_recent = true;
2630 if (cortex_m->dcb_dhcsr & S_RESET_ST) {
2631 LOG_TARGET_DEBUG(target, "reset happened some time ago, ignore");
2632 cortex_m->dcb_dhcsr &= ~S_RESET_ST;
2633 }
2634 }
2635 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
2636
2637 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
2638 /* Enable debug requests */
2639 uint32_t dhcsr = (cortex_m->dcb_dhcsr | C_DEBUGEN) & ~(C_HALT | C_STEP | C_MASKINTS);
2640
2641 retval = target_write_u32(target, DCB_DHCSR, DBGKEY | (dhcsr & 0x0000FFFFUL));
2642 if (retval != ERROR_OK)
2643 return retval;
2644 cortex_m->dcb_dhcsr = dhcsr;
2645 }
2646
2647 /* Configure trace modules */
2648 retval = target_write_u32(target, DCB_DEMCR, TRCENA | armv7m->demcr);
2649 if (retval != ERROR_OK)
2650 return retval;
2651
2652 if (armv7m->trace_config.itm_deferred_config)
2653 armv7m_trace_itm_config(target);
2654
2655 /* NOTE: FPB and DWT are both optional. */
2656
2657 /* Setup FPB */
2658 target_read_u32(target, FP_CTRL, &fpcr);
2659 /* bits [14:12] and [7:4] */
2660 cortex_m->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF);
2661 cortex_m->fp_num_lit = (fpcr >> 8) & 0xF;
2662 /* Detect flash patch revision, see RM DDI 0403E.b page C1-817.
2663 Revision is zero base, fp_rev == 1 means Rev.2 ! */
2664 cortex_m->fp_rev = (fpcr >> 28) & 0xf;
2665 free(cortex_m->fp_comparator_list);
2666 cortex_m->fp_comparator_list = calloc(
2667 cortex_m->fp_num_code + cortex_m->fp_num_lit,
2668 sizeof(struct cortex_m_fp_comparator));
2669 cortex_m->fpb_enabled = fpcr & 1;
2670 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
2671 cortex_m->fp_comparator_list[i].type =
2672 (i < cortex_m->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
2673 cortex_m->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
2674
2675 /* make sure we clear any breakpoints enabled on the target */
2676 target_write_u32(target, cortex_m->fp_comparator_list[i].fpcr_address, 0);
2677 }
2678 LOG_TARGET_DEBUG(target, "FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i",
2679 fpcr,
2680 cortex_m->fp_num_code,
2681 cortex_m->fp_num_lit);
2682
2683 /* Setup DWT */
2684 cortex_m_dwt_free(target);
2685 cortex_m_dwt_setup(cortex_m, target);
2686
2687 /* These hardware breakpoints only work for code in flash! */
2688 LOG_TARGET_INFO(target, "target has %d breakpoints, %d watchpoints",
2689 cortex_m->fp_num_code,
2690 cortex_m->dwt_num_comp);
2691 }
2692
2693 return ERROR_OK;
2694 }
2695
2696 static int cortex_m_dcc_read(struct target *target, uint8_t *value, uint8_t *ctrl)
2697 {
2698 struct armv7m_common *armv7m = target_to_armv7m(target);
2699 uint16_t dcrdr;
2700 uint8_t buf[2];
2701 int retval;
2702
2703 retval = mem_ap_read_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2704 if (retval != ERROR_OK)
2705 return retval;
2706
2707 dcrdr = target_buffer_get_u16(target, buf);
2708 *ctrl = (uint8_t)dcrdr;
2709 *value = (uint8_t)(dcrdr >> 8);
2710
2711 LOG_TARGET_DEBUG(target, "data 0x%x ctrl 0x%x", *value, *ctrl);
2712
2713 /* write ack back to software dcc register
2714 * signify we have read data */
2715 if (dcrdr & (1 << 0)) {
2716 target_buffer_set_u16(target, buf, 0);
2717 retval = mem_ap_write_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2718 if (retval != ERROR_OK)
2719 return retval;
2720 }
2721
2722 return ERROR_OK;
2723 }
2724
2725 static int cortex_m_target_request_data(struct target *target,
2726 uint32_t size, uint8_t *buffer)
2727 {
2728 uint8_t data;
2729 uint8_t ctrl;
2730 uint32_t i;
2731
2732 for (i = 0; i < (size * 4); i++) {
2733 int retval = cortex_m_dcc_read(target, &data, &ctrl);
2734 if (retval != ERROR_OK)
2735 return retval;
2736 buffer[i] = data;
2737 }
2738
2739 return ERROR_OK;
2740 }
2741
2742 static int cortex_m_handle_target_request(void *priv)
2743 {
2744 struct target *target = priv;
2745 if (!target_was_examined(target))
2746 return ERROR_OK;
2747
2748 if (!target->dbg_msg_enabled)
2749 return ERROR_OK;
2750
2751 if (target->state == TARGET_RUNNING) {
2752 uint8_t data;
2753 uint8_t ctrl;
2754 int retval;
2755
2756 retval = cortex_m_dcc_read(target, &data, &ctrl);
2757 if (retval != ERROR_OK)
2758 return retval;
2759
2760 /* check if we have data */
2761 if (ctrl & (1 << 0)) {
2762 uint32_t request;
2763
2764 /* we assume target is quick enough */
2765 request = data;
2766 for (int i = 1; i <= 3; i++) {
2767 retval = cortex_m_dcc_read(target, &data, &ctrl);
2768 if (retval != ERROR_OK)
2769 return retval;
2770 request |= ((uint32_t)data << (i * 8));
2771 }
2772 target_request(target, request);
2773 }
2774 }
2775
2776 return ERROR_OK;
2777 }
2778
2779 static int cortex_m_init_arch_info(struct target *target,
2780 struct cortex_m_common *cortex_m, struct adiv5_dap *dap)
2781 {
2782 struct armv7m_common *armv7m = &cortex_m->armv7m;
2783
2784 armv7m_init_arch_info(target, armv7m);
2785
2786 /* default reset mode is to use srst if fitted
2787 * if not it will use CORTEX_M_RESET_VECTRESET */
2788 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2789
2790 armv7m->arm.dap = dap;
2791
2792 /* register arch-specific functions */
2793 armv7m->examine_debug_reason = cortex_m_examine_debug_reason;
2794
2795 armv7m->post_debug_entry = NULL;
2796
2797 armv7m->pre_restore_context = NULL;
2798
2799 armv7m->load_core_reg_u32 = cortex_m_load_core_reg_u32;
2800 armv7m->store_core_reg_u32 = cortex_m_store_core_reg_u32;
2801
2802 target_register_timer_callback(cortex_m_handle_target_request, 1,
2803 TARGET_TIMER_TYPE_PERIODIC, target);
2804
2805 return ERROR_OK;
2806 }
2807
2808 static int cortex_m_target_create(struct target *target, Jim_Interp *interp)
2809 {
2810 struct adiv5_private_config *pc;
2811
2812 pc = (struct adiv5_private_config *)target->private_config;
2813 if (adiv5_verify_config(pc) != ERROR_OK)
2814 return ERROR_FAIL;
2815
2816 struct cortex_m_common *cortex_m = calloc(1, sizeof(struct cortex_m_common));
2817 if (!cortex_m) {
2818 LOG_TARGET_ERROR(target, "No memory creating target");
2819 return ERROR_FAIL;
2820 }
2821
2822 cortex_m->common_magic = CORTEX_M_COMMON_MAGIC;
2823 cortex_m->apsel = pc->ap_num;
2824
2825 cortex_m_init_arch_info(target, cortex_m, pc->dap);
2826
2827 return ERROR_OK;
2828 }
2829
2830 /*--------------------------------------------------------------------------*/
2831
2832 static int cortex_m_verify_pointer(struct command_invocation *cmd,
2833 struct cortex_m_common *cm)
2834 {
2835 if (!is_cortex_m_with_dap_access(cm)) {
2836 command_print(cmd, "target is not a Cortex-M");
2837 return ERROR_TARGET_INVALID;
2838 }
2839 return ERROR_OK;
2840 }
2841
2842 /*
2843 * Only stuff below this line should need to verify that its target
2844 * is a Cortex-M with available DAP access (not a HLA adapter).
2845 */
2846
2847 COMMAND_HANDLER(handle_cortex_m_vector_catch_command)
2848 {
2849 struct target *target = get_current_target(CMD_CTX);
2850 struct cortex_m_common *cortex_m = target_to_cm(target);
2851 struct armv7m_common *armv7m = &cortex_m->armv7m;
2852 uint32_t demcr = 0;
2853 int retval;
2854
2855 static const struct {
2856 char name[10];
2857 unsigned mask;
2858 } vec_ids[] = {
2859 { "hard_err", VC_HARDERR, },
2860 { "int_err", VC_INTERR, },
2861 { "bus_err", VC_BUSERR, },
2862 { "state_err", VC_STATERR, },
2863 { "chk_err", VC_CHKERR, },
2864 { "nocp_err", VC_NOCPERR, },
2865 { "mm_err", VC_MMERR, },
2866 { "reset", VC_CORERESET, },
2867 };
2868
2869 retval = cortex_m_verify_pointer(CMD, cortex_m);
2870 if (retval != ERROR_OK)
2871 return retval;
2872
2873 if (!target_was_examined(target)) {
2874 LOG_TARGET_ERROR(target, "Target not examined yet");
2875 return ERROR_FAIL;
2876 }
2877
2878 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2879 if (retval != ERROR_OK)
2880 return retval;
2881
2882 if (CMD_ARGC > 0) {
2883 unsigned catch = 0;
2884
2885 if (CMD_ARGC == 1) {
2886 if (strcmp(CMD_ARGV[0], "all") == 0) {
2887 catch = VC_HARDERR | VC_INTERR | VC_BUSERR
2888 | VC_STATERR | VC_CHKERR | VC_NOCPERR
2889 | VC_MMERR | VC_CORERESET;
2890 goto write;
2891 } else if (strcmp(CMD_ARGV[0], "none") == 0)
2892 goto write;
2893 }
2894 while (CMD_ARGC-- > 0) {
2895 unsigned i;
2896 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2897 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
2898 continue;
2899 catch |= vec_ids[i].mask;
2900 break;
2901 }
2902 if (i == ARRAY_SIZE(vec_ids)) {
2903 LOG_TARGET_ERROR(target, "No Cortex-M vector '%s'", CMD_ARGV[CMD_ARGC]);
2904 return ERROR_COMMAND_SYNTAX_ERROR;
2905 }
2906 }
2907 write:
2908 /* For now, armv7m->demcr only stores vector catch flags. */
2909 armv7m->demcr = catch;
2910
2911 demcr &= ~0xffff;
2912 demcr |= catch;
2913
2914 /* write, but don't assume it stuck (why not??) */
2915 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, demcr);
2916 if (retval != ERROR_OK)
2917 return retval;
2918 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2919 if (retval != ERROR_OK)
2920 return retval;
2921
2922 /* FIXME be sure to clear DEMCR on clean server shutdown.
2923 * Otherwise the vector catch hardware could fire when there's
2924 * no debugger hooked up, causing much confusion...
2925 */
2926 }
2927
2928 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2929 command_print(CMD, "%9s: %s", vec_ids[i].name,
2930 (demcr & vec_ids[i].mask) ? "catch" : "ignore");
2931 }
2932
2933 return ERROR_OK;
2934 }
2935
2936 COMMAND_HANDLER(handle_cortex_m_mask_interrupts_command)
2937 {
2938 struct target *target = get_current_target(CMD_CTX);
2939 struct cortex_m_common *cortex_m = target_to_cm(target);
2940 int retval;
2941
2942 static const struct nvp nvp_maskisr_modes[] = {
2943 { .name = "auto", .value = CORTEX_M_ISRMASK_AUTO },
2944 { .name = "off", .value = CORTEX_M_ISRMASK_OFF },
2945 { .name = "on", .value = CORTEX_M_ISRMASK_ON },
2946 { .name = "steponly", .value = CORTEX_M_ISRMASK_STEPONLY },
2947 { .name = NULL, .value = -1 },
2948 };
2949 const struct nvp *n;
2950
2951
2952 retval = cortex_m_verify_pointer(CMD, cortex_m);
2953 if (retval != ERROR_OK)
2954 return retval;
2955
2956 if (target->state != TARGET_HALTED) {
2957 command_print(CMD, "Error: target must be stopped for \"%s\" command", CMD_NAME);
2958 return ERROR_TARGET_NOT_HALTED;
2959 }
2960
2961 if (CMD_ARGC > 0) {
2962 n = nvp_name2value(nvp_maskisr_modes, CMD_ARGV[0]);
2963 if (!n->name)
2964 return ERROR_COMMAND_SYNTAX_ERROR;
2965 cortex_m->isrmasking_mode = n->value;
2966 cortex_m_set_maskints_for_halt(target);
2967 }
2968
2969 n = nvp_value2name(nvp_maskisr_modes, cortex_m->isrmasking_mode);
2970 command_print(CMD, "cortex_m interrupt mask %s", n->name);
2971
2972 return ERROR_OK;
2973 }
2974
2975 COMMAND_HANDLER(handle_cortex_m_reset_config_command)
2976 {
2977 struct target *target = get_current_target(CMD_CTX);
2978 struct cortex_m_common *cortex_m = target_to_cm(target);
2979 int retval;
2980 char *reset_config;
2981
2982 retval = cortex_m_verify_pointer(CMD, cortex_m);
2983 if (retval != ERROR_OK)
2984 return retval;
2985
2986 if (CMD_ARGC > 0) {
2987 if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
2988 cortex_m->soft_reset_config = CORTEX_M_RESET_SYSRESETREQ;
2989
2990 else if (strcmp(*CMD_ARGV, "vectreset") == 0) {
2991 if (target_was_examined(target)
2992 && !cortex_m->vectreset_supported)
2993 LOG_TARGET_WARNING(target, "VECTRESET is not supported on your Cortex-M core!");
2994 else
2995 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2996
2997 } else
2998 return ERROR_COMMAND_SYNTAX_ERROR;
2999 }
3000
3001 switch (cortex_m->soft_reset_config) {
3002 case CORTEX_M_RESET_SYSRESETREQ:
3003 reset_config = "sysresetreq";
3004 break;
3005
3006 case CORTEX_M_RESET_VECTRESET:
3007 reset_config = "vectreset";
3008 break;
3009
3010 default:
3011 reset_config = "unknown";
3012 break;
3013 }
3014
3015 command_print(CMD, "cortex_m reset_config %s", reset_config);
3016
3017 return ERROR_OK;
3018 }
3019
3020 static const struct command_registration cortex_m_exec_command_handlers[] = {
3021 {
3022 .name = "maskisr",
3023 .handler = handle_cortex_m_mask_interrupts_command,
3024 .mode = COMMAND_EXEC,
3025 .help = "mask cortex_m interrupts",
3026 .usage = "['auto'|'on'|'off'|'steponly']",
3027 },
3028 {
3029 .name = "vector_catch",
3030 .handler = handle_cortex_m_vector_catch_command,
3031 .mode = COMMAND_EXEC,
3032 .help = "configure hardware vectors to trigger debug entry",
3033 .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
3034 },
3035 {
3036 .name = "reset_config",
3037 .handler = handle_cortex_m_reset_config_command,
3038 .mode = COMMAND_ANY,
3039 .help = "configure software reset handling",
3040 .usage = "['sysresetreq'|'vectreset']",
3041 },
3042 {
3043 .chain = smp_command_handlers,
3044 },
3045 COMMAND_REGISTRATION_DONE
3046 };
3047 static const struct command_registration cortex_m_command_handlers[] = {
3048 {
3049 .chain = armv7m_command_handlers,
3050 },
3051 {
3052 .chain = armv7m_trace_command_handlers,
3053 },
3054 /* START_DEPRECATED_TPIU */
3055 {
3056 .chain = arm_tpiu_deprecated_command_handlers,
3057 },
3058 /* END_DEPRECATED_TPIU */
3059 {
3060 .name = "cortex_m",
3061 .mode = COMMAND_EXEC,
3062 .help = "Cortex-M command group",
3063 .usage = "",
3064 .chain = cortex_m_exec_command_handlers,
3065 },
3066 {
3067 .chain = rtt_target_command_handlers,
3068 },
3069 COMMAND_REGISTRATION_DONE
3070 };
3071
3072 struct target_type cortexm_target = {
3073 .name = "cortex_m",
3074
3075 .poll = cortex_m_poll,
3076 .arch_state = armv7m_arch_state,
3077
3078 .target_request_data = cortex_m_target_request_data,
3079
3080 .halt = cortex_m_halt,
3081 .resume = cortex_m_resume,
3082 .step = cortex_m_step,
3083
3084 .assert_reset = cortex_m_assert_reset,
3085 .deassert_reset = cortex_m_deassert_reset,
3086 .soft_reset_halt = cortex_m_soft_reset_halt,
3087
3088 .get_gdb_arch = arm_get_gdb_arch,
3089 .get_gdb_reg_list = armv7m_get_gdb_reg_list,
3090
3091 .read_memory = cortex_m_read_memory,
3092 .write_memory = cortex_m_write_memory,
3093 .checksum_memory = armv7m_checksum_memory,
3094 .blank_check_memory = armv7m_blank_check_memory,
3095
3096 .run_algorithm = armv7m_run_algorithm,
3097 .start_algorithm = armv7m_start_algorithm,
3098 .wait_algorithm = armv7m_wait_algorithm,
3099
3100 .add_breakpoint = cortex_m_add_breakpoint,
3101 .remove_breakpoint = cortex_m_remove_breakpoint,
3102 .add_watchpoint = cortex_m_add_watchpoint,
3103 .remove_watchpoint = cortex_m_remove_watchpoint,
3104 .hit_watchpoint = cortex_m_hit_watchpoint,
3105
3106 .commands = cortex_m_command_handlers,
3107 .target_create = cortex_m_target_create,
3108 .target_jim_configure = adiv5_jim_configure,
3109 .init_target = cortex_m_init_target,
3110 .examine = cortex_m_examine,
3111 .deinit_target = cortex_m_deinit_target,
3112
3113 .profiling = cortex_m_profiling,
3114 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)