target/cortex_m: support Infineon Cortex-M33 from SLx2 MCU
[openocd.git] / src / target / cortex_m.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 * *
7 * Copyright (C) 2006 by Magnus Lundin *
8 * lundin@mlu.mine.nu *
9 * *
10 * Copyright (C) 2008 by Spencer Oliver *
11 * spen@spen-soft.co.uk *
12 * *
13 * *
14 * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
15 * *
16 ***************************************************************************/
17 #ifdef HAVE_CONFIG_H
18 #include "config.h"
19 #endif
20
21 #include "jtag/interface.h"
22 #include "breakpoints.h"
23 #include "cortex_m.h"
24 #include "target_request.h"
25 #include "target_type.h"
26 #include "arm_adi_v5.h"
27 #include "arm_disassembler.h"
28 #include "register.h"
29 #include "arm_opcodes.h"
30 #include "arm_semihosting.h"
31 #include "smp.h"
32 #include <helper/nvp.h>
33 #include <helper/time_support.h>
34 #include <rtt/rtt.h>
35
36 /* NOTE: most of this should work fine for the Cortex-M1 and
37 * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
38 * Some differences: M0/M1 doesn't have FPB remapping or the
39 * DWT tracing/profiling support. (So the cycle counter will
40 * not be usable; the other stuff isn't currently used here.)
41 *
42 * Although there are some workarounds for errata seen only in r0p0
43 * silicon, such old parts are hard to find and thus not much tested
44 * any longer.
45 */
46
47 /* Timeout for register r/w */
48 #define DHCSR_S_REGRDY_TIMEOUT (500)
49
50 /* Supported Cortex-M Cores */
51 static const struct cortex_m_part_info cortex_m_parts[] = {
52 {
53 .impl_part = CORTEX_M0_PARTNO,
54 .name = "Cortex-M0",
55 .arch = ARM_ARCH_V6M,
56 },
57 {
58 .impl_part = CORTEX_M0P_PARTNO,
59 .name = "Cortex-M0+",
60 .arch = ARM_ARCH_V6M,
61 },
62 {
63 .impl_part = CORTEX_M1_PARTNO,
64 .name = "Cortex-M1",
65 .arch = ARM_ARCH_V6M,
66 },
67 {
68 .impl_part = CORTEX_M3_PARTNO,
69 .name = "Cortex-M3",
70 .arch = ARM_ARCH_V7M,
71 .flags = CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
72 },
73 {
74 .impl_part = CORTEX_M4_PARTNO,
75 .name = "Cortex-M4",
76 .arch = ARM_ARCH_V7M,
77 .flags = CORTEX_M_F_HAS_FPV4 | CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
78 },
79 {
80 .impl_part = CORTEX_M7_PARTNO,
81 .name = "Cortex-M7",
82 .arch = ARM_ARCH_V7M,
83 .flags = CORTEX_M_F_HAS_FPV5,
84 },
85 {
86 .impl_part = CORTEX_M23_PARTNO,
87 .name = "Cortex-M23",
88 .arch = ARM_ARCH_V8M,
89 },
90 {
91 .impl_part = CORTEX_M33_PARTNO,
92 .name = "Cortex-M33",
93 .arch = ARM_ARCH_V8M,
94 .flags = CORTEX_M_F_HAS_FPV5,
95 },
96 {
97 .impl_part = CORTEX_M35P_PARTNO,
98 .name = "Cortex-M35P",
99 .arch = ARM_ARCH_V8M,
100 .flags = CORTEX_M_F_HAS_FPV5,
101 },
102 {
103 .impl_part = CORTEX_M55_PARTNO,
104 .name = "Cortex-M55",
105 .arch = ARM_ARCH_V8M,
106 .flags = CORTEX_M_F_HAS_FPV5,
107 },
108 {
109 .impl_part = STAR_MC1_PARTNO,
110 .name = "STAR-MC1",
111 .arch = ARM_ARCH_V8M,
112 .flags = CORTEX_M_F_HAS_FPV5,
113 },
114 {
115 .impl_part = INFINEON_SLX2_PARTNO,
116 .name = "Infineon-SLx2",
117 .arch = ARM_ARCH_V8M,
118 },
119 {
120 .impl_part = REALTEK_M200_PARTNO,
121 .name = "Real-M200 (KM0)",
122 .arch = ARM_ARCH_V8M,
123 },
124 {
125 .impl_part = REALTEK_M300_PARTNO,
126 .name = "Real-M300 (KM4)",
127 .arch = ARM_ARCH_V8M,
128 .flags = CORTEX_M_F_HAS_FPV5,
129 },
130 };
131
132 /* forward declarations */
133 static int cortex_m_store_core_reg_u32(struct target *target,
134 uint32_t num, uint32_t value);
135 static void cortex_m_dwt_free(struct target *target);
136
137 /** DCB DHCSR register contains S_RETIRE_ST and S_RESET_ST bits cleared
138 * on a read. Call this helper function each time DHCSR is read
139 * to preserve S_RESET_ST state in case of a reset event was detected.
140 */
141 static inline void cortex_m_cumulate_dhcsr_sticky(struct cortex_m_common *cortex_m,
142 uint32_t dhcsr)
143 {
144 cortex_m->dcb_dhcsr_cumulated_sticky |= dhcsr;
145 }
146
147 /** Read DCB DHCSR register to cortex_m->dcb_dhcsr and cumulate
148 * sticky bits in cortex_m->dcb_dhcsr_cumulated_sticky
149 */
150 static int cortex_m_read_dhcsr_atomic_sticky(struct target *target)
151 {
152 struct cortex_m_common *cortex_m = target_to_cm(target);
153 struct armv7m_common *armv7m = target_to_armv7m(target);
154
155 int retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR,
156 &cortex_m->dcb_dhcsr);
157 if (retval != ERROR_OK)
158 return retval;
159
160 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
161 return ERROR_OK;
162 }
163
164 static int cortex_m_load_core_reg_u32(struct target *target,
165 uint32_t regsel, uint32_t *value)
166 {
167 struct cortex_m_common *cortex_m = target_to_cm(target);
168 struct armv7m_common *armv7m = target_to_armv7m(target);
169 int retval;
170 uint32_t dcrdr, tmp_value;
171 int64_t then;
172
173 /* because the DCB_DCRDR is used for the emulated dcc channel
174 * we have to save/restore the DCB_DCRDR when used */
175 if (target->dbg_msg_enabled) {
176 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
177 if (retval != ERROR_OK)
178 return retval;
179 }
180
181 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
182 if (retval != ERROR_OK)
183 return retval;
184
185 /* check if value from register is ready and pre-read it */
186 then = timeval_ms();
187 while (1) {
188 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR,
189 &cortex_m->dcb_dhcsr);
190 if (retval != ERROR_OK)
191 return retval;
192 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DCRDR,
193 &tmp_value);
194 if (retval != ERROR_OK)
195 return retval;
196 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
197 if (cortex_m->dcb_dhcsr & S_REGRDY)
198 break;
199 cortex_m->slow_register_read = true; /* Polling (still) needed. */
200 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
201 LOG_TARGET_ERROR(target, "Timeout waiting for DCRDR transfer ready");
202 return ERROR_TIMEOUT_REACHED;
203 }
204 keep_alive();
205 }
206
207 *value = tmp_value;
208
209 if (target->dbg_msg_enabled) {
210 /* restore DCB_DCRDR - this needs to be in a separate
211 * transaction otherwise the emulated DCC channel breaks */
212 if (retval == ERROR_OK)
213 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
214 }
215
216 return retval;
217 }
218
219 static int cortex_m_slow_read_all_regs(struct target *target)
220 {
221 struct cortex_m_common *cortex_m = target_to_cm(target);
222 struct armv7m_common *armv7m = target_to_armv7m(target);
223 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
224
225 /* Opportunistically restore fast read, it'll revert to slow
226 * if any register needed polling in cortex_m_load_core_reg_u32(). */
227 cortex_m->slow_register_read = false;
228
229 for (unsigned int reg_id = 0; reg_id < num_regs; reg_id++) {
230 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
231 if (r->exist) {
232 int retval = armv7m->arm.read_core_reg(target, r, reg_id, ARM_MODE_ANY);
233 if (retval != ERROR_OK)
234 return retval;
235 }
236 }
237
238 if (!cortex_m->slow_register_read)
239 LOG_TARGET_DEBUG(target, "Switching back to fast register reads");
240
241 return ERROR_OK;
242 }
243
244 static int cortex_m_queue_reg_read(struct target *target, uint32_t regsel,
245 uint32_t *reg_value, uint32_t *dhcsr)
246 {
247 struct armv7m_common *armv7m = target_to_armv7m(target);
248 int retval;
249
250 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
251 if (retval != ERROR_OK)
252 return retval;
253
254 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR, dhcsr);
255 if (retval != ERROR_OK)
256 return retval;
257
258 return mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, reg_value);
259 }
260
261 static int cortex_m_fast_read_all_regs(struct target *target)
262 {
263 struct cortex_m_common *cortex_m = target_to_cm(target);
264 struct armv7m_common *armv7m = target_to_armv7m(target);
265 int retval;
266 uint32_t dcrdr;
267
268 /* because the DCB_DCRDR is used for the emulated dcc channel
269 * we have to save/restore the DCB_DCRDR when used */
270 if (target->dbg_msg_enabled) {
271 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
272 if (retval != ERROR_OK)
273 return retval;
274 }
275
276 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
277 const unsigned int n_r32 = ARMV7M_LAST_REG - ARMV7M_CORE_FIRST_REG + 1
278 + ARMV7M_FPU_LAST_REG - ARMV7M_FPU_FIRST_REG + 1;
279 /* we need one 32-bit word for each register except FP D0..D15, which
280 * need two words */
281 uint32_t r_vals[n_r32];
282 uint32_t dhcsr[n_r32];
283
284 unsigned int wi = 0; /* write index to r_vals and dhcsr arrays */
285 unsigned int reg_id; /* register index in the reg_list, ARMV7M_R0... */
286 for (reg_id = 0; reg_id < num_regs; reg_id++) {
287 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
288 if (!r->exist)
289 continue; /* skip non existent registers */
290
291 if (r->size <= 8) {
292 /* Any 8-bit or shorter register is unpacked from a 32-bit
293 * container register. Skip it now. */
294 continue;
295 }
296
297 uint32_t regsel = armv7m_map_id_to_regsel(reg_id);
298 retval = cortex_m_queue_reg_read(target, regsel, &r_vals[wi],
299 &dhcsr[wi]);
300 if (retval != ERROR_OK)
301 return retval;
302 wi++;
303
304 assert(r->size == 32 || r->size == 64);
305 if (r->size == 32)
306 continue; /* done with 32-bit register */
307
308 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
309 /* the odd part of FP register (S1, S3...) */
310 retval = cortex_m_queue_reg_read(target, regsel + 1, &r_vals[wi],
311 &dhcsr[wi]);
312 if (retval != ERROR_OK)
313 return retval;
314 wi++;
315 }
316
317 assert(wi <= n_r32);
318
319 retval = dap_run(armv7m->debug_ap->dap);
320 if (retval != ERROR_OK)
321 return retval;
322
323 if (target->dbg_msg_enabled) {
324 /* restore DCB_DCRDR - this needs to be in a separate
325 * transaction otherwise the emulated DCC channel breaks */
326 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
327 if (retval != ERROR_OK)
328 return retval;
329 }
330
331 bool not_ready = false;
332 for (unsigned int i = 0; i < wi; i++) {
333 if ((dhcsr[i] & S_REGRDY) == 0) {
334 not_ready = true;
335 LOG_TARGET_DEBUG(target, "Register %u was not ready during fast read", i);
336 }
337 cortex_m_cumulate_dhcsr_sticky(cortex_m, dhcsr[i]);
338 }
339
340 if (not_ready) {
341 /* Any register was not ready,
342 * fall back to slow read with S_REGRDY polling */
343 return ERROR_TIMEOUT_REACHED;
344 }
345
346 LOG_TARGET_DEBUG(target, "read %u 32-bit registers", wi);
347
348 unsigned int ri = 0; /* read index from r_vals array */
349 for (reg_id = 0; reg_id < num_regs; reg_id++) {
350 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
351 if (!r->exist)
352 continue; /* skip non existent registers */
353
354 r->dirty = false;
355
356 unsigned int reg32_id;
357 uint32_t offset;
358 if (armv7m_map_reg_packing(reg_id, &reg32_id, &offset)) {
359 /* Unpack a partial register from 32-bit container register */
360 struct reg *r32 = &armv7m->arm.core_cache->reg_list[reg32_id];
361
362 /* The container register ought to precede all regs unpacked
363 * from it in the reg_list. So the value should be ready
364 * to unpack */
365 assert(r32->valid);
366 buf_cpy(r32->value + offset, r->value, r->size);
367
368 } else {
369 assert(r->size == 32 || r->size == 64);
370 buf_set_u32(r->value, 0, 32, r_vals[ri++]);
371
372 if (r->size == 64) {
373 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
374 /* the odd part of FP register (S1, S3...) */
375 buf_set_u32(r->value + 4, 0, 32, r_vals[ri++]);
376 }
377 }
378 r->valid = true;
379 }
380 assert(ri == wi);
381
382 return retval;
383 }
384
385 static int cortex_m_store_core_reg_u32(struct target *target,
386 uint32_t regsel, uint32_t value)
387 {
388 struct cortex_m_common *cortex_m = target_to_cm(target);
389 struct armv7m_common *armv7m = target_to_armv7m(target);
390 int retval;
391 uint32_t dcrdr;
392 int64_t then;
393
394 /* because the DCB_DCRDR is used for the emulated dcc channel
395 * we have to save/restore the DCB_DCRDR when used */
396 if (target->dbg_msg_enabled) {
397 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
398 if (retval != ERROR_OK)
399 return retval;
400 }
401
402 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, value);
403 if (retval != ERROR_OK)
404 return retval;
405
406 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel | DCRSR_WNR);
407 if (retval != ERROR_OK)
408 return retval;
409
410 /* check if value is written into register */
411 then = timeval_ms();
412 while (1) {
413 retval = cortex_m_read_dhcsr_atomic_sticky(target);
414 if (retval != ERROR_OK)
415 return retval;
416 if (cortex_m->dcb_dhcsr & S_REGRDY)
417 break;
418 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
419 LOG_TARGET_ERROR(target, "Timeout waiting for DCRDR transfer ready");
420 return ERROR_TIMEOUT_REACHED;
421 }
422 keep_alive();
423 }
424
425 if (target->dbg_msg_enabled) {
426 /* restore DCB_DCRDR - this needs to be in a separate
427 * transaction otherwise the emulated DCC channel breaks */
428 if (retval == ERROR_OK)
429 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
430 }
431
432 return retval;
433 }
434
435 static int cortex_m_write_debug_halt_mask(struct target *target,
436 uint32_t mask_on, uint32_t mask_off)
437 {
438 struct cortex_m_common *cortex_m = target_to_cm(target);
439 struct armv7m_common *armv7m = &cortex_m->armv7m;
440
441 /* mask off status bits */
442 cortex_m->dcb_dhcsr &= ~((0xFFFFul << 16) | mask_off);
443 /* create new register mask */
444 cortex_m->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
445
446 return mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DHCSR, cortex_m->dcb_dhcsr);
447 }
448
449 static int cortex_m_set_maskints(struct target *target, bool mask)
450 {
451 struct cortex_m_common *cortex_m = target_to_cm(target);
452 if (!!(cortex_m->dcb_dhcsr & C_MASKINTS) != mask)
453 return cortex_m_write_debug_halt_mask(target, mask ? C_MASKINTS : 0, mask ? 0 : C_MASKINTS);
454 else
455 return ERROR_OK;
456 }
457
458 static int cortex_m_set_maskints_for_halt(struct target *target)
459 {
460 struct cortex_m_common *cortex_m = target_to_cm(target);
461 switch (cortex_m->isrmasking_mode) {
462 case CORTEX_M_ISRMASK_AUTO:
463 /* interrupts taken at resume, whether for step or run -> no mask */
464 return cortex_m_set_maskints(target, false);
465
466 case CORTEX_M_ISRMASK_OFF:
467 /* interrupts never masked */
468 return cortex_m_set_maskints(target, false);
469
470 case CORTEX_M_ISRMASK_ON:
471 /* interrupts always masked */
472 return cortex_m_set_maskints(target, true);
473
474 case CORTEX_M_ISRMASK_STEPONLY:
475 /* interrupts masked for single step only -> mask now if MASKINTS
476 * erratum, otherwise only mask before stepping */
477 return cortex_m_set_maskints(target, cortex_m->maskints_erratum);
478 }
479 return ERROR_OK;
480 }
481
482 static int cortex_m_set_maskints_for_run(struct target *target)
483 {
484 switch (target_to_cm(target)->isrmasking_mode) {
485 case CORTEX_M_ISRMASK_AUTO:
486 /* interrupts taken at resume, whether for step or run -> no mask */
487 return cortex_m_set_maskints(target, false);
488
489 case CORTEX_M_ISRMASK_OFF:
490 /* interrupts never masked */
491 return cortex_m_set_maskints(target, false);
492
493 case CORTEX_M_ISRMASK_ON:
494 /* interrupts always masked */
495 return cortex_m_set_maskints(target, true);
496
497 case CORTEX_M_ISRMASK_STEPONLY:
498 /* interrupts masked for single step only -> no mask */
499 return cortex_m_set_maskints(target, false);
500 }
501 return ERROR_OK;
502 }
503
504 static int cortex_m_set_maskints_for_step(struct target *target)
505 {
506 switch (target_to_cm(target)->isrmasking_mode) {
507 case CORTEX_M_ISRMASK_AUTO:
508 /* the auto-interrupt should already be done -> mask */
509 return cortex_m_set_maskints(target, true);
510
511 case CORTEX_M_ISRMASK_OFF:
512 /* interrupts never masked */
513 return cortex_m_set_maskints(target, false);
514
515 case CORTEX_M_ISRMASK_ON:
516 /* interrupts always masked */
517 return cortex_m_set_maskints(target, true);
518
519 case CORTEX_M_ISRMASK_STEPONLY:
520 /* interrupts masked for single step only -> mask */
521 return cortex_m_set_maskints(target, true);
522 }
523 return ERROR_OK;
524 }
525
526 static int cortex_m_clear_halt(struct target *target)
527 {
528 struct cortex_m_common *cortex_m = target_to_cm(target);
529 struct armv7m_common *armv7m = &cortex_m->armv7m;
530 int retval;
531
532 /* clear step if any */
533 cortex_m_write_debug_halt_mask(target, C_HALT, C_STEP);
534
535 /* Read Debug Fault Status Register */
536 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR, &cortex_m->nvic_dfsr);
537 if (retval != ERROR_OK)
538 return retval;
539
540 /* Clear Debug Fault Status */
541 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_DFSR, cortex_m->nvic_dfsr);
542 if (retval != ERROR_OK)
543 return retval;
544 LOG_TARGET_DEBUG(target, "NVIC_DFSR 0x%" PRIx32 "", cortex_m->nvic_dfsr);
545
546 return ERROR_OK;
547 }
548
549 static int cortex_m_single_step_core(struct target *target)
550 {
551 struct cortex_m_common *cortex_m = target_to_cm(target);
552 int retval;
553
554 /* Mask interrupts before clearing halt, if not done already. This avoids
555 * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
556 * HALT can put the core into an unknown state.
557 */
558 if (!(cortex_m->dcb_dhcsr & C_MASKINTS)) {
559 retval = cortex_m_write_debug_halt_mask(target, C_MASKINTS, 0);
560 if (retval != ERROR_OK)
561 return retval;
562 }
563 retval = cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
564 if (retval != ERROR_OK)
565 return retval;
566 LOG_TARGET_DEBUG(target, "single step");
567
568 /* restore dhcsr reg */
569 cortex_m_clear_halt(target);
570
571 return ERROR_OK;
572 }
573
574 static int cortex_m_enable_fpb(struct target *target)
575 {
576 int retval = target_write_u32(target, FP_CTRL, 3);
577 if (retval != ERROR_OK)
578 return retval;
579
580 /* check the fpb is actually enabled */
581 uint32_t fpctrl;
582 retval = target_read_u32(target, FP_CTRL, &fpctrl);
583 if (retval != ERROR_OK)
584 return retval;
585
586 if (fpctrl & 1)
587 return ERROR_OK;
588
589 return ERROR_FAIL;
590 }
591
592 static int cortex_m_endreset_event(struct target *target)
593 {
594 int retval;
595 uint32_t dcb_demcr;
596 struct cortex_m_common *cortex_m = target_to_cm(target);
597 struct armv7m_common *armv7m = &cortex_m->armv7m;
598 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
599 struct cortex_m_fp_comparator *fp_list = cortex_m->fp_comparator_list;
600 struct cortex_m_dwt_comparator *dwt_list = cortex_m->dwt_comparator_list;
601
602 /* REVISIT The four debug monitor bits are currently ignored... */
603 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &dcb_demcr);
604 if (retval != ERROR_OK)
605 return retval;
606 LOG_TARGET_DEBUG(target, "DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr);
607
608 /* this register is used for emulated dcc channel */
609 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
610 if (retval != ERROR_OK)
611 return retval;
612
613 retval = cortex_m_read_dhcsr_atomic_sticky(target);
614 if (retval != ERROR_OK)
615 return retval;
616
617 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
618 /* Enable debug requests */
619 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
620 if (retval != ERROR_OK)
621 return retval;
622 }
623
624 /* Restore proper interrupt masking setting for running CPU. */
625 cortex_m_set_maskints_for_run(target);
626
627 /* Enable features controlled by ITM and DWT blocks, and catch only
628 * the vectors we were told to pay attention to.
629 *
630 * Target firmware is responsible for all fault handling policy
631 * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
632 * or manual updates to the NVIC SHCSR and CCR registers.
633 */
634 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, TRCENA | armv7m->demcr);
635 if (retval != ERROR_OK)
636 return retval;
637
638 /* Paranoia: evidently some (early?) chips don't preserve all the
639 * debug state (including FPB, DWT, etc) across reset...
640 */
641
642 /* Enable FPB */
643 retval = cortex_m_enable_fpb(target);
644 if (retval != ERROR_OK) {
645 LOG_TARGET_ERROR(target, "Failed to enable the FPB");
646 return retval;
647 }
648
649 cortex_m->fpb_enabled = true;
650
651 /* Restore FPB registers */
652 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
653 retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
654 if (retval != ERROR_OK)
655 return retval;
656 }
657
658 /* Restore DWT registers */
659 for (unsigned int i = 0; i < cortex_m->dwt_num_comp; i++) {
660 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
661 dwt_list[i].comp);
662 if (retval != ERROR_OK)
663 return retval;
664 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
665 dwt_list[i].mask);
666 if (retval != ERROR_OK)
667 return retval;
668 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
669 dwt_list[i].function);
670 if (retval != ERROR_OK)
671 return retval;
672 }
673 retval = dap_run(swjdp);
674 if (retval != ERROR_OK)
675 return retval;
676
677 register_cache_invalidate(armv7m->arm.core_cache);
678
679 /* TODO: invalidate also working areas (needed in the case of detected reset).
680 * Doing so will require flash drivers to test if working area
681 * is still valid in all target algo calling loops.
682 */
683
684 /* make sure we have latest dhcsr flags */
685 retval = cortex_m_read_dhcsr_atomic_sticky(target);
686 if (retval != ERROR_OK)
687 return retval;
688
689 return retval;
690 }
691
692 static int cortex_m_examine_debug_reason(struct target *target)
693 {
694 struct cortex_m_common *cortex_m = target_to_cm(target);
695
696 /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
697 * only check the debug reason if we don't know it already */
698
699 if ((target->debug_reason != DBG_REASON_DBGRQ)
700 && (target->debug_reason != DBG_REASON_SINGLESTEP)) {
701 if (cortex_m->nvic_dfsr & DFSR_BKPT) {
702 target->debug_reason = DBG_REASON_BREAKPOINT;
703 if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
704 target->debug_reason = DBG_REASON_WPTANDBKPT;
705 } else if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
706 target->debug_reason = DBG_REASON_WATCHPOINT;
707 else if (cortex_m->nvic_dfsr & DFSR_VCATCH)
708 target->debug_reason = DBG_REASON_BREAKPOINT;
709 else if (cortex_m->nvic_dfsr & DFSR_EXTERNAL)
710 target->debug_reason = DBG_REASON_DBGRQ;
711 else /* HALTED */
712 target->debug_reason = DBG_REASON_UNDEFINED;
713 }
714
715 return ERROR_OK;
716 }
717
718 static int cortex_m_examine_exception_reason(struct target *target)
719 {
720 uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
721 struct armv7m_common *armv7m = target_to_armv7m(target);
722 struct adiv5_dap *swjdp = armv7m->arm.dap;
723 int retval;
724
725 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SHCSR, &shcsr);
726 if (retval != ERROR_OK)
727 return retval;
728 switch (armv7m->exception_number) {
729 case 2: /* NMI */
730 break;
731 case 3: /* Hard Fault */
732 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_HFSR, &except_sr);
733 if (retval != ERROR_OK)
734 return retval;
735 if (except_sr & 0x40000000) {
736 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &cfsr);
737 if (retval != ERROR_OK)
738 return retval;
739 }
740 break;
741 case 4: /* Memory Management */
742 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
743 if (retval != ERROR_OK)
744 return retval;
745 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_MMFAR, &except_ar);
746 if (retval != ERROR_OK)
747 return retval;
748 break;
749 case 5: /* Bus Fault */
750 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
751 if (retval != ERROR_OK)
752 return retval;
753 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_BFAR, &except_ar);
754 if (retval != ERROR_OK)
755 return retval;
756 break;
757 case 6: /* Usage Fault */
758 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
759 if (retval != ERROR_OK)
760 return retval;
761 break;
762 case 7: /* Secure Fault */
763 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFSR, &except_sr);
764 if (retval != ERROR_OK)
765 return retval;
766 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFAR, &except_ar);
767 if (retval != ERROR_OK)
768 return retval;
769 break;
770 case 11: /* SVCall */
771 break;
772 case 12: /* Debug Monitor */
773 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_DFSR, &except_sr);
774 if (retval != ERROR_OK)
775 return retval;
776 break;
777 case 14: /* PendSV */
778 break;
779 case 15: /* SysTick */
780 break;
781 default:
782 except_sr = 0;
783 break;
784 }
785 retval = dap_run(swjdp);
786 if (retval == ERROR_OK)
787 LOG_TARGET_DEBUG(target, "%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
788 ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
789 armv7m_exception_string(armv7m->exception_number),
790 shcsr, except_sr, cfsr, except_ar);
791 return retval;
792 }
793
794 static int cortex_m_debug_entry(struct target *target)
795 {
796 uint32_t xpsr;
797 int retval;
798 struct cortex_m_common *cortex_m = target_to_cm(target);
799 struct armv7m_common *armv7m = &cortex_m->armv7m;
800 struct arm *arm = &armv7m->arm;
801 struct reg *r;
802
803 LOG_TARGET_DEBUG(target, " ");
804
805 /* Do this really early to minimize the window where the MASKINTS erratum
806 * can pile up pending interrupts. */
807 cortex_m_set_maskints_for_halt(target);
808
809 cortex_m_clear_halt(target);
810
811 retval = cortex_m_read_dhcsr_atomic_sticky(target);
812 if (retval != ERROR_OK)
813 return retval;
814
815 retval = armv7m->examine_debug_reason(target);
816 if (retval != ERROR_OK)
817 return retval;
818
819 /* examine PE security state */
820 uint32_t dscsr = 0;
821 if (armv7m->arm.arch == ARM_ARCH_V8M) {
822 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DSCSR, &dscsr);
823 if (retval != ERROR_OK)
824 return retval;
825 }
826
827 /* Load all registers to arm.core_cache */
828 if (!cortex_m->slow_register_read) {
829 retval = cortex_m_fast_read_all_regs(target);
830 if (retval == ERROR_TIMEOUT_REACHED) {
831 cortex_m->slow_register_read = true;
832 LOG_TARGET_DEBUG(target, "Switched to slow register read");
833 }
834 }
835
836 if (cortex_m->slow_register_read)
837 retval = cortex_m_slow_read_all_regs(target);
838
839 if (retval != ERROR_OK)
840 return retval;
841
842 r = arm->cpsr;
843 xpsr = buf_get_u32(r->value, 0, 32);
844
845 /* Are we in an exception handler */
846 if (xpsr & 0x1FF) {
847 armv7m->exception_number = (xpsr & 0x1FF);
848
849 arm->core_mode = ARM_MODE_HANDLER;
850 arm->map = armv7m_msp_reg_map;
851 } else {
852 unsigned control = buf_get_u32(arm->core_cache
853 ->reg_list[ARMV7M_CONTROL].value, 0, 3);
854
855 /* is this thread privileged? */
856 arm->core_mode = control & 1
857 ? ARM_MODE_USER_THREAD
858 : ARM_MODE_THREAD;
859
860 /* which stack is it using? */
861 if (control & 2)
862 arm->map = armv7m_psp_reg_map;
863 else
864 arm->map = armv7m_msp_reg_map;
865
866 armv7m->exception_number = 0;
867 }
868
869 if (armv7m->exception_number)
870 cortex_m_examine_exception_reason(target);
871
872 bool secure_state = (dscsr & DSCSR_CDS) == DSCSR_CDS;
873 LOG_TARGET_DEBUG(target, "entered debug state in core mode: %s at PC 0x%" PRIx32
874 ", cpu in %s state, target->state: %s",
875 arm_mode_name(arm->core_mode),
876 buf_get_u32(arm->pc->value, 0, 32),
877 secure_state ? "Secure" : "Non-Secure",
878 target_state_name(target));
879
880 if (armv7m->post_debug_entry) {
881 retval = armv7m->post_debug_entry(target);
882 if (retval != ERROR_OK)
883 return retval;
884 }
885
886 return ERROR_OK;
887 }
888
889 static int cortex_m_poll_one(struct target *target)
890 {
891 int detected_failure = ERROR_OK;
892 int retval = ERROR_OK;
893 enum target_state prev_target_state = target->state;
894 struct cortex_m_common *cortex_m = target_to_cm(target);
895 struct armv7m_common *armv7m = &cortex_m->armv7m;
896
897 /* Read from Debug Halting Control and Status Register */
898 retval = cortex_m_read_dhcsr_atomic_sticky(target);
899 if (retval != ERROR_OK) {
900 target->state = TARGET_UNKNOWN;
901 return retval;
902 }
903
904 /* Recover from lockup. See ARMv7-M architecture spec,
905 * section B1.5.15 "Unrecoverable exception cases".
906 */
907 if (cortex_m->dcb_dhcsr & S_LOCKUP) {
908 LOG_TARGET_ERROR(target, "clearing lockup after double fault");
909 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
910 target->debug_reason = DBG_REASON_DBGRQ;
911
912 /* We have to execute the rest (the "finally" equivalent, but
913 * still throw this exception again).
914 */
915 detected_failure = ERROR_FAIL;
916
917 /* refresh status bits */
918 retval = cortex_m_read_dhcsr_atomic_sticky(target);
919 if (retval != ERROR_OK)
920 return retval;
921 }
922
923 if (cortex_m->dcb_dhcsr_cumulated_sticky & S_RESET_ST) {
924 cortex_m->dcb_dhcsr_cumulated_sticky &= ~S_RESET_ST;
925 if (target->state != TARGET_RESET) {
926 target->state = TARGET_RESET;
927 LOG_TARGET_INFO(target, "external reset detected");
928 }
929 return ERROR_OK;
930 }
931
932 if (target->state == TARGET_RESET) {
933 /* Cannot switch context while running so endreset is
934 * called with target->state == TARGET_RESET
935 */
936 LOG_TARGET_DEBUG(target, "Exit from reset with dcb_dhcsr 0x%" PRIx32,
937 cortex_m->dcb_dhcsr);
938 retval = cortex_m_endreset_event(target);
939 if (retval != ERROR_OK) {
940 target->state = TARGET_UNKNOWN;
941 return retval;
942 }
943 target->state = TARGET_RUNNING;
944 prev_target_state = TARGET_RUNNING;
945 }
946
947 if (cortex_m->dcb_dhcsr & S_HALT) {
948 target->state = TARGET_HALTED;
949
950 if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET)) {
951 retval = cortex_m_debug_entry(target);
952
953 /* arm_semihosting needs to know registers, don't run if debug entry returned error */
954 if (retval == ERROR_OK && arm_semihosting(target, &retval) != 0)
955 return retval;
956
957 if (target->smp) {
958 LOG_TARGET_DEBUG(target, "postpone target event 'halted'");
959 target->smp_halt_event_postponed = true;
960 } else {
961 /* regardless of errors returned in previous code update state */
962 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
963 }
964 }
965 if (prev_target_state == TARGET_DEBUG_RUNNING) {
966 retval = cortex_m_debug_entry(target);
967
968 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
969 }
970 if (retval != ERROR_OK)
971 return retval;
972 }
973
974 if (target->state == TARGET_UNKNOWN) {
975 /* Check if processor is retiring instructions or sleeping.
976 * Unlike S_RESET_ST here we test if the target *is* running now,
977 * not if it has been running (possibly in the past). Instructions are
978 * typically processed much faster than OpenOCD polls DHCSR so S_RETIRE_ST
979 * is read always 1. That's the reason not to use dcb_dhcsr_cumulated_sticky.
980 */
981 if (cortex_m->dcb_dhcsr & S_RETIRE_ST || cortex_m->dcb_dhcsr & S_SLEEP) {
982 target->state = TARGET_RUNNING;
983 retval = ERROR_OK;
984 }
985 }
986
987 /* Check that target is truly halted, since the target could be resumed externally */
988 if ((prev_target_state == TARGET_HALTED) && !(cortex_m->dcb_dhcsr & S_HALT)) {
989 /* registers are now invalid */
990 register_cache_invalidate(armv7m->arm.core_cache);
991
992 target->state = TARGET_RUNNING;
993 LOG_TARGET_WARNING(target, "external resume detected");
994 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
995 retval = ERROR_OK;
996 }
997
998 /* Did we detect a failure condition that we cleared? */
999 if (detected_failure != ERROR_OK)
1000 retval = detected_failure;
1001 return retval;
1002 }
1003
1004 static int cortex_m_halt_one(struct target *target);
1005
1006 static int cortex_m_smp_halt_all(struct list_head *smp_targets)
1007 {
1008 int retval = ERROR_OK;
1009 struct target_list *head;
1010
1011 foreach_smp_target(head, smp_targets) {
1012 struct target *curr = head->target;
1013 if (!target_was_examined(curr))
1014 continue;
1015 if (curr->state == TARGET_HALTED)
1016 continue;
1017
1018 int ret2 = cortex_m_halt_one(curr);
1019 if (retval == ERROR_OK)
1020 retval = ret2; /* store the first error code ignore others */
1021 }
1022 return retval;
1023 }
1024
1025 static int cortex_m_smp_post_halt_poll(struct list_head *smp_targets)
1026 {
1027 int retval = ERROR_OK;
1028 struct target_list *head;
1029
1030 foreach_smp_target(head, smp_targets) {
1031 struct target *curr = head->target;
1032 if (!target_was_examined(curr))
1033 continue;
1034 /* skip targets that were already halted */
1035 if (curr->state == TARGET_HALTED)
1036 continue;
1037
1038 int ret2 = cortex_m_poll_one(curr);
1039 if (retval == ERROR_OK)
1040 retval = ret2; /* store the first error code ignore others */
1041 }
1042 return retval;
1043 }
1044
1045 static int cortex_m_poll_smp(struct list_head *smp_targets)
1046 {
1047 int retval = ERROR_OK;
1048 struct target_list *head;
1049 bool halted = false;
1050
1051 foreach_smp_target(head, smp_targets) {
1052 struct target *curr = head->target;
1053 if (curr->smp_halt_event_postponed) {
1054 halted = true;
1055 break;
1056 }
1057 }
1058
1059 if (halted) {
1060 retval = cortex_m_smp_halt_all(smp_targets);
1061
1062 int ret2 = cortex_m_smp_post_halt_poll(smp_targets);
1063 if (retval == ERROR_OK)
1064 retval = ret2; /* store the first error code ignore others */
1065
1066 foreach_smp_target(head, smp_targets) {
1067 struct target *curr = head->target;
1068 if (!curr->smp_halt_event_postponed)
1069 continue;
1070
1071 curr->smp_halt_event_postponed = false;
1072 if (curr->state == TARGET_HALTED) {
1073 LOG_TARGET_DEBUG(curr, "sending postponed target event 'halted'");
1074 target_call_event_callbacks(curr, TARGET_EVENT_HALTED);
1075 }
1076 }
1077 /* There is no need to set gdb_service->target
1078 * as hwthread_update_threads() selects an interesting thread
1079 * by its own
1080 */
1081 }
1082 return retval;
1083 }
1084
1085 static int cortex_m_poll(struct target *target)
1086 {
1087 int retval = cortex_m_poll_one(target);
1088
1089 if (target->smp) {
1090 struct target_list *last;
1091 last = list_last_entry(target->smp_targets, struct target_list, lh);
1092 if (target == last->target)
1093 /* After the last target in SMP group has been polled
1094 * check for postponed halted events and eventually halt and re-poll
1095 * other targets */
1096 cortex_m_poll_smp(target->smp_targets);
1097 }
1098 return retval;
1099 }
1100
1101 static int cortex_m_halt_one(struct target *target)
1102 {
1103 LOG_TARGET_DEBUG(target, "target->state: %s", target_state_name(target));
1104
1105 if (target->state == TARGET_HALTED) {
1106 LOG_TARGET_DEBUG(target, "target was already halted");
1107 return ERROR_OK;
1108 }
1109
1110 if (target->state == TARGET_UNKNOWN)
1111 LOG_TARGET_WARNING(target, "target was in unknown state when halt was requested");
1112
1113 if (target->state == TARGET_RESET) {
1114 if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
1115 LOG_TARGET_ERROR(target, "can't request a halt while in reset if nSRST pulls nTRST");
1116 return ERROR_TARGET_FAILURE;
1117 } else {
1118 /* we came here in a reset_halt or reset_init sequence
1119 * debug entry was already prepared in cortex_m3_assert_reset()
1120 */
1121 target->debug_reason = DBG_REASON_DBGRQ;
1122
1123 return ERROR_OK;
1124 }
1125 }
1126
1127 /* Write to Debug Halting Control and Status Register */
1128 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1129
1130 /* Do this really early to minimize the window where the MASKINTS erratum
1131 * can pile up pending interrupts. */
1132 cortex_m_set_maskints_for_halt(target);
1133
1134 target->debug_reason = DBG_REASON_DBGRQ;
1135
1136 return ERROR_OK;
1137 }
1138
1139 static int cortex_m_halt(struct target *target)
1140 {
1141 if (target->smp)
1142 return cortex_m_smp_halt_all(target->smp_targets);
1143 else
1144 return cortex_m_halt_one(target);
1145 }
1146
1147 static int cortex_m_soft_reset_halt(struct target *target)
1148 {
1149 struct cortex_m_common *cortex_m = target_to_cm(target);
1150 struct armv7m_common *armv7m = &cortex_m->armv7m;
1151 int retval, timeout = 0;
1152
1153 /* on single cortex_m MCU soft_reset_halt should be avoided as same functionality
1154 * can be obtained by using 'reset halt' and 'cortex_m reset_config vectreset'.
1155 * As this reset only uses VC_CORERESET it would only ever reset the cortex_m
1156 * core, not the peripherals */
1157 LOG_TARGET_DEBUG(target, "soft_reset_halt is discouraged, please use 'reset halt' instead.");
1158
1159 if (!cortex_m->vectreset_supported) {
1160 LOG_TARGET_ERROR(target, "VECTRESET is not supported on this Cortex-M core");
1161 return ERROR_FAIL;
1162 }
1163
1164 /* Set C_DEBUGEN */
1165 retval = cortex_m_write_debug_halt_mask(target, 0, C_STEP | C_MASKINTS);
1166 if (retval != ERROR_OK)
1167 return retval;
1168
1169 /* Enter debug state on reset; restore DEMCR in endreset_event() */
1170 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR,
1171 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1172 if (retval != ERROR_OK)
1173 return retval;
1174
1175 /* Request a core-only reset */
1176 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1177 AIRCR_VECTKEY | AIRCR_VECTRESET);
1178 if (retval != ERROR_OK)
1179 return retval;
1180 target->state = TARGET_RESET;
1181
1182 /* registers are now invalid */
1183 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1184
1185 while (timeout < 100) {
1186 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1187 if (retval == ERROR_OK) {
1188 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR,
1189 &cortex_m->nvic_dfsr);
1190 if (retval != ERROR_OK)
1191 return retval;
1192 if ((cortex_m->dcb_dhcsr & S_HALT)
1193 && (cortex_m->nvic_dfsr & DFSR_VCATCH)) {
1194 LOG_TARGET_DEBUG(target, "system reset-halted, DHCSR 0x%08" PRIx32 ", DFSR 0x%08" PRIx32,
1195 cortex_m->dcb_dhcsr, cortex_m->nvic_dfsr);
1196 cortex_m_poll(target);
1197 /* FIXME restore user's vector catch config */
1198 return ERROR_OK;
1199 } else {
1200 LOG_TARGET_DEBUG(target, "waiting for system reset-halt, "
1201 "DHCSR 0x%08" PRIx32 ", %d ms",
1202 cortex_m->dcb_dhcsr, timeout);
1203 }
1204 }
1205 timeout++;
1206 alive_sleep(1);
1207 }
1208
1209 return ERROR_OK;
1210 }
1211
1212 void cortex_m_enable_breakpoints(struct target *target)
1213 {
1214 struct breakpoint *breakpoint = target->breakpoints;
1215
1216 /* set any pending breakpoints */
1217 while (breakpoint) {
1218 if (!breakpoint->is_set)
1219 cortex_m_set_breakpoint(target, breakpoint);
1220 breakpoint = breakpoint->next;
1221 }
1222 }
1223
1224 static int cortex_m_restore_one(struct target *target, bool current,
1225 target_addr_t *address, bool handle_breakpoints, bool debug_execution)
1226 {
1227 struct armv7m_common *armv7m = target_to_armv7m(target);
1228 struct breakpoint *breakpoint = NULL;
1229 uint32_t resume_pc;
1230 struct reg *r;
1231
1232 if (target->state != TARGET_HALTED) {
1233 LOG_TARGET_ERROR(target, "not halted");
1234 return ERROR_TARGET_NOT_HALTED;
1235 }
1236
1237 if (!debug_execution) {
1238 target_free_all_working_areas(target);
1239 cortex_m_enable_breakpoints(target);
1240 cortex_m_enable_watchpoints(target);
1241 }
1242
1243 if (debug_execution) {
1244 r = armv7m->arm.core_cache->reg_list + ARMV7M_PRIMASK;
1245
1246 /* Disable interrupts */
1247 /* We disable interrupts in the PRIMASK register instead of
1248 * masking with C_MASKINTS. This is probably the same issue
1249 * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
1250 * in parallel with disabled interrupts can cause local faults
1251 * to not be taken.
1252 *
1253 * This breaks non-debug (application) execution if not
1254 * called from armv7m_start_algorithm() which saves registers.
1255 */
1256 buf_set_u32(r->value, 0, 1, 1);
1257 r->dirty = true;
1258 r->valid = true;
1259
1260 /* Make sure we are in Thumb mode, set xPSR.T bit */
1261 /* armv7m_start_algorithm() initializes entire xPSR register.
1262 * This duplicity handles the case when cortex_m_resume()
1263 * is used with the debug_execution flag directly,
1264 * not called through armv7m_start_algorithm().
1265 */
1266 r = armv7m->arm.cpsr;
1267 buf_set_u32(r->value, 24, 1, 1);
1268 r->dirty = true;
1269 r->valid = true;
1270 }
1271
1272 /* current = 1: continue on current pc, otherwise continue at <address> */
1273 r = armv7m->arm.pc;
1274 if (!current) {
1275 buf_set_u32(r->value, 0, 32, *address);
1276 r->dirty = true;
1277 r->valid = true;
1278 }
1279
1280 /* if we halted last time due to a bkpt instruction
1281 * then we have to manually step over it, otherwise
1282 * the core will break again */
1283
1284 if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
1285 && !debug_execution)
1286 armv7m_maybe_skip_bkpt_inst(target, NULL);
1287
1288 resume_pc = buf_get_u32(r->value, 0, 32);
1289 if (current)
1290 *address = resume_pc;
1291
1292 int retval = armv7m_restore_context(target);
1293 if (retval != ERROR_OK)
1294 return retval;
1295
1296 /* the front-end may request us not to handle breakpoints */
1297 if (handle_breakpoints) {
1298 /* Single step past breakpoint at current address */
1299 breakpoint = breakpoint_find(target, resume_pc);
1300 if (breakpoint) {
1301 LOG_TARGET_DEBUG(target, "unset breakpoint at " TARGET_ADDR_FMT " (ID: %" PRIu32 ")",
1302 breakpoint->address,
1303 breakpoint->unique_id);
1304 retval = cortex_m_unset_breakpoint(target, breakpoint);
1305 if (retval == ERROR_OK)
1306 retval = cortex_m_single_step_core(target);
1307 int ret2 = cortex_m_set_breakpoint(target, breakpoint);
1308 if (retval != ERROR_OK)
1309 return retval;
1310 if (ret2 != ERROR_OK)
1311 return ret2;
1312 }
1313 }
1314
1315 return ERROR_OK;
1316 }
1317
1318 static int cortex_m_restart_one(struct target *target, bool debug_execution)
1319 {
1320 struct armv7m_common *armv7m = target_to_armv7m(target);
1321
1322 /* Restart core */
1323 cortex_m_set_maskints_for_run(target);
1324 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1325
1326 target->debug_reason = DBG_REASON_NOTHALTED;
1327 /* registers are now invalid */
1328 register_cache_invalidate(armv7m->arm.core_cache);
1329
1330 if (!debug_execution) {
1331 target->state = TARGET_RUNNING;
1332 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1333 } else {
1334 target->state = TARGET_DEBUG_RUNNING;
1335 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1336 }
1337
1338 return ERROR_OK;
1339 }
1340
1341 static int cortex_m_restore_smp(struct target *target, bool handle_breakpoints)
1342 {
1343 struct target_list *head;
1344 target_addr_t address;
1345 foreach_smp_target(head, target->smp_targets) {
1346 struct target *curr = head->target;
1347 /* skip calling target */
1348 if (curr == target)
1349 continue;
1350 if (!target_was_examined(curr))
1351 continue;
1352 /* skip running targets */
1353 if (curr->state == TARGET_RUNNING)
1354 continue;
1355
1356 int retval = cortex_m_restore_one(curr, true, &address,
1357 handle_breakpoints, false);
1358 if (retval != ERROR_OK)
1359 return retval;
1360
1361 retval = cortex_m_restart_one(curr, false);
1362 if (retval != ERROR_OK)
1363 return retval;
1364
1365 LOG_TARGET_DEBUG(curr, "SMP resumed at " TARGET_ADDR_FMT, address);
1366 }
1367 return ERROR_OK;
1368 }
1369
1370 static int cortex_m_resume(struct target *target, int current,
1371 target_addr_t address, int handle_breakpoints, int debug_execution)
1372 {
1373 int retval = cortex_m_restore_one(target, !!current, &address, !!handle_breakpoints, !!debug_execution);
1374 if (retval != ERROR_OK) {
1375 LOG_TARGET_ERROR(target, "context restore failed, aborting resume");
1376 return retval;
1377 }
1378
1379 if (target->smp && !debug_execution) {
1380 retval = cortex_m_restore_smp(target, !!handle_breakpoints);
1381 if (retval != ERROR_OK)
1382 LOG_WARNING("resume of a SMP target failed, trying to resume current one");
1383 }
1384
1385 cortex_m_restart_one(target, !!debug_execution);
1386 if (retval != ERROR_OK) {
1387 LOG_TARGET_ERROR(target, "resume failed");
1388 return retval;
1389 }
1390
1391 LOG_TARGET_DEBUG(target, "%sresumed at " TARGET_ADDR_FMT,
1392 debug_execution ? "debug " : "", address);
1393
1394 return ERROR_OK;
1395 }
1396
1397 /* int irqstepcount = 0; */
1398 static int cortex_m_step(struct target *target, int current,
1399 target_addr_t address, int handle_breakpoints)
1400 {
1401 struct cortex_m_common *cortex_m = target_to_cm(target);
1402 struct armv7m_common *armv7m = &cortex_m->armv7m;
1403 struct breakpoint *breakpoint = NULL;
1404 struct reg *pc = armv7m->arm.pc;
1405 bool bkpt_inst_found = false;
1406 int retval;
1407 bool isr_timed_out = false;
1408
1409 if (target->state != TARGET_HALTED) {
1410 LOG_TARGET_ERROR(target, "not halted");
1411 return ERROR_TARGET_NOT_HALTED;
1412 }
1413
1414 /* Just one of SMP cores will step. Set the gdb control
1415 * target to current one or gdb miss gdb-end event */
1416 if (target->smp && target->gdb_service)
1417 target->gdb_service->target = target;
1418
1419 /* current = 1: continue on current pc, otherwise continue at <address> */
1420 if (!current) {
1421 buf_set_u32(pc->value, 0, 32, address);
1422 pc->dirty = true;
1423 pc->valid = true;
1424 }
1425
1426 uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
1427
1428 /* the front-end may request us not to handle breakpoints */
1429 if (handle_breakpoints) {
1430 breakpoint = breakpoint_find(target, pc_value);
1431 if (breakpoint)
1432 cortex_m_unset_breakpoint(target, breakpoint);
1433 }
1434
1435 armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
1436
1437 target->debug_reason = DBG_REASON_SINGLESTEP;
1438
1439 armv7m_restore_context(target);
1440
1441 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1442
1443 /* if no bkpt instruction is found at pc then we can perform
1444 * a normal step, otherwise we have to manually step over the bkpt
1445 * instruction - as such simulate a step */
1446 if (bkpt_inst_found == false) {
1447 if (cortex_m->isrmasking_mode != CORTEX_M_ISRMASK_AUTO) {
1448 /* Automatic ISR masking mode off: Just step over the next
1449 * instruction, with interrupts on or off as appropriate. */
1450 cortex_m_set_maskints_for_step(target);
1451 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1452 } else {
1453 /* Process interrupts during stepping in a way they don't interfere
1454 * debugging.
1455 *
1456 * Principle:
1457 *
1458 * Set a temporary break point at the current pc and let the core run
1459 * with interrupts enabled. Pending interrupts get served and we run
1460 * into the breakpoint again afterwards. Then we step over the next
1461 * instruction with interrupts disabled.
1462 *
1463 * If the pending interrupts don't complete within time, we leave the
1464 * core running. This may happen if the interrupts trigger faster
1465 * than the core can process them or the handler doesn't return.
1466 *
1467 * If no more breakpoints are available we simply do a step with
1468 * interrupts enabled.
1469 *
1470 */
1471
1472 /* 2012-09-29 ph
1473 *
1474 * If a break point is already set on the lower half word then a break point on
1475 * the upper half word will not break again when the core is restarted. So we
1476 * just step over the instruction with interrupts disabled.
1477 *
1478 * The documentation has no information about this, it was found by observation
1479 * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 doesn't seem to
1480 * suffer from this problem.
1481 *
1482 * To add some confusion: pc_value has bit 0 always set, while the breakpoint
1483 * address has it always cleared. The former is done to indicate thumb mode
1484 * to gdb.
1485 *
1486 */
1487 if ((pc_value & 0x02) && breakpoint_find(target, pc_value & ~0x03)) {
1488 LOG_TARGET_DEBUG(target, "Stepping over next instruction with interrupts disabled");
1489 cortex_m_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
1490 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1491 /* Re-enable interrupts if appropriate */
1492 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1493 cortex_m_set_maskints_for_halt(target);
1494 } else {
1495
1496 /* Set a temporary break point */
1497 if (breakpoint) {
1498 retval = cortex_m_set_breakpoint(target, breakpoint);
1499 } else {
1500 enum breakpoint_type type = BKPT_HARD;
1501 if (cortex_m->fp_rev == 0 && pc_value > 0x1FFFFFFF) {
1502 /* FPB rev.1 cannot handle such addr, try BKPT instr */
1503 type = BKPT_SOFT;
1504 }
1505 retval = breakpoint_add(target, pc_value, 2, type);
1506 }
1507
1508 bool tmp_bp_set = (retval == ERROR_OK);
1509
1510 /* No more breakpoints left, just do a step */
1511 if (!tmp_bp_set) {
1512 cortex_m_set_maskints_for_step(target);
1513 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1514 /* Re-enable interrupts if appropriate */
1515 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1516 cortex_m_set_maskints_for_halt(target);
1517 } else {
1518 /* Start the core */
1519 LOG_TARGET_DEBUG(target, "Starting core to serve pending interrupts");
1520 int64_t t_start = timeval_ms();
1521 cortex_m_set_maskints_for_run(target);
1522 cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
1523
1524 /* Wait for pending handlers to complete or timeout */
1525 do {
1526 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1527 if (retval != ERROR_OK) {
1528 target->state = TARGET_UNKNOWN;
1529 return retval;
1530 }
1531 isr_timed_out = ((timeval_ms() - t_start) > 500);
1532 } while (!((cortex_m->dcb_dhcsr & S_HALT) || isr_timed_out));
1533
1534 /* only remove breakpoint if we created it */
1535 if (breakpoint)
1536 cortex_m_unset_breakpoint(target, breakpoint);
1537 else {
1538 /* Remove the temporary breakpoint */
1539 breakpoint_remove(target, pc_value);
1540 }
1541
1542 if (isr_timed_out) {
1543 LOG_TARGET_DEBUG(target, "Interrupt handlers didn't complete within time, "
1544 "leaving target running");
1545 } else {
1546 /* Step over next instruction with interrupts disabled */
1547 cortex_m_set_maskints_for_step(target);
1548 cortex_m_write_debug_halt_mask(target,
1549 C_HALT | C_MASKINTS,
1550 0);
1551 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1552 /* Re-enable interrupts if appropriate */
1553 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1554 cortex_m_set_maskints_for_halt(target);
1555 }
1556 }
1557 }
1558 }
1559 }
1560
1561 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1562 if (retval != ERROR_OK)
1563 return retval;
1564
1565 /* registers are now invalid */
1566 register_cache_invalidate(armv7m->arm.core_cache);
1567
1568 if (breakpoint)
1569 cortex_m_set_breakpoint(target, breakpoint);
1570
1571 if (isr_timed_out) {
1572 /* Leave the core running. The user has to stop execution manually. */
1573 target->debug_reason = DBG_REASON_NOTHALTED;
1574 target->state = TARGET_RUNNING;
1575 return ERROR_OK;
1576 }
1577
1578 LOG_TARGET_DEBUG(target, "target stepped dcb_dhcsr = 0x%" PRIx32
1579 " nvic_icsr = 0x%" PRIx32,
1580 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1581
1582 retval = cortex_m_debug_entry(target);
1583 if (retval != ERROR_OK)
1584 return retval;
1585 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1586
1587 LOG_TARGET_DEBUG(target, "target stepped dcb_dhcsr = 0x%" PRIx32
1588 " nvic_icsr = 0x%" PRIx32,
1589 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1590
1591 return ERROR_OK;
1592 }
1593
1594 static int cortex_m_assert_reset(struct target *target)
1595 {
1596 struct cortex_m_common *cortex_m = target_to_cm(target);
1597 struct armv7m_common *armv7m = &cortex_m->armv7m;
1598 enum cortex_m_soft_reset_config reset_config = cortex_m->soft_reset_config;
1599
1600 LOG_TARGET_DEBUG(target, "target->state: %s,%s examined",
1601 target_state_name(target),
1602 target_was_examined(target) ? "" : " not");
1603
1604 enum reset_types jtag_reset_config = jtag_get_reset_config();
1605
1606 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1607 /* allow scripts to override the reset event */
1608
1609 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1610 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1611 target->state = TARGET_RESET;
1612
1613 return ERROR_OK;
1614 }
1615
1616 /* some cores support connecting while srst is asserted
1617 * use that mode is it has been configured */
1618
1619 bool srst_asserted = false;
1620
1621 if ((jtag_reset_config & RESET_HAS_SRST) &&
1622 ((jtag_reset_config & RESET_SRST_NO_GATING) || !armv7m->debug_ap)) {
1623 /* If we have no debug_ap, asserting SRST is the only thing
1624 * we can do now */
1625 adapter_assert_reset();
1626 srst_asserted = true;
1627 }
1628
1629 /* TODO: replace the hack calling target_examine_one()
1630 * as soon as a better reset framework is available */
1631 if (!target_was_examined(target) && !target->defer_examine
1632 && srst_asserted && (jtag_reset_config & RESET_SRST_NO_GATING)) {
1633 LOG_TARGET_DEBUG(target, "Trying to re-examine under reset");
1634 target_examine_one(target);
1635 }
1636
1637 /* We need at least debug_ap to go further.
1638 * Inform user and bail out if we don't have one. */
1639 if (!armv7m->debug_ap) {
1640 if (srst_asserted) {
1641 if (target->reset_halt)
1642 LOG_TARGET_ERROR(target, "Debug AP not available, will not halt after reset!");
1643
1644 /* Do not propagate error: reset was asserted, proceed to deassert! */
1645 target->state = TARGET_RESET;
1646 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1647 return ERROR_OK;
1648
1649 } else {
1650 LOG_TARGET_ERROR(target, "Debug AP not available, reset NOT asserted!");
1651 return ERROR_FAIL;
1652 }
1653 }
1654
1655 /* Enable debug requests */
1656 int retval = cortex_m_read_dhcsr_atomic_sticky(target);
1657
1658 /* Store important errors instead of failing and proceed to reset assert */
1659
1660 if (retval != ERROR_OK || !(cortex_m->dcb_dhcsr & C_DEBUGEN))
1661 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
1662
1663 /* If the processor is sleeping in a WFI or WFE instruction, the
1664 * C_HALT bit must be asserted to regain control */
1665 if (retval == ERROR_OK && (cortex_m->dcb_dhcsr & S_SLEEP))
1666 retval = cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1667
1668 mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
1669 /* Ignore less important errors */
1670
1671 if (!target->reset_halt) {
1672 /* Set/Clear C_MASKINTS in a separate operation */
1673 cortex_m_set_maskints_for_run(target);
1674
1675 /* clear any debug flags before resuming */
1676 cortex_m_clear_halt(target);
1677
1678 /* clear C_HALT in dhcsr reg */
1679 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1680 } else {
1681 /* Halt in debug on reset; endreset_event() restores DEMCR.
1682 *
1683 * REVISIT catching BUSERR presumably helps to defend against
1684 * bad vector table entries. Should this include MMERR or
1685 * other flags too?
1686 */
1687 int retval2;
1688 retval2 = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DEMCR,
1689 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1690 if (retval != ERROR_OK || retval2 != ERROR_OK)
1691 LOG_TARGET_INFO(target, "AP write error, reset will not halt");
1692 }
1693
1694 if (jtag_reset_config & RESET_HAS_SRST) {
1695 /* default to asserting srst */
1696 if (!srst_asserted)
1697 adapter_assert_reset();
1698
1699 /* srst is asserted, ignore AP access errors */
1700 retval = ERROR_OK;
1701 } else {
1702 /* Use a standard Cortex-M3 software reset mechanism.
1703 * We default to using VECTRESET as it is supported on all current cores
1704 * (except Cortex-M0, M0+ and M1 which support SYSRESETREQ only!)
1705 * This has the disadvantage of not resetting the peripherals, so a
1706 * reset-init event handler is needed to perform any peripheral resets.
1707 */
1708 if (!cortex_m->vectreset_supported
1709 && reset_config == CORTEX_M_RESET_VECTRESET) {
1710 reset_config = CORTEX_M_RESET_SYSRESETREQ;
1711 LOG_TARGET_WARNING(target, "VECTRESET is not supported on this Cortex-M core, using SYSRESETREQ instead.");
1712 LOG_TARGET_WARNING(target, "Set 'cortex_m reset_config sysresetreq'.");
1713 }
1714
1715 LOG_TARGET_DEBUG(target, "Using Cortex-M %s", (reset_config == CORTEX_M_RESET_SYSRESETREQ)
1716 ? "SYSRESETREQ" : "VECTRESET");
1717
1718 if (reset_config == CORTEX_M_RESET_VECTRESET) {
1719 LOG_TARGET_WARNING(target, "Only resetting the Cortex-M core, use a reset-init event "
1720 "handler to reset any peripherals or configure hardware srst support.");
1721 }
1722
1723 int retval3;
1724 retval3 = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1725 AIRCR_VECTKEY | ((reset_config == CORTEX_M_RESET_SYSRESETREQ)
1726 ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
1727 if (retval3 != ERROR_OK)
1728 LOG_TARGET_DEBUG(target, "Ignoring AP write error right after reset");
1729
1730 retval3 = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1731 if (retval3 != ERROR_OK) {
1732 LOG_TARGET_ERROR(target, "DP initialisation failed");
1733 /* The error return value must not be propagated in this case.
1734 * SYSRESETREQ or VECTRESET have been possibly triggered
1735 * so reset processing should continue */
1736 } else {
1737 /* I do not know why this is necessary, but it
1738 * fixes strange effects (step/resume cause NMI
1739 * after reset) on LM3S6918 -- Michael Schwingen
1740 */
1741 uint32_t tmp;
1742 mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_AIRCR, &tmp);
1743 }
1744 }
1745
1746 target->state = TARGET_RESET;
1747 jtag_sleep(50000);
1748
1749 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1750
1751 /* now return stored error code if any */
1752 if (retval != ERROR_OK)
1753 return retval;
1754
1755 if (target->reset_halt && target_was_examined(target)) {
1756 retval = target_halt(target);
1757 if (retval != ERROR_OK)
1758 return retval;
1759 }
1760
1761 return ERROR_OK;
1762 }
1763
1764 static int cortex_m_deassert_reset(struct target *target)
1765 {
1766 struct armv7m_common *armv7m = &target_to_cm(target)->armv7m;
1767
1768 LOG_TARGET_DEBUG(target, "target->state: %s,%s examined",
1769 target_state_name(target),
1770 target_was_examined(target) ? "" : " not");
1771
1772 /* deassert reset lines */
1773 adapter_deassert_reset();
1774
1775 enum reset_types jtag_reset_config = jtag_get_reset_config();
1776
1777 if ((jtag_reset_config & RESET_HAS_SRST) &&
1778 !(jtag_reset_config & RESET_SRST_NO_GATING) &&
1779 armv7m->debug_ap) {
1780
1781 int retval = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1782 if (retval != ERROR_OK) {
1783 LOG_TARGET_ERROR(target, "DP initialisation failed");
1784 return retval;
1785 }
1786 }
1787
1788 return ERROR_OK;
1789 }
1790
1791 int cortex_m_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
1792 {
1793 int retval;
1794 unsigned int fp_num = 0;
1795 struct cortex_m_common *cortex_m = target_to_cm(target);
1796 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1797
1798 if (breakpoint->is_set) {
1799 LOG_TARGET_WARNING(target, "breakpoint (BPID: %" PRIu32 ") already set", breakpoint->unique_id);
1800 return ERROR_OK;
1801 }
1802
1803 if (breakpoint->type == BKPT_HARD) {
1804 uint32_t fpcr_value;
1805 while (comparator_list[fp_num].used && (fp_num < cortex_m->fp_num_code))
1806 fp_num++;
1807 if (fp_num >= cortex_m->fp_num_code) {
1808 LOG_TARGET_ERROR(target, "Can not find free FPB Comparator!");
1809 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1810 }
1811 breakpoint_hw_set(breakpoint, fp_num);
1812 fpcr_value = breakpoint->address | 1;
1813 if (cortex_m->fp_rev == 0) {
1814 if (breakpoint->address > 0x1FFFFFFF) {
1815 LOG_TARGET_ERROR(target, "Cortex-M Flash Patch Breakpoint rev.1 "
1816 "cannot handle HW breakpoint above address 0x1FFFFFFE");
1817 return ERROR_FAIL;
1818 }
1819 uint32_t hilo;
1820 hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
1821 fpcr_value = (fpcr_value & 0x1FFFFFFC) | hilo | 1;
1822 } else if (cortex_m->fp_rev > 1) {
1823 LOG_TARGET_ERROR(target, "Unhandled Cortex-M Flash Patch Breakpoint architecture revision");
1824 return ERROR_FAIL;
1825 }
1826 comparator_list[fp_num].used = true;
1827 comparator_list[fp_num].fpcr_value = fpcr_value;
1828 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1829 comparator_list[fp_num].fpcr_value);
1830 LOG_TARGET_DEBUG(target, "fpc_num %i fpcr_value 0x%" PRIx32 "",
1831 fp_num,
1832 comparator_list[fp_num].fpcr_value);
1833 if (!cortex_m->fpb_enabled) {
1834 LOG_TARGET_DEBUG(target, "FPB wasn't enabled, do it now");
1835 retval = cortex_m_enable_fpb(target);
1836 if (retval != ERROR_OK) {
1837 LOG_TARGET_ERROR(target, "Failed to enable the FPB");
1838 return retval;
1839 }
1840
1841 cortex_m->fpb_enabled = true;
1842 }
1843 } else if (breakpoint->type == BKPT_SOFT) {
1844 uint8_t code[4];
1845
1846 /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
1847 * semihosting; don't use that. Otherwise the BKPT
1848 * parameter is arbitrary.
1849 */
1850 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1851 retval = target_read_memory(target,
1852 breakpoint->address & 0xFFFFFFFE,
1853 breakpoint->length, 1,
1854 breakpoint->orig_instr);
1855 if (retval != ERROR_OK)
1856 return retval;
1857 retval = target_write_memory(target,
1858 breakpoint->address & 0xFFFFFFFE,
1859 breakpoint->length, 1,
1860 code);
1861 if (retval != ERROR_OK)
1862 return retval;
1863 breakpoint->is_set = true;
1864 }
1865
1866 LOG_TARGET_DEBUG(target, "BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (n=%u)",
1867 breakpoint->unique_id,
1868 (int)(breakpoint->type),
1869 breakpoint->address,
1870 breakpoint->length,
1871 (breakpoint->type == BKPT_SOFT) ? 0 : breakpoint->number);
1872
1873 return ERROR_OK;
1874 }
1875
1876 int cortex_m_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1877 {
1878 int retval;
1879 struct cortex_m_common *cortex_m = target_to_cm(target);
1880 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1881
1882 if (!breakpoint->is_set) {
1883 LOG_TARGET_WARNING(target, "breakpoint not set");
1884 return ERROR_OK;
1885 }
1886
1887 LOG_TARGET_DEBUG(target, "BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (n=%u)",
1888 breakpoint->unique_id,
1889 (int)(breakpoint->type),
1890 breakpoint->address,
1891 breakpoint->length,
1892 (breakpoint->type == BKPT_SOFT) ? 0 : breakpoint->number);
1893
1894 if (breakpoint->type == BKPT_HARD) {
1895 unsigned int fp_num = breakpoint->number;
1896 if (fp_num >= cortex_m->fp_num_code) {
1897 LOG_TARGET_DEBUG(target, "Invalid FP Comparator number in breakpoint");
1898 return ERROR_OK;
1899 }
1900 comparator_list[fp_num].used = false;
1901 comparator_list[fp_num].fpcr_value = 0;
1902 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1903 comparator_list[fp_num].fpcr_value);
1904 } else {
1905 /* restore original instruction (kept in target endianness) */
1906 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE,
1907 breakpoint->length, 1,
1908 breakpoint->orig_instr);
1909 if (retval != ERROR_OK)
1910 return retval;
1911 }
1912 breakpoint->is_set = false;
1913
1914 return ERROR_OK;
1915 }
1916
1917 int cortex_m_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
1918 {
1919 if (breakpoint->length == 3) {
1920 LOG_TARGET_DEBUG(target, "Using a two byte breakpoint for 32bit Thumb-2 request");
1921 breakpoint->length = 2;
1922 }
1923
1924 if ((breakpoint->length != 2)) {
1925 LOG_TARGET_INFO(target, "only breakpoints of two bytes length supported");
1926 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1927 }
1928
1929 return cortex_m_set_breakpoint(target, breakpoint);
1930 }
1931
1932 int cortex_m_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1933 {
1934 if (!breakpoint->is_set)
1935 return ERROR_OK;
1936
1937 return cortex_m_unset_breakpoint(target, breakpoint);
1938 }
1939
1940 static int cortex_m_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1941 {
1942 unsigned int dwt_num = 0;
1943 struct cortex_m_common *cortex_m = target_to_cm(target);
1944
1945 /* REVISIT Don't fully trust these "not used" records ... users
1946 * may set up breakpoints by hand, e.g. dual-address data value
1947 * watchpoint using comparator #1; comparator #0 matching cycle
1948 * count; send data trace info through ITM and TPIU; etc
1949 */
1950 struct cortex_m_dwt_comparator *comparator;
1951
1952 for (comparator = cortex_m->dwt_comparator_list;
1953 comparator->used && dwt_num < cortex_m->dwt_num_comp;
1954 comparator++, dwt_num++)
1955 continue;
1956 if (dwt_num >= cortex_m->dwt_num_comp) {
1957 LOG_TARGET_ERROR(target, "Can not find free DWT Comparator");
1958 return ERROR_FAIL;
1959 }
1960 comparator->used = true;
1961 watchpoint_set(watchpoint, dwt_num);
1962
1963 comparator->comp = watchpoint->address;
1964 target_write_u32(target, comparator->dwt_comparator_address + 0,
1965 comparator->comp);
1966
1967 if ((cortex_m->dwt_devarch & 0x1FFFFF) != DWT_DEVARCH_ARMV8M_V2_0
1968 && (cortex_m->dwt_devarch & 0x1FFFFF) != DWT_DEVARCH_ARMV8M_V2_1) {
1969 uint32_t mask = 0, temp;
1970
1971 /* watchpoint params were validated earlier */
1972 temp = watchpoint->length;
1973 while (temp) {
1974 temp >>= 1;
1975 mask++;
1976 }
1977 mask--;
1978
1979 comparator->mask = mask;
1980 target_write_u32(target, comparator->dwt_comparator_address + 4,
1981 comparator->mask);
1982
1983 switch (watchpoint->rw) {
1984 case WPT_READ:
1985 comparator->function = 5;
1986 break;
1987 case WPT_WRITE:
1988 comparator->function = 6;
1989 break;
1990 case WPT_ACCESS:
1991 comparator->function = 7;
1992 break;
1993 }
1994 } else {
1995 uint32_t data_size = watchpoint->length >> 1;
1996 comparator->mask = (watchpoint->length >> 1) | 1;
1997
1998 switch (watchpoint->rw) {
1999 case WPT_ACCESS:
2000 comparator->function = 4;
2001 break;
2002 case WPT_WRITE:
2003 comparator->function = 5;
2004 break;
2005 case WPT_READ:
2006 comparator->function = 6;
2007 break;
2008 }
2009 comparator->function = comparator->function | (1 << 4) |
2010 (data_size << 10);
2011 }
2012
2013 target_write_u32(target, comparator->dwt_comparator_address + 8,
2014 comparator->function);
2015
2016 LOG_TARGET_DEBUG(target, "Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
2017 watchpoint->unique_id, dwt_num,
2018 (unsigned) comparator->comp,
2019 (unsigned) comparator->mask,
2020 (unsigned) comparator->function);
2021 return ERROR_OK;
2022 }
2023
2024 static int cortex_m_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
2025 {
2026 struct cortex_m_common *cortex_m = target_to_cm(target);
2027 struct cortex_m_dwt_comparator *comparator;
2028
2029 if (!watchpoint->is_set) {
2030 LOG_TARGET_WARNING(target, "watchpoint (wpid: %d) not set",
2031 watchpoint->unique_id);
2032 return ERROR_OK;
2033 }
2034
2035 unsigned int dwt_num = watchpoint->number;
2036
2037 LOG_TARGET_DEBUG(target, "Watchpoint (ID %d) DWT%u address: 0x%08x clear",
2038 watchpoint->unique_id, dwt_num,
2039 (unsigned) watchpoint->address);
2040
2041 if (dwt_num >= cortex_m->dwt_num_comp) {
2042 LOG_TARGET_DEBUG(target, "Invalid DWT Comparator number in watchpoint");
2043 return ERROR_OK;
2044 }
2045
2046 comparator = cortex_m->dwt_comparator_list + dwt_num;
2047 comparator->used = false;
2048 comparator->function = 0;
2049 target_write_u32(target, comparator->dwt_comparator_address + 8,
2050 comparator->function);
2051
2052 watchpoint->is_set = false;
2053
2054 return ERROR_OK;
2055 }
2056
2057 int cortex_m_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
2058 {
2059 struct cortex_m_common *cortex_m = target_to_cm(target);
2060
2061 if (cortex_m->dwt_comp_available < 1) {
2062 LOG_TARGET_DEBUG(target, "no comparators?");
2063 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2064 }
2065
2066 /* REVISIT This DWT may well be able to watch for specific data
2067 * values. Requires comparator #1 to set DATAVMATCH and match
2068 * the data, and another comparator (DATAVADDR0) matching addr.
2069 *
2070 * NOTE: hardware doesn't support data value masking, so we'll need
2071 * to check that mask is zero
2072 */
2073 if (watchpoint->mask != WATCHPOINT_IGNORE_DATA_VALUE_MASK) {
2074 LOG_TARGET_DEBUG(target, "watchpoint value masks not supported");
2075 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2076 }
2077
2078 /* hardware allows address masks of up to 32K */
2079 unsigned mask;
2080
2081 for (mask = 0; mask < 16; mask++) {
2082 if ((1u << mask) == watchpoint->length)
2083 break;
2084 }
2085 if (mask == 16) {
2086 LOG_TARGET_DEBUG(target, "unsupported watchpoint length");
2087 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2088 }
2089 if (watchpoint->address & ((1 << mask) - 1)) {
2090 LOG_TARGET_DEBUG(target, "watchpoint address is unaligned");
2091 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2092 }
2093
2094 cortex_m->dwt_comp_available--;
2095 LOG_TARGET_DEBUG(target, "dwt_comp_available: %d", cortex_m->dwt_comp_available);
2096
2097 return ERROR_OK;
2098 }
2099
2100 int cortex_m_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2101 {
2102 struct cortex_m_common *cortex_m = target_to_cm(target);
2103
2104 /* REVISIT why check? DWT can be updated with core running ... */
2105 if (target->state != TARGET_HALTED) {
2106 LOG_TARGET_ERROR(target, "not halted");
2107 return ERROR_TARGET_NOT_HALTED;
2108 }
2109
2110 if (watchpoint->is_set)
2111 cortex_m_unset_watchpoint(target, watchpoint);
2112
2113 cortex_m->dwt_comp_available++;
2114 LOG_TARGET_DEBUG(target, "dwt_comp_available: %d", cortex_m->dwt_comp_available);
2115
2116 return ERROR_OK;
2117 }
2118
2119 static int cortex_m_hit_watchpoint(struct target *target, struct watchpoint **hit_watchpoint)
2120 {
2121 if (target->debug_reason != DBG_REASON_WATCHPOINT)
2122 return ERROR_FAIL;
2123
2124 struct cortex_m_common *cortex_m = target_to_cm(target);
2125
2126 for (struct watchpoint *wp = target->watchpoints; wp; wp = wp->next) {
2127 if (!wp->is_set)
2128 continue;
2129
2130 unsigned int dwt_num = wp->number;
2131 struct cortex_m_dwt_comparator *comparator = cortex_m->dwt_comparator_list + dwt_num;
2132
2133 uint32_t dwt_function;
2134 int retval = target_read_u32(target, comparator->dwt_comparator_address + 8, &dwt_function);
2135 if (retval != ERROR_OK)
2136 return ERROR_FAIL;
2137
2138 /* check the MATCHED bit */
2139 if (dwt_function & BIT(24)) {
2140 *hit_watchpoint = wp;
2141 return ERROR_OK;
2142 }
2143 }
2144
2145 return ERROR_FAIL;
2146 }
2147
2148 void cortex_m_enable_watchpoints(struct target *target)
2149 {
2150 struct watchpoint *watchpoint = target->watchpoints;
2151
2152 /* set any pending watchpoints */
2153 while (watchpoint) {
2154 if (!watchpoint->is_set)
2155 cortex_m_set_watchpoint(target, watchpoint);
2156 watchpoint = watchpoint->next;
2157 }
2158 }
2159
2160 static int cortex_m_read_memory(struct target *target, target_addr_t address,
2161 uint32_t size, uint32_t count, uint8_t *buffer)
2162 {
2163 struct armv7m_common *armv7m = target_to_armv7m(target);
2164
2165 if (armv7m->arm.arch == ARM_ARCH_V6M) {
2166 /* armv6m does not handle unaligned memory access */
2167 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
2168 return ERROR_TARGET_UNALIGNED_ACCESS;
2169 }
2170
2171 return mem_ap_read_buf(armv7m->debug_ap, buffer, size, count, address);
2172 }
2173
2174 static int cortex_m_write_memory(struct target *target, target_addr_t address,
2175 uint32_t size, uint32_t count, const uint8_t *buffer)
2176 {
2177 struct armv7m_common *armv7m = target_to_armv7m(target);
2178
2179 if (armv7m->arm.arch == ARM_ARCH_V6M) {
2180 /* armv6m does not handle unaligned memory access */
2181 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
2182 return ERROR_TARGET_UNALIGNED_ACCESS;
2183 }
2184
2185 return mem_ap_write_buf(armv7m->debug_ap, buffer, size, count, address);
2186 }
2187
2188 static int cortex_m_init_target(struct command_context *cmd_ctx,
2189 struct target *target)
2190 {
2191 armv7m_build_reg_cache(target);
2192 arm_semihosting_init(target);
2193 return ERROR_OK;
2194 }
2195
2196 void cortex_m_deinit_target(struct target *target)
2197 {
2198 struct cortex_m_common *cortex_m = target_to_cm(target);
2199 struct armv7m_common *armv7m = target_to_armv7m(target);
2200
2201 if (!armv7m->is_hla_target && armv7m->debug_ap)
2202 dap_put_ap(armv7m->debug_ap);
2203
2204 free(cortex_m->fp_comparator_list);
2205
2206 cortex_m_dwt_free(target);
2207 armv7m_free_reg_cache(target);
2208
2209 free(target->private_config);
2210 free(cortex_m);
2211 }
2212
2213 int cortex_m_profiling(struct target *target, uint32_t *samples,
2214 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2215 {
2216 struct timeval timeout, now;
2217 struct armv7m_common *armv7m = target_to_armv7m(target);
2218 uint32_t reg_value;
2219 int retval;
2220
2221 retval = target_read_u32(target, DWT_PCSR, &reg_value);
2222 if (retval != ERROR_OK) {
2223 LOG_TARGET_ERROR(target, "Error while reading PCSR");
2224 return retval;
2225 }
2226 if (reg_value == 0) {
2227 LOG_TARGET_INFO(target, "PCSR sampling not supported on this processor.");
2228 return target_profiling_default(target, samples, max_num_samples, num_samples, seconds);
2229 }
2230
2231 gettimeofday(&timeout, NULL);
2232 timeval_add_time(&timeout, seconds, 0);
2233
2234 LOG_TARGET_INFO(target, "Starting Cortex-M profiling. Sampling DWT_PCSR as fast as we can...");
2235
2236 /* Make sure the target is running */
2237 target_poll(target);
2238 if (target->state == TARGET_HALTED)
2239 retval = target_resume(target, 1, 0, 0, 0);
2240
2241 if (retval != ERROR_OK) {
2242 LOG_TARGET_ERROR(target, "Error while resuming target");
2243 return retval;
2244 }
2245
2246 uint32_t sample_count = 0;
2247
2248 for (;;) {
2249 if (armv7m && armv7m->debug_ap) {
2250 uint32_t read_count = max_num_samples - sample_count;
2251 if (read_count > 1024)
2252 read_count = 1024;
2253
2254 retval = mem_ap_read_buf_noincr(armv7m->debug_ap,
2255 (void *)&samples[sample_count],
2256 4, read_count, DWT_PCSR);
2257 sample_count += read_count;
2258 } else {
2259 target_read_u32(target, DWT_PCSR, &samples[sample_count++]);
2260 }
2261
2262 if (retval != ERROR_OK) {
2263 LOG_TARGET_ERROR(target, "Error while reading PCSR");
2264 return retval;
2265 }
2266
2267
2268 gettimeofday(&now, NULL);
2269 if (sample_count >= max_num_samples || timeval_compare(&now, &timeout) > 0) {
2270 LOG_TARGET_INFO(target, "Profiling completed. %" PRIu32 " samples.", sample_count);
2271 break;
2272 }
2273 }
2274
2275 *num_samples = sample_count;
2276 return retval;
2277 }
2278
2279
2280 /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
2281 * on r/w if the core is not running, and clear on resume or reset ... or
2282 * at least, in a post_restore_context() method.
2283 */
2284
2285 struct dwt_reg_state {
2286 struct target *target;
2287 uint32_t addr;
2288 uint8_t value[4]; /* scratch/cache */
2289 };
2290
2291 static int cortex_m_dwt_get_reg(struct reg *reg)
2292 {
2293 struct dwt_reg_state *state = reg->arch_info;
2294
2295 uint32_t tmp;
2296 int retval = target_read_u32(state->target, state->addr, &tmp);
2297 if (retval != ERROR_OK)
2298 return retval;
2299
2300 buf_set_u32(state->value, 0, 32, tmp);
2301 return ERROR_OK;
2302 }
2303
2304 static int cortex_m_dwt_set_reg(struct reg *reg, uint8_t *buf)
2305 {
2306 struct dwt_reg_state *state = reg->arch_info;
2307
2308 return target_write_u32(state->target, state->addr,
2309 buf_get_u32(buf, 0, reg->size));
2310 }
2311
2312 struct dwt_reg {
2313 uint32_t addr;
2314 const char *name;
2315 unsigned size;
2316 };
2317
2318 static const struct dwt_reg dwt_base_regs[] = {
2319 { DWT_CTRL, "dwt_ctrl", 32, },
2320 /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
2321 * increments while the core is asleep.
2322 */
2323 { DWT_CYCCNT, "dwt_cyccnt", 32, },
2324 /* plus some 8 bit counters, useful for profiling with TPIU */
2325 };
2326
2327 static const struct dwt_reg dwt_comp[] = {
2328 #define DWT_COMPARATOR(i) \
2329 { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
2330 { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
2331 { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
2332 DWT_COMPARATOR(0),
2333 DWT_COMPARATOR(1),
2334 DWT_COMPARATOR(2),
2335 DWT_COMPARATOR(3),
2336 DWT_COMPARATOR(4),
2337 DWT_COMPARATOR(5),
2338 DWT_COMPARATOR(6),
2339 DWT_COMPARATOR(7),
2340 DWT_COMPARATOR(8),
2341 DWT_COMPARATOR(9),
2342 DWT_COMPARATOR(10),
2343 DWT_COMPARATOR(11),
2344 DWT_COMPARATOR(12),
2345 DWT_COMPARATOR(13),
2346 DWT_COMPARATOR(14),
2347 DWT_COMPARATOR(15),
2348 #undef DWT_COMPARATOR
2349 };
2350
2351 static const struct reg_arch_type dwt_reg_type = {
2352 .get = cortex_m_dwt_get_reg,
2353 .set = cortex_m_dwt_set_reg,
2354 };
2355
2356 static void cortex_m_dwt_addreg(struct target *t, struct reg *r, const struct dwt_reg *d)
2357 {
2358 struct dwt_reg_state *state;
2359
2360 state = calloc(1, sizeof(*state));
2361 if (!state)
2362 return;
2363 state->addr = d->addr;
2364 state->target = t;
2365
2366 r->name = d->name;
2367 r->size = d->size;
2368 r->value = state->value;
2369 r->arch_info = state;
2370 r->type = &dwt_reg_type;
2371 }
2372
2373 static void cortex_m_dwt_setup(struct cortex_m_common *cm, struct target *target)
2374 {
2375 uint32_t dwtcr;
2376 struct reg_cache *cache;
2377 struct cortex_m_dwt_comparator *comparator;
2378 int reg;
2379
2380 target_read_u32(target, DWT_CTRL, &dwtcr);
2381 LOG_TARGET_DEBUG(target, "DWT_CTRL: 0x%" PRIx32, dwtcr);
2382 if (!dwtcr) {
2383 LOG_TARGET_DEBUG(target, "no DWT");
2384 return;
2385 }
2386
2387 target_read_u32(target, DWT_DEVARCH, &cm->dwt_devarch);
2388 LOG_TARGET_DEBUG(target, "DWT_DEVARCH: 0x%" PRIx32, cm->dwt_devarch);
2389
2390 cm->dwt_num_comp = (dwtcr >> 28) & 0xF;
2391 cm->dwt_comp_available = cm->dwt_num_comp;
2392 cm->dwt_comparator_list = calloc(cm->dwt_num_comp,
2393 sizeof(struct cortex_m_dwt_comparator));
2394 if (!cm->dwt_comparator_list) {
2395 fail0:
2396 cm->dwt_num_comp = 0;
2397 LOG_TARGET_ERROR(target, "out of mem");
2398 return;
2399 }
2400
2401 cache = calloc(1, sizeof(*cache));
2402 if (!cache) {
2403 fail1:
2404 free(cm->dwt_comparator_list);
2405 goto fail0;
2406 }
2407 cache->name = "Cortex-M DWT registers";
2408 cache->num_regs = 2 + cm->dwt_num_comp * 3;
2409 cache->reg_list = calloc(cache->num_regs, sizeof(*cache->reg_list));
2410 if (!cache->reg_list) {
2411 free(cache);
2412 goto fail1;
2413 }
2414
2415 for (reg = 0; reg < 2; reg++)
2416 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2417 dwt_base_regs + reg);
2418
2419 comparator = cm->dwt_comparator_list;
2420 for (unsigned int i = 0; i < cm->dwt_num_comp; i++, comparator++) {
2421 int j;
2422
2423 comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
2424 for (j = 0; j < 3; j++, reg++)
2425 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2426 dwt_comp + 3 * i + j);
2427
2428 /* make sure we clear any watchpoints enabled on the target */
2429 target_write_u32(target, comparator->dwt_comparator_address + 8, 0);
2430 }
2431
2432 *register_get_last_cache_p(&target->reg_cache) = cache;
2433 cm->dwt_cache = cache;
2434
2435 LOG_TARGET_DEBUG(target, "DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
2436 dwtcr, cm->dwt_num_comp,
2437 (dwtcr & (0xf << 24)) ? " only" : "/trigger");
2438
2439 /* REVISIT: if num_comp > 1, check whether comparator #1 can
2440 * implement single-address data value watchpoints ... so we
2441 * won't need to check it later, when asked to set one up.
2442 */
2443 }
2444
2445 static void cortex_m_dwt_free(struct target *target)
2446 {
2447 struct cortex_m_common *cm = target_to_cm(target);
2448 struct reg_cache *cache = cm->dwt_cache;
2449
2450 free(cm->dwt_comparator_list);
2451 cm->dwt_comparator_list = NULL;
2452 cm->dwt_num_comp = 0;
2453
2454 if (cache) {
2455 register_unlink_cache(&target->reg_cache, cache);
2456
2457 if (cache->reg_list) {
2458 for (size_t i = 0; i < cache->num_regs; i++)
2459 free(cache->reg_list[i].arch_info);
2460 free(cache->reg_list);
2461 }
2462 free(cache);
2463 }
2464 cm->dwt_cache = NULL;
2465 }
2466
2467 static bool cortex_m_has_tz(struct target *target)
2468 {
2469 struct armv7m_common *armv7m = target_to_armv7m(target);
2470 uint32_t dauthstatus;
2471
2472 if (armv7m->arm.arch != ARM_ARCH_V8M)
2473 return false;
2474
2475 int retval = target_read_u32(target, DAUTHSTATUS, &dauthstatus);
2476 if (retval != ERROR_OK) {
2477 LOG_WARNING("Error reading DAUTHSTATUS register");
2478 return false;
2479 }
2480 return (dauthstatus & DAUTHSTATUS_SID_MASK) != 0;
2481 }
2482
2483 #define MVFR0 0xe000ef40
2484 #define MVFR1 0xe000ef44
2485
2486 #define MVFR0_DEFAULT_M4 0x10110021
2487 #define MVFR1_DEFAULT_M4 0x11000011
2488
2489 #define MVFR0_DEFAULT_M7_SP 0x10110021
2490 #define MVFR0_DEFAULT_M7_DP 0x10110221
2491 #define MVFR1_DEFAULT_M7_SP 0x11000011
2492 #define MVFR1_DEFAULT_M7_DP 0x12000011
2493
2494 static int cortex_m_find_mem_ap(struct adiv5_dap *swjdp,
2495 struct adiv5_ap **debug_ap)
2496 {
2497 if (dap_find_get_ap(swjdp, AP_TYPE_AHB3_AP, debug_ap) == ERROR_OK)
2498 return ERROR_OK;
2499
2500 return dap_find_get_ap(swjdp, AP_TYPE_AHB5_AP, debug_ap);
2501 }
2502
2503 int cortex_m_examine(struct target *target)
2504 {
2505 int retval;
2506 uint32_t cpuid, fpcr, mvfr0, mvfr1;
2507 struct cortex_m_common *cortex_m = target_to_cm(target);
2508 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
2509 struct armv7m_common *armv7m = target_to_armv7m(target);
2510
2511 /* hla_target shares the examine handler but does not support
2512 * all its calls */
2513 if (!armv7m->is_hla_target) {
2514 if (!armv7m->debug_ap) {
2515 if (cortex_m->apsel == DP_APSEL_INVALID) {
2516 /* Search for the MEM-AP */
2517 retval = cortex_m_find_mem_ap(swjdp, &armv7m->debug_ap);
2518 if (retval != ERROR_OK) {
2519 LOG_TARGET_ERROR(target, "Could not find MEM-AP to control the core");
2520 return retval;
2521 }
2522 } else {
2523 armv7m->debug_ap = dap_get_ap(swjdp, cortex_m->apsel);
2524 if (!armv7m->debug_ap) {
2525 LOG_ERROR("Cannot get AP");
2526 return ERROR_FAIL;
2527 }
2528 }
2529 }
2530
2531 armv7m->debug_ap->memaccess_tck = 8;
2532
2533 retval = mem_ap_init(armv7m->debug_ap);
2534 if (retval != ERROR_OK)
2535 return retval;
2536 }
2537
2538 if (!target_was_examined(target)) {
2539 target_set_examined(target);
2540
2541 /* Read from Device Identification Registers */
2542 retval = target_read_u32(target, CPUID, &cpuid);
2543 if (retval != ERROR_OK)
2544 return retval;
2545
2546 /* Inspect implementor/part to look for recognized cores */
2547 unsigned int impl_part = cpuid & (ARM_CPUID_IMPLEMENTOR_MASK | ARM_CPUID_PARTNO_MASK);
2548
2549 for (unsigned int n = 0; n < ARRAY_SIZE(cortex_m_parts); n++) {
2550 if (impl_part == cortex_m_parts[n].impl_part) {
2551 cortex_m->core_info = &cortex_m_parts[n];
2552 break;
2553 }
2554 }
2555
2556 if (!cortex_m->core_info) {
2557 LOG_TARGET_ERROR(target, "Cortex-M CPUID: 0x%x is unrecognized", cpuid);
2558 return ERROR_FAIL;
2559 }
2560
2561 armv7m->arm.arch = cortex_m->core_info->arch;
2562
2563 LOG_TARGET_INFO(target, "%s r%" PRId8 "p%" PRId8 " processor detected",
2564 cortex_m->core_info->name,
2565 (uint8_t)((cpuid >> 20) & 0xf),
2566 (uint8_t)((cpuid >> 0) & 0xf));
2567
2568 cortex_m->maskints_erratum = false;
2569 if (impl_part == CORTEX_M7_PARTNO) {
2570 uint8_t rev, patch;
2571 rev = (cpuid >> 20) & 0xf;
2572 patch = (cpuid >> 0) & 0xf;
2573 if ((rev == 0) && (patch < 2)) {
2574 LOG_TARGET_WARNING(target, "Silicon bug: single stepping may enter pending exception handler!");
2575 cortex_m->maskints_erratum = true;
2576 }
2577 }
2578 LOG_TARGET_DEBUG(target, "cpuid: 0x%8.8" PRIx32 "", cpuid);
2579
2580 if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV4) {
2581 target_read_u32(target, MVFR0, &mvfr0);
2582 target_read_u32(target, MVFR1, &mvfr1);
2583
2584 /* test for floating point feature on Cortex-M4 */
2585 if ((mvfr0 == MVFR0_DEFAULT_M4) && (mvfr1 == MVFR1_DEFAULT_M4)) {
2586 LOG_TARGET_DEBUG(target, "%s floating point feature FPv4_SP found", cortex_m->core_info->name);
2587 armv7m->fp_feature = FPV4_SP;
2588 }
2589 } else if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV5) {
2590 target_read_u32(target, MVFR0, &mvfr0);
2591 target_read_u32(target, MVFR1, &mvfr1);
2592
2593 /* test for floating point features on Cortex-M7 */
2594 if ((mvfr0 == MVFR0_DEFAULT_M7_SP) && (mvfr1 == MVFR1_DEFAULT_M7_SP)) {
2595 LOG_TARGET_DEBUG(target, "%s floating point feature FPv5_SP found", cortex_m->core_info->name);
2596 armv7m->fp_feature = FPV5_SP;
2597 } else if ((mvfr0 == MVFR0_DEFAULT_M7_DP) && (mvfr1 == MVFR1_DEFAULT_M7_DP)) {
2598 LOG_TARGET_DEBUG(target, "%s floating point feature FPv5_DP found", cortex_m->core_info->name);
2599 armv7m->fp_feature = FPV5_DP;
2600 }
2601 }
2602
2603 /* VECTRESET is supported only on ARMv7-M cores */
2604 cortex_m->vectreset_supported = armv7m->arm.arch == ARM_ARCH_V7M;
2605
2606 /* Check for FPU, otherwise mark FPU register as non-existent */
2607 if (armv7m->fp_feature == FP_NONE)
2608 for (size_t idx = ARMV7M_FPU_FIRST_REG; idx <= ARMV7M_FPU_LAST_REG; idx++)
2609 armv7m->arm.core_cache->reg_list[idx].exist = false;
2610
2611 if (!cortex_m_has_tz(target))
2612 for (size_t idx = ARMV8M_FIRST_REG; idx <= ARMV8M_LAST_REG; idx++)
2613 armv7m->arm.core_cache->reg_list[idx].exist = false;
2614
2615 if (!armv7m->is_hla_target) {
2616 if (cortex_m->core_info->flags & CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K)
2617 /* Cortex-M3/M4 have 4096 bytes autoincrement range,
2618 * s. ARM IHI 0031C: MEM-AP 7.2.2 */
2619 armv7m->debug_ap->tar_autoincr_block = (1 << 12);
2620 }
2621
2622 retval = target_read_u32(target, DCB_DHCSR, &cortex_m->dcb_dhcsr);
2623 if (retval != ERROR_OK)
2624 return retval;
2625
2626 /* Don't cumulate sticky S_RESET_ST at the very first read of DHCSR
2627 * as S_RESET_ST may indicate a reset that happened long time ago
2628 * (most probably the power-on reset before OpenOCD was started).
2629 * As we are just initializing the debug system we do not need
2630 * to call cortex_m_endreset_event() in the following poll.
2631 */
2632 if (!cortex_m->dcb_dhcsr_sticky_is_recent) {
2633 cortex_m->dcb_dhcsr_sticky_is_recent = true;
2634 if (cortex_m->dcb_dhcsr & S_RESET_ST) {
2635 LOG_TARGET_DEBUG(target, "reset happened some time ago, ignore");
2636 cortex_m->dcb_dhcsr &= ~S_RESET_ST;
2637 }
2638 }
2639 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
2640
2641 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
2642 /* Enable debug requests */
2643 uint32_t dhcsr = (cortex_m->dcb_dhcsr | C_DEBUGEN) & ~(C_HALT | C_STEP | C_MASKINTS);
2644
2645 retval = target_write_u32(target, DCB_DHCSR, DBGKEY | (dhcsr & 0x0000FFFFUL));
2646 if (retval != ERROR_OK)
2647 return retval;
2648 cortex_m->dcb_dhcsr = dhcsr;
2649 }
2650
2651 /* Configure trace modules */
2652 retval = target_write_u32(target, DCB_DEMCR, TRCENA | armv7m->demcr);
2653 if (retval != ERROR_OK)
2654 return retval;
2655
2656 if (armv7m->trace_config.itm_deferred_config)
2657 armv7m_trace_itm_config(target);
2658
2659 /* NOTE: FPB and DWT are both optional. */
2660
2661 /* Setup FPB */
2662 target_read_u32(target, FP_CTRL, &fpcr);
2663 /* bits [14:12] and [7:4] */
2664 cortex_m->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF);
2665 cortex_m->fp_num_lit = (fpcr >> 8) & 0xF;
2666 /* Detect flash patch revision, see RM DDI 0403E.b page C1-817.
2667 Revision is zero base, fp_rev == 1 means Rev.2 ! */
2668 cortex_m->fp_rev = (fpcr >> 28) & 0xf;
2669 free(cortex_m->fp_comparator_list);
2670 cortex_m->fp_comparator_list = calloc(
2671 cortex_m->fp_num_code + cortex_m->fp_num_lit,
2672 sizeof(struct cortex_m_fp_comparator));
2673 cortex_m->fpb_enabled = fpcr & 1;
2674 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
2675 cortex_m->fp_comparator_list[i].type =
2676 (i < cortex_m->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
2677 cortex_m->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
2678
2679 /* make sure we clear any breakpoints enabled on the target */
2680 target_write_u32(target, cortex_m->fp_comparator_list[i].fpcr_address, 0);
2681 }
2682 LOG_TARGET_DEBUG(target, "FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i",
2683 fpcr,
2684 cortex_m->fp_num_code,
2685 cortex_m->fp_num_lit);
2686
2687 /* Setup DWT */
2688 cortex_m_dwt_free(target);
2689 cortex_m_dwt_setup(cortex_m, target);
2690
2691 /* These hardware breakpoints only work for code in flash! */
2692 LOG_TARGET_INFO(target, "target has %d breakpoints, %d watchpoints",
2693 cortex_m->fp_num_code,
2694 cortex_m->dwt_num_comp);
2695 }
2696
2697 return ERROR_OK;
2698 }
2699
2700 static int cortex_m_dcc_read(struct target *target, uint8_t *value, uint8_t *ctrl)
2701 {
2702 struct armv7m_common *armv7m = target_to_armv7m(target);
2703 uint16_t dcrdr;
2704 uint8_t buf[2];
2705 int retval;
2706
2707 retval = mem_ap_read_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2708 if (retval != ERROR_OK)
2709 return retval;
2710
2711 dcrdr = target_buffer_get_u16(target, buf);
2712 *ctrl = (uint8_t)dcrdr;
2713 *value = (uint8_t)(dcrdr >> 8);
2714
2715 LOG_TARGET_DEBUG(target, "data 0x%x ctrl 0x%x", *value, *ctrl);
2716
2717 /* write ack back to software dcc register
2718 * signify we have read data */
2719 if (dcrdr & (1 << 0)) {
2720 target_buffer_set_u16(target, buf, 0);
2721 retval = mem_ap_write_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2722 if (retval != ERROR_OK)
2723 return retval;
2724 }
2725
2726 return ERROR_OK;
2727 }
2728
2729 static int cortex_m_target_request_data(struct target *target,
2730 uint32_t size, uint8_t *buffer)
2731 {
2732 uint8_t data;
2733 uint8_t ctrl;
2734 uint32_t i;
2735
2736 for (i = 0; i < (size * 4); i++) {
2737 int retval = cortex_m_dcc_read(target, &data, &ctrl);
2738 if (retval != ERROR_OK)
2739 return retval;
2740 buffer[i] = data;
2741 }
2742
2743 return ERROR_OK;
2744 }
2745
2746 static int cortex_m_handle_target_request(void *priv)
2747 {
2748 struct target *target = priv;
2749 if (!target_was_examined(target))
2750 return ERROR_OK;
2751
2752 if (!target->dbg_msg_enabled)
2753 return ERROR_OK;
2754
2755 if (target->state == TARGET_RUNNING) {
2756 uint8_t data;
2757 uint8_t ctrl;
2758 int retval;
2759
2760 retval = cortex_m_dcc_read(target, &data, &ctrl);
2761 if (retval != ERROR_OK)
2762 return retval;
2763
2764 /* check if we have data */
2765 if (ctrl & (1 << 0)) {
2766 uint32_t request;
2767
2768 /* we assume target is quick enough */
2769 request = data;
2770 for (int i = 1; i <= 3; i++) {
2771 retval = cortex_m_dcc_read(target, &data, &ctrl);
2772 if (retval != ERROR_OK)
2773 return retval;
2774 request |= ((uint32_t)data << (i * 8));
2775 }
2776 target_request(target, request);
2777 }
2778 }
2779
2780 return ERROR_OK;
2781 }
2782
2783 static int cortex_m_init_arch_info(struct target *target,
2784 struct cortex_m_common *cortex_m, struct adiv5_dap *dap)
2785 {
2786 struct armv7m_common *armv7m = &cortex_m->armv7m;
2787
2788 armv7m_init_arch_info(target, armv7m);
2789
2790 /* default reset mode is to use srst if fitted
2791 * if not it will use CORTEX_M3_RESET_VECTRESET */
2792 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2793
2794 armv7m->arm.dap = dap;
2795
2796 /* register arch-specific functions */
2797 armv7m->examine_debug_reason = cortex_m_examine_debug_reason;
2798
2799 armv7m->post_debug_entry = NULL;
2800
2801 armv7m->pre_restore_context = NULL;
2802
2803 armv7m->load_core_reg_u32 = cortex_m_load_core_reg_u32;
2804 armv7m->store_core_reg_u32 = cortex_m_store_core_reg_u32;
2805
2806 target_register_timer_callback(cortex_m_handle_target_request, 1,
2807 TARGET_TIMER_TYPE_PERIODIC, target);
2808
2809 return ERROR_OK;
2810 }
2811
2812 static int cortex_m_target_create(struct target *target, Jim_Interp *interp)
2813 {
2814 struct adiv5_private_config *pc;
2815
2816 pc = (struct adiv5_private_config *)target->private_config;
2817 if (adiv5_verify_config(pc) != ERROR_OK)
2818 return ERROR_FAIL;
2819
2820 struct cortex_m_common *cortex_m = calloc(1, sizeof(struct cortex_m_common));
2821 if (!cortex_m) {
2822 LOG_TARGET_ERROR(target, "No memory creating target");
2823 return ERROR_FAIL;
2824 }
2825
2826 cortex_m->common_magic = CORTEX_M_COMMON_MAGIC;
2827 cortex_m->apsel = pc->ap_num;
2828
2829 cortex_m_init_arch_info(target, cortex_m, pc->dap);
2830
2831 return ERROR_OK;
2832 }
2833
2834 /*--------------------------------------------------------------------------*/
2835
2836 static int cortex_m_verify_pointer(struct command_invocation *cmd,
2837 struct cortex_m_common *cm)
2838 {
2839 if (!is_cortex_m_with_dap_access(cm)) {
2840 command_print(cmd, "target is not a Cortex-M");
2841 return ERROR_TARGET_INVALID;
2842 }
2843 return ERROR_OK;
2844 }
2845
2846 /*
2847 * Only stuff below this line should need to verify that its target
2848 * is a Cortex-M3. Everything else should have indirected through the
2849 * cortexm3_target structure, which is only used with CM3 targets.
2850 */
2851
2852 COMMAND_HANDLER(handle_cortex_m_vector_catch_command)
2853 {
2854 struct target *target = get_current_target(CMD_CTX);
2855 struct cortex_m_common *cortex_m = target_to_cm(target);
2856 struct armv7m_common *armv7m = &cortex_m->armv7m;
2857 uint32_t demcr = 0;
2858 int retval;
2859
2860 static const struct {
2861 char name[10];
2862 unsigned mask;
2863 } vec_ids[] = {
2864 { "hard_err", VC_HARDERR, },
2865 { "int_err", VC_INTERR, },
2866 { "bus_err", VC_BUSERR, },
2867 { "state_err", VC_STATERR, },
2868 { "chk_err", VC_CHKERR, },
2869 { "nocp_err", VC_NOCPERR, },
2870 { "mm_err", VC_MMERR, },
2871 { "reset", VC_CORERESET, },
2872 };
2873
2874 retval = cortex_m_verify_pointer(CMD, cortex_m);
2875 if (retval != ERROR_OK)
2876 return retval;
2877
2878 if (!target_was_examined(target)) {
2879 LOG_TARGET_ERROR(target, "Target not examined yet");
2880 return ERROR_FAIL;
2881 }
2882
2883 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2884 if (retval != ERROR_OK)
2885 return retval;
2886
2887 if (CMD_ARGC > 0) {
2888 unsigned catch = 0;
2889
2890 if (CMD_ARGC == 1) {
2891 if (strcmp(CMD_ARGV[0], "all") == 0) {
2892 catch = VC_HARDERR | VC_INTERR | VC_BUSERR
2893 | VC_STATERR | VC_CHKERR | VC_NOCPERR
2894 | VC_MMERR | VC_CORERESET;
2895 goto write;
2896 } else if (strcmp(CMD_ARGV[0], "none") == 0)
2897 goto write;
2898 }
2899 while (CMD_ARGC-- > 0) {
2900 unsigned i;
2901 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2902 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
2903 continue;
2904 catch |= vec_ids[i].mask;
2905 break;
2906 }
2907 if (i == ARRAY_SIZE(vec_ids)) {
2908 LOG_TARGET_ERROR(target, "No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]);
2909 return ERROR_COMMAND_SYNTAX_ERROR;
2910 }
2911 }
2912 write:
2913 /* For now, armv7m->demcr only stores vector catch flags. */
2914 armv7m->demcr = catch;
2915
2916 demcr &= ~0xffff;
2917 demcr |= catch;
2918
2919 /* write, but don't assume it stuck (why not??) */
2920 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, demcr);
2921 if (retval != ERROR_OK)
2922 return retval;
2923 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2924 if (retval != ERROR_OK)
2925 return retval;
2926
2927 /* FIXME be sure to clear DEMCR on clean server shutdown.
2928 * Otherwise the vector catch hardware could fire when there's
2929 * no debugger hooked up, causing much confusion...
2930 */
2931 }
2932
2933 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2934 command_print(CMD, "%9s: %s", vec_ids[i].name,
2935 (demcr & vec_ids[i].mask) ? "catch" : "ignore");
2936 }
2937
2938 return ERROR_OK;
2939 }
2940
2941 COMMAND_HANDLER(handle_cortex_m_mask_interrupts_command)
2942 {
2943 struct target *target = get_current_target(CMD_CTX);
2944 struct cortex_m_common *cortex_m = target_to_cm(target);
2945 int retval;
2946
2947 static const struct nvp nvp_maskisr_modes[] = {
2948 { .name = "auto", .value = CORTEX_M_ISRMASK_AUTO },
2949 { .name = "off", .value = CORTEX_M_ISRMASK_OFF },
2950 { .name = "on", .value = CORTEX_M_ISRMASK_ON },
2951 { .name = "steponly", .value = CORTEX_M_ISRMASK_STEPONLY },
2952 { .name = NULL, .value = -1 },
2953 };
2954 const struct nvp *n;
2955
2956
2957 retval = cortex_m_verify_pointer(CMD, cortex_m);
2958 if (retval != ERROR_OK)
2959 return retval;
2960
2961 if (target->state != TARGET_HALTED) {
2962 command_print(CMD, "Error: target must be stopped for \"%s\" command", CMD_NAME);
2963 return ERROR_TARGET_NOT_HALTED;
2964 }
2965
2966 if (CMD_ARGC > 0) {
2967 n = nvp_name2value(nvp_maskisr_modes, CMD_ARGV[0]);
2968 if (!n->name)
2969 return ERROR_COMMAND_SYNTAX_ERROR;
2970 cortex_m->isrmasking_mode = n->value;
2971 cortex_m_set_maskints_for_halt(target);
2972 }
2973
2974 n = nvp_value2name(nvp_maskisr_modes, cortex_m->isrmasking_mode);
2975 command_print(CMD, "cortex_m interrupt mask %s", n->name);
2976
2977 return ERROR_OK;
2978 }
2979
2980 COMMAND_HANDLER(handle_cortex_m_reset_config_command)
2981 {
2982 struct target *target = get_current_target(CMD_CTX);
2983 struct cortex_m_common *cortex_m = target_to_cm(target);
2984 int retval;
2985 char *reset_config;
2986
2987 retval = cortex_m_verify_pointer(CMD, cortex_m);
2988 if (retval != ERROR_OK)
2989 return retval;
2990
2991 if (CMD_ARGC > 0) {
2992 if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
2993 cortex_m->soft_reset_config = CORTEX_M_RESET_SYSRESETREQ;
2994
2995 else if (strcmp(*CMD_ARGV, "vectreset") == 0) {
2996 if (target_was_examined(target)
2997 && !cortex_m->vectreset_supported)
2998 LOG_TARGET_WARNING(target, "VECTRESET is not supported on your Cortex-M core!");
2999 else
3000 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
3001
3002 } else
3003 return ERROR_COMMAND_SYNTAX_ERROR;
3004 }
3005
3006 switch (cortex_m->soft_reset_config) {
3007 case CORTEX_M_RESET_SYSRESETREQ:
3008 reset_config = "sysresetreq";
3009 break;
3010
3011 case CORTEX_M_RESET_VECTRESET:
3012 reset_config = "vectreset";
3013 break;
3014
3015 default:
3016 reset_config = "unknown";
3017 break;
3018 }
3019
3020 command_print(CMD, "cortex_m reset_config %s", reset_config);
3021
3022 return ERROR_OK;
3023 }
3024
3025 static const struct command_registration cortex_m_exec_command_handlers[] = {
3026 {
3027 .name = "maskisr",
3028 .handler = handle_cortex_m_mask_interrupts_command,
3029 .mode = COMMAND_EXEC,
3030 .help = "mask cortex_m interrupts",
3031 .usage = "['auto'|'on'|'off'|'steponly']",
3032 },
3033 {
3034 .name = "vector_catch",
3035 .handler = handle_cortex_m_vector_catch_command,
3036 .mode = COMMAND_EXEC,
3037 .help = "configure hardware vectors to trigger debug entry",
3038 .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
3039 },
3040 {
3041 .name = "reset_config",
3042 .handler = handle_cortex_m_reset_config_command,
3043 .mode = COMMAND_ANY,
3044 .help = "configure software reset handling",
3045 .usage = "['sysresetreq'|'vectreset']",
3046 },
3047 {
3048 .chain = smp_command_handlers,
3049 },
3050 COMMAND_REGISTRATION_DONE
3051 };
3052 static const struct command_registration cortex_m_command_handlers[] = {
3053 {
3054 .chain = armv7m_command_handlers,
3055 },
3056 {
3057 .chain = armv7m_trace_command_handlers,
3058 },
3059 /* START_DEPRECATED_TPIU */
3060 {
3061 .chain = arm_tpiu_deprecated_command_handlers,
3062 },
3063 /* END_DEPRECATED_TPIU */
3064 {
3065 .name = "cortex_m",
3066 .mode = COMMAND_EXEC,
3067 .help = "Cortex-M command group",
3068 .usage = "",
3069 .chain = cortex_m_exec_command_handlers,
3070 },
3071 {
3072 .chain = rtt_target_command_handlers,
3073 },
3074 COMMAND_REGISTRATION_DONE
3075 };
3076
3077 struct target_type cortexm_target = {
3078 .name = "cortex_m",
3079
3080 .poll = cortex_m_poll,
3081 .arch_state = armv7m_arch_state,
3082
3083 .target_request_data = cortex_m_target_request_data,
3084
3085 .halt = cortex_m_halt,
3086 .resume = cortex_m_resume,
3087 .step = cortex_m_step,
3088
3089 .assert_reset = cortex_m_assert_reset,
3090 .deassert_reset = cortex_m_deassert_reset,
3091 .soft_reset_halt = cortex_m_soft_reset_halt,
3092
3093 .get_gdb_arch = arm_get_gdb_arch,
3094 .get_gdb_reg_list = armv7m_get_gdb_reg_list,
3095
3096 .read_memory = cortex_m_read_memory,
3097 .write_memory = cortex_m_write_memory,
3098 .checksum_memory = armv7m_checksum_memory,
3099 .blank_check_memory = armv7m_blank_check_memory,
3100
3101 .run_algorithm = armv7m_run_algorithm,
3102 .start_algorithm = armv7m_start_algorithm,
3103 .wait_algorithm = armv7m_wait_algorithm,
3104
3105 .add_breakpoint = cortex_m_add_breakpoint,
3106 .remove_breakpoint = cortex_m_remove_breakpoint,
3107 .add_watchpoint = cortex_m_add_watchpoint,
3108 .remove_watchpoint = cortex_m_remove_watchpoint,
3109 .hit_watchpoint = cortex_m_hit_watchpoint,
3110
3111 .commands = cortex_m_command_handlers,
3112 .target_create = cortex_m_target_create,
3113 .target_jim_configure = adiv5_jim_configure,
3114 .init_target = cortex_m_init_target,
3115 .examine = cortex_m_examine,
3116 .deinit_target = cortex_m_deinit_target,
3117
3118 .profiling = cortex_m_profiling,
3119 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)