target/cortex_m: prevent segmentation fault in cortex_m_poll()
[openocd.git] / src / target / cortex_m.c
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 * *
7 * Copyright (C) 2006 by Magnus Lundin *
8 * lundin@mlu.mine.nu *
9 * *
10 * Copyright (C) 2008 by Spencer Oliver *
11 * spen@spen-soft.co.uk *
12 * *
13 * *
14 * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
15 * *
16 ***************************************************************************/
17 #ifdef HAVE_CONFIG_H
18 #include "config.h"
19 #endif
20
21 #include "jtag/interface.h"
22 #include "breakpoints.h"
23 #include "cortex_m.h"
24 #include "target_request.h"
25 #include "target_type.h"
26 #include "arm_adi_v5.h"
27 #include "arm_disassembler.h"
28 #include "register.h"
29 #include "arm_opcodes.h"
30 #include "arm_semihosting.h"
31 #include <helper/time_support.h>
32 #include <rtt/rtt.h>
33
34 /* NOTE: most of this should work fine for the Cortex-M1 and
35 * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
36 * Some differences: M0/M1 doesn't have FPB remapping or the
37 * DWT tracing/profiling support. (So the cycle counter will
38 * not be usable; the other stuff isn't currently used here.)
39 *
40 * Although there are some workarounds for errata seen only in r0p0
41 * silicon, such old parts are hard to find and thus not much tested
42 * any longer.
43 */
44
45 /* Timeout for register r/w */
46 #define DHCSR_S_REGRDY_TIMEOUT (500)
47
48 /* Supported Cortex-M Cores */
49 static const struct cortex_m_part_info cortex_m_parts[] = {
50 {
51 .partno = CORTEX_M0_PARTNO,
52 .name = "Cortex-M0",
53 .arch = ARM_ARCH_V6M,
54 },
55 {
56 .partno = CORTEX_M0P_PARTNO,
57 .name = "Cortex-M0+",
58 .arch = ARM_ARCH_V6M,
59 },
60 {
61 .partno = CORTEX_M1_PARTNO,
62 .name = "Cortex-M1",
63 .arch = ARM_ARCH_V6M,
64 },
65 {
66 .partno = CORTEX_M3_PARTNO,
67 .name = "Cortex-M3",
68 .arch = ARM_ARCH_V7M,
69 .flags = CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
70 },
71 {
72 .partno = CORTEX_M4_PARTNO,
73 .name = "Cortex-M4",
74 .arch = ARM_ARCH_V7M,
75 .flags = CORTEX_M_F_HAS_FPV4 | CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
76 },
77 {
78 .partno = CORTEX_M7_PARTNO,
79 .name = "Cortex-M7",
80 .arch = ARM_ARCH_V7M,
81 .flags = CORTEX_M_F_HAS_FPV5,
82 },
83 {
84 .partno = CORTEX_M23_PARTNO,
85 .name = "Cortex-M23",
86 .arch = ARM_ARCH_V8M,
87 },
88 {
89 .partno = CORTEX_M33_PARTNO,
90 .name = "Cortex-M33",
91 .arch = ARM_ARCH_V8M,
92 .flags = CORTEX_M_F_HAS_FPV5,
93 },
94 {
95 .partno = CORTEX_M35P_PARTNO,
96 .name = "Cortex-M35P",
97 .arch = ARM_ARCH_V8M,
98 .flags = CORTEX_M_F_HAS_FPV5,
99 },
100 {
101 .partno = CORTEX_M55_PARTNO,
102 .name = "Cortex-M55",
103 .arch = ARM_ARCH_V8M,
104 .flags = CORTEX_M_F_HAS_FPV5,
105 },
106 };
107
108 /* forward declarations */
109 static int cortex_m_store_core_reg_u32(struct target *target,
110 uint32_t num, uint32_t value);
111 static void cortex_m_dwt_free(struct target *target);
112
113 /** DCB DHCSR register contains S_RETIRE_ST and S_RESET_ST bits cleared
114 * on a read. Call this helper function each time DHCSR is read
115 * to preserve S_RESET_ST state in case of a reset event was detected.
116 */
117 static inline void cortex_m_cumulate_dhcsr_sticky(struct cortex_m_common *cortex_m,
118 uint32_t dhcsr)
119 {
120 cortex_m->dcb_dhcsr_cumulated_sticky |= dhcsr;
121 }
122
123 /** Read DCB DHCSR register to cortex_m->dcb_dhcsr and cumulate
124 * sticky bits in cortex_m->dcb_dhcsr_cumulated_sticky
125 */
126 static int cortex_m_read_dhcsr_atomic_sticky(struct target *target)
127 {
128 struct cortex_m_common *cortex_m = target_to_cm(target);
129 struct armv7m_common *armv7m = target_to_armv7m(target);
130
131 int retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR,
132 &cortex_m->dcb_dhcsr);
133 if (retval != ERROR_OK)
134 return retval;
135
136 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
137 return ERROR_OK;
138 }
139
140 static int cortex_m_load_core_reg_u32(struct target *target,
141 uint32_t regsel, uint32_t *value)
142 {
143 struct cortex_m_common *cortex_m = target_to_cm(target);
144 struct armv7m_common *armv7m = target_to_armv7m(target);
145 int retval;
146 uint32_t dcrdr, tmp_value;
147 int64_t then;
148
149 /* because the DCB_DCRDR is used for the emulated dcc channel
150 * we have to save/restore the DCB_DCRDR when used */
151 if (target->dbg_msg_enabled) {
152 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
153 if (retval != ERROR_OK)
154 return retval;
155 }
156
157 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
158 if (retval != ERROR_OK)
159 return retval;
160
161 /* check if value from register is ready and pre-read it */
162 then = timeval_ms();
163 while (1) {
164 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR,
165 &cortex_m->dcb_dhcsr);
166 if (retval != ERROR_OK)
167 return retval;
168 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DCRDR,
169 &tmp_value);
170 if (retval != ERROR_OK)
171 return retval;
172 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
173 if (cortex_m->dcb_dhcsr & S_REGRDY)
174 break;
175 cortex_m->slow_register_read = true; /* Polling (still) needed. */
176 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
177 LOG_TARGET_ERROR(target, "Timeout waiting for DCRDR transfer ready");
178 return ERROR_TIMEOUT_REACHED;
179 }
180 keep_alive();
181 }
182
183 *value = tmp_value;
184
185 if (target->dbg_msg_enabled) {
186 /* restore DCB_DCRDR - this needs to be in a separate
187 * transaction otherwise the emulated DCC channel breaks */
188 if (retval == ERROR_OK)
189 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
190 }
191
192 return retval;
193 }
194
195 static int cortex_m_slow_read_all_regs(struct target *target)
196 {
197 struct cortex_m_common *cortex_m = target_to_cm(target);
198 struct armv7m_common *armv7m = target_to_armv7m(target);
199 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
200
201 /* Opportunistically restore fast read, it'll revert to slow
202 * if any register needed polling in cortex_m_load_core_reg_u32(). */
203 cortex_m->slow_register_read = false;
204
205 for (unsigned int reg_id = 0; reg_id < num_regs; reg_id++) {
206 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
207 if (r->exist) {
208 int retval = armv7m->arm.read_core_reg(target, r, reg_id, ARM_MODE_ANY);
209 if (retval != ERROR_OK)
210 return retval;
211 }
212 }
213
214 if (!cortex_m->slow_register_read)
215 LOG_TARGET_DEBUG(target, "Switching back to fast register reads");
216
217 return ERROR_OK;
218 }
219
220 static int cortex_m_queue_reg_read(struct target *target, uint32_t regsel,
221 uint32_t *reg_value, uint32_t *dhcsr)
222 {
223 struct armv7m_common *armv7m = target_to_armv7m(target);
224 int retval;
225
226 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
227 if (retval != ERROR_OK)
228 return retval;
229
230 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR, dhcsr);
231 if (retval != ERROR_OK)
232 return retval;
233
234 return mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, reg_value);
235 }
236
237 static int cortex_m_fast_read_all_regs(struct target *target)
238 {
239 struct cortex_m_common *cortex_m = target_to_cm(target);
240 struct armv7m_common *armv7m = target_to_armv7m(target);
241 int retval;
242 uint32_t dcrdr;
243
244 /* because the DCB_DCRDR is used for the emulated dcc channel
245 * we have to save/restore the DCB_DCRDR when used */
246 if (target->dbg_msg_enabled) {
247 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
248 if (retval != ERROR_OK)
249 return retval;
250 }
251
252 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
253 const unsigned int n_r32 = ARMV7M_LAST_REG - ARMV7M_CORE_FIRST_REG + 1
254 + ARMV7M_FPU_LAST_REG - ARMV7M_FPU_FIRST_REG + 1;
255 /* we need one 32-bit word for each register except FP D0..D15, which
256 * need two words */
257 uint32_t r_vals[n_r32];
258 uint32_t dhcsr[n_r32];
259
260 unsigned int wi = 0; /* write index to r_vals and dhcsr arrays */
261 unsigned int reg_id; /* register index in the reg_list, ARMV7M_R0... */
262 for (reg_id = 0; reg_id < num_regs; reg_id++) {
263 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
264 if (!r->exist)
265 continue; /* skip non existent registers */
266
267 if (r->size <= 8) {
268 /* Any 8-bit or shorter register is unpacked from a 32-bit
269 * container register. Skip it now. */
270 continue;
271 }
272
273 uint32_t regsel = armv7m_map_id_to_regsel(reg_id);
274 retval = cortex_m_queue_reg_read(target, regsel, &r_vals[wi],
275 &dhcsr[wi]);
276 if (retval != ERROR_OK)
277 return retval;
278 wi++;
279
280 assert(r->size == 32 || r->size == 64);
281 if (r->size == 32)
282 continue; /* done with 32-bit register */
283
284 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
285 /* the odd part of FP register (S1, S3...) */
286 retval = cortex_m_queue_reg_read(target, regsel + 1, &r_vals[wi],
287 &dhcsr[wi]);
288 if (retval != ERROR_OK)
289 return retval;
290 wi++;
291 }
292
293 assert(wi <= n_r32);
294
295 retval = dap_run(armv7m->debug_ap->dap);
296 if (retval != ERROR_OK)
297 return retval;
298
299 if (target->dbg_msg_enabled) {
300 /* restore DCB_DCRDR - this needs to be in a separate
301 * transaction otherwise the emulated DCC channel breaks */
302 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
303 if (retval != ERROR_OK)
304 return retval;
305 }
306
307 bool not_ready = false;
308 for (unsigned int i = 0; i < wi; i++) {
309 if ((dhcsr[i] & S_REGRDY) == 0) {
310 not_ready = true;
311 LOG_TARGET_DEBUG(target, "Register %u was not ready during fast read", i);
312 }
313 cortex_m_cumulate_dhcsr_sticky(cortex_m, dhcsr[i]);
314 }
315
316 if (not_ready) {
317 /* Any register was not ready,
318 * fall back to slow read with S_REGRDY polling */
319 return ERROR_TIMEOUT_REACHED;
320 }
321
322 LOG_TARGET_DEBUG(target, "read %u 32-bit registers", wi);
323
324 unsigned int ri = 0; /* read index from r_vals array */
325 for (reg_id = 0; reg_id < num_regs; reg_id++) {
326 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
327 if (!r->exist)
328 continue; /* skip non existent registers */
329
330 r->dirty = false;
331
332 unsigned int reg32_id;
333 uint32_t offset;
334 if (armv7m_map_reg_packing(reg_id, &reg32_id, &offset)) {
335 /* Unpack a partial register from 32-bit container register */
336 struct reg *r32 = &armv7m->arm.core_cache->reg_list[reg32_id];
337
338 /* The container register ought to precede all regs unpacked
339 * from it in the reg_list. So the value should be ready
340 * to unpack */
341 assert(r32->valid);
342 buf_cpy(r32->value + offset, r->value, r->size);
343
344 } else {
345 assert(r->size == 32 || r->size == 64);
346 buf_set_u32(r->value, 0, 32, r_vals[ri++]);
347
348 if (r->size == 64) {
349 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
350 /* the odd part of FP register (S1, S3...) */
351 buf_set_u32(r->value + 4, 0, 32, r_vals[ri++]);
352 }
353 }
354 r->valid = true;
355 }
356 assert(ri == wi);
357
358 return retval;
359 }
360
361 static int cortex_m_store_core_reg_u32(struct target *target,
362 uint32_t regsel, uint32_t value)
363 {
364 struct cortex_m_common *cortex_m = target_to_cm(target);
365 struct armv7m_common *armv7m = target_to_armv7m(target);
366 int retval;
367 uint32_t dcrdr;
368 int64_t then;
369
370 /* because the DCB_DCRDR is used for the emulated dcc channel
371 * we have to save/restore the DCB_DCRDR when used */
372 if (target->dbg_msg_enabled) {
373 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
374 if (retval != ERROR_OK)
375 return retval;
376 }
377
378 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, value);
379 if (retval != ERROR_OK)
380 return retval;
381
382 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel | DCRSR_WNR);
383 if (retval != ERROR_OK)
384 return retval;
385
386 /* check if value is written into register */
387 then = timeval_ms();
388 while (1) {
389 retval = cortex_m_read_dhcsr_atomic_sticky(target);
390 if (retval != ERROR_OK)
391 return retval;
392 if (cortex_m->dcb_dhcsr & S_REGRDY)
393 break;
394 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
395 LOG_TARGET_ERROR(target, "Timeout waiting for DCRDR transfer ready");
396 return ERROR_TIMEOUT_REACHED;
397 }
398 keep_alive();
399 }
400
401 if (target->dbg_msg_enabled) {
402 /* restore DCB_DCRDR - this needs to be in a separate
403 * transaction otherwise the emulated DCC channel breaks */
404 if (retval == ERROR_OK)
405 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
406 }
407
408 return retval;
409 }
410
411 static int cortex_m_write_debug_halt_mask(struct target *target,
412 uint32_t mask_on, uint32_t mask_off)
413 {
414 struct cortex_m_common *cortex_m = target_to_cm(target);
415 struct armv7m_common *armv7m = &cortex_m->armv7m;
416
417 /* mask off status bits */
418 cortex_m->dcb_dhcsr &= ~((0xFFFFul << 16) | mask_off);
419 /* create new register mask */
420 cortex_m->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
421
422 return mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DHCSR, cortex_m->dcb_dhcsr);
423 }
424
425 static int cortex_m_set_maskints(struct target *target, bool mask)
426 {
427 struct cortex_m_common *cortex_m = target_to_cm(target);
428 if (!!(cortex_m->dcb_dhcsr & C_MASKINTS) != mask)
429 return cortex_m_write_debug_halt_mask(target, mask ? C_MASKINTS : 0, mask ? 0 : C_MASKINTS);
430 else
431 return ERROR_OK;
432 }
433
434 static int cortex_m_set_maskints_for_halt(struct target *target)
435 {
436 struct cortex_m_common *cortex_m = target_to_cm(target);
437 switch (cortex_m->isrmasking_mode) {
438 case CORTEX_M_ISRMASK_AUTO:
439 /* interrupts taken at resume, whether for step or run -> no mask */
440 return cortex_m_set_maskints(target, false);
441
442 case CORTEX_M_ISRMASK_OFF:
443 /* interrupts never masked */
444 return cortex_m_set_maskints(target, false);
445
446 case CORTEX_M_ISRMASK_ON:
447 /* interrupts always masked */
448 return cortex_m_set_maskints(target, true);
449
450 case CORTEX_M_ISRMASK_STEPONLY:
451 /* interrupts masked for single step only -> mask now if MASKINTS
452 * erratum, otherwise only mask before stepping */
453 return cortex_m_set_maskints(target, cortex_m->maskints_erratum);
454 }
455 return ERROR_OK;
456 }
457
458 static int cortex_m_set_maskints_for_run(struct target *target)
459 {
460 switch (target_to_cm(target)->isrmasking_mode) {
461 case CORTEX_M_ISRMASK_AUTO:
462 /* interrupts taken at resume, whether for step or run -> no mask */
463 return cortex_m_set_maskints(target, false);
464
465 case CORTEX_M_ISRMASK_OFF:
466 /* interrupts never masked */
467 return cortex_m_set_maskints(target, false);
468
469 case CORTEX_M_ISRMASK_ON:
470 /* interrupts always masked */
471 return cortex_m_set_maskints(target, true);
472
473 case CORTEX_M_ISRMASK_STEPONLY:
474 /* interrupts masked for single step only -> no mask */
475 return cortex_m_set_maskints(target, false);
476 }
477 return ERROR_OK;
478 }
479
480 static int cortex_m_set_maskints_for_step(struct target *target)
481 {
482 switch (target_to_cm(target)->isrmasking_mode) {
483 case CORTEX_M_ISRMASK_AUTO:
484 /* the auto-interrupt should already be done -> mask */
485 return cortex_m_set_maskints(target, true);
486
487 case CORTEX_M_ISRMASK_OFF:
488 /* interrupts never masked */
489 return cortex_m_set_maskints(target, false);
490
491 case CORTEX_M_ISRMASK_ON:
492 /* interrupts always masked */
493 return cortex_m_set_maskints(target, true);
494
495 case CORTEX_M_ISRMASK_STEPONLY:
496 /* interrupts masked for single step only -> mask */
497 return cortex_m_set_maskints(target, true);
498 }
499 return ERROR_OK;
500 }
501
502 static int cortex_m_clear_halt(struct target *target)
503 {
504 struct cortex_m_common *cortex_m = target_to_cm(target);
505 struct armv7m_common *armv7m = &cortex_m->armv7m;
506 int retval;
507
508 /* clear step if any */
509 cortex_m_write_debug_halt_mask(target, C_HALT, C_STEP);
510
511 /* Read Debug Fault Status Register */
512 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR, &cortex_m->nvic_dfsr);
513 if (retval != ERROR_OK)
514 return retval;
515
516 /* Clear Debug Fault Status */
517 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_DFSR, cortex_m->nvic_dfsr);
518 if (retval != ERROR_OK)
519 return retval;
520 LOG_TARGET_DEBUG(target, "NVIC_DFSR 0x%" PRIx32 "", cortex_m->nvic_dfsr);
521
522 return ERROR_OK;
523 }
524
525 static int cortex_m_single_step_core(struct target *target)
526 {
527 struct cortex_m_common *cortex_m = target_to_cm(target);
528 int retval;
529
530 /* Mask interrupts before clearing halt, if not done already. This avoids
531 * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
532 * HALT can put the core into an unknown state.
533 */
534 if (!(cortex_m->dcb_dhcsr & C_MASKINTS)) {
535 retval = cortex_m_write_debug_halt_mask(target, C_MASKINTS, 0);
536 if (retval != ERROR_OK)
537 return retval;
538 }
539 retval = cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
540 if (retval != ERROR_OK)
541 return retval;
542 LOG_TARGET_DEBUG(target, "single step");
543
544 /* restore dhcsr reg */
545 cortex_m_clear_halt(target);
546
547 return ERROR_OK;
548 }
549
550 static int cortex_m_enable_fpb(struct target *target)
551 {
552 int retval = target_write_u32(target, FP_CTRL, 3);
553 if (retval != ERROR_OK)
554 return retval;
555
556 /* check the fpb is actually enabled */
557 uint32_t fpctrl;
558 retval = target_read_u32(target, FP_CTRL, &fpctrl);
559 if (retval != ERROR_OK)
560 return retval;
561
562 if (fpctrl & 1)
563 return ERROR_OK;
564
565 return ERROR_FAIL;
566 }
567
568 static int cortex_m_endreset_event(struct target *target)
569 {
570 int retval;
571 uint32_t dcb_demcr;
572 struct cortex_m_common *cortex_m = target_to_cm(target);
573 struct armv7m_common *armv7m = &cortex_m->armv7m;
574 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
575 struct cortex_m_fp_comparator *fp_list = cortex_m->fp_comparator_list;
576 struct cortex_m_dwt_comparator *dwt_list = cortex_m->dwt_comparator_list;
577
578 /* REVISIT The four debug monitor bits are currently ignored... */
579 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &dcb_demcr);
580 if (retval != ERROR_OK)
581 return retval;
582 LOG_TARGET_DEBUG(target, "DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr);
583
584 /* this register is used for emulated dcc channel */
585 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
586 if (retval != ERROR_OK)
587 return retval;
588
589 retval = cortex_m_read_dhcsr_atomic_sticky(target);
590 if (retval != ERROR_OK)
591 return retval;
592
593 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
594 /* Enable debug requests */
595 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
596 if (retval != ERROR_OK)
597 return retval;
598 }
599
600 /* Restore proper interrupt masking setting for running CPU. */
601 cortex_m_set_maskints_for_run(target);
602
603 /* Enable features controlled by ITM and DWT blocks, and catch only
604 * the vectors we were told to pay attention to.
605 *
606 * Target firmware is responsible for all fault handling policy
607 * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
608 * or manual updates to the NVIC SHCSR and CCR registers.
609 */
610 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, TRCENA | armv7m->demcr);
611 if (retval != ERROR_OK)
612 return retval;
613
614 /* Paranoia: evidently some (early?) chips don't preserve all the
615 * debug state (including FPB, DWT, etc) across reset...
616 */
617
618 /* Enable FPB */
619 retval = cortex_m_enable_fpb(target);
620 if (retval != ERROR_OK) {
621 LOG_TARGET_ERROR(target, "Failed to enable the FPB");
622 return retval;
623 }
624
625 cortex_m->fpb_enabled = true;
626
627 /* Restore FPB registers */
628 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
629 retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
630 if (retval != ERROR_OK)
631 return retval;
632 }
633
634 /* Restore DWT registers */
635 for (unsigned int i = 0; i < cortex_m->dwt_num_comp; i++) {
636 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
637 dwt_list[i].comp);
638 if (retval != ERROR_OK)
639 return retval;
640 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
641 dwt_list[i].mask);
642 if (retval != ERROR_OK)
643 return retval;
644 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
645 dwt_list[i].function);
646 if (retval != ERROR_OK)
647 return retval;
648 }
649 retval = dap_run(swjdp);
650 if (retval != ERROR_OK)
651 return retval;
652
653 register_cache_invalidate(armv7m->arm.core_cache);
654
655 /* make sure we have latest dhcsr flags */
656 retval = cortex_m_read_dhcsr_atomic_sticky(target);
657 if (retval != ERROR_OK)
658 return retval;
659
660 return retval;
661 }
662
663 static int cortex_m_examine_debug_reason(struct target *target)
664 {
665 struct cortex_m_common *cortex_m = target_to_cm(target);
666
667 /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
668 * only check the debug reason if we don't know it already */
669
670 if ((target->debug_reason != DBG_REASON_DBGRQ)
671 && (target->debug_reason != DBG_REASON_SINGLESTEP)) {
672 if (cortex_m->nvic_dfsr & DFSR_BKPT) {
673 target->debug_reason = DBG_REASON_BREAKPOINT;
674 if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
675 target->debug_reason = DBG_REASON_WPTANDBKPT;
676 } else if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
677 target->debug_reason = DBG_REASON_WATCHPOINT;
678 else if (cortex_m->nvic_dfsr & DFSR_VCATCH)
679 target->debug_reason = DBG_REASON_BREAKPOINT;
680 else if (cortex_m->nvic_dfsr & DFSR_EXTERNAL)
681 target->debug_reason = DBG_REASON_DBGRQ;
682 else /* HALTED */
683 target->debug_reason = DBG_REASON_UNDEFINED;
684 }
685
686 return ERROR_OK;
687 }
688
689 static int cortex_m_examine_exception_reason(struct target *target)
690 {
691 uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
692 struct armv7m_common *armv7m = target_to_armv7m(target);
693 struct adiv5_dap *swjdp = armv7m->arm.dap;
694 int retval;
695
696 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SHCSR, &shcsr);
697 if (retval != ERROR_OK)
698 return retval;
699 switch (armv7m->exception_number) {
700 case 2: /* NMI */
701 break;
702 case 3: /* Hard Fault */
703 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_HFSR, &except_sr);
704 if (retval != ERROR_OK)
705 return retval;
706 if (except_sr & 0x40000000) {
707 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &cfsr);
708 if (retval != ERROR_OK)
709 return retval;
710 }
711 break;
712 case 4: /* Memory Management */
713 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
714 if (retval != ERROR_OK)
715 return retval;
716 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_MMFAR, &except_ar);
717 if (retval != ERROR_OK)
718 return retval;
719 break;
720 case 5: /* Bus Fault */
721 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
722 if (retval != ERROR_OK)
723 return retval;
724 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_BFAR, &except_ar);
725 if (retval != ERROR_OK)
726 return retval;
727 break;
728 case 6: /* Usage Fault */
729 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
730 if (retval != ERROR_OK)
731 return retval;
732 break;
733 case 7: /* Secure Fault */
734 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFSR, &except_sr);
735 if (retval != ERROR_OK)
736 return retval;
737 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFAR, &except_ar);
738 if (retval != ERROR_OK)
739 return retval;
740 break;
741 case 11: /* SVCall */
742 break;
743 case 12: /* Debug Monitor */
744 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_DFSR, &except_sr);
745 if (retval != ERROR_OK)
746 return retval;
747 break;
748 case 14: /* PendSV */
749 break;
750 case 15: /* SysTick */
751 break;
752 default:
753 except_sr = 0;
754 break;
755 }
756 retval = dap_run(swjdp);
757 if (retval == ERROR_OK)
758 LOG_TARGET_DEBUG(target, "%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
759 ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
760 armv7m_exception_string(armv7m->exception_number),
761 shcsr, except_sr, cfsr, except_ar);
762 return retval;
763 }
764
765 static int cortex_m_debug_entry(struct target *target)
766 {
767 uint32_t xPSR;
768 int retval;
769 struct cortex_m_common *cortex_m = target_to_cm(target);
770 struct armv7m_common *armv7m = &cortex_m->armv7m;
771 struct arm *arm = &armv7m->arm;
772 struct reg *r;
773
774 LOG_TARGET_DEBUG(target, " ");
775
776 /* Do this really early to minimize the window where the MASKINTS erratum
777 * can pile up pending interrupts. */
778 cortex_m_set_maskints_for_halt(target);
779
780 cortex_m_clear_halt(target);
781
782 retval = cortex_m_read_dhcsr_atomic_sticky(target);
783 if (retval != ERROR_OK)
784 return retval;
785
786 retval = armv7m->examine_debug_reason(target);
787 if (retval != ERROR_OK)
788 return retval;
789
790 /* examine PE security state */
791 bool secure_state = false;
792 if (armv7m->arm.arch == ARM_ARCH_V8M) {
793 uint32_t dscsr;
794
795 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DSCSR, &dscsr);
796 if (retval != ERROR_OK)
797 return retval;
798
799 secure_state = (dscsr & DSCSR_CDS) == DSCSR_CDS;
800 }
801
802 /* Load all registers to arm.core_cache */
803 if (!cortex_m->slow_register_read) {
804 retval = cortex_m_fast_read_all_regs(target);
805 if (retval == ERROR_TIMEOUT_REACHED) {
806 cortex_m->slow_register_read = true;
807 LOG_TARGET_DEBUG(target, "Switched to slow register read");
808 }
809 }
810
811 if (cortex_m->slow_register_read)
812 retval = cortex_m_slow_read_all_regs(target);
813
814 if (retval != ERROR_OK)
815 return retval;
816
817 r = arm->cpsr;
818 xPSR = buf_get_u32(r->value, 0, 32);
819
820 /* Are we in an exception handler */
821 if (xPSR & 0x1FF) {
822 armv7m->exception_number = (xPSR & 0x1FF);
823
824 arm->core_mode = ARM_MODE_HANDLER;
825 arm->map = armv7m_msp_reg_map;
826 } else {
827 unsigned control = buf_get_u32(arm->core_cache
828 ->reg_list[ARMV7M_CONTROL].value, 0, 3);
829
830 /* is this thread privileged? */
831 arm->core_mode = control & 1
832 ? ARM_MODE_USER_THREAD
833 : ARM_MODE_THREAD;
834
835 /* which stack is it using? */
836 if (control & 2)
837 arm->map = armv7m_psp_reg_map;
838 else
839 arm->map = armv7m_msp_reg_map;
840
841 armv7m->exception_number = 0;
842 }
843
844 if (armv7m->exception_number)
845 cortex_m_examine_exception_reason(target);
846
847 LOG_TARGET_DEBUG(target, "entered debug state in core mode: %s at PC 0x%" PRIx32
848 ", cpu in %s state, target->state: %s",
849 arm_mode_name(arm->core_mode),
850 buf_get_u32(arm->pc->value, 0, 32),
851 secure_state ? "Secure" : "Non-Secure",
852 target_state_name(target));
853
854 if (armv7m->post_debug_entry) {
855 retval = armv7m->post_debug_entry(target);
856 if (retval != ERROR_OK)
857 return retval;
858 }
859
860 return ERROR_OK;
861 }
862
863 static int cortex_m_poll(struct target *target)
864 {
865 int detected_failure = ERROR_OK;
866 int retval = ERROR_OK;
867 enum target_state prev_target_state = target->state;
868 struct cortex_m_common *cortex_m = target_to_cm(target);
869 struct armv7m_common *armv7m = &cortex_m->armv7m;
870
871 /* Check if debug_ap is available to prevent segmentation fault.
872 * If the re-examination after an error does not find a MEM-AP
873 * (e.g. the target stopped communicating), debug_ap pointer
874 * can suddenly become NULL.
875 */
876 if (!armv7m->debug_ap) {
877 target->state = TARGET_UNKNOWN;
878 return ERROR_TARGET_NOT_EXAMINED;
879 }
880
881 /* Read from Debug Halting Control and Status Register */
882 retval = cortex_m_read_dhcsr_atomic_sticky(target);
883 if (retval != ERROR_OK) {
884 target->state = TARGET_UNKNOWN;
885 return retval;
886 }
887
888 /* Recover from lockup. See ARMv7-M architecture spec,
889 * section B1.5.15 "Unrecoverable exception cases".
890 */
891 if (cortex_m->dcb_dhcsr & S_LOCKUP) {
892 LOG_TARGET_ERROR(target, "clearing lockup after double fault");
893 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
894 target->debug_reason = DBG_REASON_DBGRQ;
895
896 /* We have to execute the rest (the "finally" equivalent, but
897 * still throw this exception again).
898 */
899 detected_failure = ERROR_FAIL;
900
901 /* refresh status bits */
902 retval = cortex_m_read_dhcsr_atomic_sticky(target);
903 if (retval != ERROR_OK)
904 return retval;
905 }
906
907 if (cortex_m->dcb_dhcsr_cumulated_sticky & S_RESET_ST) {
908 cortex_m->dcb_dhcsr_cumulated_sticky &= ~S_RESET_ST;
909 if (target->state != TARGET_RESET) {
910 target->state = TARGET_RESET;
911 LOG_TARGET_INFO(target, "external reset detected");
912 }
913 return ERROR_OK;
914 }
915
916 if (target->state == TARGET_RESET) {
917 /* Cannot switch context while running so endreset is
918 * called with target->state == TARGET_RESET
919 */
920 LOG_TARGET_DEBUG(target, "Exit from reset with dcb_dhcsr 0x%" PRIx32,
921 cortex_m->dcb_dhcsr);
922 retval = cortex_m_endreset_event(target);
923 if (retval != ERROR_OK) {
924 target->state = TARGET_UNKNOWN;
925 return retval;
926 }
927 target->state = TARGET_RUNNING;
928 prev_target_state = TARGET_RUNNING;
929 }
930
931 if (cortex_m->dcb_dhcsr & S_HALT) {
932 target->state = TARGET_HALTED;
933
934 if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET)) {
935 retval = cortex_m_debug_entry(target);
936 if (retval != ERROR_OK)
937 return retval;
938
939 if (arm_semihosting(target, &retval) != 0)
940 return retval;
941
942 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
943 }
944 if (prev_target_state == TARGET_DEBUG_RUNNING) {
945 retval = cortex_m_debug_entry(target);
946 if (retval != ERROR_OK)
947 return retval;
948
949 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
950 }
951 }
952
953 if (target->state == TARGET_UNKNOWN) {
954 /* Check if processor is retiring instructions or sleeping.
955 * Unlike S_RESET_ST here we test if the target *is* running now,
956 * not if it has been running (possibly in the past). Instructions are
957 * typically processed much faster than OpenOCD polls DHCSR so S_RETIRE_ST
958 * is read always 1. That's the reason not to use dcb_dhcsr_cumulated_sticky.
959 */
960 if (cortex_m->dcb_dhcsr & S_RETIRE_ST || cortex_m->dcb_dhcsr & S_SLEEP) {
961 target->state = TARGET_RUNNING;
962 retval = ERROR_OK;
963 }
964 }
965
966 /* Check that target is truly halted, since the target could be resumed externally */
967 if ((prev_target_state == TARGET_HALTED) && !(cortex_m->dcb_dhcsr & S_HALT)) {
968 /* registers are now invalid */
969 register_cache_invalidate(armv7m->arm.core_cache);
970
971 target->state = TARGET_RUNNING;
972 LOG_TARGET_WARNING(target, "external resume detected");
973 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
974 retval = ERROR_OK;
975 }
976
977 /* Did we detect a failure condition that we cleared? */
978 if (detected_failure != ERROR_OK)
979 retval = detected_failure;
980 return retval;
981 }
982
983 static int cortex_m_halt(struct target *target)
984 {
985 LOG_TARGET_DEBUG(target, "target->state: %s", target_state_name(target));
986
987 if (target->state == TARGET_HALTED) {
988 LOG_TARGET_DEBUG(target, "target was already halted");
989 return ERROR_OK;
990 }
991
992 if (target->state == TARGET_UNKNOWN)
993 LOG_TARGET_WARNING(target, "target was in unknown state when halt was requested");
994
995 if (target->state == TARGET_RESET) {
996 if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
997 LOG_TARGET_ERROR(target, "can't request a halt while in reset if nSRST pulls nTRST");
998 return ERROR_TARGET_FAILURE;
999 } else {
1000 /* we came here in a reset_halt or reset_init sequence
1001 * debug entry was already prepared in cortex_m3_assert_reset()
1002 */
1003 target->debug_reason = DBG_REASON_DBGRQ;
1004
1005 return ERROR_OK;
1006 }
1007 }
1008
1009 /* Write to Debug Halting Control and Status Register */
1010 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1011
1012 /* Do this really early to minimize the window where the MASKINTS erratum
1013 * can pile up pending interrupts. */
1014 cortex_m_set_maskints_for_halt(target);
1015
1016 target->debug_reason = DBG_REASON_DBGRQ;
1017
1018 return ERROR_OK;
1019 }
1020
1021 static int cortex_m_soft_reset_halt(struct target *target)
1022 {
1023 struct cortex_m_common *cortex_m = target_to_cm(target);
1024 struct armv7m_common *armv7m = &cortex_m->armv7m;
1025 int retval, timeout = 0;
1026
1027 /* on single cortex_m MCU soft_reset_halt should be avoided as same functionality
1028 * can be obtained by using 'reset halt' and 'cortex_m reset_config vectreset'.
1029 * As this reset only uses VC_CORERESET it would only ever reset the cortex_m
1030 * core, not the peripherals */
1031 LOG_TARGET_DEBUG(target, "soft_reset_halt is discouraged, please use 'reset halt' instead.");
1032
1033 if (!cortex_m->vectreset_supported) {
1034 LOG_TARGET_ERROR(target, "VECTRESET is not supported on this Cortex-M core");
1035 return ERROR_FAIL;
1036 }
1037
1038 /* Set C_DEBUGEN */
1039 retval = cortex_m_write_debug_halt_mask(target, 0, C_STEP | C_MASKINTS);
1040 if (retval != ERROR_OK)
1041 return retval;
1042
1043 /* Enter debug state on reset; restore DEMCR in endreset_event() */
1044 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR,
1045 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1046 if (retval != ERROR_OK)
1047 return retval;
1048
1049 /* Request a core-only reset */
1050 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1051 AIRCR_VECTKEY | AIRCR_VECTRESET);
1052 if (retval != ERROR_OK)
1053 return retval;
1054 target->state = TARGET_RESET;
1055
1056 /* registers are now invalid */
1057 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1058
1059 while (timeout < 100) {
1060 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1061 if (retval == ERROR_OK) {
1062 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR,
1063 &cortex_m->nvic_dfsr);
1064 if (retval != ERROR_OK)
1065 return retval;
1066 if ((cortex_m->dcb_dhcsr & S_HALT)
1067 && (cortex_m->nvic_dfsr & DFSR_VCATCH)) {
1068 LOG_TARGET_DEBUG(target, "system reset-halted, DHCSR 0x%08" PRIx32 ", DFSR 0x%08" PRIx32,
1069 cortex_m->dcb_dhcsr, cortex_m->nvic_dfsr);
1070 cortex_m_poll(target);
1071 /* FIXME restore user's vector catch config */
1072 return ERROR_OK;
1073 } else {
1074 LOG_TARGET_DEBUG(target, "waiting for system reset-halt, "
1075 "DHCSR 0x%08" PRIx32 ", %d ms",
1076 cortex_m->dcb_dhcsr, timeout);
1077 }
1078 }
1079 timeout++;
1080 alive_sleep(1);
1081 }
1082
1083 return ERROR_OK;
1084 }
1085
1086 void cortex_m_enable_breakpoints(struct target *target)
1087 {
1088 struct breakpoint *breakpoint = target->breakpoints;
1089
1090 /* set any pending breakpoints */
1091 while (breakpoint) {
1092 if (!breakpoint->is_set)
1093 cortex_m_set_breakpoint(target, breakpoint);
1094 breakpoint = breakpoint->next;
1095 }
1096 }
1097
1098 static int cortex_m_resume(struct target *target, int current,
1099 target_addr_t address, int handle_breakpoints, int debug_execution)
1100 {
1101 struct armv7m_common *armv7m = target_to_armv7m(target);
1102 struct breakpoint *breakpoint = NULL;
1103 uint32_t resume_pc;
1104 struct reg *r;
1105
1106 if (target->state != TARGET_HALTED) {
1107 LOG_TARGET_WARNING(target, "target not halted");
1108 return ERROR_TARGET_NOT_HALTED;
1109 }
1110
1111 if (!debug_execution) {
1112 target_free_all_working_areas(target);
1113 cortex_m_enable_breakpoints(target);
1114 cortex_m_enable_watchpoints(target);
1115 }
1116
1117 if (debug_execution) {
1118 r = armv7m->arm.core_cache->reg_list + ARMV7M_PRIMASK;
1119
1120 /* Disable interrupts */
1121 /* We disable interrupts in the PRIMASK register instead of
1122 * masking with C_MASKINTS. This is probably the same issue
1123 * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
1124 * in parallel with disabled interrupts can cause local faults
1125 * to not be taken.
1126 *
1127 * This breaks non-debug (application) execution if not
1128 * called from armv7m_start_algorithm() which saves registers.
1129 */
1130 buf_set_u32(r->value, 0, 1, 1);
1131 r->dirty = true;
1132 r->valid = true;
1133
1134 /* Make sure we are in Thumb mode, set xPSR.T bit */
1135 /* armv7m_start_algorithm() initializes entire xPSR register.
1136 * This duplicity handles the case when cortex_m_resume()
1137 * is used with the debug_execution flag directly,
1138 * not called through armv7m_start_algorithm().
1139 */
1140 r = armv7m->arm.cpsr;
1141 buf_set_u32(r->value, 24, 1, 1);
1142 r->dirty = true;
1143 r->valid = true;
1144 }
1145
1146 /* current = 1: continue on current pc, otherwise continue at <address> */
1147 r = armv7m->arm.pc;
1148 if (!current) {
1149 buf_set_u32(r->value, 0, 32, address);
1150 r->dirty = true;
1151 r->valid = true;
1152 }
1153
1154 /* if we halted last time due to a bkpt instruction
1155 * then we have to manually step over it, otherwise
1156 * the core will break again */
1157
1158 if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
1159 && !debug_execution)
1160 armv7m_maybe_skip_bkpt_inst(target, NULL);
1161
1162 resume_pc = buf_get_u32(r->value, 0, 32);
1163
1164 armv7m_restore_context(target);
1165
1166 /* the front-end may request us not to handle breakpoints */
1167 if (handle_breakpoints) {
1168 /* Single step past breakpoint at current address */
1169 breakpoint = breakpoint_find(target, resume_pc);
1170 if (breakpoint) {
1171 LOG_TARGET_DEBUG(target, "unset breakpoint at " TARGET_ADDR_FMT " (ID: %" PRIu32 ")",
1172 breakpoint->address,
1173 breakpoint->unique_id);
1174 cortex_m_unset_breakpoint(target, breakpoint);
1175 cortex_m_single_step_core(target);
1176 cortex_m_set_breakpoint(target, breakpoint);
1177 }
1178 }
1179
1180 /* Restart core */
1181 cortex_m_set_maskints_for_run(target);
1182 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1183
1184 target->debug_reason = DBG_REASON_NOTHALTED;
1185
1186 /* registers are now invalid */
1187 register_cache_invalidate(armv7m->arm.core_cache);
1188
1189 if (!debug_execution) {
1190 target->state = TARGET_RUNNING;
1191 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1192 LOG_TARGET_DEBUG(target, "target resumed at 0x%" PRIx32 "", resume_pc);
1193 } else {
1194 target->state = TARGET_DEBUG_RUNNING;
1195 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1196 LOG_TARGET_DEBUG(target, "target debug resumed at 0x%" PRIx32 "", resume_pc);
1197 }
1198
1199 return ERROR_OK;
1200 }
1201
1202 /* int irqstepcount = 0; */
1203 static int cortex_m_step(struct target *target, int current,
1204 target_addr_t address, int handle_breakpoints)
1205 {
1206 struct cortex_m_common *cortex_m = target_to_cm(target);
1207 struct armv7m_common *armv7m = &cortex_m->armv7m;
1208 struct breakpoint *breakpoint = NULL;
1209 struct reg *pc = armv7m->arm.pc;
1210 bool bkpt_inst_found = false;
1211 int retval;
1212 bool isr_timed_out = false;
1213
1214 if (target->state != TARGET_HALTED) {
1215 LOG_TARGET_WARNING(target, "target not halted");
1216 return ERROR_TARGET_NOT_HALTED;
1217 }
1218
1219 /* current = 1: continue on current pc, otherwise continue at <address> */
1220 if (!current) {
1221 buf_set_u32(pc->value, 0, 32, address);
1222 pc->dirty = true;
1223 pc->valid = true;
1224 }
1225
1226 uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
1227
1228 /* the front-end may request us not to handle breakpoints */
1229 if (handle_breakpoints) {
1230 breakpoint = breakpoint_find(target, pc_value);
1231 if (breakpoint)
1232 cortex_m_unset_breakpoint(target, breakpoint);
1233 }
1234
1235 armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
1236
1237 target->debug_reason = DBG_REASON_SINGLESTEP;
1238
1239 armv7m_restore_context(target);
1240
1241 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1242
1243 /* if no bkpt instruction is found at pc then we can perform
1244 * a normal step, otherwise we have to manually step over the bkpt
1245 * instruction - as such simulate a step */
1246 if (bkpt_inst_found == false) {
1247 if (cortex_m->isrmasking_mode != CORTEX_M_ISRMASK_AUTO) {
1248 /* Automatic ISR masking mode off: Just step over the next
1249 * instruction, with interrupts on or off as appropriate. */
1250 cortex_m_set_maskints_for_step(target);
1251 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1252 } else {
1253 /* Process interrupts during stepping in a way they don't interfere
1254 * debugging.
1255 *
1256 * Principle:
1257 *
1258 * Set a temporary break point at the current pc and let the core run
1259 * with interrupts enabled. Pending interrupts get served and we run
1260 * into the breakpoint again afterwards. Then we step over the next
1261 * instruction with interrupts disabled.
1262 *
1263 * If the pending interrupts don't complete within time, we leave the
1264 * core running. This may happen if the interrupts trigger faster
1265 * than the core can process them or the handler doesn't return.
1266 *
1267 * If no more breakpoints are available we simply do a step with
1268 * interrupts enabled.
1269 *
1270 */
1271
1272 /* 2012-09-29 ph
1273 *
1274 * If a break point is already set on the lower half word then a break point on
1275 * the upper half word will not break again when the core is restarted. So we
1276 * just step over the instruction with interrupts disabled.
1277 *
1278 * The documentation has no information about this, it was found by observation
1279 * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 doesn't seem to
1280 * suffer from this problem.
1281 *
1282 * To add some confusion: pc_value has bit 0 always set, while the breakpoint
1283 * address has it always cleared. The former is done to indicate thumb mode
1284 * to gdb.
1285 *
1286 */
1287 if ((pc_value & 0x02) && breakpoint_find(target, pc_value & ~0x03)) {
1288 LOG_TARGET_DEBUG(target, "Stepping over next instruction with interrupts disabled");
1289 cortex_m_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
1290 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1291 /* Re-enable interrupts if appropriate */
1292 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1293 cortex_m_set_maskints_for_halt(target);
1294 } else {
1295
1296 /* Set a temporary break point */
1297 if (breakpoint) {
1298 retval = cortex_m_set_breakpoint(target, breakpoint);
1299 } else {
1300 enum breakpoint_type type = BKPT_HARD;
1301 if (cortex_m->fp_rev == 0 && pc_value > 0x1FFFFFFF) {
1302 /* FPB rev.1 cannot handle such addr, try BKPT instr */
1303 type = BKPT_SOFT;
1304 }
1305 retval = breakpoint_add(target, pc_value, 2, type);
1306 }
1307
1308 bool tmp_bp_set = (retval == ERROR_OK);
1309
1310 /* No more breakpoints left, just do a step */
1311 if (!tmp_bp_set) {
1312 cortex_m_set_maskints_for_step(target);
1313 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1314 /* Re-enable interrupts if appropriate */
1315 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1316 cortex_m_set_maskints_for_halt(target);
1317 } else {
1318 /* Start the core */
1319 LOG_TARGET_DEBUG(target, "Starting core to serve pending interrupts");
1320 int64_t t_start = timeval_ms();
1321 cortex_m_set_maskints_for_run(target);
1322 cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
1323
1324 /* Wait for pending handlers to complete or timeout */
1325 do {
1326 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1327 if (retval != ERROR_OK) {
1328 target->state = TARGET_UNKNOWN;
1329 return retval;
1330 }
1331 isr_timed_out = ((timeval_ms() - t_start) > 500);
1332 } while (!((cortex_m->dcb_dhcsr & S_HALT) || isr_timed_out));
1333
1334 /* only remove breakpoint if we created it */
1335 if (breakpoint)
1336 cortex_m_unset_breakpoint(target, breakpoint);
1337 else {
1338 /* Remove the temporary breakpoint */
1339 breakpoint_remove(target, pc_value);
1340 }
1341
1342 if (isr_timed_out) {
1343 LOG_TARGET_DEBUG(target, "Interrupt handlers didn't complete within time, "
1344 "leaving target running");
1345 } else {
1346 /* Step over next instruction with interrupts disabled */
1347 cortex_m_set_maskints_for_step(target);
1348 cortex_m_write_debug_halt_mask(target,
1349 C_HALT | C_MASKINTS,
1350 0);
1351 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1352 /* Re-enable interrupts if appropriate */
1353 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1354 cortex_m_set_maskints_for_halt(target);
1355 }
1356 }
1357 }
1358 }
1359 }
1360
1361 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1362 if (retval != ERROR_OK)
1363 return retval;
1364
1365 /* registers are now invalid */
1366 register_cache_invalidate(armv7m->arm.core_cache);
1367
1368 if (breakpoint)
1369 cortex_m_set_breakpoint(target, breakpoint);
1370
1371 if (isr_timed_out) {
1372 /* Leave the core running. The user has to stop execution manually. */
1373 target->debug_reason = DBG_REASON_NOTHALTED;
1374 target->state = TARGET_RUNNING;
1375 return ERROR_OK;
1376 }
1377
1378 LOG_TARGET_DEBUG(target, "target stepped dcb_dhcsr = 0x%" PRIx32
1379 " nvic_icsr = 0x%" PRIx32,
1380 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1381
1382 retval = cortex_m_debug_entry(target);
1383 if (retval != ERROR_OK)
1384 return retval;
1385 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1386
1387 LOG_TARGET_DEBUG(target, "target stepped dcb_dhcsr = 0x%" PRIx32
1388 " nvic_icsr = 0x%" PRIx32,
1389 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1390
1391 return ERROR_OK;
1392 }
1393
1394 static int cortex_m_assert_reset(struct target *target)
1395 {
1396 struct cortex_m_common *cortex_m = target_to_cm(target);
1397 struct armv7m_common *armv7m = &cortex_m->armv7m;
1398 enum cortex_m_soft_reset_config reset_config = cortex_m->soft_reset_config;
1399
1400 LOG_TARGET_DEBUG(target, "target->state: %s",
1401 target_state_name(target));
1402
1403 enum reset_types jtag_reset_config = jtag_get_reset_config();
1404
1405 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1406 /* allow scripts to override the reset event */
1407
1408 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1409 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1410 target->state = TARGET_RESET;
1411
1412 return ERROR_OK;
1413 }
1414
1415 /* some cores support connecting while srst is asserted
1416 * use that mode is it has been configured */
1417
1418 bool srst_asserted = false;
1419
1420 if (!target_was_examined(target)) {
1421 if (jtag_reset_config & RESET_HAS_SRST) {
1422 adapter_assert_reset();
1423 if (target->reset_halt)
1424 LOG_TARGET_ERROR(target, "Target not examined, will not halt after reset!");
1425 return ERROR_OK;
1426 } else {
1427 LOG_TARGET_ERROR(target, "Target not examined, reset NOT asserted!");
1428 return ERROR_FAIL;
1429 }
1430 }
1431
1432 if ((jtag_reset_config & RESET_HAS_SRST) &&
1433 (jtag_reset_config & RESET_SRST_NO_GATING)) {
1434 adapter_assert_reset();
1435 srst_asserted = true;
1436 }
1437
1438 /* Enable debug requests */
1439 int retval = cortex_m_read_dhcsr_atomic_sticky(target);
1440
1441 /* Store important errors instead of failing and proceed to reset assert */
1442
1443 if (retval != ERROR_OK || !(cortex_m->dcb_dhcsr & C_DEBUGEN))
1444 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
1445
1446 /* If the processor is sleeping in a WFI or WFE instruction, the
1447 * C_HALT bit must be asserted to regain control */
1448 if (retval == ERROR_OK && (cortex_m->dcb_dhcsr & S_SLEEP))
1449 retval = cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1450
1451 mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
1452 /* Ignore less important errors */
1453
1454 if (!target->reset_halt) {
1455 /* Set/Clear C_MASKINTS in a separate operation */
1456 cortex_m_set_maskints_for_run(target);
1457
1458 /* clear any debug flags before resuming */
1459 cortex_m_clear_halt(target);
1460
1461 /* clear C_HALT in dhcsr reg */
1462 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1463 } else {
1464 /* Halt in debug on reset; endreset_event() restores DEMCR.
1465 *
1466 * REVISIT catching BUSERR presumably helps to defend against
1467 * bad vector table entries. Should this include MMERR or
1468 * other flags too?
1469 */
1470 int retval2;
1471 retval2 = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DEMCR,
1472 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1473 if (retval != ERROR_OK || retval2 != ERROR_OK)
1474 LOG_TARGET_INFO(target, "AP write error, reset will not halt");
1475 }
1476
1477 if (jtag_reset_config & RESET_HAS_SRST) {
1478 /* default to asserting srst */
1479 if (!srst_asserted)
1480 adapter_assert_reset();
1481
1482 /* srst is asserted, ignore AP access errors */
1483 retval = ERROR_OK;
1484 } else {
1485 /* Use a standard Cortex-M3 software reset mechanism.
1486 * We default to using VECTRESET as it is supported on all current cores
1487 * (except Cortex-M0, M0+ and M1 which support SYSRESETREQ only!)
1488 * This has the disadvantage of not resetting the peripherals, so a
1489 * reset-init event handler is needed to perform any peripheral resets.
1490 */
1491 if (!cortex_m->vectreset_supported
1492 && reset_config == CORTEX_M_RESET_VECTRESET) {
1493 reset_config = CORTEX_M_RESET_SYSRESETREQ;
1494 LOG_TARGET_WARNING(target, "VECTRESET is not supported on this Cortex-M core, using SYSRESETREQ instead.");
1495 LOG_TARGET_WARNING(target, "Set 'cortex_m reset_config sysresetreq'.");
1496 }
1497
1498 LOG_TARGET_DEBUG(target, "Using Cortex-M %s", (reset_config == CORTEX_M_RESET_SYSRESETREQ)
1499 ? "SYSRESETREQ" : "VECTRESET");
1500
1501 if (reset_config == CORTEX_M_RESET_VECTRESET) {
1502 LOG_TARGET_WARNING(target, "Only resetting the Cortex-M core, use a reset-init event "
1503 "handler to reset any peripherals or configure hardware srst support.");
1504 }
1505
1506 int retval3;
1507 retval3 = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1508 AIRCR_VECTKEY | ((reset_config == CORTEX_M_RESET_SYSRESETREQ)
1509 ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
1510 if (retval3 != ERROR_OK)
1511 LOG_TARGET_DEBUG(target, "Ignoring AP write error right after reset");
1512
1513 retval3 = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1514 if (retval3 != ERROR_OK) {
1515 LOG_TARGET_ERROR(target, "DP initialisation failed");
1516 /* The error return value must not be propagated in this case.
1517 * SYSRESETREQ or VECTRESET have been possibly triggered
1518 * so reset processing should continue */
1519 } else {
1520 /* I do not know why this is necessary, but it
1521 * fixes strange effects (step/resume cause NMI
1522 * after reset) on LM3S6918 -- Michael Schwingen
1523 */
1524 uint32_t tmp;
1525 mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_AIRCR, &tmp);
1526 }
1527 }
1528
1529 target->state = TARGET_RESET;
1530 jtag_sleep(50000);
1531
1532 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1533
1534 /* now return stored error code if any */
1535 if (retval != ERROR_OK)
1536 return retval;
1537
1538 if (target->reset_halt) {
1539 retval = target_halt(target);
1540 if (retval != ERROR_OK)
1541 return retval;
1542 }
1543
1544 return ERROR_OK;
1545 }
1546
1547 static int cortex_m_deassert_reset(struct target *target)
1548 {
1549 struct armv7m_common *armv7m = &target_to_cm(target)->armv7m;
1550
1551 LOG_TARGET_DEBUG(target, "target->state: %s",
1552 target_state_name(target));
1553
1554 /* deassert reset lines */
1555 adapter_deassert_reset();
1556
1557 enum reset_types jtag_reset_config = jtag_get_reset_config();
1558
1559 if ((jtag_reset_config & RESET_HAS_SRST) &&
1560 !(jtag_reset_config & RESET_SRST_NO_GATING) &&
1561 target_was_examined(target)) {
1562
1563 int retval = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1564 if (retval != ERROR_OK) {
1565 LOG_TARGET_ERROR(target, "DP initialisation failed");
1566 return retval;
1567 }
1568 }
1569
1570 return ERROR_OK;
1571 }
1572
1573 int cortex_m_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
1574 {
1575 int retval;
1576 unsigned int fp_num = 0;
1577 struct cortex_m_common *cortex_m = target_to_cm(target);
1578 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1579
1580 if (breakpoint->is_set) {
1581 LOG_TARGET_WARNING(target, "breakpoint (BPID: %" PRIu32 ") already set", breakpoint->unique_id);
1582 return ERROR_OK;
1583 }
1584
1585 if (breakpoint->type == BKPT_HARD) {
1586 uint32_t fpcr_value;
1587 while (comparator_list[fp_num].used && (fp_num < cortex_m->fp_num_code))
1588 fp_num++;
1589 if (fp_num >= cortex_m->fp_num_code) {
1590 LOG_TARGET_ERROR(target, "Can not find free FPB Comparator!");
1591 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1592 }
1593 breakpoint_hw_set(breakpoint, fp_num);
1594 fpcr_value = breakpoint->address | 1;
1595 if (cortex_m->fp_rev == 0) {
1596 if (breakpoint->address > 0x1FFFFFFF) {
1597 LOG_TARGET_ERROR(target, "Cortex-M Flash Patch Breakpoint rev.1 "
1598 "cannot handle HW breakpoint above address 0x1FFFFFFE");
1599 return ERROR_FAIL;
1600 }
1601 uint32_t hilo;
1602 hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
1603 fpcr_value = (fpcr_value & 0x1FFFFFFC) | hilo | 1;
1604 } else if (cortex_m->fp_rev > 1) {
1605 LOG_TARGET_ERROR(target, "Unhandled Cortex-M Flash Patch Breakpoint architecture revision");
1606 return ERROR_FAIL;
1607 }
1608 comparator_list[fp_num].used = true;
1609 comparator_list[fp_num].fpcr_value = fpcr_value;
1610 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1611 comparator_list[fp_num].fpcr_value);
1612 LOG_TARGET_DEBUG(target, "fpc_num %i fpcr_value 0x%" PRIx32 "",
1613 fp_num,
1614 comparator_list[fp_num].fpcr_value);
1615 if (!cortex_m->fpb_enabled) {
1616 LOG_TARGET_DEBUG(target, "FPB wasn't enabled, do it now");
1617 retval = cortex_m_enable_fpb(target);
1618 if (retval != ERROR_OK) {
1619 LOG_TARGET_ERROR(target, "Failed to enable the FPB");
1620 return retval;
1621 }
1622
1623 cortex_m->fpb_enabled = true;
1624 }
1625 } else if (breakpoint->type == BKPT_SOFT) {
1626 uint8_t code[4];
1627
1628 /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
1629 * semihosting; don't use that. Otherwise the BKPT
1630 * parameter is arbitrary.
1631 */
1632 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1633 retval = target_read_memory(target,
1634 breakpoint->address & 0xFFFFFFFE,
1635 breakpoint->length, 1,
1636 breakpoint->orig_instr);
1637 if (retval != ERROR_OK)
1638 return retval;
1639 retval = target_write_memory(target,
1640 breakpoint->address & 0xFFFFFFFE,
1641 breakpoint->length, 1,
1642 code);
1643 if (retval != ERROR_OK)
1644 return retval;
1645 breakpoint->is_set = true;
1646 }
1647
1648 LOG_TARGET_DEBUG(target, "BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (n=%u)",
1649 breakpoint->unique_id,
1650 (int)(breakpoint->type),
1651 breakpoint->address,
1652 breakpoint->length,
1653 (breakpoint->type == BKPT_SOFT) ? 0 : breakpoint->number);
1654
1655 return ERROR_OK;
1656 }
1657
1658 int cortex_m_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1659 {
1660 int retval;
1661 struct cortex_m_common *cortex_m = target_to_cm(target);
1662 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1663
1664 if (!breakpoint->is_set) {
1665 LOG_TARGET_WARNING(target, "breakpoint not set");
1666 return ERROR_OK;
1667 }
1668
1669 LOG_TARGET_DEBUG(target, "BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (n=%u)",
1670 breakpoint->unique_id,
1671 (int)(breakpoint->type),
1672 breakpoint->address,
1673 breakpoint->length,
1674 (breakpoint->type == BKPT_SOFT) ? 0 : breakpoint->number);
1675
1676 if (breakpoint->type == BKPT_HARD) {
1677 unsigned int fp_num = breakpoint->number;
1678 if (fp_num >= cortex_m->fp_num_code) {
1679 LOG_TARGET_DEBUG(target, "Invalid FP Comparator number in breakpoint");
1680 return ERROR_OK;
1681 }
1682 comparator_list[fp_num].used = false;
1683 comparator_list[fp_num].fpcr_value = 0;
1684 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1685 comparator_list[fp_num].fpcr_value);
1686 } else {
1687 /* restore original instruction (kept in target endianness) */
1688 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE,
1689 breakpoint->length, 1,
1690 breakpoint->orig_instr);
1691 if (retval != ERROR_OK)
1692 return retval;
1693 }
1694 breakpoint->is_set = false;
1695
1696 return ERROR_OK;
1697 }
1698
1699 int cortex_m_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
1700 {
1701 if (breakpoint->length == 3) {
1702 LOG_TARGET_DEBUG(target, "Using a two byte breakpoint for 32bit Thumb-2 request");
1703 breakpoint->length = 2;
1704 }
1705
1706 if ((breakpoint->length != 2)) {
1707 LOG_TARGET_INFO(target, "only breakpoints of two bytes length supported");
1708 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1709 }
1710
1711 return cortex_m_set_breakpoint(target, breakpoint);
1712 }
1713
1714 int cortex_m_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1715 {
1716 if (!breakpoint->is_set)
1717 return ERROR_OK;
1718
1719 return cortex_m_unset_breakpoint(target, breakpoint);
1720 }
1721
1722 static int cortex_m_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1723 {
1724 unsigned int dwt_num = 0;
1725 struct cortex_m_common *cortex_m = target_to_cm(target);
1726
1727 /* REVISIT Don't fully trust these "not used" records ... users
1728 * may set up breakpoints by hand, e.g. dual-address data value
1729 * watchpoint using comparator #1; comparator #0 matching cycle
1730 * count; send data trace info through ITM and TPIU; etc
1731 */
1732 struct cortex_m_dwt_comparator *comparator;
1733
1734 for (comparator = cortex_m->dwt_comparator_list;
1735 comparator->used && dwt_num < cortex_m->dwt_num_comp;
1736 comparator++, dwt_num++)
1737 continue;
1738 if (dwt_num >= cortex_m->dwt_num_comp) {
1739 LOG_TARGET_ERROR(target, "Can not find free DWT Comparator");
1740 return ERROR_FAIL;
1741 }
1742 comparator->used = true;
1743 watchpoint_set(watchpoint, dwt_num);
1744
1745 comparator->comp = watchpoint->address;
1746 target_write_u32(target, comparator->dwt_comparator_address + 0,
1747 comparator->comp);
1748
1749 if ((cortex_m->dwt_devarch & 0x1FFFFF) != DWT_DEVARCH_ARMV8M) {
1750 uint32_t mask = 0, temp;
1751
1752 /* watchpoint params were validated earlier */
1753 temp = watchpoint->length;
1754 while (temp) {
1755 temp >>= 1;
1756 mask++;
1757 }
1758 mask--;
1759
1760 comparator->mask = mask;
1761 target_write_u32(target, comparator->dwt_comparator_address + 4,
1762 comparator->mask);
1763
1764 switch (watchpoint->rw) {
1765 case WPT_READ:
1766 comparator->function = 5;
1767 break;
1768 case WPT_WRITE:
1769 comparator->function = 6;
1770 break;
1771 case WPT_ACCESS:
1772 comparator->function = 7;
1773 break;
1774 }
1775 } else {
1776 uint32_t data_size = watchpoint->length >> 1;
1777 comparator->mask = (watchpoint->length >> 1) | 1;
1778
1779 switch (watchpoint->rw) {
1780 case WPT_ACCESS:
1781 comparator->function = 4;
1782 break;
1783 case WPT_WRITE:
1784 comparator->function = 5;
1785 break;
1786 case WPT_READ:
1787 comparator->function = 6;
1788 break;
1789 }
1790 comparator->function = comparator->function | (1 << 4) |
1791 (data_size << 10);
1792 }
1793
1794 target_write_u32(target, comparator->dwt_comparator_address + 8,
1795 comparator->function);
1796
1797 LOG_TARGET_DEBUG(target, "Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
1798 watchpoint->unique_id, dwt_num,
1799 (unsigned) comparator->comp,
1800 (unsigned) comparator->mask,
1801 (unsigned) comparator->function);
1802 return ERROR_OK;
1803 }
1804
1805 static int cortex_m_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
1806 {
1807 struct cortex_m_common *cortex_m = target_to_cm(target);
1808 struct cortex_m_dwt_comparator *comparator;
1809
1810 if (!watchpoint->is_set) {
1811 LOG_TARGET_WARNING(target, "watchpoint (wpid: %d) not set",
1812 watchpoint->unique_id);
1813 return ERROR_OK;
1814 }
1815
1816 unsigned int dwt_num = watchpoint->number;
1817
1818 LOG_TARGET_DEBUG(target, "Watchpoint (ID %d) DWT%u address: 0x%08x clear",
1819 watchpoint->unique_id, dwt_num,
1820 (unsigned) watchpoint->address);
1821
1822 if (dwt_num >= cortex_m->dwt_num_comp) {
1823 LOG_TARGET_DEBUG(target, "Invalid DWT Comparator number in watchpoint");
1824 return ERROR_OK;
1825 }
1826
1827 comparator = cortex_m->dwt_comparator_list + dwt_num;
1828 comparator->used = false;
1829 comparator->function = 0;
1830 target_write_u32(target, comparator->dwt_comparator_address + 8,
1831 comparator->function);
1832
1833 watchpoint->is_set = false;
1834
1835 return ERROR_OK;
1836 }
1837
1838 int cortex_m_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
1839 {
1840 struct cortex_m_common *cortex_m = target_to_cm(target);
1841
1842 if (cortex_m->dwt_comp_available < 1) {
1843 LOG_TARGET_DEBUG(target, "no comparators?");
1844 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1845 }
1846
1847 /* hardware doesn't support data value masking */
1848 if (watchpoint->mask != ~(uint32_t)0) {
1849 LOG_TARGET_DEBUG(target, "watchpoint value masks not supported");
1850 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1851 }
1852
1853 /* hardware allows address masks of up to 32K */
1854 unsigned mask;
1855
1856 for (mask = 0; mask < 16; mask++) {
1857 if ((1u << mask) == watchpoint->length)
1858 break;
1859 }
1860 if (mask == 16) {
1861 LOG_TARGET_DEBUG(target, "unsupported watchpoint length");
1862 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1863 }
1864 if (watchpoint->address & ((1 << mask) - 1)) {
1865 LOG_TARGET_DEBUG(target, "watchpoint address is unaligned");
1866 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1867 }
1868
1869 /* Caller doesn't seem to be able to describe watching for data
1870 * values of zero; that flags "no value".
1871 *
1872 * REVISIT This DWT may well be able to watch for specific data
1873 * values. Requires comparator #1 to set DATAVMATCH and match
1874 * the data, and another comparator (DATAVADDR0) matching addr.
1875 */
1876 if (watchpoint->value) {
1877 LOG_TARGET_DEBUG(target, "data value watchpoint not YET supported");
1878 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1879 }
1880
1881 cortex_m->dwt_comp_available--;
1882 LOG_TARGET_DEBUG(target, "dwt_comp_available: %d", cortex_m->dwt_comp_available);
1883
1884 return ERROR_OK;
1885 }
1886
1887 int cortex_m_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
1888 {
1889 struct cortex_m_common *cortex_m = target_to_cm(target);
1890
1891 /* REVISIT why check? DWT can be updated with core running ... */
1892 if (target->state != TARGET_HALTED) {
1893 LOG_TARGET_WARNING(target, "target not halted");
1894 return ERROR_TARGET_NOT_HALTED;
1895 }
1896
1897 if (watchpoint->is_set)
1898 cortex_m_unset_watchpoint(target, watchpoint);
1899
1900 cortex_m->dwt_comp_available++;
1901 LOG_TARGET_DEBUG(target, "dwt_comp_available: %d", cortex_m->dwt_comp_available);
1902
1903 return ERROR_OK;
1904 }
1905
1906 int cortex_m_hit_watchpoint(struct target *target, struct watchpoint **hit_watchpoint)
1907 {
1908 if (target->debug_reason != DBG_REASON_WATCHPOINT)
1909 return ERROR_FAIL;
1910
1911 struct cortex_m_common *cortex_m = target_to_cm(target);
1912
1913 for (struct watchpoint *wp = target->watchpoints; wp; wp = wp->next) {
1914 if (!wp->is_set)
1915 continue;
1916
1917 unsigned int dwt_num = wp->number;
1918 struct cortex_m_dwt_comparator *comparator = cortex_m->dwt_comparator_list + dwt_num;
1919
1920 uint32_t dwt_function;
1921 int retval = target_read_u32(target, comparator->dwt_comparator_address + 8, &dwt_function);
1922 if (retval != ERROR_OK)
1923 return ERROR_FAIL;
1924
1925 /* check the MATCHED bit */
1926 if (dwt_function & BIT(24)) {
1927 *hit_watchpoint = wp;
1928 return ERROR_OK;
1929 }
1930 }
1931
1932 return ERROR_FAIL;
1933 }
1934
1935 void cortex_m_enable_watchpoints(struct target *target)
1936 {
1937 struct watchpoint *watchpoint = target->watchpoints;
1938
1939 /* set any pending watchpoints */
1940 while (watchpoint) {
1941 if (!watchpoint->is_set)
1942 cortex_m_set_watchpoint(target, watchpoint);
1943 watchpoint = watchpoint->next;
1944 }
1945 }
1946
1947 static int cortex_m_read_memory(struct target *target, target_addr_t address,
1948 uint32_t size, uint32_t count, uint8_t *buffer)
1949 {
1950 struct armv7m_common *armv7m = target_to_armv7m(target);
1951
1952 if (armv7m->arm.arch == ARM_ARCH_V6M) {
1953 /* armv6m does not handle unaligned memory access */
1954 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1955 return ERROR_TARGET_UNALIGNED_ACCESS;
1956 }
1957
1958 return mem_ap_read_buf(armv7m->debug_ap, buffer, size, count, address);
1959 }
1960
1961 static int cortex_m_write_memory(struct target *target, target_addr_t address,
1962 uint32_t size, uint32_t count, const uint8_t *buffer)
1963 {
1964 struct armv7m_common *armv7m = target_to_armv7m(target);
1965
1966 if (armv7m->arm.arch == ARM_ARCH_V6M) {
1967 /* armv6m does not handle unaligned memory access */
1968 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1969 return ERROR_TARGET_UNALIGNED_ACCESS;
1970 }
1971
1972 return mem_ap_write_buf(armv7m->debug_ap, buffer, size, count, address);
1973 }
1974
1975 static int cortex_m_init_target(struct command_context *cmd_ctx,
1976 struct target *target)
1977 {
1978 armv7m_build_reg_cache(target);
1979 arm_semihosting_init(target);
1980 return ERROR_OK;
1981 }
1982
1983 void cortex_m_deinit_target(struct target *target)
1984 {
1985 struct cortex_m_common *cortex_m = target_to_cm(target);
1986 struct armv7m_common *armv7m = target_to_armv7m(target);
1987
1988 if (!armv7m->is_hla_target && armv7m->debug_ap)
1989 dap_put_ap(armv7m->debug_ap);
1990
1991 free(cortex_m->fp_comparator_list);
1992
1993 cortex_m_dwt_free(target);
1994 armv7m_free_reg_cache(target);
1995
1996 free(target->private_config);
1997 free(cortex_m);
1998 }
1999
2000 int cortex_m_profiling(struct target *target, uint32_t *samples,
2001 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2002 {
2003 struct timeval timeout, now;
2004 struct armv7m_common *armv7m = target_to_armv7m(target);
2005 uint32_t reg_value;
2006 int retval;
2007
2008 retval = target_read_u32(target, DWT_PCSR, &reg_value);
2009 if (retval != ERROR_OK) {
2010 LOG_TARGET_ERROR(target, "Error while reading PCSR");
2011 return retval;
2012 }
2013 if (reg_value == 0) {
2014 LOG_TARGET_INFO(target, "PCSR sampling not supported on this processor.");
2015 return target_profiling_default(target, samples, max_num_samples, num_samples, seconds);
2016 }
2017
2018 gettimeofday(&timeout, NULL);
2019 timeval_add_time(&timeout, seconds, 0);
2020
2021 LOG_TARGET_INFO(target, "Starting Cortex-M profiling. Sampling DWT_PCSR as fast as we can...");
2022
2023 /* Make sure the target is running */
2024 target_poll(target);
2025 if (target->state == TARGET_HALTED)
2026 retval = target_resume(target, 1, 0, 0, 0);
2027
2028 if (retval != ERROR_OK) {
2029 LOG_TARGET_ERROR(target, "Error while resuming target");
2030 return retval;
2031 }
2032
2033 uint32_t sample_count = 0;
2034
2035 for (;;) {
2036 if (armv7m && armv7m->debug_ap) {
2037 uint32_t read_count = max_num_samples - sample_count;
2038 if (read_count > 1024)
2039 read_count = 1024;
2040
2041 retval = mem_ap_read_buf_noincr(armv7m->debug_ap,
2042 (void *)&samples[sample_count],
2043 4, read_count, DWT_PCSR);
2044 sample_count += read_count;
2045 } else {
2046 target_read_u32(target, DWT_PCSR, &samples[sample_count++]);
2047 }
2048
2049 if (retval != ERROR_OK) {
2050 LOG_TARGET_ERROR(target, "Error while reading PCSR");
2051 return retval;
2052 }
2053
2054
2055 gettimeofday(&now, NULL);
2056 if (sample_count >= max_num_samples || timeval_compare(&now, &timeout) > 0) {
2057 LOG_TARGET_INFO(target, "Profiling completed. %" PRIu32 " samples.", sample_count);
2058 break;
2059 }
2060 }
2061
2062 *num_samples = sample_count;
2063 return retval;
2064 }
2065
2066
2067 /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
2068 * on r/w if the core is not running, and clear on resume or reset ... or
2069 * at least, in a post_restore_context() method.
2070 */
2071
2072 struct dwt_reg_state {
2073 struct target *target;
2074 uint32_t addr;
2075 uint8_t value[4]; /* scratch/cache */
2076 };
2077
2078 static int cortex_m_dwt_get_reg(struct reg *reg)
2079 {
2080 struct dwt_reg_state *state = reg->arch_info;
2081
2082 uint32_t tmp;
2083 int retval = target_read_u32(state->target, state->addr, &tmp);
2084 if (retval != ERROR_OK)
2085 return retval;
2086
2087 buf_set_u32(state->value, 0, 32, tmp);
2088 return ERROR_OK;
2089 }
2090
2091 static int cortex_m_dwt_set_reg(struct reg *reg, uint8_t *buf)
2092 {
2093 struct dwt_reg_state *state = reg->arch_info;
2094
2095 return target_write_u32(state->target, state->addr,
2096 buf_get_u32(buf, 0, reg->size));
2097 }
2098
2099 struct dwt_reg {
2100 uint32_t addr;
2101 const char *name;
2102 unsigned size;
2103 };
2104
2105 static const struct dwt_reg dwt_base_regs[] = {
2106 { DWT_CTRL, "dwt_ctrl", 32, },
2107 /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
2108 * increments while the core is asleep.
2109 */
2110 { DWT_CYCCNT, "dwt_cyccnt", 32, },
2111 /* plus some 8 bit counters, useful for profiling with TPIU */
2112 };
2113
2114 static const struct dwt_reg dwt_comp[] = {
2115 #define DWT_COMPARATOR(i) \
2116 { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
2117 { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
2118 { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
2119 DWT_COMPARATOR(0),
2120 DWT_COMPARATOR(1),
2121 DWT_COMPARATOR(2),
2122 DWT_COMPARATOR(3),
2123 DWT_COMPARATOR(4),
2124 DWT_COMPARATOR(5),
2125 DWT_COMPARATOR(6),
2126 DWT_COMPARATOR(7),
2127 DWT_COMPARATOR(8),
2128 DWT_COMPARATOR(9),
2129 DWT_COMPARATOR(10),
2130 DWT_COMPARATOR(11),
2131 DWT_COMPARATOR(12),
2132 DWT_COMPARATOR(13),
2133 DWT_COMPARATOR(14),
2134 DWT_COMPARATOR(15),
2135 #undef DWT_COMPARATOR
2136 };
2137
2138 static const struct reg_arch_type dwt_reg_type = {
2139 .get = cortex_m_dwt_get_reg,
2140 .set = cortex_m_dwt_set_reg,
2141 };
2142
2143 static void cortex_m_dwt_addreg(struct target *t, struct reg *r, const struct dwt_reg *d)
2144 {
2145 struct dwt_reg_state *state;
2146
2147 state = calloc(1, sizeof(*state));
2148 if (!state)
2149 return;
2150 state->addr = d->addr;
2151 state->target = t;
2152
2153 r->name = d->name;
2154 r->size = d->size;
2155 r->value = state->value;
2156 r->arch_info = state;
2157 r->type = &dwt_reg_type;
2158 }
2159
2160 static void cortex_m_dwt_setup(struct cortex_m_common *cm, struct target *target)
2161 {
2162 uint32_t dwtcr;
2163 struct reg_cache *cache;
2164 struct cortex_m_dwt_comparator *comparator;
2165 int reg;
2166
2167 target_read_u32(target, DWT_CTRL, &dwtcr);
2168 LOG_TARGET_DEBUG(target, "DWT_CTRL: 0x%" PRIx32, dwtcr);
2169 if (!dwtcr) {
2170 LOG_TARGET_DEBUG(target, "no DWT");
2171 return;
2172 }
2173
2174 target_read_u32(target, DWT_DEVARCH, &cm->dwt_devarch);
2175 LOG_TARGET_DEBUG(target, "DWT_DEVARCH: 0x%" PRIx32, cm->dwt_devarch);
2176
2177 cm->dwt_num_comp = (dwtcr >> 28) & 0xF;
2178 cm->dwt_comp_available = cm->dwt_num_comp;
2179 cm->dwt_comparator_list = calloc(cm->dwt_num_comp,
2180 sizeof(struct cortex_m_dwt_comparator));
2181 if (!cm->dwt_comparator_list) {
2182 fail0:
2183 cm->dwt_num_comp = 0;
2184 LOG_TARGET_ERROR(target, "out of mem");
2185 return;
2186 }
2187
2188 cache = calloc(1, sizeof(*cache));
2189 if (!cache) {
2190 fail1:
2191 free(cm->dwt_comparator_list);
2192 goto fail0;
2193 }
2194 cache->name = "Cortex-M DWT registers";
2195 cache->num_regs = 2 + cm->dwt_num_comp * 3;
2196 cache->reg_list = calloc(cache->num_regs, sizeof(*cache->reg_list));
2197 if (!cache->reg_list) {
2198 free(cache);
2199 goto fail1;
2200 }
2201
2202 for (reg = 0; reg < 2; reg++)
2203 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2204 dwt_base_regs + reg);
2205
2206 comparator = cm->dwt_comparator_list;
2207 for (unsigned int i = 0; i < cm->dwt_num_comp; i++, comparator++) {
2208 int j;
2209
2210 comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
2211 for (j = 0; j < 3; j++, reg++)
2212 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2213 dwt_comp + 3 * i + j);
2214
2215 /* make sure we clear any watchpoints enabled on the target */
2216 target_write_u32(target, comparator->dwt_comparator_address + 8, 0);
2217 }
2218
2219 *register_get_last_cache_p(&target->reg_cache) = cache;
2220 cm->dwt_cache = cache;
2221
2222 LOG_TARGET_DEBUG(target, "DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
2223 dwtcr, cm->dwt_num_comp,
2224 (dwtcr & (0xf << 24)) ? " only" : "/trigger");
2225
2226 /* REVISIT: if num_comp > 1, check whether comparator #1 can
2227 * implement single-address data value watchpoints ... so we
2228 * won't need to check it later, when asked to set one up.
2229 */
2230 }
2231
2232 static void cortex_m_dwt_free(struct target *target)
2233 {
2234 struct cortex_m_common *cm = target_to_cm(target);
2235 struct reg_cache *cache = cm->dwt_cache;
2236
2237 free(cm->dwt_comparator_list);
2238 cm->dwt_comparator_list = NULL;
2239 cm->dwt_num_comp = 0;
2240
2241 if (cache) {
2242 register_unlink_cache(&target->reg_cache, cache);
2243
2244 if (cache->reg_list) {
2245 for (size_t i = 0; i < cache->num_regs; i++)
2246 free(cache->reg_list[i].arch_info);
2247 free(cache->reg_list);
2248 }
2249 free(cache);
2250 }
2251 cm->dwt_cache = NULL;
2252 }
2253
2254 #define MVFR0 0xe000ef40
2255 #define MVFR1 0xe000ef44
2256
2257 #define MVFR0_DEFAULT_M4 0x10110021
2258 #define MVFR1_DEFAULT_M4 0x11000011
2259
2260 #define MVFR0_DEFAULT_M7_SP 0x10110021
2261 #define MVFR0_DEFAULT_M7_DP 0x10110221
2262 #define MVFR1_DEFAULT_M7_SP 0x11000011
2263 #define MVFR1_DEFAULT_M7_DP 0x12000011
2264
2265 static int cortex_m_find_mem_ap(struct adiv5_dap *swjdp,
2266 struct adiv5_ap **debug_ap)
2267 {
2268 if (dap_find_get_ap(swjdp, AP_TYPE_AHB3_AP, debug_ap) == ERROR_OK)
2269 return ERROR_OK;
2270
2271 return dap_find_get_ap(swjdp, AP_TYPE_AHB5_AP, debug_ap);
2272 }
2273
2274 int cortex_m_examine(struct target *target)
2275 {
2276 int retval;
2277 uint32_t cpuid, fpcr, mvfr0, mvfr1;
2278 struct cortex_m_common *cortex_m = target_to_cm(target);
2279 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
2280 struct armv7m_common *armv7m = target_to_armv7m(target);
2281
2282 /* hla_target shares the examine handler but does not support
2283 * all its calls */
2284 if (!armv7m->is_hla_target) {
2285 if (armv7m->debug_ap) {
2286 dap_put_ap(armv7m->debug_ap);
2287 armv7m->debug_ap = NULL;
2288 }
2289
2290 if (cortex_m->apsel == DP_APSEL_INVALID) {
2291 /* Search for the MEM-AP */
2292 retval = cortex_m_find_mem_ap(swjdp, &armv7m->debug_ap);
2293 if (retval != ERROR_OK) {
2294 LOG_TARGET_ERROR(target, "Could not find MEM-AP to control the core");
2295 return retval;
2296 }
2297 } else {
2298 armv7m->debug_ap = dap_get_ap(swjdp, cortex_m->apsel);
2299 if (!armv7m->debug_ap) {
2300 LOG_ERROR("Cannot get AP");
2301 return ERROR_FAIL;
2302 }
2303 }
2304
2305 armv7m->debug_ap->memaccess_tck = 8;
2306
2307 retval = mem_ap_init(armv7m->debug_ap);
2308 if (retval != ERROR_OK)
2309 return retval;
2310 }
2311
2312 if (!target_was_examined(target)) {
2313 target_set_examined(target);
2314
2315 /* Read from Device Identification Registers */
2316 retval = target_read_u32(target, CPUID, &cpuid);
2317 if (retval != ERROR_OK)
2318 return retval;
2319
2320 /* Get ARCH and CPU types */
2321 const enum cortex_m_partno core_partno = (cpuid & ARM_CPUID_PARTNO_MASK) >> ARM_CPUID_PARTNO_POS;
2322
2323 for (unsigned int n = 0; n < ARRAY_SIZE(cortex_m_parts); n++) {
2324 if (core_partno == cortex_m_parts[n].partno) {
2325 cortex_m->core_info = &cortex_m_parts[n];
2326 break;
2327 }
2328 }
2329
2330 if (!cortex_m->core_info) {
2331 LOG_TARGET_ERROR(target, "Cortex-M PARTNO 0x%x is unrecognized", core_partno);
2332 return ERROR_FAIL;
2333 }
2334
2335 armv7m->arm.arch = cortex_m->core_info->arch;
2336
2337 LOG_TARGET_INFO(target, "%s r%" PRId8 "p%" PRId8 " processor detected",
2338 cortex_m->core_info->name,
2339 (uint8_t)((cpuid >> 20) & 0xf),
2340 (uint8_t)((cpuid >> 0) & 0xf));
2341
2342 cortex_m->maskints_erratum = false;
2343 if (core_partno == CORTEX_M7_PARTNO) {
2344 uint8_t rev, patch;
2345 rev = (cpuid >> 20) & 0xf;
2346 patch = (cpuid >> 0) & 0xf;
2347 if ((rev == 0) && (patch < 2)) {
2348 LOG_TARGET_WARNING(target, "Silicon bug: single stepping may enter pending exception handler!");
2349 cortex_m->maskints_erratum = true;
2350 }
2351 }
2352 LOG_TARGET_DEBUG(target, "cpuid: 0x%8.8" PRIx32 "", cpuid);
2353
2354 if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV4) {
2355 target_read_u32(target, MVFR0, &mvfr0);
2356 target_read_u32(target, MVFR1, &mvfr1);
2357
2358 /* test for floating point feature on Cortex-M4 */
2359 if ((mvfr0 == MVFR0_DEFAULT_M4) && (mvfr1 == MVFR1_DEFAULT_M4)) {
2360 LOG_TARGET_DEBUG(target, "%s floating point feature FPv4_SP found", cortex_m->core_info->name);
2361 armv7m->fp_feature = FPV4_SP;
2362 }
2363 } else if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV5) {
2364 target_read_u32(target, MVFR0, &mvfr0);
2365 target_read_u32(target, MVFR1, &mvfr1);
2366
2367 /* test for floating point features on Cortex-M7 */
2368 if ((mvfr0 == MVFR0_DEFAULT_M7_SP) && (mvfr1 == MVFR1_DEFAULT_M7_SP)) {
2369 LOG_TARGET_DEBUG(target, "%s floating point feature FPv5_SP found", cortex_m->core_info->name);
2370 armv7m->fp_feature = FPV5_SP;
2371 } else if ((mvfr0 == MVFR0_DEFAULT_M7_DP) && (mvfr1 == MVFR1_DEFAULT_M7_DP)) {
2372 LOG_TARGET_DEBUG(target, "%s floating point feature FPv5_DP found", cortex_m->core_info->name);
2373 armv7m->fp_feature = FPV5_DP;
2374 }
2375 }
2376
2377 /* VECTRESET is supported only on ARMv7-M cores */
2378 cortex_m->vectreset_supported = armv7m->arm.arch == ARM_ARCH_V7M;
2379
2380 /* Check for FPU, otherwise mark FPU register as non-existent */
2381 if (armv7m->fp_feature == FP_NONE)
2382 for (size_t idx = ARMV7M_FPU_FIRST_REG; idx <= ARMV7M_FPU_LAST_REG; idx++)
2383 armv7m->arm.core_cache->reg_list[idx].exist = false;
2384
2385 if (armv7m->arm.arch != ARM_ARCH_V8M)
2386 for (size_t idx = ARMV8M_FIRST_REG; idx <= ARMV8M_LAST_REG; idx++)
2387 armv7m->arm.core_cache->reg_list[idx].exist = false;
2388
2389 if (!armv7m->is_hla_target) {
2390 if (cortex_m->core_info->flags & CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K)
2391 /* Cortex-M3/M4 have 4096 bytes autoincrement range,
2392 * s. ARM IHI 0031C: MEM-AP 7.2.2 */
2393 armv7m->debug_ap->tar_autoincr_block = (1 << 12);
2394 }
2395
2396 retval = target_read_u32(target, DCB_DHCSR, &cortex_m->dcb_dhcsr);
2397 if (retval != ERROR_OK)
2398 return retval;
2399 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
2400
2401 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
2402 /* Enable debug requests */
2403 uint32_t dhcsr = (cortex_m->dcb_dhcsr | C_DEBUGEN) & ~(C_HALT | C_STEP | C_MASKINTS);
2404
2405 retval = target_write_u32(target, DCB_DHCSR, DBGKEY | (dhcsr & 0x0000FFFFUL));
2406 if (retval != ERROR_OK)
2407 return retval;
2408 cortex_m->dcb_dhcsr = dhcsr;
2409 }
2410
2411 /* Configure trace modules */
2412 retval = target_write_u32(target, DCB_DEMCR, TRCENA | armv7m->demcr);
2413 if (retval != ERROR_OK)
2414 return retval;
2415
2416 if (armv7m->trace_config.itm_deferred_config)
2417 armv7m_trace_itm_config(target);
2418
2419 /* NOTE: FPB and DWT are both optional. */
2420
2421 /* Setup FPB */
2422 target_read_u32(target, FP_CTRL, &fpcr);
2423 /* bits [14:12] and [7:4] */
2424 cortex_m->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF);
2425 cortex_m->fp_num_lit = (fpcr >> 8) & 0xF;
2426 /* Detect flash patch revision, see RM DDI 0403E.b page C1-817.
2427 Revision is zero base, fp_rev == 1 means Rev.2 ! */
2428 cortex_m->fp_rev = (fpcr >> 28) & 0xf;
2429 free(cortex_m->fp_comparator_list);
2430 cortex_m->fp_comparator_list = calloc(
2431 cortex_m->fp_num_code + cortex_m->fp_num_lit,
2432 sizeof(struct cortex_m_fp_comparator));
2433 cortex_m->fpb_enabled = fpcr & 1;
2434 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
2435 cortex_m->fp_comparator_list[i].type =
2436 (i < cortex_m->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
2437 cortex_m->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
2438
2439 /* make sure we clear any breakpoints enabled on the target */
2440 target_write_u32(target, cortex_m->fp_comparator_list[i].fpcr_address, 0);
2441 }
2442 LOG_TARGET_DEBUG(target, "FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i",
2443 fpcr,
2444 cortex_m->fp_num_code,
2445 cortex_m->fp_num_lit);
2446
2447 /* Setup DWT */
2448 cortex_m_dwt_free(target);
2449 cortex_m_dwt_setup(cortex_m, target);
2450
2451 /* These hardware breakpoints only work for code in flash! */
2452 LOG_TARGET_INFO(target, "target has %d breakpoints, %d watchpoints",
2453 cortex_m->fp_num_code,
2454 cortex_m->dwt_num_comp);
2455 }
2456
2457 return ERROR_OK;
2458 }
2459
2460 static int cortex_m_dcc_read(struct target *target, uint8_t *value, uint8_t *ctrl)
2461 {
2462 struct armv7m_common *armv7m = target_to_armv7m(target);
2463 uint16_t dcrdr;
2464 uint8_t buf[2];
2465 int retval;
2466
2467 retval = mem_ap_read_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2468 if (retval != ERROR_OK)
2469 return retval;
2470
2471 dcrdr = target_buffer_get_u16(target, buf);
2472 *ctrl = (uint8_t)dcrdr;
2473 *value = (uint8_t)(dcrdr >> 8);
2474
2475 LOG_TARGET_DEBUG(target, "data 0x%x ctrl 0x%x", *value, *ctrl);
2476
2477 /* write ack back to software dcc register
2478 * signify we have read data */
2479 if (dcrdr & (1 << 0)) {
2480 target_buffer_set_u16(target, buf, 0);
2481 retval = mem_ap_write_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2482 if (retval != ERROR_OK)
2483 return retval;
2484 }
2485
2486 return ERROR_OK;
2487 }
2488
2489 static int cortex_m_target_request_data(struct target *target,
2490 uint32_t size, uint8_t *buffer)
2491 {
2492 uint8_t data;
2493 uint8_t ctrl;
2494 uint32_t i;
2495
2496 for (i = 0; i < (size * 4); i++) {
2497 int retval = cortex_m_dcc_read(target, &data, &ctrl);
2498 if (retval != ERROR_OK)
2499 return retval;
2500 buffer[i] = data;
2501 }
2502
2503 return ERROR_OK;
2504 }
2505
2506 static int cortex_m_handle_target_request(void *priv)
2507 {
2508 struct target *target = priv;
2509 if (!target_was_examined(target))
2510 return ERROR_OK;
2511
2512 if (!target->dbg_msg_enabled)
2513 return ERROR_OK;
2514
2515 if (target->state == TARGET_RUNNING) {
2516 uint8_t data;
2517 uint8_t ctrl;
2518 int retval;
2519
2520 retval = cortex_m_dcc_read(target, &data, &ctrl);
2521 if (retval != ERROR_OK)
2522 return retval;
2523
2524 /* check if we have data */
2525 if (ctrl & (1 << 0)) {
2526 uint32_t request;
2527
2528 /* we assume target is quick enough */
2529 request = data;
2530 for (int i = 1; i <= 3; i++) {
2531 retval = cortex_m_dcc_read(target, &data, &ctrl);
2532 if (retval != ERROR_OK)
2533 return retval;
2534 request |= ((uint32_t)data << (i * 8));
2535 }
2536 target_request(target, request);
2537 }
2538 }
2539
2540 return ERROR_OK;
2541 }
2542
2543 static int cortex_m_init_arch_info(struct target *target,
2544 struct cortex_m_common *cortex_m, struct adiv5_dap *dap)
2545 {
2546 struct armv7m_common *armv7m = &cortex_m->armv7m;
2547
2548 armv7m_init_arch_info(target, armv7m);
2549
2550 /* default reset mode is to use srst if fitted
2551 * if not it will use CORTEX_M3_RESET_VECTRESET */
2552 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2553
2554 armv7m->arm.dap = dap;
2555
2556 /* register arch-specific functions */
2557 armv7m->examine_debug_reason = cortex_m_examine_debug_reason;
2558
2559 armv7m->post_debug_entry = NULL;
2560
2561 armv7m->pre_restore_context = NULL;
2562
2563 armv7m->load_core_reg_u32 = cortex_m_load_core_reg_u32;
2564 armv7m->store_core_reg_u32 = cortex_m_store_core_reg_u32;
2565
2566 target_register_timer_callback(cortex_m_handle_target_request, 1,
2567 TARGET_TIMER_TYPE_PERIODIC, target);
2568
2569 return ERROR_OK;
2570 }
2571
2572 static int cortex_m_target_create(struct target *target, Jim_Interp *interp)
2573 {
2574 struct adiv5_private_config *pc;
2575
2576 pc = (struct adiv5_private_config *)target->private_config;
2577 if (adiv5_verify_config(pc) != ERROR_OK)
2578 return ERROR_FAIL;
2579
2580 struct cortex_m_common *cortex_m = calloc(1, sizeof(struct cortex_m_common));
2581 if (!cortex_m) {
2582 LOG_TARGET_ERROR(target, "No memory creating target");
2583 return ERROR_FAIL;
2584 }
2585
2586 cortex_m->common_magic = CORTEX_M_COMMON_MAGIC;
2587 cortex_m->apsel = pc->ap_num;
2588
2589 cortex_m_init_arch_info(target, cortex_m, pc->dap);
2590
2591 return ERROR_OK;
2592 }
2593
2594 /*--------------------------------------------------------------------------*/
2595
2596 static int cortex_m_verify_pointer(struct command_invocation *cmd,
2597 struct cortex_m_common *cm)
2598 {
2599 if (!is_cortex_m_with_dap_access(cm)) {
2600 command_print(cmd, "target is not a Cortex-M");
2601 return ERROR_TARGET_INVALID;
2602 }
2603 return ERROR_OK;
2604 }
2605
2606 /*
2607 * Only stuff below this line should need to verify that its target
2608 * is a Cortex-M3. Everything else should have indirected through the
2609 * cortexm3_target structure, which is only used with CM3 targets.
2610 */
2611
2612 COMMAND_HANDLER(handle_cortex_m_vector_catch_command)
2613 {
2614 struct target *target = get_current_target(CMD_CTX);
2615 struct cortex_m_common *cortex_m = target_to_cm(target);
2616 struct armv7m_common *armv7m = &cortex_m->armv7m;
2617 uint32_t demcr = 0;
2618 int retval;
2619
2620 static const struct {
2621 char name[10];
2622 unsigned mask;
2623 } vec_ids[] = {
2624 { "hard_err", VC_HARDERR, },
2625 { "int_err", VC_INTERR, },
2626 { "bus_err", VC_BUSERR, },
2627 { "state_err", VC_STATERR, },
2628 { "chk_err", VC_CHKERR, },
2629 { "nocp_err", VC_NOCPERR, },
2630 { "mm_err", VC_MMERR, },
2631 { "reset", VC_CORERESET, },
2632 };
2633
2634 retval = cortex_m_verify_pointer(CMD, cortex_m);
2635 if (retval != ERROR_OK)
2636 return retval;
2637
2638 if (!target_was_examined(target)) {
2639 LOG_TARGET_ERROR(target, "Target not examined yet");
2640 return ERROR_FAIL;
2641 }
2642
2643 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2644 if (retval != ERROR_OK)
2645 return retval;
2646
2647 if (CMD_ARGC > 0) {
2648 unsigned catch = 0;
2649
2650 if (CMD_ARGC == 1) {
2651 if (strcmp(CMD_ARGV[0], "all") == 0) {
2652 catch = VC_HARDERR | VC_INTERR | VC_BUSERR
2653 | VC_STATERR | VC_CHKERR | VC_NOCPERR
2654 | VC_MMERR | VC_CORERESET;
2655 goto write;
2656 } else if (strcmp(CMD_ARGV[0], "none") == 0)
2657 goto write;
2658 }
2659 while (CMD_ARGC-- > 0) {
2660 unsigned i;
2661 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2662 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
2663 continue;
2664 catch |= vec_ids[i].mask;
2665 break;
2666 }
2667 if (i == ARRAY_SIZE(vec_ids)) {
2668 LOG_TARGET_ERROR(target, "No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]);
2669 return ERROR_COMMAND_SYNTAX_ERROR;
2670 }
2671 }
2672 write:
2673 /* For now, armv7m->demcr only stores vector catch flags. */
2674 armv7m->demcr = catch;
2675
2676 demcr &= ~0xffff;
2677 demcr |= catch;
2678
2679 /* write, but don't assume it stuck (why not??) */
2680 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, demcr);
2681 if (retval != ERROR_OK)
2682 return retval;
2683 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2684 if (retval != ERROR_OK)
2685 return retval;
2686
2687 /* FIXME be sure to clear DEMCR on clean server shutdown.
2688 * Otherwise the vector catch hardware could fire when there's
2689 * no debugger hooked up, causing much confusion...
2690 */
2691 }
2692
2693 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2694 command_print(CMD, "%9s: %s", vec_ids[i].name,
2695 (demcr & vec_ids[i].mask) ? "catch" : "ignore");
2696 }
2697
2698 return ERROR_OK;
2699 }
2700
2701 COMMAND_HANDLER(handle_cortex_m_mask_interrupts_command)
2702 {
2703 struct target *target = get_current_target(CMD_CTX);
2704 struct cortex_m_common *cortex_m = target_to_cm(target);
2705 int retval;
2706
2707 static const struct jim_nvp nvp_maskisr_modes[] = {
2708 { .name = "auto", .value = CORTEX_M_ISRMASK_AUTO },
2709 { .name = "off", .value = CORTEX_M_ISRMASK_OFF },
2710 { .name = "on", .value = CORTEX_M_ISRMASK_ON },
2711 { .name = "steponly", .value = CORTEX_M_ISRMASK_STEPONLY },
2712 { .name = NULL, .value = -1 },
2713 };
2714 const struct jim_nvp *n;
2715
2716
2717 retval = cortex_m_verify_pointer(CMD, cortex_m);
2718 if (retval != ERROR_OK)
2719 return retval;
2720
2721 if (target->state != TARGET_HALTED) {
2722 command_print(CMD, "target must be stopped for \"%s\" command", CMD_NAME);
2723 return ERROR_OK;
2724 }
2725
2726 if (CMD_ARGC > 0) {
2727 n = jim_nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2728 if (!n->name)
2729 return ERROR_COMMAND_SYNTAX_ERROR;
2730 cortex_m->isrmasking_mode = n->value;
2731 cortex_m_set_maskints_for_halt(target);
2732 }
2733
2734 n = jim_nvp_value2name_simple(nvp_maskisr_modes, cortex_m->isrmasking_mode);
2735 command_print(CMD, "cortex_m interrupt mask %s", n->name);
2736
2737 return ERROR_OK;
2738 }
2739
2740 COMMAND_HANDLER(handle_cortex_m_reset_config_command)
2741 {
2742 struct target *target = get_current_target(CMD_CTX);
2743 struct cortex_m_common *cortex_m = target_to_cm(target);
2744 int retval;
2745 char *reset_config;
2746
2747 retval = cortex_m_verify_pointer(CMD, cortex_m);
2748 if (retval != ERROR_OK)
2749 return retval;
2750
2751 if (CMD_ARGC > 0) {
2752 if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
2753 cortex_m->soft_reset_config = CORTEX_M_RESET_SYSRESETREQ;
2754
2755 else if (strcmp(*CMD_ARGV, "vectreset") == 0) {
2756 if (target_was_examined(target)
2757 && !cortex_m->vectreset_supported)
2758 LOG_TARGET_WARNING(target, "VECTRESET is not supported on your Cortex-M core!");
2759 else
2760 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2761
2762 } else
2763 return ERROR_COMMAND_SYNTAX_ERROR;
2764 }
2765
2766 switch (cortex_m->soft_reset_config) {
2767 case CORTEX_M_RESET_SYSRESETREQ:
2768 reset_config = "sysresetreq";
2769 break;
2770
2771 case CORTEX_M_RESET_VECTRESET:
2772 reset_config = "vectreset";
2773 break;
2774
2775 default:
2776 reset_config = "unknown";
2777 break;
2778 }
2779
2780 command_print(CMD, "cortex_m reset_config %s", reset_config);
2781
2782 return ERROR_OK;
2783 }
2784
2785 static const struct command_registration cortex_m_exec_command_handlers[] = {
2786 {
2787 .name = "maskisr",
2788 .handler = handle_cortex_m_mask_interrupts_command,
2789 .mode = COMMAND_EXEC,
2790 .help = "mask cortex_m interrupts",
2791 .usage = "['auto'|'on'|'off'|'steponly']",
2792 },
2793 {
2794 .name = "vector_catch",
2795 .handler = handle_cortex_m_vector_catch_command,
2796 .mode = COMMAND_EXEC,
2797 .help = "configure hardware vectors to trigger debug entry",
2798 .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
2799 },
2800 {
2801 .name = "reset_config",
2802 .handler = handle_cortex_m_reset_config_command,
2803 .mode = COMMAND_ANY,
2804 .help = "configure software reset handling",
2805 .usage = "['sysresetreq'|'vectreset']",
2806 },
2807 COMMAND_REGISTRATION_DONE
2808 };
2809 static const struct command_registration cortex_m_command_handlers[] = {
2810 {
2811 .chain = armv7m_command_handlers,
2812 },
2813 {
2814 .chain = armv7m_trace_command_handlers,
2815 },
2816 /* START_DEPRECATED_TPIU */
2817 {
2818 .chain = arm_tpiu_deprecated_command_handlers,
2819 },
2820 /* END_DEPRECATED_TPIU */
2821 {
2822 .name = "cortex_m",
2823 .mode = COMMAND_EXEC,
2824 .help = "Cortex-M command group",
2825 .usage = "",
2826 .chain = cortex_m_exec_command_handlers,
2827 },
2828 {
2829 .chain = rtt_target_command_handlers,
2830 },
2831 COMMAND_REGISTRATION_DONE
2832 };
2833
2834 struct target_type cortexm_target = {
2835 .name = "cortex_m",
2836
2837 .poll = cortex_m_poll,
2838 .arch_state = armv7m_arch_state,
2839
2840 .target_request_data = cortex_m_target_request_data,
2841
2842 .halt = cortex_m_halt,
2843 .resume = cortex_m_resume,
2844 .step = cortex_m_step,
2845
2846 .assert_reset = cortex_m_assert_reset,
2847 .deassert_reset = cortex_m_deassert_reset,
2848 .soft_reset_halt = cortex_m_soft_reset_halt,
2849
2850 .get_gdb_arch = arm_get_gdb_arch,
2851 .get_gdb_reg_list = armv7m_get_gdb_reg_list,
2852
2853 .read_memory = cortex_m_read_memory,
2854 .write_memory = cortex_m_write_memory,
2855 .checksum_memory = armv7m_checksum_memory,
2856 .blank_check_memory = armv7m_blank_check_memory,
2857
2858 .run_algorithm = armv7m_run_algorithm,
2859 .start_algorithm = armv7m_start_algorithm,
2860 .wait_algorithm = armv7m_wait_algorithm,
2861
2862 .add_breakpoint = cortex_m_add_breakpoint,
2863 .remove_breakpoint = cortex_m_remove_breakpoint,
2864 .add_watchpoint = cortex_m_add_watchpoint,
2865 .remove_watchpoint = cortex_m_remove_watchpoint,
2866 .hit_watchpoint = cortex_m_hit_watchpoint,
2867
2868 .commands = cortex_m_command_handlers,
2869 .target_create = cortex_m_target_create,
2870 .target_jim_configure = adiv5_jim_configure,
2871 .init_target = cortex_m_init_target,
2872 .examine = cortex_m_examine,
2873 .deinit_target = cortex_m_deinit_target,
2874
2875 .profiling = cortex_m_profiling,
2876 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)