target/cortex_m: check core implementor field
[openocd.git] / src / target / cortex_m.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 * *
7 * Copyright (C) 2006 by Magnus Lundin *
8 * lundin@mlu.mine.nu *
9 * *
10 * Copyright (C) 2008 by Spencer Oliver *
11 * spen@spen-soft.co.uk *
12 * *
13 * *
14 * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
15 * *
16 ***************************************************************************/
17 #ifdef HAVE_CONFIG_H
18 #include "config.h"
19 #endif
20
21 #include "jtag/interface.h"
22 #include "breakpoints.h"
23 #include "cortex_m.h"
24 #include "target_request.h"
25 #include "target_type.h"
26 #include "arm_adi_v5.h"
27 #include "arm_disassembler.h"
28 #include "register.h"
29 #include "arm_opcodes.h"
30 #include "arm_semihosting.h"
31 #include "smp.h"
32 #include <helper/nvp.h>
33 #include <helper/time_support.h>
34 #include <rtt/rtt.h>
35
36 /* NOTE: most of this should work fine for the Cortex-M1 and
37 * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
38 * Some differences: M0/M1 doesn't have FPB remapping or the
39 * DWT tracing/profiling support. (So the cycle counter will
40 * not be usable; the other stuff isn't currently used here.)
41 *
42 * Although there are some workarounds for errata seen only in r0p0
43 * silicon, such old parts are hard to find and thus not much tested
44 * any longer.
45 */
46
47 /* Timeout for register r/w */
48 #define DHCSR_S_REGRDY_TIMEOUT (500)
49
50 /* Supported Cortex-M Cores */
51 static const struct cortex_m_part_info cortex_m_parts[] = {
52 {
53 .impl_part = CORTEX_M0_PARTNO,
54 .name = "Cortex-M0",
55 .arch = ARM_ARCH_V6M,
56 },
57 {
58 .impl_part = CORTEX_M0P_PARTNO,
59 .name = "Cortex-M0+",
60 .arch = ARM_ARCH_V6M,
61 },
62 {
63 .impl_part = CORTEX_M1_PARTNO,
64 .name = "Cortex-M1",
65 .arch = ARM_ARCH_V6M,
66 },
67 {
68 .impl_part = CORTEX_M3_PARTNO,
69 .name = "Cortex-M3",
70 .arch = ARM_ARCH_V7M,
71 .flags = CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
72 },
73 {
74 .impl_part = CORTEX_M4_PARTNO,
75 .name = "Cortex-M4",
76 .arch = ARM_ARCH_V7M,
77 .flags = CORTEX_M_F_HAS_FPV4 | CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
78 },
79 {
80 .impl_part = CORTEX_M7_PARTNO,
81 .name = "Cortex-M7",
82 .arch = ARM_ARCH_V7M,
83 .flags = CORTEX_M_F_HAS_FPV5,
84 },
85 {
86 .impl_part = CORTEX_M23_PARTNO,
87 .name = "Cortex-M23",
88 .arch = ARM_ARCH_V8M,
89 },
90 {
91 .impl_part = CORTEX_M33_PARTNO,
92 .name = "Cortex-M33",
93 .arch = ARM_ARCH_V8M,
94 .flags = CORTEX_M_F_HAS_FPV5,
95 },
96 {
97 .impl_part = CORTEX_M35P_PARTNO,
98 .name = "Cortex-M35P",
99 .arch = ARM_ARCH_V8M,
100 .flags = CORTEX_M_F_HAS_FPV5,
101 },
102 {
103 .impl_part = CORTEX_M55_PARTNO,
104 .name = "Cortex-M55",
105 .arch = ARM_ARCH_V8M,
106 .flags = CORTEX_M_F_HAS_FPV5,
107 },
108 {
109 .impl_part = STAR_MC1_PARTNO,
110 .name = "STAR-MC1",
111 .arch = ARM_ARCH_V8M,
112 .flags = CORTEX_M_F_HAS_FPV5,
113 },
114 };
115
116 /* forward declarations */
117 static int cortex_m_store_core_reg_u32(struct target *target,
118 uint32_t num, uint32_t value);
119 static void cortex_m_dwt_free(struct target *target);
120
121 /** DCB DHCSR register contains S_RETIRE_ST and S_RESET_ST bits cleared
122 * on a read. Call this helper function each time DHCSR is read
123 * to preserve S_RESET_ST state in case of a reset event was detected.
124 */
125 static inline void cortex_m_cumulate_dhcsr_sticky(struct cortex_m_common *cortex_m,
126 uint32_t dhcsr)
127 {
128 cortex_m->dcb_dhcsr_cumulated_sticky |= dhcsr;
129 }
130
131 /** Read DCB DHCSR register to cortex_m->dcb_dhcsr and cumulate
132 * sticky bits in cortex_m->dcb_dhcsr_cumulated_sticky
133 */
134 static int cortex_m_read_dhcsr_atomic_sticky(struct target *target)
135 {
136 struct cortex_m_common *cortex_m = target_to_cm(target);
137 struct armv7m_common *armv7m = target_to_armv7m(target);
138
139 int retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR,
140 &cortex_m->dcb_dhcsr);
141 if (retval != ERROR_OK)
142 return retval;
143
144 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
145 return ERROR_OK;
146 }
147
148 static int cortex_m_load_core_reg_u32(struct target *target,
149 uint32_t regsel, uint32_t *value)
150 {
151 struct cortex_m_common *cortex_m = target_to_cm(target);
152 struct armv7m_common *armv7m = target_to_armv7m(target);
153 int retval;
154 uint32_t dcrdr, tmp_value;
155 int64_t then;
156
157 /* because the DCB_DCRDR is used for the emulated dcc channel
158 * we have to save/restore the DCB_DCRDR when used */
159 if (target->dbg_msg_enabled) {
160 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
161 if (retval != ERROR_OK)
162 return retval;
163 }
164
165 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
166 if (retval != ERROR_OK)
167 return retval;
168
169 /* check if value from register is ready and pre-read it */
170 then = timeval_ms();
171 while (1) {
172 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR,
173 &cortex_m->dcb_dhcsr);
174 if (retval != ERROR_OK)
175 return retval;
176 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DCRDR,
177 &tmp_value);
178 if (retval != ERROR_OK)
179 return retval;
180 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
181 if (cortex_m->dcb_dhcsr & S_REGRDY)
182 break;
183 cortex_m->slow_register_read = true; /* Polling (still) needed. */
184 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
185 LOG_TARGET_ERROR(target, "Timeout waiting for DCRDR transfer ready");
186 return ERROR_TIMEOUT_REACHED;
187 }
188 keep_alive();
189 }
190
191 *value = tmp_value;
192
193 if (target->dbg_msg_enabled) {
194 /* restore DCB_DCRDR - this needs to be in a separate
195 * transaction otherwise the emulated DCC channel breaks */
196 if (retval == ERROR_OK)
197 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
198 }
199
200 return retval;
201 }
202
203 static int cortex_m_slow_read_all_regs(struct target *target)
204 {
205 struct cortex_m_common *cortex_m = target_to_cm(target);
206 struct armv7m_common *armv7m = target_to_armv7m(target);
207 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
208
209 /* Opportunistically restore fast read, it'll revert to slow
210 * if any register needed polling in cortex_m_load_core_reg_u32(). */
211 cortex_m->slow_register_read = false;
212
213 for (unsigned int reg_id = 0; reg_id < num_regs; reg_id++) {
214 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
215 if (r->exist) {
216 int retval = armv7m->arm.read_core_reg(target, r, reg_id, ARM_MODE_ANY);
217 if (retval != ERROR_OK)
218 return retval;
219 }
220 }
221
222 if (!cortex_m->slow_register_read)
223 LOG_TARGET_DEBUG(target, "Switching back to fast register reads");
224
225 return ERROR_OK;
226 }
227
228 static int cortex_m_queue_reg_read(struct target *target, uint32_t regsel,
229 uint32_t *reg_value, uint32_t *dhcsr)
230 {
231 struct armv7m_common *armv7m = target_to_armv7m(target);
232 int retval;
233
234 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
235 if (retval != ERROR_OK)
236 return retval;
237
238 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR, dhcsr);
239 if (retval != ERROR_OK)
240 return retval;
241
242 return mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, reg_value);
243 }
244
245 static int cortex_m_fast_read_all_regs(struct target *target)
246 {
247 struct cortex_m_common *cortex_m = target_to_cm(target);
248 struct armv7m_common *armv7m = target_to_armv7m(target);
249 int retval;
250 uint32_t dcrdr;
251
252 /* because the DCB_DCRDR is used for the emulated dcc channel
253 * we have to save/restore the DCB_DCRDR when used */
254 if (target->dbg_msg_enabled) {
255 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
256 if (retval != ERROR_OK)
257 return retval;
258 }
259
260 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
261 const unsigned int n_r32 = ARMV7M_LAST_REG - ARMV7M_CORE_FIRST_REG + 1
262 + ARMV7M_FPU_LAST_REG - ARMV7M_FPU_FIRST_REG + 1;
263 /* we need one 32-bit word for each register except FP D0..D15, which
264 * need two words */
265 uint32_t r_vals[n_r32];
266 uint32_t dhcsr[n_r32];
267
268 unsigned int wi = 0; /* write index to r_vals and dhcsr arrays */
269 unsigned int reg_id; /* register index in the reg_list, ARMV7M_R0... */
270 for (reg_id = 0; reg_id < num_regs; reg_id++) {
271 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
272 if (!r->exist)
273 continue; /* skip non existent registers */
274
275 if (r->size <= 8) {
276 /* Any 8-bit or shorter register is unpacked from a 32-bit
277 * container register. Skip it now. */
278 continue;
279 }
280
281 uint32_t regsel = armv7m_map_id_to_regsel(reg_id);
282 retval = cortex_m_queue_reg_read(target, regsel, &r_vals[wi],
283 &dhcsr[wi]);
284 if (retval != ERROR_OK)
285 return retval;
286 wi++;
287
288 assert(r->size == 32 || r->size == 64);
289 if (r->size == 32)
290 continue; /* done with 32-bit register */
291
292 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
293 /* the odd part of FP register (S1, S3...) */
294 retval = cortex_m_queue_reg_read(target, regsel + 1, &r_vals[wi],
295 &dhcsr[wi]);
296 if (retval != ERROR_OK)
297 return retval;
298 wi++;
299 }
300
301 assert(wi <= n_r32);
302
303 retval = dap_run(armv7m->debug_ap->dap);
304 if (retval != ERROR_OK)
305 return retval;
306
307 if (target->dbg_msg_enabled) {
308 /* restore DCB_DCRDR - this needs to be in a separate
309 * transaction otherwise the emulated DCC channel breaks */
310 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
311 if (retval != ERROR_OK)
312 return retval;
313 }
314
315 bool not_ready = false;
316 for (unsigned int i = 0; i < wi; i++) {
317 if ((dhcsr[i] & S_REGRDY) == 0) {
318 not_ready = true;
319 LOG_TARGET_DEBUG(target, "Register %u was not ready during fast read", i);
320 }
321 cortex_m_cumulate_dhcsr_sticky(cortex_m, dhcsr[i]);
322 }
323
324 if (not_ready) {
325 /* Any register was not ready,
326 * fall back to slow read with S_REGRDY polling */
327 return ERROR_TIMEOUT_REACHED;
328 }
329
330 LOG_TARGET_DEBUG(target, "read %u 32-bit registers", wi);
331
332 unsigned int ri = 0; /* read index from r_vals array */
333 for (reg_id = 0; reg_id < num_regs; reg_id++) {
334 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
335 if (!r->exist)
336 continue; /* skip non existent registers */
337
338 r->dirty = false;
339
340 unsigned int reg32_id;
341 uint32_t offset;
342 if (armv7m_map_reg_packing(reg_id, &reg32_id, &offset)) {
343 /* Unpack a partial register from 32-bit container register */
344 struct reg *r32 = &armv7m->arm.core_cache->reg_list[reg32_id];
345
346 /* The container register ought to precede all regs unpacked
347 * from it in the reg_list. So the value should be ready
348 * to unpack */
349 assert(r32->valid);
350 buf_cpy(r32->value + offset, r->value, r->size);
351
352 } else {
353 assert(r->size == 32 || r->size == 64);
354 buf_set_u32(r->value, 0, 32, r_vals[ri++]);
355
356 if (r->size == 64) {
357 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
358 /* the odd part of FP register (S1, S3...) */
359 buf_set_u32(r->value + 4, 0, 32, r_vals[ri++]);
360 }
361 }
362 r->valid = true;
363 }
364 assert(ri == wi);
365
366 return retval;
367 }
368
369 static int cortex_m_store_core_reg_u32(struct target *target,
370 uint32_t regsel, uint32_t value)
371 {
372 struct cortex_m_common *cortex_m = target_to_cm(target);
373 struct armv7m_common *armv7m = target_to_armv7m(target);
374 int retval;
375 uint32_t dcrdr;
376 int64_t then;
377
378 /* because the DCB_DCRDR is used for the emulated dcc channel
379 * we have to save/restore the DCB_DCRDR when used */
380 if (target->dbg_msg_enabled) {
381 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
382 if (retval != ERROR_OK)
383 return retval;
384 }
385
386 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, value);
387 if (retval != ERROR_OK)
388 return retval;
389
390 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel | DCRSR_WNR);
391 if (retval != ERROR_OK)
392 return retval;
393
394 /* check if value is written into register */
395 then = timeval_ms();
396 while (1) {
397 retval = cortex_m_read_dhcsr_atomic_sticky(target);
398 if (retval != ERROR_OK)
399 return retval;
400 if (cortex_m->dcb_dhcsr & S_REGRDY)
401 break;
402 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
403 LOG_TARGET_ERROR(target, "Timeout waiting for DCRDR transfer ready");
404 return ERROR_TIMEOUT_REACHED;
405 }
406 keep_alive();
407 }
408
409 if (target->dbg_msg_enabled) {
410 /* restore DCB_DCRDR - this needs to be in a separate
411 * transaction otherwise the emulated DCC channel breaks */
412 if (retval == ERROR_OK)
413 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
414 }
415
416 return retval;
417 }
418
419 static int cortex_m_write_debug_halt_mask(struct target *target,
420 uint32_t mask_on, uint32_t mask_off)
421 {
422 struct cortex_m_common *cortex_m = target_to_cm(target);
423 struct armv7m_common *armv7m = &cortex_m->armv7m;
424
425 /* mask off status bits */
426 cortex_m->dcb_dhcsr &= ~((0xFFFFul << 16) | mask_off);
427 /* create new register mask */
428 cortex_m->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
429
430 return mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DHCSR, cortex_m->dcb_dhcsr);
431 }
432
433 static int cortex_m_set_maskints(struct target *target, bool mask)
434 {
435 struct cortex_m_common *cortex_m = target_to_cm(target);
436 if (!!(cortex_m->dcb_dhcsr & C_MASKINTS) != mask)
437 return cortex_m_write_debug_halt_mask(target, mask ? C_MASKINTS : 0, mask ? 0 : C_MASKINTS);
438 else
439 return ERROR_OK;
440 }
441
442 static int cortex_m_set_maskints_for_halt(struct target *target)
443 {
444 struct cortex_m_common *cortex_m = target_to_cm(target);
445 switch (cortex_m->isrmasking_mode) {
446 case CORTEX_M_ISRMASK_AUTO:
447 /* interrupts taken at resume, whether for step or run -> no mask */
448 return cortex_m_set_maskints(target, false);
449
450 case CORTEX_M_ISRMASK_OFF:
451 /* interrupts never masked */
452 return cortex_m_set_maskints(target, false);
453
454 case CORTEX_M_ISRMASK_ON:
455 /* interrupts always masked */
456 return cortex_m_set_maskints(target, true);
457
458 case CORTEX_M_ISRMASK_STEPONLY:
459 /* interrupts masked for single step only -> mask now if MASKINTS
460 * erratum, otherwise only mask before stepping */
461 return cortex_m_set_maskints(target, cortex_m->maskints_erratum);
462 }
463 return ERROR_OK;
464 }
465
466 static int cortex_m_set_maskints_for_run(struct target *target)
467 {
468 switch (target_to_cm(target)->isrmasking_mode) {
469 case CORTEX_M_ISRMASK_AUTO:
470 /* interrupts taken at resume, whether for step or run -> no mask */
471 return cortex_m_set_maskints(target, false);
472
473 case CORTEX_M_ISRMASK_OFF:
474 /* interrupts never masked */
475 return cortex_m_set_maskints(target, false);
476
477 case CORTEX_M_ISRMASK_ON:
478 /* interrupts always masked */
479 return cortex_m_set_maskints(target, true);
480
481 case CORTEX_M_ISRMASK_STEPONLY:
482 /* interrupts masked for single step only -> no mask */
483 return cortex_m_set_maskints(target, false);
484 }
485 return ERROR_OK;
486 }
487
488 static int cortex_m_set_maskints_for_step(struct target *target)
489 {
490 switch (target_to_cm(target)->isrmasking_mode) {
491 case CORTEX_M_ISRMASK_AUTO:
492 /* the auto-interrupt should already be done -> mask */
493 return cortex_m_set_maskints(target, true);
494
495 case CORTEX_M_ISRMASK_OFF:
496 /* interrupts never masked */
497 return cortex_m_set_maskints(target, false);
498
499 case CORTEX_M_ISRMASK_ON:
500 /* interrupts always masked */
501 return cortex_m_set_maskints(target, true);
502
503 case CORTEX_M_ISRMASK_STEPONLY:
504 /* interrupts masked for single step only -> mask */
505 return cortex_m_set_maskints(target, true);
506 }
507 return ERROR_OK;
508 }
509
510 static int cortex_m_clear_halt(struct target *target)
511 {
512 struct cortex_m_common *cortex_m = target_to_cm(target);
513 struct armv7m_common *armv7m = &cortex_m->armv7m;
514 int retval;
515
516 /* clear step if any */
517 cortex_m_write_debug_halt_mask(target, C_HALT, C_STEP);
518
519 /* Read Debug Fault Status Register */
520 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR, &cortex_m->nvic_dfsr);
521 if (retval != ERROR_OK)
522 return retval;
523
524 /* Clear Debug Fault Status */
525 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_DFSR, cortex_m->nvic_dfsr);
526 if (retval != ERROR_OK)
527 return retval;
528 LOG_TARGET_DEBUG(target, "NVIC_DFSR 0x%" PRIx32 "", cortex_m->nvic_dfsr);
529
530 return ERROR_OK;
531 }
532
533 static int cortex_m_single_step_core(struct target *target)
534 {
535 struct cortex_m_common *cortex_m = target_to_cm(target);
536 int retval;
537
538 /* Mask interrupts before clearing halt, if not done already. This avoids
539 * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
540 * HALT can put the core into an unknown state.
541 */
542 if (!(cortex_m->dcb_dhcsr & C_MASKINTS)) {
543 retval = cortex_m_write_debug_halt_mask(target, C_MASKINTS, 0);
544 if (retval != ERROR_OK)
545 return retval;
546 }
547 retval = cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
548 if (retval != ERROR_OK)
549 return retval;
550 LOG_TARGET_DEBUG(target, "single step");
551
552 /* restore dhcsr reg */
553 cortex_m_clear_halt(target);
554
555 return ERROR_OK;
556 }
557
558 static int cortex_m_enable_fpb(struct target *target)
559 {
560 int retval = target_write_u32(target, FP_CTRL, 3);
561 if (retval != ERROR_OK)
562 return retval;
563
564 /* check the fpb is actually enabled */
565 uint32_t fpctrl;
566 retval = target_read_u32(target, FP_CTRL, &fpctrl);
567 if (retval != ERROR_OK)
568 return retval;
569
570 if (fpctrl & 1)
571 return ERROR_OK;
572
573 return ERROR_FAIL;
574 }
575
576 static int cortex_m_endreset_event(struct target *target)
577 {
578 int retval;
579 uint32_t dcb_demcr;
580 struct cortex_m_common *cortex_m = target_to_cm(target);
581 struct armv7m_common *armv7m = &cortex_m->armv7m;
582 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
583 struct cortex_m_fp_comparator *fp_list = cortex_m->fp_comparator_list;
584 struct cortex_m_dwt_comparator *dwt_list = cortex_m->dwt_comparator_list;
585
586 /* REVISIT The four debug monitor bits are currently ignored... */
587 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &dcb_demcr);
588 if (retval != ERROR_OK)
589 return retval;
590 LOG_TARGET_DEBUG(target, "DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr);
591
592 /* this register is used for emulated dcc channel */
593 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
594 if (retval != ERROR_OK)
595 return retval;
596
597 retval = cortex_m_read_dhcsr_atomic_sticky(target);
598 if (retval != ERROR_OK)
599 return retval;
600
601 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
602 /* Enable debug requests */
603 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
604 if (retval != ERROR_OK)
605 return retval;
606 }
607
608 /* Restore proper interrupt masking setting for running CPU. */
609 cortex_m_set_maskints_for_run(target);
610
611 /* Enable features controlled by ITM and DWT blocks, and catch only
612 * the vectors we were told to pay attention to.
613 *
614 * Target firmware is responsible for all fault handling policy
615 * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
616 * or manual updates to the NVIC SHCSR and CCR registers.
617 */
618 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, TRCENA | armv7m->demcr);
619 if (retval != ERROR_OK)
620 return retval;
621
622 /* Paranoia: evidently some (early?) chips don't preserve all the
623 * debug state (including FPB, DWT, etc) across reset...
624 */
625
626 /* Enable FPB */
627 retval = cortex_m_enable_fpb(target);
628 if (retval != ERROR_OK) {
629 LOG_TARGET_ERROR(target, "Failed to enable the FPB");
630 return retval;
631 }
632
633 cortex_m->fpb_enabled = true;
634
635 /* Restore FPB registers */
636 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
637 retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
638 if (retval != ERROR_OK)
639 return retval;
640 }
641
642 /* Restore DWT registers */
643 for (unsigned int i = 0; i < cortex_m->dwt_num_comp; i++) {
644 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
645 dwt_list[i].comp);
646 if (retval != ERROR_OK)
647 return retval;
648 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
649 dwt_list[i].mask);
650 if (retval != ERROR_OK)
651 return retval;
652 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
653 dwt_list[i].function);
654 if (retval != ERROR_OK)
655 return retval;
656 }
657 retval = dap_run(swjdp);
658 if (retval != ERROR_OK)
659 return retval;
660
661 register_cache_invalidate(armv7m->arm.core_cache);
662
663 /* TODO: invalidate also working areas (needed in the case of detected reset).
664 * Doing so will require flash drivers to test if working area
665 * is still valid in all target algo calling loops.
666 */
667
668 /* make sure we have latest dhcsr flags */
669 retval = cortex_m_read_dhcsr_atomic_sticky(target);
670 if (retval != ERROR_OK)
671 return retval;
672
673 return retval;
674 }
675
676 static int cortex_m_examine_debug_reason(struct target *target)
677 {
678 struct cortex_m_common *cortex_m = target_to_cm(target);
679
680 /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
681 * only check the debug reason if we don't know it already */
682
683 if ((target->debug_reason != DBG_REASON_DBGRQ)
684 && (target->debug_reason != DBG_REASON_SINGLESTEP)) {
685 if (cortex_m->nvic_dfsr & DFSR_BKPT) {
686 target->debug_reason = DBG_REASON_BREAKPOINT;
687 if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
688 target->debug_reason = DBG_REASON_WPTANDBKPT;
689 } else if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
690 target->debug_reason = DBG_REASON_WATCHPOINT;
691 else if (cortex_m->nvic_dfsr & DFSR_VCATCH)
692 target->debug_reason = DBG_REASON_BREAKPOINT;
693 else if (cortex_m->nvic_dfsr & DFSR_EXTERNAL)
694 target->debug_reason = DBG_REASON_DBGRQ;
695 else /* HALTED */
696 target->debug_reason = DBG_REASON_UNDEFINED;
697 }
698
699 return ERROR_OK;
700 }
701
702 static int cortex_m_examine_exception_reason(struct target *target)
703 {
704 uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
705 struct armv7m_common *armv7m = target_to_armv7m(target);
706 struct adiv5_dap *swjdp = armv7m->arm.dap;
707 int retval;
708
709 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SHCSR, &shcsr);
710 if (retval != ERROR_OK)
711 return retval;
712 switch (armv7m->exception_number) {
713 case 2: /* NMI */
714 break;
715 case 3: /* Hard Fault */
716 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_HFSR, &except_sr);
717 if (retval != ERROR_OK)
718 return retval;
719 if (except_sr & 0x40000000) {
720 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &cfsr);
721 if (retval != ERROR_OK)
722 return retval;
723 }
724 break;
725 case 4: /* Memory Management */
726 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
727 if (retval != ERROR_OK)
728 return retval;
729 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_MMFAR, &except_ar);
730 if (retval != ERROR_OK)
731 return retval;
732 break;
733 case 5: /* Bus Fault */
734 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
735 if (retval != ERROR_OK)
736 return retval;
737 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_BFAR, &except_ar);
738 if (retval != ERROR_OK)
739 return retval;
740 break;
741 case 6: /* Usage Fault */
742 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
743 if (retval != ERROR_OK)
744 return retval;
745 break;
746 case 7: /* Secure Fault */
747 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFSR, &except_sr);
748 if (retval != ERROR_OK)
749 return retval;
750 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFAR, &except_ar);
751 if (retval != ERROR_OK)
752 return retval;
753 break;
754 case 11: /* SVCall */
755 break;
756 case 12: /* Debug Monitor */
757 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_DFSR, &except_sr);
758 if (retval != ERROR_OK)
759 return retval;
760 break;
761 case 14: /* PendSV */
762 break;
763 case 15: /* SysTick */
764 break;
765 default:
766 except_sr = 0;
767 break;
768 }
769 retval = dap_run(swjdp);
770 if (retval == ERROR_OK)
771 LOG_TARGET_DEBUG(target, "%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
772 ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
773 armv7m_exception_string(armv7m->exception_number),
774 shcsr, except_sr, cfsr, except_ar);
775 return retval;
776 }
777
778 static int cortex_m_debug_entry(struct target *target)
779 {
780 uint32_t xpsr;
781 int retval;
782 struct cortex_m_common *cortex_m = target_to_cm(target);
783 struct armv7m_common *armv7m = &cortex_m->armv7m;
784 struct arm *arm = &armv7m->arm;
785 struct reg *r;
786
787 LOG_TARGET_DEBUG(target, " ");
788
789 /* Do this really early to minimize the window where the MASKINTS erratum
790 * can pile up pending interrupts. */
791 cortex_m_set_maskints_for_halt(target);
792
793 cortex_m_clear_halt(target);
794
795 retval = cortex_m_read_dhcsr_atomic_sticky(target);
796 if (retval != ERROR_OK)
797 return retval;
798
799 retval = armv7m->examine_debug_reason(target);
800 if (retval != ERROR_OK)
801 return retval;
802
803 /* examine PE security state */
804 uint32_t dscsr = 0;
805 if (armv7m->arm.arch == ARM_ARCH_V8M) {
806 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DSCSR, &dscsr);
807 if (retval != ERROR_OK)
808 return retval;
809 }
810
811 /* Load all registers to arm.core_cache */
812 if (!cortex_m->slow_register_read) {
813 retval = cortex_m_fast_read_all_regs(target);
814 if (retval == ERROR_TIMEOUT_REACHED) {
815 cortex_m->slow_register_read = true;
816 LOG_TARGET_DEBUG(target, "Switched to slow register read");
817 }
818 }
819
820 if (cortex_m->slow_register_read)
821 retval = cortex_m_slow_read_all_regs(target);
822
823 if (retval != ERROR_OK)
824 return retval;
825
826 r = arm->cpsr;
827 xpsr = buf_get_u32(r->value, 0, 32);
828
829 /* Are we in an exception handler */
830 if (xpsr & 0x1FF) {
831 armv7m->exception_number = (xpsr & 0x1FF);
832
833 arm->core_mode = ARM_MODE_HANDLER;
834 arm->map = armv7m_msp_reg_map;
835 } else {
836 unsigned control = buf_get_u32(arm->core_cache
837 ->reg_list[ARMV7M_CONTROL].value, 0, 3);
838
839 /* is this thread privileged? */
840 arm->core_mode = control & 1
841 ? ARM_MODE_USER_THREAD
842 : ARM_MODE_THREAD;
843
844 /* which stack is it using? */
845 if (control & 2)
846 arm->map = armv7m_psp_reg_map;
847 else
848 arm->map = armv7m_msp_reg_map;
849
850 armv7m->exception_number = 0;
851 }
852
853 if (armv7m->exception_number)
854 cortex_m_examine_exception_reason(target);
855
856 bool secure_state = (dscsr & DSCSR_CDS) == DSCSR_CDS;
857 LOG_TARGET_DEBUG(target, "entered debug state in core mode: %s at PC 0x%" PRIx32
858 ", cpu in %s state, target->state: %s",
859 arm_mode_name(arm->core_mode),
860 buf_get_u32(arm->pc->value, 0, 32),
861 secure_state ? "Secure" : "Non-Secure",
862 target_state_name(target));
863
864 if (armv7m->post_debug_entry) {
865 retval = armv7m->post_debug_entry(target);
866 if (retval != ERROR_OK)
867 return retval;
868 }
869
870 return ERROR_OK;
871 }
872
873 static int cortex_m_poll_one(struct target *target)
874 {
875 int detected_failure = ERROR_OK;
876 int retval = ERROR_OK;
877 enum target_state prev_target_state = target->state;
878 struct cortex_m_common *cortex_m = target_to_cm(target);
879 struct armv7m_common *armv7m = &cortex_m->armv7m;
880
881 /* Read from Debug Halting Control and Status Register */
882 retval = cortex_m_read_dhcsr_atomic_sticky(target);
883 if (retval != ERROR_OK) {
884 target->state = TARGET_UNKNOWN;
885 return retval;
886 }
887
888 /* Recover from lockup. See ARMv7-M architecture spec,
889 * section B1.5.15 "Unrecoverable exception cases".
890 */
891 if (cortex_m->dcb_dhcsr & S_LOCKUP) {
892 LOG_TARGET_ERROR(target, "clearing lockup after double fault");
893 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
894 target->debug_reason = DBG_REASON_DBGRQ;
895
896 /* We have to execute the rest (the "finally" equivalent, but
897 * still throw this exception again).
898 */
899 detected_failure = ERROR_FAIL;
900
901 /* refresh status bits */
902 retval = cortex_m_read_dhcsr_atomic_sticky(target);
903 if (retval != ERROR_OK)
904 return retval;
905 }
906
907 if (cortex_m->dcb_dhcsr_cumulated_sticky & S_RESET_ST) {
908 cortex_m->dcb_dhcsr_cumulated_sticky &= ~S_RESET_ST;
909 if (target->state != TARGET_RESET) {
910 target->state = TARGET_RESET;
911 LOG_TARGET_INFO(target, "external reset detected");
912 }
913 return ERROR_OK;
914 }
915
916 if (target->state == TARGET_RESET) {
917 /* Cannot switch context while running so endreset is
918 * called with target->state == TARGET_RESET
919 */
920 LOG_TARGET_DEBUG(target, "Exit from reset with dcb_dhcsr 0x%" PRIx32,
921 cortex_m->dcb_dhcsr);
922 retval = cortex_m_endreset_event(target);
923 if (retval != ERROR_OK) {
924 target->state = TARGET_UNKNOWN;
925 return retval;
926 }
927 target->state = TARGET_RUNNING;
928 prev_target_state = TARGET_RUNNING;
929 }
930
931 if (cortex_m->dcb_dhcsr & S_HALT) {
932 target->state = TARGET_HALTED;
933
934 if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET)) {
935 retval = cortex_m_debug_entry(target);
936
937 /* arm_semihosting needs to know registers, don't run if debug entry returned error */
938 if (retval == ERROR_OK && arm_semihosting(target, &retval) != 0)
939 return retval;
940
941 if (target->smp) {
942 LOG_TARGET_DEBUG(target, "postpone target event 'halted'");
943 target->smp_halt_event_postponed = true;
944 } else {
945 /* regardless of errors returned in previous code update state */
946 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
947 }
948 }
949 if (prev_target_state == TARGET_DEBUG_RUNNING) {
950 retval = cortex_m_debug_entry(target);
951
952 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
953 }
954 if (retval != ERROR_OK)
955 return retval;
956 }
957
958 if (target->state == TARGET_UNKNOWN) {
959 /* Check if processor is retiring instructions or sleeping.
960 * Unlike S_RESET_ST here we test if the target *is* running now,
961 * not if it has been running (possibly in the past). Instructions are
962 * typically processed much faster than OpenOCD polls DHCSR so S_RETIRE_ST
963 * is read always 1. That's the reason not to use dcb_dhcsr_cumulated_sticky.
964 */
965 if (cortex_m->dcb_dhcsr & S_RETIRE_ST || cortex_m->dcb_dhcsr & S_SLEEP) {
966 target->state = TARGET_RUNNING;
967 retval = ERROR_OK;
968 }
969 }
970
971 /* Check that target is truly halted, since the target could be resumed externally */
972 if ((prev_target_state == TARGET_HALTED) && !(cortex_m->dcb_dhcsr & S_HALT)) {
973 /* registers are now invalid */
974 register_cache_invalidate(armv7m->arm.core_cache);
975
976 target->state = TARGET_RUNNING;
977 LOG_TARGET_WARNING(target, "external resume detected");
978 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
979 retval = ERROR_OK;
980 }
981
982 /* Did we detect a failure condition that we cleared? */
983 if (detected_failure != ERROR_OK)
984 retval = detected_failure;
985 return retval;
986 }
987
988 static int cortex_m_halt_one(struct target *target);
989
990 static int cortex_m_smp_halt_all(struct list_head *smp_targets)
991 {
992 int retval = ERROR_OK;
993 struct target_list *head;
994
995 foreach_smp_target(head, smp_targets) {
996 struct target *curr = head->target;
997 if (!target_was_examined(curr))
998 continue;
999 if (curr->state == TARGET_HALTED)
1000 continue;
1001
1002 int ret2 = cortex_m_halt_one(curr);
1003 if (retval == ERROR_OK)
1004 retval = ret2; /* store the first error code ignore others */
1005 }
1006 return retval;
1007 }
1008
1009 static int cortex_m_smp_post_halt_poll(struct list_head *smp_targets)
1010 {
1011 int retval = ERROR_OK;
1012 struct target_list *head;
1013
1014 foreach_smp_target(head, smp_targets) {
1015 struct target *curr = head->target;
1016 if (!target_was_examined(curr))
1017 continue;
1018 /* skip targets that were already halted */
1019 if (curr->state == TARGET_HALTED)
1020 continue;
1021
1022 int ret2 = cortex_m_poll_one(curr);
1023 if (retval == ERROR_OK)
1024 retval = ret2; /* store the first error code ignore others */
1025 }
1026 return retval;
1027 }
1028
1029 static int cortex_m_poll_smp(struct list_head *smp_targets)
1030 {
1031 int retval = ERROR_OK;
1032 struct target_list *head;
1033 bool halted = false;
1034
1035 foreach_smp_target(head, smp_targets) {
1036 struct target *curr = head->target;
1037 if (curr->smp_halt_event_postponed) {
1038 halted = true;
1039 break;
1040 }
1041 }
1042
1043 if (halted) {
1044 retval = cortex_m_smp_halt_all(smp_targets);
1045
1046 int ret2 = cortex_m_smp_post_halt_poll(smp_targets);
1047 if (retval == ERROR_OK)
1048 retval = ret2; /* store the first error code ignore others */
1049
1050 foreach_smp_target(head, smp_targets) {
1051 struct target *curr = head->target;
1052 if (!curr->smp_halt_event_postponed)
1053 continue;
1054
1055 curr->smp_halt_event_postponed = false;
1056 if (curr->state == TARGET_HALTED) {
1057 LOG_TARGET_DEBUG(curr, "sending postponed target event 'halted'");
1058 target_call_event_callbacks(curr, TARGET_EVENT_HALTED);
1059 }
1060 }
1061 /* There is no need to set gdb_service->target
1062 * as hwthread_update_threads() selects an interesting thread
1063 * by its own
1064 */
1065 }
1066 return retval;
1067 }
1068
1069 static int cortex_m_poll(struct target *target)
1070 {
1071 int retval = cortex_m_poll_one(target);
1072
1073 if (target->smp) {
1074 struct target_list *last;
1075 last = list_last_entry(target->smp_targets, struct target_list, lh);
1076 if (target == last->target)
1077 /* After the last target in SMP group has been polled
1078 * check for postponed halted events and eventually halt and re-poll
1079 * other targets */
1080 cortex_m_poll_smp(target->smp_targets);
1081 }
1082 return retval;
1083 }
1084
1085 static int cortex_m_halt_one(struct target *target)
1086 {
1087 LOG_TARGET_DEBUG(target, "target->state: %s", target_state_name(target));
1088
1089 if (target->state == TARGET_HALTED) {
1090 LOG_TARGET_DEBUG(target, "target was already halted");
1091 return ERROR_OK;
1092 }
1093
1094 if (target->state == TARGET_UNKNOWN)
1095 LOG_TARGET_WARNING(target, "target was in unknown state when halt was requested");
1096
1097 if (target->state == TARGET_RESET) {
1098 if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
1099 LOG_TARGET_ERROR(target, "can't request a halt while in reset if nSRST pulls nTRST");
1100 return ERROR_TARGET_FAILURE;
1101 } else {
1102 /* we came here in a reset_halt or reset_init sequence
1103 * debug entry was already prepared in cortex_m3_assert_reset()
1104 */
1105 target->debug_reason = DBG_REASON_DBGRQ;
1106
1107 return ERROR_OK;
1108 }
1109 }
1110
1111 /* Write to Debug Halting Control and Status Register */
1112 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1113
1114 /* Do this really early to minimize the window where the MASKINTS erratum
1115 * can pile up pending interrupts. */
1116 cortex_m_set_maskints_for_halt(target);
1117
1118 target->debug_reason = DBG_REASON_DBGRQ;
1119
1120 return ERROR_OK;
1121 }
1122
1123 static int cortex_m_halt(struct target *target)
1124 {
1125 if (target->smp)
1126 return cortex_m_smp_halt_all(target->smp_targets);
1127 else
1128 return cortex_m_halt_one(target);
1129 }
1130
1131 static int cortex_m_soft_reset_halt(struct target *target)
1132 {
1133 struct cortex_m_common *cortex_m = target_to_cm(target);
1134 struct armv7m_common *armv7m = &cortex_m->armv7m;
1135 int retval, timeout = 0;
1136
1137 /* on single cortex_m MCU soft_reset_halt should be avoided as same functionality
1138 * can be obtained by using 'reset halt' and 'cortex_m reset_config vectreset'.
1139 * As this reset only uses VC_CORERESET it would only ever reset the cortex_m
1140 * core, not the peripherals */
1141 LOG_TARGET_DEBUG(target, "soft_reset_halt is discouraged, please use 'reset halt' instead.");
1142
1143 if (!cortex_m->vectreset_supported) {
1144 LOG_TARGET_ERROR(target, "VECTRESET is not supported on this Cortex-M core");
1145 return ERROR_FAIL;
1146 }
1147
1148 /* Set C_DEBUGEN */
1149 retval = cortex_m_write_debug_halt_mask(target, 0, C_STEP | C_MASKINTS);
1150 if (retval != ERROR_OK)
1151 return retval;
1152
1153 /* Enter debug state on reset; restore DEMCR in endreset_event() */
1154 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR,
1155 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1156 if (retval != ERROR_OK)
1157 return retval;
1158
1159 /* Request a core-only reset */
1160 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1161 AIRCR_VECTKEY | AIRCR_VECTRESET);
1162 if (retval != ERROR_OK)
1163 return retval;
1164 target->state = TARGET_RESET;
1165
1166 /* registers are now invalid */
1167 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1168
1169 while (timeout < 100) {
1170 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1171 if (retval == ERROR_OK) {
1172 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR,
1173 &cortex_m->nvic_dfsr);
1174 if (retval != ERROR_OK)
1175 return retval;
1176 if ((cortex_m->dcb_dhcsr & S_HALT)
1177 && (cortex_m->nvic_dfsr & DFSR_VCATCH)) {
1178 LOG_TARGET_DEBUG(target, "system reset-halted, DHCSR 0x%08" PRIx32 ", DFSR 0x%08" PRIx32,
1179 cortex_m->dcb_dhcsr, cortex_m->nvic_dfsr);
1180 cortex_m_poll(target);
1181 /* FIXME restore user's vector catch config */
1182 return ERROR_OK;
1183 } else {
1184 LOG_TARGET_DEBUG(target, "waiting for system reset-halt, "
1185 "DHCSR 0x%08" PRIx32 ", %d ms",
1186 cortex_m->dcb_dhcsr, timeout);
1187 }
1188 }
1189 timeout++;
1190 alive_sleep(1);
1191 }
1192
1193 return ERROR_OK;
1194 }
1195
1196 void cortex_m_enable_breakpoints(struct target *target)
1197 {
1198 struct breakpoint *breakpoint = target->breakpoints;
1199
1200 /* set any pending breakpoints */
1201 while (breakpoint) {
1202 if (!breakpoint->is_set)
1203 cortex_m_set_breakpoint(target, breakpoint);
1204 breakpoint = breakpoint->next;
1205 }
1206 }
1207
1208 static int cortex_m_restore_one(struct target *target, bool current,
1209 target_addr_t *address, bool handle_breakpoints, bool debug_execution)
1210 {
1211 struct armv7m_common *armv7m = target_to_armv7m(target);
1212 struct breakpoint *breakpoint = NULL;
1213 uint32_t resume_pc;
1214 struct reg *r;
1215
1216 if (target->state != TARGET_HALTED) {
1217 LOG_TARGET_ERROR(target, "not halted");
1218 return ERROR_TARGET_NOT_HALTED;
1219 }
1220
1221 if (!debug_execution) {
1222 target_free_all_working_areas(target);
1223 cortex_m_enable_breakpoints(target);
1224 cortex_m_enable_watchpoints(target);
1225 }
1226
1227 if (debug_execution) {
1228 r = armv7m->arm.core_cache->reg_list + ARMV7M_PRIMASK;
1229
1230 /* Disable interrupts */
1231 /* We disable interrupts in the PRIMASK register instead of
1232 * masking with C_MASKINTS. This is probably the same issue
1233 * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
1234 * in parallel with disabled interrupts can cause local faults
1235 * to not be taken.
1236 *
1237 * This breaks non-debug (application) execution if not
1238 * called from armv7m_start_algorithm() which saves registers.
1239 */
1240 buf_set_u32(r->value, 0, 1, 1);
1241 r->dirty = true;
1242 r->valid = true;
1243
1244 /* Make sure we are in Thumb mode, set xPSR.T bit */
1245 /* armv7m_start_algorithm() initializes entire xPSR register.
1246 * This duplicity handles the case when cortex_m_resume()
1247 * is used with the debug_execution flag directly,
1248 * not called through armv7m_start_algorithm().
1249 */
1250 r = armv7m->arm.cpsr;
1251 buf_set_u32(r->value, 24, 1, 1);
1252 r->dirty = true;
1253 r->valid = true;
1254 }
1255
1256 /* current = 1: continue on current pc, otherwise continue at <address> */
1257 r = armv7m->arm.pc;
1258 if (!current) {
1259 buf_set_u32(r->value, 0, 32, *address);
1260 r->dirty = true;
1261 r->valid = true;
1262 }
1263
1264 /* if we halted last time due to a bkpt instruction
1265 * then we have to manually step over it, otherwise
1266 * the core will break again */
1267
1268 if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
1269 && !debug_execution)
1270 armv7m_maybe_skip_bkpt_inst(target, NULL);
1271
1272 resume_pc = buf_get_u32(r->value, 0, 32);
1273 if (current)
1274 *address = resume_pc;
1275
1276 int retval = armv7m_restore_context(target);
1277 if (retval != ERROR_OK)
1278 return retval;
1279
1280 /* the front-end may request us not to handle breakpoints */
1281 if (handle_breakpoints) {
1282 /* Single step past breakpoint at current address */
1283 breakpoint = breakpoint_find(target, resume_pc);
1284 if (breakpoint) {
1285 LOG_TARGET_DEBUG(target, "unset breakpoint at " TARGET_ADDR_FMT " (ID: %" PRIu32 ")",
1286 breakpoint->address,
1287 breakpoint->unique_id);
1288 retval = cortex_m_unset_breakpoint(target, breakpoint);
1289 if (retval == ERROR_OK)
1290 retval = cortex_m_single_step_core(target);
1291 int ret2 = cortex_m_set_breakpoint(target, breakpoint);
1292 if (retval != ERROR_OK)
1293 return retval;
1294 if (ret2 != ERROR_OK)
1295 return ret2;
1296 }
1297 }
1298
1299 return ERROR_OK;
1300 }
1301
1302 static int cortex_m_restart_one(struct target *target, bool debug_execution)
1303 {
1304 struct armv7m_common *armv7m = target_to_armv7m(target);
1305
1306 /* Restart core */
1307 cortex_m_set_maskints_for_run(target);
1308 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1309
1310 target->debug_reason = DBG_REASON_NOTHALTED;
1311 /* registers are now invalid */
1312 register_cache_invalidate(armv7m->arm.core_cache);
1313
1314 if (!debug_execution) {
1315 target->state = TARGET_RUNNING;
1316 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1317 } else {
1318 target->state = TARGET_DEBUG_RUNNING;
1319 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1320 }
1321
1322 return ERROR_OK;
1323 }
1324
1325 static int cortex_m_restore_smp(struct target *target, bool handle_breakpoints)
1326 {
1327 struct target_list *head;
1328 target_addr_t address;
1329 foreach_smp_target(head, target->smp_targets) {
1330 struct target *curr = head->target;
1331 /* skip calling target */
1332 if (curr == target)
1333 continue;
1334 if (!target_was_examined(curr))
1335 continue;
1336 /* skip running targets */
1337 if (curr->state == TARGET_RUNNING)
1338 continue;
1339
1340 int retval = cortex_m_restore_one(curr, true, &address,
1341 handle_breakpoints, false);
1342 if (retval != ERROR_OK)
1343 return retval;
1344
1345 retval = cortex_m_restart_one(curr, false);
1346 if (retval != ERROR_OK)
1347 return retval;
1348
1349 LOG_TARGET_DEBUG(curr, "SMP resumed at " TARGET_ADDR_FMT, address);
1350 }
1351 return ERROR_OK;
1352 }
1353
1354 static int cortex_m_resume(struct target *target, int current,
1355 target_addr_t address, int handle_breakpoints, int debug_execution)
1356 {
1357 int retval = cortex_m_restore_one(target, !!current, &address, !!handle_breakpoints, !!debug_execution);
1358 if (retval != ERROR_OK) {
1359 LOG_TARGET_ERROR(target, "context restore failed, aborting resume");
1360 return retval;
1361 }
1362
1363 if (target->smp && !debug_execution) {
1364 retval = cortex_m_restore_smp(target, !!handle_breakpoints);
1365 if (retval != ERROR_OK)
1366 LOG_WARNING("resume of a SMP target failed, trying to resume current one");
1367 }
1368
1369 cortex_m_restart_one(target, !!debug_execution);
1370 if (retval != ERROR_OK) {
1371 LOG_TARGET_ERROR(target, "resume failed");
1372 return retval;
1373 }
1374
1375 LOG_TARGET_DEBUG(target, "%sresumed at " TARGET_ADDR_FMT,
1376 debug_execution ? "debug " : "", address);
1377
1378 return ERROR_OK;
1379 }
1380
1381 /* int irqstepcount = 0; */
1382 static int cortex_m_step(struct target *target, int current,
1383 target_addr_t address, int handle_breakpoints)
1384 {
1385 struct cortex_m_common *cortex_m = target_to_cm(target);
1386 struct armv7m_common *armv7m = &cortex_m->armv7m;
1387 struct breakpoint *breakpoint = NULL;
1388 struct reg *pc = armv7m->arm.pc;
1389 bool bkpt_inst_found = false;
1390 int retval;
1391 bool isr_timed_out = false;
1392
1393 if (target->state != TARGET_HALTED) {
1394 LOG_TARGET_ERROR(target, "not halted");
1395 return ERROR_TARGET_NOT_HALTED;
1396 }
1397
1398 /* Just one of SMP cores will step. Set the gdb control
1399 * target to current one or gdb miss gdb-end event */
1400 if (target->smp && target->gdb_service)
1401 target->gdb_service->target = target;
1402
1403 /* current = 1: continue on current pc, otherwise continue at <address> */
1404 if (!current) {
1405 buf_set_u32(pc->value, 0, 32, address);
1406 pc->dirty = true;
1407 pc->valid = true;
1408 }
1409
1410 uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
1411
1412 /* the front-end may request us not to handle breakpoints */
1413 if (handle_breakpoints) {
1414 breakpoint = breakpoint_find(target, pc_value);
1415 if (breakpoint)
1416 cortex_m_unset_breakpoint(target, breakpoint);
1417 }
1418
1419 armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
1420
1421 target->debug_reason = DBG_REASON_SINGLESTEP;
1422
1423 armv7m_restore_context(target);
1424
1425 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1426
1427 /* if no bkpt instruction is found at pc then we can perform
1428 * a normal step, otherwise we have to manually step over the bkpt
1429 * instruction - as such simulate a step */
1430 if (bkpt_inst_found == false) {
1431 if (cortex_m->isrmasking_mode != CORTEX_M_ISRMASK_AUTO) {
1432 /* Automatic ISR masking mode off: Just step over the next
1433 * instruction, with interrupts on or off as appropriate. */
1434 cortex_m_set_maskints_for_step(target);
1435 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1436 } else {
1437 /* Process interrupts during stepping in a way they don't interfere
1438 * debugging.
1439 *
1440 * Principle:
1441 *
1442 * Set a temporary break point at the current pc and let the core run
1443 * with interrupts enabled. Pending interrupts get served and we run
1444 * into the breakpoint again afterwards. Then we step over the next
1445 * instruction with interrupts disabled.
1446 *
1447 * If the pending interrupts don't complete within time, we leave the
1448 * core running. This may happen if the interrupts trigger faster
1449 * than the core can process them or the handler doesn't return.
1450 *
1451 * If no more breakpoints are available we simply do a step with
1452 * interrupts enabled.
1453 *
1454 */
1455
1456 /* 2012-09-29 ph
1457 *
1458 * If a break point is already set on the lower half word then a break point on
1459 * the upper half word will not break again when the core is restarted. So we
1460 * just step over the instruction with interrupts disabled.
1461 *
1462 * The documentation has no information about this, it was found by observation
1463 * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 doesn't seem to
1464 * suffer from this problem.
1465 *
1466 * To add some confusion: pc_value has bit 0 always set, while the breakpoint
1467 * address has it always cleared. The former is done to indicate thumb mode
1468 * to gdb.
1469 *
1470 */
1471 if ((pc_value & 0x02) && breakpoint_find(target, pc_value & ~0x03)) {
1472 LOG_TARGET_DEBUG(target, "Stepping over next instruction with interrupts disabled");
1473 cortex_m_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
1474 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1475 /* Re-enable interrupts if appropriate */
1476 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1477 cortex_m_set_maskints_for_halt(target);
1478 } else {
1479
1480 /* Set a temporary break point */
1481 if (breakpoint) {
1482 retval = cortex_m_set_breakpoint(target, breakpoint);
1483 } else {
1484 enum breakpoint_type type = BKPT_HARD;
1485 if (cortex_m->fp_rev == 0 && pc_value > 0x1FFFFFFF) {
1486 /* FPB rev.1 cannot handle such addr, try BKPT instr */
1487 type = BKPT_SOFT;
1488 }
1489 retval = breakpoint_add(target, pc_value, 2, type);
1490 }
1491
1492 bool tmp_bp_set = (retval == ERROR_OK);
1493
1494 /* No more breakpoints left, just do a step */
1495 if (!tmp_bp_set) {
1496 cortex_m_set_maskints_for_step(target);
1497 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1498 /* Re-enable interrupts if appropriate */
1499 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1500 cortex_m_set_maskints_for_halt(target);
1501 } else {
1502 /* Start the core */
1503 LOG_TARGET_DEBUG(target, "Starting core to serve pending interrupts");
1504 int64_t t_start = timeval_ms();
1505 cortex_m_set_maskints_for_run(target);
1506 cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
1507
1508 /* Wait for pending handlers to complete or timeout */
1509 do {
1510 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1511 if (retval != ERROR_OK) {
1512 target->state = TARGET_UNKNOWN;
1513 return retval;
1514 }
1515 isr_timed_out = ((timeval_ms() - t_start) > 500);
1516 } while (!((cortex_m->dcb_dhcsr & S_HALT) || isr_timed_out));
1517
1518 /* only remove breakpoint if we created it */
1519 if (breakpoint)
1520 cortex_m_unset_breakpoint(target, breakpoint);
1521 else {
1522 /* Remove the temporary breakpoint */
1523 breakpoint_remove(target, pc_value);
1524 }
1525
1526 if (isr_timed_out) {
1527 LOG_TARGET_DEBUG(target, "Interrupt handlers didn't complete within time, "
1528 "leaving target running");
1529 } else {
1530 /* Step over next instruction with interrupts disabled */
1531 cortex_m_set_maskints_for_step(target);
1532 cortex_m_write_debug_halt_mask(target,
1533 C_HALT | C_MASKINTS,
1534 0);
1535 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1536 /* Re-enable interrupts if appropriate */
1537 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1538 cortex_m_set_maskints_for_halt(target);
1539 }
1540 }
1541 }
1542 }
1543 }
1544
1545 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1546 if (retval != ERROR_OK)
1547 return retval;
1548
1549 /* registers are now invalid */
1550 register_cache_invalidate(armv7m->arm.core_cache);
1551
1552 if (breakpoint)
1553 cortex_m_set_breakpoint(target, breakpoint);
1554
1555 if (isr_timed_out) {
1556 /* Leave the core running. The user has to stop execution manually. */
1557 target->debug_reason = DBG_REASON_NOTHALTED;
1558 target->state = TARGET_RUNNING;
1559 return ERROR_OK;
1560 }
1561
1562 LOG_TARGET_DEBUG(target, "target stepped dcb_dhcsr = 0x%" PRIx32
1563 " nvic_icsr = 0x%" PRIx32,
1564 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1565
1566 retval = cortex_m_debug_entry(target);
1567 if (retval != ERROR_OK)
1568 return retval;
1569 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1570
1571 LOG_TARGET_DEBUG(target, "target stepped dcb_dhcsr = 0x%" PRIx32
1572 " nvic_icsr = 0x%" PRIx32,
1573 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1574
1575 return ERROR_OK;
1576 }
1577
1578 static int cortex_m_assert_reset(struct target *target)
1579 {
1580 struct cortex_m_common *cortex_m = target_to_cm(target);
1581 struct armv7m_common *armv7m = &cortex_m->armv7m;
1582 enum cortex_m_soft_reset_config reset_config = cortex_m->soft_reset_config;
1583
1584 LOG_TARGET_DEBUG(target, "target->state: %s,%s examined",
1585 target_state_name(target),
1586 target_was_examined(target) ? "" : " not");
1587
1588 enum reset_types jtag_reset_config = jtag_get_reset_config();
1589
1590 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1591 /* allow scripts to override the reset event */
1592
1593 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1594 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1595 target->state = TARGET_RESET;
1596
1597 return ERROR_OK;
1598 }
1599
1600 /* some cores support connecting while srst is asserted
1601 * use that mode is it has been configured */
1602
1603 bool srst_asserted = false;
1604
1605 if ((jtag_reset_config & RESET_HAS_SRST) &&
1606 ((jtag_reset_config & RESET_SRST_NO_GATING) || !armv7m->debug_ap)) {
1607 /* If we have no debug_ap, asserting SRST is the only thing
1608 * we can do now */
1609 adapter_assert_reset();
1610 srst_asserted = true;
1611 }
1612
1613 /* TODO: replace the hack calling target_examine_one()
1614 * as soon as a better reset framework is available */
1615 if (!target_was_examined(target) && !target->defer_examine
1616 && srst_asserted && (jtag_reset_config & RESET_SRST_NO_GATING)) {
1617 LOG_TARGET_DEBUG(target, "Trying to re-examine under reset");
1618 target_examine_one(target);
1619 }
1620
1621 /* We need at least debug_ap to go further.
1622 * Inform user and bail out if we don't have one. */
1623 if (!armv7m->debug_ap) {
1624 if (srst_asserted) {
1625 if (target->reset_halt)
1626 LOG_TARGET_ERROR(target, "Debug AP not available, will not halt after reset!");
1627
1628 /* Do not propagate error: reset was asserted, proceed to deassert! */
1629 target->state = TARGET_RESET;
1630 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1631 return ERROR_OK;
1632
1633 } else {
1634 LOG_TARGET_ERROR(target, "Debug AP not available, reset NOT asserted!");
1635 return ERROR_FAIL;
1636 }
1637 }
1638
1639 /* Enable debug requests */
1640 int retval = cortex_m_read_dhcsr_atomic_sticky(target);
1641
1642 /* Store important errors instead of failing and proceed to reset assert */
1643
1644 if (retval != ERROR_OK || !(cortex_m->dcb_dhcsr & C_DEBUGEN))
1645 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
1646
1647 /* If the processor is sleeping in a WFI or WFE instruction, the
1648 * C_HALT bit must be asserted to regain control */
1649 if (retval == ERROR_OK && (cortex_m->dcb_dhcsr & S_SLEEP))
1650 retval = cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1651
1652 mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
1653 /* Ignore less important errors */
1654
1655 if (!target->reset_halt) {
1656 /* Set/Clear C_MASKINTS in a separate operation */
1657 cortex_m_set_maskints_for_run(target);
1658
1659 /* clear any debug flags before resuming */
1660 cortex_m_clear_halt(target);
1661
1662 /* clear C_HALT in dhcsr reg */
1663 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1664 } else {
1665 /* Halt in debug on reset; endreset_event() restores DEMCR.
1666 *
1667 * REVISIT catching BUSERR presumably helps to defend against
1668 * bad vector table entries. Should this include MMERR or
1669 * other flags too?
1670 */
1671 int retval2;
1672 retval2 = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DEMCR,
1673 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1674 if (retval != ERROR_OK || retval2 != ERROR_OK)
1675 LOG_TARGET_INFO(target, "AP write error, reset will not halt");
1676 }
1677
1678 if (jtag_reset_config & RESET_HAS_SRST) {
1679 /* default to asserting srst */
1680 if (!srst_asserted)
1681 adapter_assert_reset();
1682
1683 /* srst is asserted, ignore AP access errors */
1684 retval = ERROR_OK;
1685 } else {
1686 /* Use a standard Cortex-M3 software reset mechanism.
1687 * We default to using VECTRESET as it is supported on all current cores
1688 * (except Cortex-M0, M0+ and M1 which support SYSRESETREQ only!)
1689 * This has the disadvantage of not resetting the peripherals, so a
1690 * reset-init event handler is needed to perform any peripheral resets.
1691 */
1692 if (!cortex_m->vectreset_supported
1693 && reset_config == CORTEX_M_RESET_VECTRESET) {
1694 reset_config = CORTEX_M_RESET_SYSRESETREQ;
1695 LOG_TARGET_WARNING(target, "VECTRESET is not supported on this Cortex-M core, using SYSRESETREQ instead.");
1696 LOG_TARGET_WARNING(target, "Set 'cortex_m reset_config sysresetreq'.");
1697 }
1698
1699 LOG_TARGET_DEBUG(target, "Using Cortex-M %s", (reset_config == CORTEX_M_RESET_SYSRESETREQ)
1700 ? "SYSRESETREQ" : "VECTRESET");
1701
1702 if (reset_config == CORTEX_M_RESET_VECTRESET) {
1703 LOG_TARGET_WARNING(target, "Only resetting the Cortex-M core, use a reset-init event "
1704 "handler to reset any peripherals or configure hardware srst support.");
1705 }
1706
1707 int retval3;
1708 retval3 = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1709 AIRCR_VECTKEY | ((reset_config == CORTEX_M_RESET_SYSRESETREQ)
1710 ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
1711 if (retval3 != ERROR_OK)
1712 LOG_TARGET_DEBUG(target, "Ignoring AP write error right after reset");
1713
1714 retval3 = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1715 if (retval3 != ERROR_OK) {
1716 LOG_TARGET_ERROR(target, "DP initialisation failed");
1717 /* The error return value must not be propagated in this case.
1718 * SYSRESETREQ or VECTRESET have been possibly triggered
1719 * so reset processing should continue */
1720 } else {
1721 /* I do not know why this is necessary, but it
1722 * fixes strange effects (step/resume cause NMI
1723 * after reset) on LM3S6918 -- Michael Schwingen
1724 */
1725 uint32_t tmp;
1726 mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_AIRCR, &tmp);
1727 }
1728 }
1729
1730 target->state = TARGET_RESET;
1731 jtag_sleep(50000);
1732
1733 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1734
1735 /* now return stored error code if any */
1736 if (retval != ERROR_OK)
1737 return retval;
1738
1739 if (target->reset_halt && target_was_examined(target)) {
1740 retval = target_halt(target);
1741 if (retval != ERROR_OK)
1742 return retval;
1743 }
1744
1745 return ERROR_OK;
1746 }
1747
1748 static int cortex_m_deassert_reset(struct target *target)
1749 {
1750 struct armv7m_common *armv7m = &target_to_cm(target)->armv7m;
1751
1752 LOG_TARGET_DEBUG(target, "target->state: %s,%s examined",
1753 target_state_name(target),
1754 target_was_examined(target) ? "" : " not");
1755
1756 /* deassert reset lines */
1757 adapter_deassert_reset();
1758
1759 enum reset_types jtag_reset_config = jtag_get_reset_config();
1760
1761 if ((jtag_reset_config & RESET_HAS_SRST) &&
1762 !(jtag_reset_config & RESET_SRST_NO_GATING) &&
1763 armv7m->debug_ap) {
1764
1765 int retval = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1766 if (retval != ERROR_OK) {
1767 LOG_TARGET_ERROR(target, "DP initialisation failed");
1768 return retval;
1769 }
1770 }
1771
1772 return ERROR_OK;
1773 }
1774
1775 int cortex_m_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
1776 {
1777 int retval;
1778 unsigned int fp_num = 0;
1779 struct cortex_m_common *cortex_m = target_to_cm(target);
1780 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1781
1782 if (breakpoint->is_set) {
1783 LOG_TARGET_WARNING(target, "breakpoint (BPID: %" PRIu32 ") already set", breakpoint->unique_id);
1784 return ERROR_OK;
1785 }
1786
1787 if (breakpoint->type == BKPT_HARD) {
1788 uint32_t fpcr_value;
1789 while (comparator_list[fp_num].used && (fp_num < cortex_m->fp_num_code))
1790 fp_num++;
1791 if (fp_num >= cortex_m->fp_num_code) {
1792 LOG_TARGET_ERROR(target, "Can not find free FPB Comparator!");
1793 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1794 }
1795 breakpoint_hw_set(breakpoint, fp_num);
1796 fpcr_value = breakpoint->address | 1;
1797 if (cortex_m->fp_rev == 0) {
1798 if (breakpoint->address > 0x1FFFFFFF) {
1799 LOG_TARGET_ERROR(target, "Cortex-M Flash Patch Breakpoint rev.1 "
1800 "cannot handle HW breakpoint above address 0x1FFFFFFE");
1801 return ERROR_FAIL;
1802 }
1803 uint32_t hilo;
1804 hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
1805 fpcr_value = (fpcr_value & 0x1FFFFFFC) | hilo | 1;
1806 } else if (cortex_m->fp_rev > 1) {
1807 LOG_TARGET_ERROR(target, "Unhandled Cortex-M Flash Patch Breakpoint architecture revision");
1808 return ERROR_FAIL;
1809 }
1810 comparator_list[fp_num].used = true;
1811 comparator_list[fp_num].fpcr_value = fpcr_value;
1812 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1813 comparator_list[fp_num].fpcr_value);
1814 LOG_TARGET_DEBUG(target, "fpc_num %i fpcr_value 0x%" PRIx32 "",
1815 fp_num,
1816 comparator_list[fp_num].fpcr_value);
1817 if (!cortex_m->fpb_enabled) {
1818 LOG_TARGET_DEBUG(target, "FPB wasn't enabled, do it now");
1819 retval = cortex_m_enable_fpb(target);
1820 if (retval != ERROR_OK) {
1821 LOG_TARGET_ERROR(target, "Failed to enable the FPB");
1822 return retval;
1823 }
1824
1825 cortex_m->fpb_enabled = true;
1826 }
1827 } else if (breakpoint->type == BKPT_SOFT) {
1828 uint8_t code[4];
1829
1830 /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
1831 * semihosting; don't use that. Otherwise the BKPT
1832 * parameter is arbitrary.
1833 */
1834 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1835 retval = target_read_memory(target,
1836 breakpoint->address & 0xFFFFFFFE,
1837 breakpoint->length, 1,
1838 breakpoint->orig_instr);
1839 if (retval != ERROR_OK)
1840 return retval;
1841 retval = target_write_memory(target,
1842 breakpoint->address & 0xFFFFFFFE,
1843 breakpoint->length, 1,
1844 code);
1845 if (retval != ERROR_OK)
1846 return retval;
1847 breakpoint->is_set = true;
1848 }
1849
1850 LOG_TARGET_DEBUG(target, "BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (n=%u)",
1851 breakpoint->unique_id,
1852 (int)(breakpoint->type),
1853 breakpoint->address,
1854 breakpoint->length,
1855 (breakpoint->type == BKPT_SOFT) ? 0 : breakpoint->number);
1856
1857 return ERROR_OK;
1858 }
1859
1860 int cortex_m_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1861 {
1862 int retval;
1863 struct cortex_m_common *cortex_m = target_to_cm(target);
1864 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1865
1866 if (!breakpoint->is_set) {
1867 LOG_TARGET_WARNING(target, "breakpoint not set");
1868 return ERROR_OK;
1869 }
1870
1871 LOG_TARGET_DEBUG(target, "BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (n=%u)",
1872 breakpoint->unique_id,
1873 (int)(breakpoint->type),
1874 breakpoint->address,
1875 breakpoint->length,
1876 (breakpoint->type == BKPT_SOFT) ? 0 : breakpoint->number);
1877
1878 if (breakpoint->type == BKPT_HARD) {
1879 unsigned int fp_num = breakpoint->number;
1880 if (fp_num >= cortex_m->fp_num_code) {
1881 LOG_TARGET_DEBUG(target, "Invalid FP Comparator number in breakpoint");
1882 return ERROR_OK;
1883 }
1884 comparator_list[fp_num].used = false;
1885 comparator_list[fp_num].fpcr_value = 0;
1886 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1887 comparator_list[fp_num].fpcr_value);
1888 } else {
1889 /* restore original instruction (kept in target endianness) */
1890 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE,
1891 breakpoint->length, 1,
1892 breakpoint->orig_instr);
1893 if (retval != ERROR_OK)
1894 return retval;
1895 }
1896 breakpoint->is_set = false;
1897
1898 return ERROR_OK;
1899 }
1900
1901 int cortex_m_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
1902 {
1903 if (breakpoint->length == 3) {
1904 LOG_TARGET_DEBUG(target, "Using a two byte breakpoint for 32bit Thumb-2 request");
1905 breakpoint->length = 2;
1906 }
1907
1908 if ((breakpoint->length != 2)) {
1909 LOG_TARGET_INFO(target, "only breakpoints of two bytes length supported");
1910 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1911 }
1912
1913 return cortex_m_set_breakpoint(target, breakpoint);
1914 }
1915
1916 int cortex_m_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1917 {
1918 if (!breakpoint->is_set)
1919 return ERROR_OK;
1920
1921 return cortex_m_unset_breakpoint(target, breakpoint);
1922 }
1923
1924 static int cortex_m_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1925 {
1926 unsigned int dwt_num = 0;
1927 struct cortex_m_common *cortex_m = target_to_cm(target);
1928
1929 /* REVISIT Don't fully trust these "not used" records ... users
1930 * may set up breakpoints by hand, e.g. dual-address data value
1931 * watchpoint using comparator #1; comparator #0 matching cycle
1932 * count; send data trace info through ITM and TPIU; etc
1933 */
1934 struct cortex_m_dwt_comparator *comparator;
1935
1936 for (comparator = cortex_m->dwt_comparator_list;
1937 comparator->used && dwt_num < cortex_m->dwt_num_comp;
1938 comparator++, dwt_num++)
1939 continue;
1940 if (dwt_num >= cortex_m->dwt_num_comp) {
1941 LOG_TARGET_ERROR(target, "Can not find free DWT Comparator");
1942 return ERROR_FAIL;
1943 }
1944 comparator->used = true;
1945 watchpoint_set(watchpoint, dwt_num);
1946
1947 comparator->comp = watchpoint->address;
1948 target_write_u32(target, comparator->dwt_comparator_address + 0,
1949 comparator->comp);
1950
1951 if ((cortex_m->dwt_devarch & 0x1FFFFF) != DWT_DEVARCH_ARMV8M) {
1952 uint32_t mask = 0, temp;
1953
1954 /* watchpoint params were validated earlier */
1955 temp = watchpoint->length;
1956 while (temp) {
1957 temp >>= 1;
1958 mask++;
1959 }
1960 mask--;
1961
1962 comparator->mask = mask;
1963 target_write_u32(target, comparator->dwt_comparator_address + 4,
1964 comparator->mask);
1965
1966 switch (watchpoint->rw) {
1967 case WPT_READ:
1968 comparator->function = 5;
1969 break;
1970 case WPT_WRITE:
1971 comparator->function = 6;
1972 break;
1973 case WPT_ACCESS:
1974 comparator->function = 7;
1975 break;
1976 }
1977 } else {
1978 uint32_t data_size = watchpoint->length >> 1;
1979 comparator->mask = (watchpoint->length >> 1) | 1;
1980
1981 switch (watchpoint->rw) {
1982 case WPT_ACCESS:
1983 comparator->function = 4;
1984 break;
1985 case WPT_WRITE:
1986 comparator->function = 5;
1987 break;
1988 case WPT_READ:
1989 comparator->function = 6;
1990 break;
1991 }
1992 comparator->function = comparator->function | (1 << 4) |
1993 (data_size << 10);
1994 }
1995
1996 target_write_u32(target, comparator->dwt_comparator_address + 8,
1997 comparator->function);
1998
1999 LOG_TARGET_DEBUG(target, "Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
2000 watchpoint->unique_id, dwt_num,
2001 (unsigned) comparator->comp,
2002 (unsigned) comparator->mask,
2003 (unsigned) comparator->function);
2004 return ERROR_OK;
2005 }
2006
2007 static int cortex_m_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
2008 {
2009 struct cortex_m_common *cortex_m = target_to_cm(target);
2010 struct cortex_m_dwt_comparator *comparator;
2011
2012 if (!watchpoint->is_set) {
2013 LOG_TARGET_WARNING(target, "watchpoint (wpid: %d) not set",
2014 watchpoint->unique_id);
2015 return ERROR_OK;
2016 }
2017
2018 unsigned int dwt_num = watchpoint->number;
2019
2020 LOG_TARGET_DEBUG(target, "Watchpoint (ID %d) DWT%u address: 0x%08x clear",
2021 watchpoint->unique_id, dwt_num,
2022 (unsigned) watchpoint->address);
2023
2024 if (dwt_num >= cortex_m->dwt_num_comp) {
2025 LOG_TARGET_DEBUG(target, "Invalid DWT Comparator number in watchpoint");
2026 return ERROR_OK;
2027 }
2028
2029 comparator = cortex_m->dwt_comparator_list + dwt_num;
2030 comparator->used = false;
2031 comparator->function = 0;
2032 target_write_u32(target, comparator->dwt_comparator_address + 8,
2033 comparator->function);
2034
2035 watchpoint->is_set = false;
2036
2037 return ERROR_OK;
2038 }
2039
2040 int cortex_m_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
2041 {
2042 struct cortex_m_common *cortex_m = target_to_cm(target);
2043
2044 if (cortex_m->dwt_comp_available < 1) {
2045 LOG_TARGET_DEBUG(target, "no comparators?");
2046 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2047 }
2048
2049 /* REVISIT This DWT may well be able to watch for specific data
2050 * values. Requires comparator #1 to set DATAVMATCH and match
2051 * the data, and another comparator (DATAVADDR0) matching addr.
2052 *
2053 * NOTE: hardware doesn't support data value masking, so we'll need
2054 * to check that mask is zero
2055 */
2056 if (watchpoint->mask != WATCHPOINT_IGNORE_DATA_VALUE_MASK) {
2057 LOG_TARGET_DEBUG(target, "watchpoint value masks not supported");
2058 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2059 }
2060
2061 /* hardware allows address masks of up to 32K */
2062 unsigned mask;
2063
2064 for (mask = 0; mask < 16; mask++) {
2065 if ((1u << mask) == watchpoint->length)
2066 break;
2067 }
2068 if (mask == 16) {
2069 LOG_TARGET_DEBUG(target, "unsupported watchpoint length");
2070 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2071 }
2072 if (watchpoint->address & ((1 << mask) - 1)) {
2073 LOG_TARGET_DEBUG(target, "watchpoint address is unaligned");
2074 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2075 }
2076
2077 cortex_m->dwt_comp_available--;
2078 LOG_TARGET_DEBUG(target, "dwt_comp_available: %d", cortex_m->dwt_comp_available);
2079
2080 return ERROR_OK;
2081 }
2082
2083 int cortex_m_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2084 {
2085 struct cortex_m_common *cortex_m = target_to_cm(target);
2086
2087 /* REVISIT why check? DWT can be updated with core running ... */
2088 if (target->state != TARGET_HALTED) {
2089 LOG_TARGET_ERROR(target, "not halted");
2090 return ERROR_TARGET_NOT_HALTED;
2091 }
2092
2093 if (watchpoint->is_set)
2094 cortex_m_unset_watchpoint(target, watchpoint);
2095
2096 cortex_m->dwt_comp_available++;
2097 LOG_TARGET_DEBUG(target, "dwt_comp_available: %d", cortex_m->dwt_comp_available);
2098
2099 return ERROR_OK;
2100 }
2101
2102 static int cortex_m_hit_watchpoint(struct target *target, struct watchpoint **hit_watchpoint)
2103 {
2104 if (target->debug_reason != DBG_REASON_WATCHPOINT)
2105 return ERROR_FAIL;
2106
2107 struct cortex_m_common *cortex_m = target_to_cm(target);
2108
2109 for (struct watchpoint *wp = target->watchpoints; wp; wp = wp->next) {
2110 if (!wp->is_set)
2111 continue;
2112
2113 unsigned int dwt_num = wp->number;
2114 struct cortex_m_dwt_comparator *comparator = cortex_m->dwt_comparator_list + dwt_num;
2115
2116 uint32_t dwt_function;
2117 int retval = target_read_u32(target, comparator->dwt_comparator_address + 8, &dwt_function);
2118 if (retval != ERROR_OK)
2119 return ERROR_FAIL;
2120
2121 /* check the MATCHED bit */
2122 if (dwt_function & BIT(24)) {
2123 *hit_watchpoint = wp;
2124 return ERROR_OK;
2125 }
2126 }
2127
2128 return ERROR_FAIL;
2129 }
2130
2131 void cortex_m_enable_watchpoints(struct target *target)
2132 {
2133 struct watchpoint *watchpoint = target->watchpoints;
2134
2135 /* set any pending watchpoints */
2136 while (watchpoint) {
2137 if (!watchpoint->is_set)
2138 cortex_m_set_watchpoint(target, watchpoint);
2139 watchpoint = watchpoint->next;
2140 }
2141 }
2142
2143 static int cortex_m_read_memory(struct target *target, target_addr_t address,
2144 uint32_t size, uint32_t count, uint8_t *buffer)
2145 {
2146 struct armv7m_common *armv7m = target_to_armv7m(target);
2147
2148 if (armv7m->arm.arch == ARM_ARCH_V6M) {
2149 /* armv6m does not handle unaligned memory access */
2150 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
2151 return ERROR_TARGET_UNALIGNED_ACCESS;
2152 }
2153
2154 return mem_ap_read_buf(armv7m->debug_ap, buffer, size, count, address);
2155 }
2156
2157 static int cortex_m_write_memory(struct target *target, target_addr_t address,
2158 uint32_t size, uint32_t count, const uint8_t *buffer)
2159 {
2160 struct armv7m_common *armv7m = target_to_armv7m(target);
2161
2162 if (armv7m->arm.arch == ARM_ARCH_V6M) {
2163 /* armv6m does not handle unaligned memory access */
2164 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
2165 return ERROR_TARGET_UNALIGNED_ACCESS;
2166 }
2167
2168 return mem_ap_write_buf(armv7m->debug_ap, buffer, size, count, address);
2169 }
2170
2171 static int cortex_m_init_target(struct command_context *cmd_ctx,
2172 struct target *target)
2173 {
2174 armv7m_build_reg_cache(target);
2175 arm_semihosting_init(target);
2176 return ERROR_OK;
2177 }
2178
2179 void cortex_m_deinit_target(struct target *target)
2180 {
2181 struct cortex_m_common *cortex_m = target_to_cm(target);
2182 struct armv7m_common *armv7m = target_to_armv7m(target);
2183
2184 if (!armv7m->is_hla_target && armv7m->debug_ap)
2185 dap_put_ap(armv7m->debug_ap);
2186
2187 free(cortex_m->fp_comparator_list);
2188
2189 cortex_m_dwt_free(target);
2190 armv7m_free_reg_cache(target);
2191
2192 free(target->private_config);
2193 free(cortex_m);
2194 }
2195
2196 int cortex_m_profiling(struct target *target, uint32_t *samples,
2197 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2198 {
2199 struct timeval timeout, now;
2200 struct armv7m_common *armv7m = target_to_armv7m(target);
2201 uint32_t reg_value;
2202 int retval;
2203
2204 retval = target_read_u32(target, DWT_PCSR, &reg_value);
2205 if (retval != ERROR_OK) {
2206 LOG_TARGET_ERROR(target, "Error while reading PCSR");
2207 return retval;
2208 }
2209 if (reg_value == 0) {
2210 LOG_TARGET_INFO(target, "PCSR sampling not supported on this processor.");
2211 return target_profiling_default(target, samples, max_num_samples, num_samples, seconds);
2212 }
2213
2214 gettimeofday(&timeout, NULL);
2215 timeval_add_time(&timeout, seconds, 0);
2216
2217 LOG_TARGET_INFO(target, "Starting Cortex-M profiling. Sampling DWT_PCSR as fast as we can...");
2218
2219 /* Make sure the target is running */
2220 target_poll(target);
2221 if (target->state == TARGET_HALTED)
2222 retval = target_resume(target, 1, 0, 0, 0);
2223
2224 if (retval != ERROR_OK) {
2225 LOG_TARGET_ERROR(target, "Error while resuming target");
2226 return retval;
2227 }
2228
2229 uint32_t sample_count = 0;
2230
2231 for (;;) {
2232 if (armv7m && armv7m->debug_ap) {
2233 uint32_t read_count = max_num_samples - sample_count;
2234 if (read_count > 1024)
2235 read_count = 1024;
2236
2237 retval = mem_ap_read_buf_noincr(armv7m->debug_ap,
2238 (void *)&samples[sample_count],
2239 4, read_count, DWT_PCSR);
2240 sample_count += read_count;
2241 } else {
2242 target_read_u32(target, DWT_PCSR, &samples[sample_count++]);
2243 }
2244
2245 if (retval != ERROR_OK) {
2246 LOG_TARGET_ERROR(target, "Error while reading PCSR");
2247 return retval;
2248 }
2249
2250
2251 gettimeofday(&now, NULL);
2252 if (sample_count >= max_num_samples || timeval_compare(&now, &timeout) > 0) {
2253 LOG_TARGET_INFO(target, "Profiling completed. %" PRIu32 " samples.", sample_count);
2254 break;
2255 }
2256 }
2257
2258 *num_samples = sample_count;
2259 return retval;
2260 }
2261
2262
2263 /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
2264 * on r/w if the core is not running, and clear on resume or reset ... or
2265 * at least, in a post_restore_context() method.
2266 */
2267
2268 struct dwt_reg_state {
2269 struct target *target;
2270 uint32_t addr;
2271 uint8_t value[4]; /* scratch/cache */
2272 };
2273
2274 static int cortex_m_dwt_get_reg(struct reg *reg)
2275 {
2276 struct dwt_reg_state *state = reg->arch_info;
2277
2278 uint32_t tmp;
2279 int retval = target_read_u32(state->target, state->addr, &tmp);
2280 if (retval != ERROR_OK)
2281 return retval;
2282
2283 buf_set_u32(state->value, 0, 32, tmp);
2284 return ERROR_OK;
2285 }
2286
2287 static int cortex_m_dwt_set_reg(struct reg *reg, uint8_t *buf)
2288 {
2289 struct dwt_reg_state *state = reg->arch_info;
2290
2291 return target_write_u32(state->target, state->addr,
2292 buf_get_u32(buf, 0, reg->size));
2293 }
2294
2295 struct dwt_reg {
2296 uint32_t addr;
2297 const char *name;
2298 unsigned size;
2299 };
2300
2301 static const struct dwt_reg dwt_base_regs[] = {
2302 { DWT_CTRL, "dwt_ctrl", 32, },
2303 /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
2304 * increments while the core is asleep.
2305 */
2306 { DWT_CYCCNT, "dwt_cyccnt", 32, },
2307 /* plus some 8 bit counters, useful for profiling with TPIU */
2308 };
2309
2310 static const struct dwt_reg dwt_comp[] = {
2311 #define DWT_COMPARATOR(i) \
2312 { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
2313 { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
2314 { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
2315 DWT_COMPARATOR(0),
2316 DWT_COMPARATOR(1),
2317 DWT_COMPARATOR(2),
2318 DWT_COMPARATOR(3),
2319 DWT_COMPARATOR(4),
2320 DWT_COMPARATOR(5),
2321 DWT_COMPARATOR(6),
2322 DWT_COMPARATOR(7),
2323 DWT_COMPARATOR(8),
2324 DWT_COMPARATOR(9),
2325 DWT_COMPARATOR(10),
2326 DWT_COMPARATOR(11),
2327 DWT_COMPARATOR(12),
2328 DWT_COMPARATOR(13),
2329 DWT_COMPARATOR(14),
2330 DWT_COMPARATOR(15),
2331 #undef DWT_COMPARATOR
2332 };
2333
2334 static const struct reg_arch_type dwt_reg_type = {
2335 .get = cortex_m_dwt_get_reg,
2336 .set = cortex_m_dwt_set_reg,
2337 };
2338
2339 static void cortex_m_dwt_addreg(struct target *t, struct reg *r, const struct dwt_reg *d)
2340 {
2341 struct dwt_reg_state *state;
2342
2343 state = calloc(1, sizeof(*state));
2344 if (!state)
2345 return;
2346 state->addr = d->addr;
2347 state->target = t;
2348
2349 r->name = d->name;
2350 r->size = d->size;
2351 r->value = state->value;
2352 r->arch_info = state;
2353 r->type = &dwt_reg_type;
2354 }
2355
2356 static void cortex_m_dwt_setup(struct cortex_m_common *cm, struct target *target)
2357 {
2358 uint32_t dwtcr;
2359 struct reg_cache *cache;
2360 struct cortex_m_dwt_comparator *comparator;
2361 int reg;
2362
2363 target_read_u32(target, DWT_CTRL, &dwtcr);
2364 LOG_TARGET_DEBUG(target, "DWT_CTRL: 0x%" PRIx32, dwtcr);
2365 if (!dwtcr) {
2366 LOG_TARGET_DEBUG(target, "no DWT");
2367 return;
2368 }
2369
2370 target_read_u32(target, DWT_DEVARCH, &cm->dwt_devarch);
2371 LOG_TARGET_DEBUG(target, "DWT_DEVARCH: 0x%" PRIx32, cm->dwt_devarch);
2372
2373 cm->dwt_num_comp = (dwtcr >> 28) & 0xF;
2374 cm->dwt_comp_available = cm->dwt_num_comp;
2375 cm->dwt_comparator_list = calloc(cm->dwt_num_comp,
2376 sizeof(struct cortex_m_dwt_comparator));
2377 if (!cm->dwt_comparator_list) {
2378 fail0:
2379 cm->dwt_num_comp = 0;
2380 LOG_TARGET_ERROR(target, "out of mem");
2381 return;
2382 }
2383
2384 cache = calloc(1, sizeof(*cache));
2385 if (!cache) {
2386 fail1:
2387 free(cm->dwt_comparator_list);
2388 goto fail0;
2389 }
2390 cache->name = "Cortex-M DWT registers";
2391 cache->num_regs = 2 + cm->dwt_num_comp * 3;
2392 cache->reg_list = calloc(cache->num_regs, sizeof(*cache->reg_list));
2393 if (!cache->reg_list) {
2394 free(cache);
2395 goto fail1;
2396 }
2397
2398 for (reg = 0; reg < 2; reg++)
2399 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2400 dwt_base_regs + reg);
2401
2402 comparator = cm->dwt_comparator_list;
2403 for (unsigned int i = 0; i < cm->dwt_num_comp; i++, comparator++) {
2404 int j;
2405
2406 comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
2407 for (j = 0; j < 3; j++, reg++)
2408 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2409 dwt_comp + 3 * i + j);
2410
2411 /* make sure we clear any watchpoints enabled on the target */
2412 target_write_u32(target, comparator->dwt_comparator_address + 8, 0);
2413 }
2414
2415 *register_get_last_cache_p(&target->reg_cache) = cache;
2416 cm->dwt_cache = cache;
2417
2418 LOG_TARGET_DEBUG(target, "DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
2419 dwtcr, cm->dwt_num_comp,
2420 (dwtcr & (0xf << 24)) ? " only" : "/trigger");
2421
2422 /* REVISIT: if num_comp > 1, check whether comparator #1 can
2423 * implement single-address data value watchpoints ... so we
2424 * won't need to check it later, when asked to set one up.
2425 */
2426 }
2427
2428 static void cortex_m_dwt_free(struct target *target)
2429 {
2430 struct cortex_m_common *cm = target_to_cm(target);
2431 struct reg_cache *cache = cm->dwt_cache;
2432
2433 free(cm->dwt_comparator_list);
2434 cm->dwt_comparator_list = NULL;
2435 cm->dwt_num_comp = 0;
2436
2437 if (cache) {
2438 register_unlink_cache(&target->reg_cache, cache);
2439
2440 if (cache->reg_list) {
2441 for (size_t i = 0; i < cache->num_regs; i++)
2442 free(cache->reg_list[i].arch_info);
2443 free(cache->reg_list);
2444 }
2445 free(cache);
2446 }
2447 cm->dwt_cache = NULL;
2448 }
2449
2450 static bool cortex_m_has_tz(struct target *target)
2451 {
2452 struct armv7m_common *armv7m = target_to_armv7m(target);
2453 uint32_t dauthstatus;
2454
2455 if (armv7m->arm.arch != ARM_ARCH_V8M)
2456 return false;
2457
2458 int retval = target_read_u32(target, DAUTHSTATUS, &dauthstatus);
2459 if (retval != ERROR_OK) {
2460 LOG_WARNING("Error reading DAUTHSTATUS register");
2461 return false;
2462 }
2463 return (dauthstatus & DAUTHSTATUS_SID_MASK) != 0;
2464 }
2465
2466 #define MVFR0 0xe000ef40
2467 #define MVFR1 0xe000ef44
2468
2469 #define MVFR0_DEFAULT_M4 0x10110021
2470 #define MVFR1_DEFAULT_M4 0x11000011
2471
2472 #define MVFR0_DEFAULT_M7_SP 0x10110021
2473 #define MVFR0_DEFAULT_M7_DP 0x10110221
2474 #define MVFR1_DEFAULT_M7_SP 0x11000011
2475 #define MVFR1_DEFAULT_M7_DP 0x12000011
2476
2477 static int cortex_m_find_mem_ap(struct adiv5_dap *swjdp,
2478 struct adiv5_ap **debug_ap)
2479 {
2480 if (dap_find_get_ap(swjdp, AP_TYPE_AHB3_AP, debug_ap) == ERROR_OK)
2481 return ERROR_OK;
2482
2483 return dap_find_get_ap(swjdp, AP_TYPE_AHB5_AP, debug_ap);
2484 }
2485
2486 int cortex_m_examine(struct target *target)
2487 {
2488 int retval;
2489 uint32_t cpuid, fpcr, mvfr0, mvfr1;
2490 struct cortex_m_common *cortex_m = target_to_cm(target);
2491 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
2492 struct armv7m_common *armv7m = target_to_armv7m(target);
2493
2494 /* hla_target shares the examine handler but does not support
2495 * all its calls */
2496 if (!armv7m->is_hla_target) {
2497 if (!armv7m->debug_ap) {
2498 if (cortex_m->apsel == DP_APSEL_INVALID) {
2499 /* Search for the MEM-AP */
2500 retval = cortex_m_find_mem_ap(swjdp, &armv7m->debug_ap);
2501 if (retval != ERROR_OK) {
2502 LOG_TARGET_ERROR(target, "Could not find MEM-AP to control the core");
2503 return retval;
2504 }
2505 } else {
2506 armv7m->debug_ap = dap_get_ap(swjdp, cortex_m->apsel);
2507 if (!armv7m->debug_ap) {
2508 LOG_ERROR("Cannot get AP");
2509 return ERROR_FAIL;
2510 }
2511 }
2512 }
2513
2514 armv7m->debug_ap->memaccess_tck = 8;
2515
2516 retval = mem_ap_init(armv7m->debug_ap);
2517 if (retval != ERROR_OK)
2518 return retval;
2519 }
2520
2521 if (!target_was_examined(target)) {
2522 target_set_examined(target);
2523
2524 /* Read from Device Identification Registers */
2525 retval = target_read_u32(target, CPUID, &cpuid);
2526 if (retval != ERROR_OK)
2527 return retval;
2528
2529 /* Inspect implementor/part to look for recognized cores */
2530 unsigned int impl_part = cpuid & (ARM_CPUID_IMPLEMENTOR_MASK | ARM_CPUID_PARTNO_MASK);
2531
2532 for (unsigned int n = 0; n < ARRAY_SIZE(cortex_m_parts); n++) {
2533 if (impl_part == cortex_m_parts[n].impl_part) {
2534 cortex_m->core_info = &cortex_m_parts[n];
2535 break;
2536 }
2537 }
2538
2539 if (!cortex_m->core_info) {
2540 LOG_TARGET_ERROR(target, "Cortex-M CPUID: 0x%x is unrecognized", cpuid);
2541 return ERROR_FAIL;
2542 }
2543
2544 armv7m->arm.arch = cortex_m->core_info->arch;
2545
2546 LOG_TARGET_INFO(target, "%s r%" PRId8 "p%" PRId8 " processor detected",
2547 cortex_m->core_info->name,
2548 (uint8_t)((cpuid >> 20) & 0xf),
2549 (uint8_t)((cpuid >> 0) & 0xf));
2550
2551 cortex_m->maskints_erratum = false;
2552 if (impl_part == CORTEX_M7_PARTNO) {
2553 uint8_t rev, patch;
2554 rev = (cpuid >> 20) & 0xf;
2555 patch = (cpuid >> 0) & 0xf;
2556 if ((rev == 0) && (patch < 2)) {
2557 LOG_TARGET_WARNING(target, "Silicon bug: single stepping may enter pending exception handler!");
2558 cortex_m->maskints_erratum = true;
2559 }
2560 }
2561 LOG_TARGET_DEBUG(target, "cpuid: 0x%8.8" PRIx32 "", cpuid);
2562
2563 if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV4) {
2564 target_read_u32(target, MVFR0, &mvfr0);
2565 target_read_u32(target, MVFR1, &mvfr1);
2566
2567 /* test for floating point feature on Cortex-M4 */
2568 if ((mvfr0 == MVFR0_DEFAULT_M4) && (mvfr1 == MVFR1_DEFAULT_M4)) {
2569 LOG_TARGET_DEBUG(target, "%s floating point feature FPv4_SP found", cortex_m->core_info->name);
2570 armv7m->fp_feature = FPV4_SP;
2571 }
2572 } else if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV5) {
2573 target_read_u32(target, MVFR0, &mvfr0);
2574 target_read_u32(target, MVFR1, &mvfr1);
2575
2576 /* test for floating point features on Cortex-M7 */
2577 if ((mvfr0 == MVFR0_DEFAULT_M7_SP) && (mvfr1 == MVFR1_DEFAULT_M7_SP)) {
2578 LOG_TARGET_DEBUG(target, "%s floating point feature FPv5_SP found", cortex_m->core_info->name);
2579 armv7m->fp_feature = FPV5_SP;
2580 } else if ((mvfr0 == MVFR0_DEFAULT_M7_DP) && (mvfr1 == MVFR1_DEFAULT_M7_DP)) {
2581 LOG_TARGET_DEBUG(target, "%s floating point feature FPv5_DP found", cortex_m->core_info->name);
2582 armv7m->fp_feature = FPV5_DP;
2583 }
2584 }
2585
2586 /* VECTRESET is supported only on ARMv7-M cores */
2587 cortex_m->vectreset_supported = armv7m->arm.arch == ARM_ARCH_V7M;
2588
2589 /* Check for FPU, otherwise mark FPU register as non-existent */
2590 if (armv7m->fp_feature == FP_NONE)
2591 for (size_t idx = ARMV7M_FPU_FIRST_REG; idx <= ARMV7M_FPU_LAST_REG; idx++)
2592 armv7m->arm.core_cache->reg_list[idx].exist = false;
2593
2594 if (!cortex_m_has_tz(target))
2595 for (size_t idx = ARMV8M_FIRST_REG; idx <= ARMV8M_LAST_REG; idx++)
2596 armv7m->arm.core_cache->reg_list[idx].exist = false;
2597
2598 if (!armv7m->is_hla_target) {
2599 if (cortex_m->core_info->flags & CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K)
2600 /* Cortex-M3/M4 have 4096 bytes autoincrement range,
2601 * s. ARM IHI 0031C: MEM-AP 7.2.2 */
2602 armv7m->debug_ap->tar_autoincr_block = (1 << 12);
2603 }
2604
2605 retval = target_read_u32(target, DCB_DHCSR, &cortex_m->dcb_dhcsr);
2606 if (retval != ERROR_OK)
2607 return retval;
2608
2609 /* Don't cumulate sticky S_RESET_ST at the very first read of DHCSR
2610 * as S_RESET_ST may indicate a reset that happened long time ago
2611 * (most probably the power-on reset before OpenOCD was started).
2612 * As we are just initializing the debug system we do not need
2613 * to call cortex_m_endreset_event() in the following poll.
2614 */
2615 if (!cortex_m->dcb_dhcsr_sticky_is_recent) {
2616 cortex_m->dcb_dhcsr_sticky_is_recent = true;
2617 if (cortex_m->dcb_dhcsr & S_RESET_ST) {
2618 LOG_TARGET_DEBUG(target, "reset happened some time ago, ignore");
2619 cortex_m->dcb_dhcsr &= ~S_RESET_ST;
2620 }
2621 }
2622 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
2623
2624 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
2625 /* Enable debug requests */
2626 uint32_t dhcsr = (cortex_m->dcb_dhcsr | C_DEBUGEN) & ~(C_HALT | C_STEP | C_MASKINTS);
2627
2628 retval = target_write_u32(target, DCB_DHCSR, DBGKEY | (dhcsr & 0x0000FFFFUL));
2629 if (retval != ERROR_OK)
2630 return retval;
2631 cortex_m->dcb_dhcsr = dhcsr;
2632 }
2633
2634 /* Configure trace modules */
2635 retval = target_write_u32(target, DCB_DEMCR, TRCENA | armv7m->demcr);
2636 if (retval != ERROR_OK)
2637 return retval;
2638
2639 if (armv7m->trace_config.itm_deferred_config)
2640 armv7m_trace_itm_config(target);
2641
2642 /* NOTE: FPB and DWT are both optional. */
2643
2644 /* Setup FPB */
2645 target_read_u32(target, FP_CTRL, &fpcr);
2646 /* bits [14:12] and [7:4] */
2647 cortex_m->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF);
2648 cortex_m->fp_num_lit = (fpcr >> 8) & 0xF;
2649 /* Detect flash patch revision, see RM DDI 0403E.b page C1-817.
2650 Revision is zero base, fp_rev == 1 means Rev.2 ! */
2651 cortex_m->fp_rev = (fpcr >> 28) & 0xf;
2652 free(cortex_m->fp_comparator_list);
2653 cortex_m->fp_comparator_list = calloc(
2654 cortex_m->fp_num_code + cortex_m->fp_num_lit,
2655 sizeof(struct cortex_m_fp_comparator));
2656 cortex_m->fpb_enabled = fpcr & 1;
2657 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
2658 cortex_m->fp_comparator_list[i].type =
2659 (i < cortex_m->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
2660 cortex_m->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
2661
2662 /* make sure we clear any breakpoints enabled on the target */
2663 target_write_u32(target, cortex_m->fp_comparator_list[i].fpcr_address, 0);
2664 }
2665 LOG_TARGET_DEBUG(target, "FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i",
2666 fpcr,
2667 cortex_m->fp_num_code,
2668 cortex_m->fp_num_lit);
2669
2670 /* Setup DWT */
2671 cortex_m_dwt_free(target);
2672 cortex_m_dwt_setup(cortex_m, target);
2673
2674 /* These hardware breakpoints only work for code in flash! */
2675 LOG_TARGET_INFO(target, "target has %d breakpoints, %d watchpoints",
2676 cortex_m->fp_num_code,
2677 cortex_m->dwt_num_comp);
2678 }
2679
2680 return ERROR_OK;
2681 }
2682
2683 static int cortex_m_dcc_read(struct target *target, uint8_t *value, uint8_t *ctrl)
2684 {
2685 struct armv7m_common *armv7m = target_to_armv7m(target);
2686 uint16_t dcrdr;
2687 uint8_t buf[2];
2688 int retval;
2689
2690 retval = mem_ap_read_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2691 if (retval != ERROR_OK)
2692 return retval;
2693
2694 dcrdr = target_buffer_get_u16(target, buf);
2695 *ctrl = (uint8_t)dcrdr;
2696 *value = (uint8_t)(dcrdr >> 8);
2697
2698 LOG_TARGET_DEBUG(target, "data 0x%x ctrl 0x%x", *value, *ctrl);
2699
2700 /* write ack back to software dcc register
2701 * signify we have read data */
2702 if (dcrdr & (1 << 0)) {
2703 target_buffer_set_u16(target, buf, 0);
2704 retval = mem_ap_write_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2705 if (retval != ERROR_OK)
2706 return retval;
2707 }
2708
2709 return ERROR_OK;
2710 }
2711
2712 static int cortex_m_target_request_data(struct target *target,
2713 uint32_t size, uint8_t *buffer)
2714 {
2715 uint8_t data;
2716 uint8_t ctrl;
2717 uint32_t i;
2718
2719 for (i = 0; i < (size * 4); i++) {
2720 int retval = cortex_m_dcc_read(target, &data, &ctrl);
2721 if (retval != ERROR_OK)
2722 return retval;
2723 buffer[i] = data;
2724 }
2725
2726 return ERROR_OK;
2727 }
2728
2729 static int cortex_m_handle_target_request(void *priv)
2730 {
2731 struct target *target = priv;
2732 if (!target_was_examined(target))
2733 return ERROR_OK;
2734
2735 if (!target->dbg_msg_enabled)
2736 return ERROR_OK;
2737
2738 if (target->state == TARGET_RUNNING) {
2739 uint8_t data;
2740 uint8_t ctrl;
2741 int retval;
2742
2743 retval = cortex_m_dcc_read(target, &data, &ctrl);
2744 if (retval != ERROR_OK)
2745 return retval;
2746
2747 /* check if we have data */
2748 if (ctrl & (1 << 0)) {
2749 uint32_t request;
2750
2751 /* we assume target is quick enough */
2752 request = data;
2753 for (int i = 1; i <= 3; i++) {
2754 retval = cortex_m_dcc_read(target, &data, &ctrl);
2755 if (retval != ERROR_OK)
2756 return retval;
2757 request |= ((uint32_t)data << (i * 8));
2758 }
2759 target_request(target, request);
2760 }
2761 }
2762
2763 return ERROR_OK;
2764 }
2765
2766 static int cortex_m_init_arch_info(struct target *target,
2767 struct cortex_m_common *cortex_m, struct adiv5_dap *dap)
2768 {
2769 struct armv7m_common *armv7m = &cortex_m->armv7m;
2770
2771 armv7m_init_arch_info(target, armv7m);
2772
2773 /* default reset mode is to use srst if fitted
2774 * if not it will use CORTEX_M3_RESET_VECTRESET */
2775 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2776
2777 armv7m->arm.dap = dap;
2778
2779 /* register arch-specific functions */
2780 armv7m->examine_debug_reason = cortex_m_examine_debug_reason;
2781
2782 armv7m->post_debug_entry = NULL;
2783
2784 armv7m->pre_restore_context = NULL;
2785
2786 armv7m->load_core_reg_u32 = cortex_m_load_core_reg_u32;
2787 armv7m->store_core_reg_u32 = cortex_m_store_core_reg_u32;
2788
2789 target_register_timer_callback(cortex_m_handle_target_request, 1,
2790 TARGET_TIMER_TYPE_PERIODIC, target);
2791
2792 return ERROR_OK;
2793 }
2794
2795 static int cortex_m_target_create(struct target *target, Jim_Interp *interp)
2796 {
2797 struct adiv5_private_config *pc;
2798
2799 pc = (struct adiv5_private_config *)target->private_config;
2800 if (adiv5_verify_config(pc) != ERROR_OK)
2801 return ERROR_FAIL;
2802
2803 struct cortex_m_common *cortex_m = calloc(1, sizeof(struct cortex_m_common));
2804 if (!cortex_m) {
2805 LOG_TARGET_ERROR(target, "No memory creating target");
2806 return ERROR_FAIL;
2807 }
2808
2809 cortex_m->common_magic = CORTEX_M_COMMON_MAGIC;
2810 cortex_m->apsel = pc->ap_num;
2811
2812 cortex_m_init_arch_info(target, cortex_m, pc->dap);
2813
2814 return ERROR_OK;
2815 }
2816
2817 /*--------------------------------------------------------------------------*/
2818
2819 static int cortex_m_verify_pointer(struct command_invocation *cmd,
2820 struct cortex_m_common *cm)
2821 {
2822 if (!is_cortex_m_with_dap_access(cm)) {
2823 command_print(cmd, "target is not a Cortex-M");
2824 return ERROR_TARGET_INVALID;
2825 }
2826 return ERROR_OK;
2827 }
2828
2829 /*
2830 * Only stuff below this line should need to verify that its target
2831 * is a Cortex-M3. Everything else should have indirected through the
2832 * cortexm3_target structure, which is only used with CM3 targets.
2833 */
2834
2835 COMMAND_HANDLER(handle_cortex_m_vector_catch_command)
2836 {
2837 struct target *target = get_current_target(CMD_CTX);
2838 struct cortex_m_common *cortex_m = target_to_cm(target);
2839 struct armv7m_common *armv7m = &cortex_m->armv7m;
2840 uint32_t demcr = 0;
2841 int retval;
2842
2843 static const struct {
2844 char name[10];
2845 unsigned mask;
2846 } vec_ids[] = {
2847 { "hard_err", VC_HARDERR, },
2848 { "int_err", VC_INTERR, },
2849 { "bus_err", VC_BUSERR, },
2850 { "state_err", VC_STATERR, },
2851 { "chk_err", VC_CHKERR, },
2852 { "nocp_err", VC_NOCPERR, },
2853 { "mm_err", VC_MMERR, },
2854 { "reset", VC_CORERESET, },
2855 };
2856
2857 retval = cortex_m_verify_pointer(CMD, cortex_m);
2858 if (retval != ERROR_OK)
2859 return retval;
2860
2861 if (!target_was_examined(target)) {
2862 LOG_TARGET_ERROR(target, "Target not examined yet");
2863 return ERROR_FAIL;
2864 }
2865
2866 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2867 if (retval != ERROR_OK)
2868 return retval;
2869
2870 if (CMD_ARGC > 0) {
2871 unsigned catch = 0;
2872
2873 if (CMD_ARGC == 1) {
2874 if (strcmp(CMD_ARGV[0], "all") == 0) {
2875 catch = VC_HARDERR | VC_INTERR | VC_BUSERR
2876 | VC_STATERR | VC_CHKERR | VC_NOCPERR
2877 | VC_MMERR | VC_CORERESET;
2878 goto write;
2879 } else if (strcmp(CMD_ARGV[0], "none") == 0)
2880 goto write;
2881 }
2882 while (CMD_ARGC-- > 0) {
2883 unsigned i;
2884 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2885 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
2886 continue;
2887 catch |= vec_ids[i].mask;
2888 break;
2889 }
2890 if (i == ARRAY_SIZE(vec_ids)) {
2891 LOG_TARGET_ERROR(target, "No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]);
2892 return ERROR_COMMAND_SYNTAX_ERROR;
2893 }
2894 }
2895 write:
2896 /* For now, armv7m->demcr only stores vector catch flags. */
2897 armv7m->demcr = catch;
2898
2899 demcr &= ~0xffff;
2900 demcr |= catch;
2901
2902 /* write, but don't assume it stuck (why not??) */
2903 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, demcr);
2904 if (retval != ERROR_OK)
2905 return retval;
2906 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2907 if (retval != ERROR_OK)
2908 return retval;
2909
2910 /* FIXME be sure to clear DEMCR on clean server shutdown.
2911 * Otherwise the vector catch hardware could fire when there's
2912 * no debugger hooked up, causing much confusion...
2913 */
2914 }
2915
2916 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2917 command_print(CMD, "%9s: %s", vec_ids[i].name,
2918 (demcr & vec_ids[i].mask) ? "catch" : "ignore");
2919 }
2920
2921 return ERROR_OK;
2922 }
2923
2924 COMMAND_HANDLER(handle_cortex_m_mask_interrupts_command)
2925 {
2926 struct target *target = get_current_target(CMD_CTX);
2927 struct cortex_m_common *cortex_m = target_to_cm(target);
2928 int retval;
2929
2930 static const struct nvp nvp_maskisr_modes[] = {
2931 { .name = "auto", .value = CORTEX_M_ISRMASK_AUTO },
2932 { .name = "off", .value = CORTEX_M_ISRMASK_OFF },
2933 { .name = "on", .value = CORTEX_M_ISRMASK_ON },
2934 { .name = "steponly", .value = CORTEX_M_ISRMASK_STEPONLY },
2935 { .name = NULL, .value = -1 },
2936 };
2937 const struct nvp *n;
2938
2939
2940 retval = cortex_m_verify_pointer(CMD, cortex_m);
2941 if (retval != ERROR_OK)
2942 return retval;
2943
2944 if (target->state != TARGET_HALTED) {
2945 command_print(CMD, "Error: target must be stopped for \"%s\" command", CMD_NAME);
2946 return ERROR_TARGET_NOT_HALTED;
2947 }
2948
2949 if (CMD_ARGC > 0) {
2950 n = nvp_name2value(nvp_maskisr_modes, CMD_ARGV[0]);
2951 if (!n->name)
2952 return ERROR_COMMAND_SYNTAX_ERROR;
2953 cortex_m->isrmasking_mode = n->value;
2954 cortex_m_set_maskints_for_halt(target);
2955 }
2956
2957 n = nvp_value2name(nvp_maskisr_modes, cortex_m->isrmasking_mode);
2958 command_print(CMD, "cortex_m interrupt mask %s", n->name);
2959
2960 return ERROR_OK;
2961 }
2962
2963 COMMAND_HANDLER(handle_cortex_m_reset_config_command)
2964 {
2965 struct target *target = get_current_target(CMD_CTX);
2966 struct cortex_m_common *cortex_m = target_to_cm(target);
2967 int retval;
2968 char *reset_config;
2969
2970 retval = cortex_m_verify_pointer(CMD, cortex_m);
2971 if (retval != ERROR_OK)
2972 return retval;
2973
2974 if (CMD_ARGC > 0) {
2975 if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
2976 cortex_m->soft_reset_config = CORTEX_M_RESET_SYSRESETREQ;
2977
2978 else if (strcmp(*CMD_ARGV, "vectreset") == 0) {
2979 if (target_was_examined(target)
2980 && !cortex_m->vectreset_supported)
2981 LOG_TARGET_WARNING(target, "VECTRESET is not supported on your Cortex-M core!");
2982 else
2983 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2984
2985 } else
2986 return ERROR_COMMAND_SYNTAX_ERROR;
2987 }
2988
2989 switch (cortex_m->soft_reset_config) {
2990 case CORTEX_M_RESET_SYSRESETREQ:
2991 reset_config = "sysresetreq";
2992 break;
2993
2994 case CORTEX_M_RESET_VECTRESET:
2995 reset_config = "vectreset";
2996 break;
2997
2998 default:
2999 reset_config = "unknown";
3000 break;
3001 }
3002
3003 command_print(CMD, "cortex_m reset_config %s", reset_config);
3004
3005 return ERROR_OK;
3006 }
3007
3008 static const struct command_registration cortex_m_exec_command_handlers[] = {
3009 {
3010 .name = "maskisr",
3011 .handler = handle_cortex_m_mask_interrupts_command,
3012 .mode = COMMAND_EXEC,
3013 .help = "mask cortex_m interrupts",
3014 .usage = "['auto'|'on'|'off'|'steponly']",
3015 },
3016 {
3017 .name = "vector_catch",
3018 .handler = handle_cortex_m_vector_catch_command,
3019 .mode = COMMAND_EXEC,
3020 .help = "configure hardware vectors to trigger debug entry",
3021 .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
3022 },
3023 {
3024 .name = "reset_config",
3025 .handler = handle_cortex_m_reset_config_command,
3026 .mode = COMMAND_ANY,
3027 .help = "configure software reset handling",
3028 .usage = "['sysresetreq'|'vectreset']",
3029 },
3030 {
3031 .chain = smp_command_handlers,
3032 },
3033 COMMAND_REGISTRATION_DONE
3034 };
3035 static const struct command_registration cortex_m_command_handlers[] = {
3036 {
3037 .chain = armv7m_command_handlers,
3038 },
3039 {
3040 .chain = armv7m_trace_command_handlers,
3041 },
3042 /* START_DEPRECATED_TPIU */
3043 {
3044 .chain = arm_tpiu_deprecated_command_handlers,
3045 },
3046 /* END_DEPRECATED_TPIU */
3047 {
3048 .name = "cortex_m",
3049 .mode = COMMAND_EXEC,
3050 .help = "Cortex-M command group",
3051 .usage = "",
3052 .chain = cortex_m_exec_command_handlers,
3053 },
3054 {
3055 .chain = rtt_target_command_handlers,
3056 },
3057 COMMAND_REGISTRATION_DONE
3058 };
3059
3060 struct target_type cortexm_target = {
3061 .name = "cortex_m",
3062
3063 .poll = cortex_m_poll,
3064 .arch_state = armv7m_arch_state,
3065
3066 .target_request_data = cortex_m_target_request_data,
3067
3068 .halt = cortex_m_halt,
3069 .resume = cortex_m_resume,
3070 .step = cortex_m_step,
3071
3072 .assert_reset = cortex_m_assert_reset,
3073 .deassert_reset = cortex_m_deassert_reset,
3074 .soft_reset_halt = cortex_m_soft_reset_halt,
3075
3076 .get_gdb_arch = arm_get_gdb_arch,
3077 .get_gdb_reg_list = armv7m_get_gdb_reg_list,
3078
3079 .read_memory = cortex_m_read_memory,
3080 .write_memory = cortex_m_write_memory,
3081 .checksum_memory = armv7m_checksum_memory,
3082 .blank_check_memory = armv7m_blank_check_memory,
3083
3084 .run_algorithm = armv7m_run_algorithm,
3085 .start_algorithm = armv7m_start_algorithm,
3086 .wait_algorithm = armv7m_wait_algorithm,
3087
3088 .add_breakpoint = cortex_m_add_breakpoint,
3089 .remove_breakpoint = cortex_m_remove_breakpoint,
3090 .add_watchpoint = cortex_m_add_watchpoint,
3091 .remove_watchpoint = cortex_m_remove_watchpoint,
3092 .hit_watchpoint = cortex_m_hit_watchpoint,
3093
3094 .commands = cortex_m_command_handlers,
3095 .target_create = cortex_m_target_create,
3096 .target_jim_configure = adiv5_jim_configure,
3097 .init_target = cortex_m_init_target,
3098 .examine = cortex_m_examine,
3099 .deinit_target = cortex_m_deinit_target,
3100
3101 .profiling = cortex_m_profiling,
3102 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)