3b4629730f27d3535a4da800bcfd45c32f70f5a9
[openocd.git] / src / target / cortex_m.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
25 * *
26 * *
27 * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
28 * *
29 ***************************************************************************/
30 #ifdef HAVE_CONFIG_H
31 #include "config.h"
32 #endif
33
34 #include "jtag/interface.h"
35 #include "breakpoints.h"
36 #include "cortex_m.h"
37 #include "target_request.h"
38 #include "target_type.h"
39 #include "arm_disassembler.h"
40 #include "register.h"
41 #include "arm_opcodes.h"
42 #include "arm_semihosting.h"
43 #include <helper/time_support.h>
44
45 /* NOTE: most of this should work fine for the Cortex-M1 and
46 * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
47 * Some differences: M0/M1 doesn't have FBP remapping or the
48 * DWT tracing/profiling support. (So the cycle counter will
49 * not be usable; the other stuff isn't currently used here.)
50 *
51 * Although there are some workarounds for errata seen only in r0p0
52 * silicon, such old parts are hard to find and thus not much tested
53 * any longer.
54 */
55
56 /**
57 * Returns the type of a break point required by address location
58 */
59 #define BKPT_TYPE_BY_ADDR(addr) ((addr) < 0x20000000 ? BKPT_HARD : BKPT_SOFT)
60
61 /* forward declarations */
62 static int cortex_m_store_core_reg_u32(struct target *target,
63 uint32_t num, uint32_t value);
64
65 static int cortexm_dap_read_coreregister_u32(struct adiv5_dap *swjdp,
66 uint32_t *value, int regnum)
67 {
68 int retval;
69 uint32_t dcrdr;
70
71 /* because the DCB_DCRDR is used for the emulated dcc channel
72 * we have to save/restore the DCB_DCRDR when used */
73
74 retval = mem_ap_read_u32(swjdp, DCB_DCRDR, &dcrdr);
75 if (retval != ERROR_OK)
76 return retval;
77
78 /* mem_ap_write_u32(swjdp, DCB_DCRSR, regnum); */
79 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRSR & 0xFFFFFFF0);
80 if (retval != ERROR_OK)
81 return retval;
82 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRSR & 0xC), regnum);
83 if (retval != ERROR_OK)
84 return retval;
85
86 /* mem_ap_read_u32(swjdp, DCB_DCRDR, value); */
87 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRDR & 0xFFFFFFF0);
88 if (retval != ERROR_OK)
89 return retval;
90 retval = dap_queue_ap_read(swjdp, AP_REG_BD0 | (DCB_DCRDR & 0xC), value);
91 if (retval != ERROR_OK)
92 return retval;
93
94 retval = dap_run(swjdp);
95 if (retval != ERROR_OK)
96 return retval;
97
98 /* restore DCB_DCRDR - this needs to be in a seperate
99 * transaction otherwise the emulated DCC channel breaks */
100 if (retval == ERROR_OK)
101 retval = mem_ap_write_atomic_u32(swjdp, DCB_DCRDR, dcrdr);
102
103 return retval;
104 }
105
106 static int cortexm_dap_write_coreregister_u32(struct adiv5_dap *swjdp,
107 uint32_t value, int regnum)
108 {
109 int retval;
110 uint32_t dcrdr;
111
112 /* because the DCB_DCRDR is used for the emulated dcc channel
113 * we have to save/restore the DCB_DCRDR when used */
114
115 retval = mem_ap_read_u32(swjdp, DCB_DCRDR, &dcrdr);
116 if (retval != ERROR_OK)
117 return retval;
118
119 /* mem_ap_write_u32(swjdp, DCB_DCRDR, core_regs[i]); */
120 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRDR & 0xFFFFFFF0);
121 if (retval != ERROR_OK)
122 return retval;
123 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRDR & 0xC), value);
124 if (retval != ERROR_OK)
125 return retval;
126
127 /* mem_ap_write_u32(swjdp, DCB_DCRSR, i | DCRSR_WnR); */
128 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRSR & 0xFFFFFFF0);
129 if (retval != ERROR_OK)
130 return retval;
131 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRSR & 0xC), regnum | DCRSR_WnR);
132 if (retval != ERROR_OK)
133 return retval;
134
135 retval = dap_run(swjdp);
136 if (retval != ERROR_OK)
137 return retval;
138
139 /* restore DCB_DCRDR - this needs to be in a seperate
140 * transaction otherwise the emulated DCC channel breaks */
141 if (retval == ERROR_OK)
142 retval = mem_ap_write_atomic_u32(swjdp, DCB_DCRDR, dcrdr);
143
144 return retval;
145 }
146
147 static int cortex_m_write_debug_halt_mask(struct target *target,
148 uint32_t mask_on, uint32_t mask_off)
149 {
150 struct cortex_m_common *cortex_m = target_to_cm(target);
151 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
152
153 /* mask off status bits */
154 cortex_m->dcb_dhcsr &= ~((0xFFFF << 16) | mask_off);
155 /* create new register mask */
156 cortex_m->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
157
158 return mem_ap_write_atomic_u32(swjdp, DCB_DHCSR, cortex_m->dcb_dhcsr);
159 }
160
161 static int cortex_m_clear_halt(struct target *target)
162 {
163 struct cortex_m_common *cortex_m = target_to_cm(target);
164 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
165 int retval;
166
167 /* clear step if any */
168 cortex_m_write_debug_halt_mask(target, C_HALT, C_STEP);
169
170 /* Read Debug Fault Status Register */
171 retval = mem_ap_read_atomic_u32(swjdp, NVIC_DFSR, &cortex_m->nvic_dfsr);
172 if (retval != ERROR_OK)
173 return retval;
174
175 /* Clear Debug Fault Status */
176 retval = mem_ap_write_atomic_u32(swjdp, NVIC_DFSR, cortex_m->nvic_dfsr);
177 if (retval != ERROR_OK)
178 return retval;
179 LOG_DEBUG(" NVIC_DFSR 0x%" PRIx32 "", cortex_m->nvic_dfsr);
180
181 return ERROR_OK;
182 }
183
184 static int cortex_m_single_step_core(struct target *target)
185 {
186 struct cortex_m_common *cortex_m = target_to_cm(target);
187 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
188 uint32_t dhcsr_save;
189 int retval;
190
191 /* backup dhcsr reg */
192 dhcsr_save = cortex_m->dcb_dhcsr;
193
194 /* Mask interrupts before clearing halt, if done already. This avoids
195 * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
196 * HALT can put the core into an unknown state.
197 */
198 if (!(cortex_m->dcb_dhcsr & C_MASKINTS)) {
199 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
200 DBGKEY | C_MASKINTS | C_HALT | C_DEBUGEN);
201 if (retval != ERROR_OK)
202 return retval;
203 }
204 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
205 DBGKEY | C_MASKINTS | C_STEP | C_DEBUGEN);
206 if (retval != ERROR_OK)
207 return retval;
208 LOG_DEBUG(" ");
209
210 /* restore dhcsr reg */
211 cortex_m->dcb_dhcsr = dhcsr_save;
212 cortex_m_clear_halt(target);
213
214 return ERROR_OK;
215 }
216
217 static int cortex_m_endreset_event(struct target *target)
218 {
219 int i;
220 int retval;
221 uint32_t dcb_demcr;
222 struct cortex_m_common *cortex_m = target_to_cm(target);
223 struct armv7m_common *armv7m = &cortex_m->armv7m;
224 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
225 struct cortex_m_fp_comparator *fp_list = cortex_m->fp_comparator_list;
226 struct cortex_m_dwt_comparator *dwt_list = cortex_m->dwt_comparator_list;
227
228 /* REVISIT The four debug monitor bits are currently ignored... */
229 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &dcb_demcr);
230 if (retval != ERROR_OK)
231 return retval;
232 LOG_DEBUG("DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr);
233
234 /* this register is used for emulated dcc channel */
235 retval = mem_ap_write_u32(swjdp, DCB_DCRDR, 0);
236 if (retval != ERROR_OK)
237 return retval;
238
239 /* Enable debug requests */
240 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m->dcb_dhcsr);
241 if (retval != ERROR_OK)
242 return retval;
243 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
244 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_DEBUGEN);
245 if (retval != ERROR_OK)
246 return retval;
247 }
248
249 /* clear any interrupt masking */
250 cortex_m_write_debug_halt_mask(target, 0, C_MASKINTS);
251
252 /* Enable features controlled by ITM and DWT blocks, and catch only
253 * the vectors we were told to pay attention to.
254 *
255 * Target firmware is responsible for all fault handling policy
256 * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
257 * or manual updates to the NVIC SHCSR and CCR registers.
258 */
259 retval = mem_ap_write_u32(swjdp, DCB_DEMCR, TRCENA | armv7m->demcr);
260 if (retval != ERROR_OK)
261 return retval;
262
263 /* Paranoia: evidently some (early?) chips don't preserve all the
264 * debug state (including FBP, DWT, etc) across reset...
265 */
266
267 /* Enable FPB */
268 retval = target_write_u32(target, FP_CTRL, 3);
269 if (retval != ERROR_OK)
270 return retval;
271
272 cortex_m->fpb_enabled = 1;
273
274 /* Restore FPB registers */
275 for (i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
276 retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
277 if (retval != ERROR_OK)
278 return retval;
279 }
280
281 /* Restore DWT registers */
282 for (i = 0; i < cortex_m->dwt_num_comp; i++) {
283 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
284 dwt_list[i].comp);
285 if (retval != ERROR_OK)
286 return retval;
287 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
288 dwt_list[i].mask);
289 if (retval != ERROR_OK)
290 return retval;
291 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
292 dwt_list[i].function);
293 if (retval != ERROR_OK)
294 return retval;
295 }
296 retval = dap_run(swjdp);
297 if (retval != ERROR_OK)
298 return retval;
299
300 register_cache_invalidate(armv7m->arm.core_cache);
301
302 /* make sure we have latest dhcsr flags */
303 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m->dcb_dhcsr);
304
305 return retval;
306 }
307
308 static int cortex_m_examine_debug_reason(struct target *target)
309 {
310 struct cortex_m_common *cortex_m = target_to_cm(target);
311
312 /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
313 * only check the debug reason if we don't know it already */
314
315 if ((target->debug_reason != DBG_REASON_DBGRQ)
316 && (target->debug_reason != DBG_REASON_SINGLESTEP)) {
317 if (cortex_m->nvic_dfsr & DFSR_BKPT) {
318 target->debug_reason = DBG_REASON_BREAKPOINT;
319 if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
320 target->debug_reason = DBG_REASON_WPTANDBKPT;
321 } else if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
322 target->debug_reason = DBG_REASON_WATCHPOINT;
323 else if (cortex_m->nvic_dfsr & DFSR_VCATCH)
324 target->debug_reason = DBG_REASON_BREAKPOINT;
325 else /* EXTERNAL, HALTED */
326 target->debug_reason = DBG_REASON_UNDEFINED;
327 }
328
329 return ERROR_OK;
330 }
331
332 static int cortex_m_examine_exception_reason(struct target *target)
333 {
334 uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
335 struct armv7m_common *armv7m = target_to_armv7m(target);
336 struct adiv5_dap *swjdp = armv7m->arm.dap;
337 int retval;
338
339 retval = mem_ap_read_u32(swjdp, NVIC_SHCSR, &shcsr);
340 if (retval != ERROR_OK)
341 return retval;
342 switch (armv7m->exception_number) {
343 case 2: /* NMI */
344 break;
345 case 3: /* Hard Fault */
346 retval = mem_ap_read_atomic_u32(swjdp, NVIC_HFSR, &except_sr);
347 if (retval != ERROR_OK)
348 return retval;
349 if (except_sr & 0x40000000) {
350 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &cfsr);
351 if (retval != ERROR_OK)
352 return retval;
353 }
354 break;
355 case 4: /* Memory Management */
356 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
357 if (retval != ERROR_OK)
358 return retval;
359 retval = mem_ap_read_u32(swjdp, NVIC_MMFAR, &except_ar);
360 if (retval != ERROR_OK)
361 return retval;
362 break;
363 case 5: /* Bus Fault */
364 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
365 if (retval != ERROR_OK)
366 return retval;
367 retval = mem_ap_read_u32(swjdp, NVIC_BFAR, &except_ar);
368 if (retval != ERROR_OK)
369 return retval;
370 break;
371 case 6: /* Usage Fault */
372 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
373 if (retval != ERROR_OK)
374 return retval;
375 break;
376 case 11: /* SVCall */
377 break;
378 case 12: /* Debug Monitor */
379 retval = mem_ap_read_u32(swjdp, NVIC_DFSR, &except_sr);
380 if (retval != ERROR_OK)
381 return retval;
382 break;
383 case 14: /* PendSV */
384 break;
385 case 15: /* SysTick */
386 break;
387 default:
388 except_sr = 0;
389 break;
390 }
391 retval = dap_run(swjdp);
392 if (retval == ERROR_OK)
393 LOG_DEBUG("%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
394 ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
395 armv7m_exception_string(armv7m->exception_number),
396 shcsr, except_sr, cfsr, except_ar);
397 return retval;
398 }
399
400 static int cortex_m_debug_entry(struct target *target)
401 {
402 int i;
403 uint32_t xPSR;
404 int retval;
405 struct cortex_m_common *cortex_m = target_to_cm(target);
406 struct armv7m_common *armv7m = &cortex_m->armv7m;
407 struct arm *arm = &armv7m->arm;
408 struct adiv5_dap *swjdp = armv7m->arm.dap;
409 struct reg *r;
410
411 LOG_DEBUG(" ");
412
413 cortex_m_clear_halt(target);
414 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m->dcb_dhcsr);
415 if (retval != ERROR_OK)
416 return retval;
417
418 retval = armv7m->examine_debug_reason(target);
419 if (retval != ERROR_OK)
420 return retval;
421
422 /* Examine target state and mode
423 * First load register accessible through core debug port */
424 int num_regs = arm->core_cache->num_regs;
425
426 for (i = 0; i < num_regs; i++) {
427 r = &armv7m->arm.core_cache->reg_list[i];
428 if (!r->valid)
429 arm->read_core_reg(target, r, i, ARM_MODE_ANY);
430 }
431
432 r = arm->cpsr;
433 xPSR = buf_get_u32(r->value, 0, 32);
434
435 /* For IT instructions xPSR must be reloaded on resume and clear on debug exec */
436 if (xPSR & 0xf00) {
437 r->dirty = r->valid;
438 cortex_m_store_core_reg_u32(target, 16, xPSR & ~0xff);
439 }
440
441 /* Are we in an exception handler */
442 if (xPSR & 0x1FF) {
443 armv7m->exception_number = (xPSR & 0x1FF);
444
445 arm->core_mode = ARM_MODE_HANDLER;
446 arm->map = armv7m_msp_reg_map;
447 } else {
448 unsigned control = buf_get_u32(arm->core_cache
449 ->reg_list[ARMV7M_CONTROL].value, 0, 2);
450
451 /* is this thread privileged? */
452 arm->core_mode = control & 1
453 ? ARM_MODE_USER_THREAD
454 : ARM_MODE_THREAD;
455
456 /* which stack is it using? */
457 if (control & 2)
458 arm->map = armv7m_psp_reg_map;
459 else
460 arm->map = armv7m_msp_reg_map;
461
462 armv7m->exception_number = 0;
463 }
464
465 if (armv7m->exception_number)
466 cortex_m_examine_exception_reason(target);
467
468 LOG_DEBUG("entered debug state in core mode: %s at PC 0x%" PRIx32 ", target->state: %s",
469 arm_mode_name(arm->core_mode),
470 *(uint32_t *)(arm->pc->value),
471 target_state_name(target));
472
473 if (armv7m->post_debug_entry) {
474 retval = armv7m->post_debug_entry(target);
475 if (retval != ERROR_OK)
476 return retval;
477 }
478
479 return ERROR_OK;
480 }
481
482 static int cortex_m_poll(struct target *target)
483 {
484 int detected_failure = ERROR_OK;
485 int retval = ERROR_OK;
486 enum target_state prev_target_state = target->state;
487 struct cortex_m_common *cortex_m = target_to_cm(target);
488 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
489
490 /* Read from Debug Halting Control and Status Register */
491 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m->dcb_dhcsr);
492 if (retval != ERROR_OK) {
493 target->state = TARGET_UNKNOWN;
494 return retval;
495 }
496
497 /* Recover from lockup. See ARMv7-M architecture spec,
498 * section B1.5.15 "Unrecoverable exception cases".
499 */
500 if (cortex_m->dcb_dhcsr & S_LOCKUP) {
501 LOG_ERROR("%s -- clearing lockup after double fault",
502 target_name(target));
503 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
504 target->debug_reason = DBG_REASON_DBGRQ;
505
506 /* We have to execute the rest (the "finally" equivalent, but
507 * still throw this exception again).
508 */
509 detected_failure = ERROR_FAIL;
510
511 /* refresh status bits */
512 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m->dcb_dhcsr);
513 if (retval != ERROR_OK)
514 return retval;
515 }
516
517 if (cortex_m->dcb_dhcsr & S_RESET_ST) {
518 /* check if still in reset */
519 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m->dcb_dhcsr);
520 if (retval != ERROR_OK)
521 return retval;
522
523 if (cortex_m->dcb_dhcsr & S_RESET_ST) {
524 target->state = TARGET_RESET;
525 return ERROR_OK;
526 }
527 }
528
529 if (target->state == TARGET_RESET) {
530 /* Cannot switch context while running so endreset is
531 * called with target->state == TARGET_RESET
532 */
533 LOG_DEBUG("Exit from reset with dcb_dhcsr 0x%" PRIx32,
534 cortex_m->dcb_dhcsr);
535 cortex_m_endreset_event(target);
536 target->state = TARGET_RUNNING;
537 prev_target_state = TARGET_RUNNING;
538 }
539
540 if (cortex_m->dcb_dhcsr & S_HALT) {
541 target->state = TARGET_HALTED;
542
543 if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET)) {
544 retval = cortex_m_debug_entry(target);
545 if (retval != ERROR_OK)
546 return retval;
547
548 if (arm_semihosting(target, &retval) != 0)
549 return retval;
550
551 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
552 }
553 if (prev_target_state == TARGET_DEBUG_RUNNING) {
554 LOG_DEBUG(" ");
555 retval = cortex_m_debug_entry(target);
556 if (retval != ERROR_OK)
557 return retval;
558
559 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
560 }
561 }
562
563 /* REVISIT when S_SLEEP is set, it's in a Sleep or DeepSleep state.
564 * How best to model low power modes?
565 */
566
567 if (target->state == TARGET_UNKNOWN) {
568 /* check if processor is retiring instructions */
569 if (cortex_m->dcb_dhcsr & S_RETIRE_ST) {
570 target->state = TARGET_RUNNING;
571 retval = ERROR_OK;
572 }
573 }
574
575 /* Did we detect a failure condition that we cleared? */
576 if (detected_failure != ERROR_OK)
577 retval = detected_failure;
578 return retval;
579 }
580
581 static int cortex_m_halt(struct target *target)
582 {
583 LOG_DEBUG("target->state: %s",
584 target_state_name(target));
585
586 if (target->state == TARGET_HALTED) {
587 LOG_DEBUG("target was already halted");
588 return ERROR_OK;
589 }
590
591 if (target->state == TARGET_UNKNOWN)
592 LOG_WARNING("target was in unknown state when halt was requested");
593
594 if (target->state == TARGET_RESET) {
595 if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
596 LOG_ERROR("can't request a halt while in reset if nSRST pulls nTRST");
597 return ERROR_TARGET_FAILURE;
598 } else {
599 /* we came here in a reset_halt or reset_init sequence
600 * debug entry was already prepared in cortex_m3_assert_reset()
601 */
602 target->debug_reason = DBG_REASON_DBGRQ;
603
604 return ERROR_OK;
605 }
606 }
607
608 /* Write to Debug Halting Control and Status Register */
609 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
610
611 target->debug_reason = DBG_REASON_DBGRQ;
612
613 return ERROR_OK;
614 }
615
616 static int cortex_m_soft_reset_halt(struct target *target)
617 {
618 struct cortex_m_common *cortex_m = target_to_cm(target);
619 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
620 uint32_t dcb_dhcsr = 0;
621 int retval, timeout = 0;
622
623 /* soft_reset_halt is deprecated on cortex_m as the same functionality
624 * can be obtained by using 'reset halt' and 'cortex_m reset_config vectreset'
625 * As this reset only used VC_CORERESET it would only ever reset the cortex_m
626 * core, not the peripherals */
627 LOG_WARNING("soft_reset_halt is deprecated, please use 'reset halt' instead.");
628
629 /* Enter debug state on reset; restore DEMCR in endreset_event() */
630 retval = mem_ap_write_u32(swjdp, DCB_DEMCR,
631 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
632 if (retval != ERROR_OK)
633 return retval;
634
635 /* Request a core-only reset */
636 retval = mem_ap_write_atomic_u32(swjdp, NVIC_AIRCR,
637 AIRCR_VECTKEY | AIRCR_VECTRESET);
638 if (retval != ERROR_OK)
639 return retval;
640 target->state = TARGET_RESET;
641
642 /* registers are now invalid */
643 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
644
645 while (timeout < 100) {
646 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &dcb_dhcsr);
647 if (retval == ERROR_OK) {
648 retval = mem_ap_read_atomic_u32(swjdp, NVIC_DFSR,
649 &cortex_m->nvic_dfsr);
650 if (retval != ERROR_OK)
651 return retval;
652 if ((dcb_dhcsr & S_HALT)
653 && (cortex_m->nvic_dfsr & DFSR_VCATCH)) {
654 LOG_DEBUG("system reset-halted, DHCSR 0x%08x, "
655 "DFSR 0x%08x",
656 (unsigned) dcb_dhcsr,
657 (unsigned) cortex_m->nvic_dfsr);
658 cortex_m_poll(target);
659 /* FIXME restore user's vector catch config */
660 return ERROR_OK;
661 } else
662 LOG_DEBUG("waiting for system reset-halt, "
663 "DHCSR 0x%08x, %d ms",
664 (unsigned) dcb_dhcsr, timeout);
665 }
666 timeout++;
667 alive_sleep(1);
668 }
669
670 return ERROR_OK;
671 }
672
673 void cortex_m_enable_breakpoints(struct target *target)
674 {
675 struct breakpoint *breakpoint = target->breakpoints;
676
677 /* set any pending breakpoints */
678 while (breakpoint) {
679 if (!breakpoint->set)
680 cortex_m_set_breakpoint(target, breakpoint);
681 breakpoint = breakpoint->next;
682 }
683 }
684
685 static int cortex_m_resume(struct target *target, int current,
686 uint32_t address, int handle_breakpoints, int debug_execution)
687 {
688 struct armv7m_common *armv7m = target_to_armv7m(target);
689 struct breakpoint *breakpoint = NULL;
690 uint32_t resume_pc;
691 struct reg *r;
692
693 if (target->state != TARGET_HALTED) {
694 LOG_WARNING("target not halted");
695 return ERROR_TARGET_NOT_HALTED;
696 }
697
698 if (!debug_execution) {
699 target_free_all_working_areas(target);
700 cortex_m_enable_breakpoints(target);
701 cortex_m_enable_watchpoints(target);
702 }
703
704 if (debug_execution) {
705 r = armv7m->arm.core_cache->reg_list + ARMV7M_PRIMASK;
706
707 /* Disable interrupts */
708 /* We disable interrupts in the PRIMASK register instead of
709 * masking with C_MASKINTS. This is probably the same issue
710 * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
711 * in parallel with disabled interrupts can cause local faults
712 * to not be taken.
713 *
714 * REVISIT this clearly breaks non-debug execution, since the
715 * PRIMASK register state isn't saved/restored... workaround
716 * by never resuming app code after debug execution.
717 */
718 buf_set_u32(r->value, 0, 1, 1);
719 r->dirty = true;
720 r->valid = true;
721
722 /* Make sure we are in Thumb mode */
723 r = armv7m->arm.cpsr;
724 buf_set_u32(r->value, 24, 1, 1);
725 r->dirty = true;
726 r->valid = true;
727 }
728
729 /* current = 1: continue on current pc, otherwise continue at <address> */
730 r = armv7m->arm.pc;
731 if (!current) {
732 buf_set_u32(r->value, 0, 32, address);
733 r->dirty = true;
734 r->valid = true;
735 }
736
737 /* if we halted last time due to a bkpt instruction
738 * then we have to manually step over it, otherwise
739 * the core will break again */
740
741 if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
742 && !debug_execution)
743 armv7m_maybe_skip_bkpt_inst(target, NULL);
744
745 resume_pc = buf_get_u32(r->value, 0, 32);
746
747 armv7m_restore_context(target);
748
749 /* the front-end may request us not to handle breakpoints */
750 if (handle_breakpoints) {
751 /* Single step past breakpoint at current address */
752 breakpoint = breakpoint_find(target, resume_pc);
753 if (breakpoint) {
754 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 " (ID: %d)",
755 breakpoint->address,
756 breakpoint->unique_id);
757 cortex_m_unset_breakpoint(target, breakpoint);
758 cortex_m_single_step_core(target);
759 cortex_m_set_breakpoint(target, breakpoint);
760 }
761 }
762
763 /* Restart core */
764 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
765
766 target->debug_reason = DBG_REASON_NOTHALTED;
767
768 /* registers are now invalid */
769 register_cache_invalidate(armv7m->arm.core_cache);
770
771 if (!debug_execution) {
772 target->state = TARGET_RUNNING;
773 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
774 LOG_DEBUG("target resumed at 0x%" PRIx32 "", resume_pc);
775 } else {
776 target->state = TARGET_DEBUG_RUNNING;
777 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
778 LOG_DEBUG("target debug resumed at 0x%" PRIx32 "", resume_pc);
779 }
780
781 return ERROR_OK;
782 }
783
784 /* int irqstepcount = 0; */
785 static int cortex_m_step(struct target *target, int current,
786 uint32_t address, int handle_breakpoints)
787 {
788 struct cortex_m_common *cortex_m = target_to_cm(target);
789 struct armv7m_common *armv7m = &cortex_m->armv7m;
790 struct adiv5_dap *swjdp = armv7m->arm.dap;
791 struct breakpoint *breakpoint = NULL;
792 struct reg *pc = armv7m->arm.pc;
793 bool bkpt_inst_found = false;
794 int retval;
795 bool isr_timed_out = false;
796
797 if (target->state != TARGET_HALTED) {
798 LOG_WARNING("target not halted");
799 return ERROR_TARGET_NOT_HALTED;
800 }
801
802 /* current = 1: continue on current pc, otherwise continue at <address> */
803 if (!current)
804 buf_set_u32(pc->value, 0, 32, address);
805
806 uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
807
808 /* the front-end may request us not to handle breakpoints */
809 if (handle_breakpoints) {
810 breakpoint = breakpoint_find(target, pc_value);
811 if (breakpoint)
812 cortex_m_unset_breakpoint(target, breakpoint);
813 }
814
815 armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
816
817 target->debug_reason = DBG_REASON_SINGLESTEP;
818
819 armv7m_restore_context(target);
820
821 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
822
823 /* if no bkpt instruction is found at pc then we can perform
824 * a normal step, otherwise we have to manually step over the bkpt
825 * instruction - as such simulate a step */
826 if (bkpt_inst_found == false) {
827 /* Automatic ISR masking mode off: Just step over the next instruction */
828 if ((cortex_m->isrmasking_mode != CORTEX_M_ISRMASK_AUTO))
829 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
830 else {
831 /* Process interrupts during stepping in a way they don't interfere
832 * debugging.
833 *
834 * Principle:
835 *
836 * Set a temporary break point at the current pc and let the core run
837 * with interrupts enabled. Pending interrupts get served and we run
838 * into the breakpoint again afterwards. Then we step over the next
839 * instruction with interrupts disabled.
840 *
841 * If the pending interrupts don't complete within time, we leave the
842 * core running. This may happen if the interrupts trigger faster
843 * than the core can process them or the handler doesn't return.
844 *
845 * If no more breakpoints are available we simply do a step with
846 * interrupts enabled.
847 *
848 */
849
850 /* 2012-09-29 ph
851 *
852 * If a break point is already set on the lower half word then a break point on
853 * the upper half word will not break again when the core is restarted. So we
854 * just step over the instruction with interrupts disabled.
855 *
856 * The documentation has no information about this, it was found by observation
857 * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 dosen't seem to
858 * suffer from this problem.
859 *
860 * To add some confusion: pc_value has bit 0 always set, while the breakpoint
861 * address has it always cleared. The former is done to indicate thumb mode
862 * to gdb.
863 *
864 */
865 if ((pc_value & 0x02) && breakpoint_find(target, pc_value & ~0x03)) {
866 LOG_DEBUG("Stepping over next instruction with interrupts disabled");
867 cortex_m_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
868 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
869 /* Re-enable interrupts */
870 cortex_m_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
871 }
872 else {
873
874 /* Set a temporary break point */
875 if (breakpoint)
876 retval = cortex_m_set_breakpoint(target, breakpoint);
877 else
878 retval = breakpoint_add(target, pc_value, 2, BKPT_TYPE_BY_ADDR(pc_value));
879 bool tmp_bp_set = (retval == ERROR_OK);
880
881 /* No more breakpoints left, just do a step */
882 if (!tmp_bp_set)
883 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
884 else {
885 /* Start the core */
886 LOG_DEBUG("Starting core to serve pending interrupts");
887 int64_t t_start = timeval_ms();
888 cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
889
890 /* Wait for pending handlers to complete or timeout */
891 do {
892 retval = mem_ap_read_atomic_u32(swjdp,
893 DCB_DHCSR,
894 &cortex_m->dcb_dhcsr);
895 if (retval != ERROR_OK) {
896 target->state = TARGET_UNKNOWN;
897 return retval;
898 }
899 isr_timed_out = ((timeval_ms() - t_start) > 500);
900 } while (!((cortex_m->dcb_dhcsr & S_HALT) || isr_timed_out));
901
902 /* only remove breakpoint if we created it */
903 if (breakpoint)
904 cortex_m_unset_breakpoint(target, breakpoint);
905 else {
906 /* Remove the temporary breakpoint */
907 breakpoint_remove(target, pc_value);
908 }
909
910 if (isr_timed_out) {
911 LOG_DEBUG("Interrupt handlers didn't complete within time, "
912 "leaving target running");
913 } else {
914 /* Step over next instruction with interrupts disabled */
915 cortex_m_write_debug_halt_mask(target,
916 C_HALT | C_MASKINTS,
917 0);
918 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
919 /* Re-enable interrupts */
920 cortex_m_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
921 }
922 }
923 }
924 }
925 }
926
927 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m->dcb_dhcsr);
928 if (retval != ERROR_OK)
929 return retval;
930
931 /* registers are now invalid */
932 register_cache_invalidate(armv7m->arm.core_cache);
933
934 if (breakpoint)
935 cortex_m_set_breakpoint(target, breakpoint);
936
937 if (isr_timed_out) {
938 /* Leave the core running. The user has to stop execution manually. */
939 target->debug_reason = DBG_REASON_NOTHALTED;
940 target->state = TARGET_RUNNING;
941 return ERROR_OK;
942 }
943
944 LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
945 " nvic_icsr = 0x%" PRIx32,
946 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
947
948 retval = cortex_m_debug_entry(target);
949 if (retval != ERROR_OK)
950 return retval;
951 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
952
953 LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
954 " nvic_icsr = 0x%" PRIx32,
955 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
956
957 return ERROR_OK;
958 }
959
960 static int cortex_m_assert_reset(struct target *target)
961 {
962 struct cortex_m_common *cortex_m = target_to_cm(target);
963 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
964 enum cortex_m_soft_reset_config reset_config = cortex_m->soft_reset_config;
965
966 LOG_DEBUG("target->state: %s",
967 target_state_name(target));
968
969 enum reset_types jtag_reset_config = jtag_get_reset_config();
970
971 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
972 /* allow scripts to override the reset event */
973
974 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
975 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
976 target->state = TARGET_RESET;
977
978 return ERROR_OK;
979 }
980
981 /* some cores support connecting while srst is asserted
982 * use that mode is it has been configured */
983
984 bool srst_asserted = false;
985
986 if ((jtag_reset_config & RESET_HAS_SRST) &&
987 (jtag_reset_config & RESET_SRST_NO_GATING)) {
988 adapter_assert_reset();
989 srst_asserted = true;
990 }
991
992 /* Enable debug requests */
993 int retval;
994 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m->dcb_dhcsr);
995 if (retval != ERROR_OK)
996 return retval;
997 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
998 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_DEBUGEN);
999 if (retval != ERROR_OK)
1000 return retval;
1001 }
1002
1003 /* If the processor is sleeping in a WFI or WFE instruction, the
1004 * C_HALT bit must be asserted to regain control */
1005 if (cortex_m->dcb_dhcsr & S_SLEEP) {
1006 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_HALT | C_DEBUGEN);
1007 if (retval != ERROR_OK)
1008 return retval;
1009 }
1010
1011 retval = mem_ap_write_u32(swjdp, DCB_DCRDR, 0);
1012 if (retval != ERROR_OK)
1013 return retval;
1014
1015 if (!target->reset_halt) {
1016 /* Set/Clear C_MASKINTS in a separate operation */
1017 if (cortex_m->dcb_dhcsr & C_MASKINTS) {
1018 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
1019 DBGKEY | C_DEBUGEN | C_HALT);
1020 if (retval != ERROR_OK)
1021 return retval;
1022 }
1023
1024 /* clear any debug flags before resuming */
1025 cortex_m_clear_halt(target);
1026
1027 /* clear C_HALT in dhcsr reg */
1028 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1029 } else {
1030 /* Halt in debug on reset; endreset_event() restores DEMCR.
1031 *
1032 * REVISIT catching BUSERR presumably helps to defend against
1033 * bad vector table entries. Should this include MMERR or
1034 * other flags too?
1035 */
1036 retval = mem_ap_write_atomic_u32(swjdp, DCB_DEMCR,
1037 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1038 if (retval != ERROR_OK)
1039 return retval;
1040 }
1041
1042 if (jtag_reset_config & RESET_HAS_SRST) {
1043 /* default to asserting srst */
1044 if (!srst_asserted)
1045 adapter_assert_reset();
1046 } else {
1047 /* Use a standard Cortex-M3 software reset mechanism.
1048 * We default to using VECRESET as it is supported on all current cores.
1049 * This has the disadvantage of not resetting the peripherals, so a
1050 * reset-init event handler is needed to perform any peripheral resets.
1051 */
1052 retval = mem_ap_write_atomic_u32(swjdp, NVIC_AIRCR,
1053 AIRCR_VECTKEY | ((reset_config == CORTEX_M_RESET_SYSRESETREQ)
1054 ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
1055 if (retval != ERROR_OK)
1056 return retval;
1057
1058 LOG_DEBUG("Using Cortex-M %s", (reset_config == CORTEX_M_RESET_SYSRESETREQ)
1059 ? "SYSRESETREQ" : "VECTRESET");
1060
1061 if (reset_config == CORTEX_M_RESET_VECTRESET) {
1062 LOG_WARNING("Only resetting the Cortex-M core, use a reset-init event "
1063 "handler to reset any peripherals or configure hardware srst support.");
1064 }
1065
1066 {
1067 /* I do not know why this is necessary, but it
1068 * fixes strange effects (step/resume cause NMI
1069 * after reset) on LM3S6918 -- Michael Schwingen
1070 */
1071 uint32_t tmp;
1072 retval = mem_ap_read_atomic_u32(swjdp, NVIC_AIRCR, &tmp);
1073 if (retval != ERROR_OK)
1074 return retval;
1075 }
1076 }
1077
1078 target->state = TARGET_RESET;
1079 jtag_add_sleep(50000);
1080
1081 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1082
1083 if (target->reset_halt) {
1084 retval = target_halt(target);
1085 if (retval != ERROR_OK)
1086 return retval;
1087 }
1088
1089 return ERROR_OK;
1090 }
1091
1092 static int cortex_m_deassert_reset(struct target *target)
1093 {
1094 LOG_DEBUG("target->state: %s",
1095 target_state_name(target));
1096
1097 /* deassert reset lines */
1098 adapter_deassert_reset();
1099
1100 return ERROR_OK;
1101 }
1102
1103 int cortex_m_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
1104 {
1105 int retval;
1106 int fp_num = 0;
1107 uint32_t hilo;
1108 struct cortex_m_common *cortex_m = target_to_cm(target);
1109 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1110
1111 if (breakpoint->set) {
1112 LOG_WARNING("breakpoint (BPID: %d) already set", breakpoint->unique_id);
1113 return ERROR_OK;
1114 }
1115
1116 if (cortex_m->auto_bp_type)
1117 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1118
1119 if (breakpoint->type == BKPT_HARD) {
1120 while (comparator_list[fp_num].used && (fp_num < cortex_m->fp_num_code))
1121 fp_num++;
1122 if (fp_num >= cortex_m->fp_num_code) {
1123 LOG_ERROR("Can not find free FPB Comparator!");
1124 return ERROR_FAIL;
1125 }
1126 breakpoint->set = fp_num + 1;
1127 hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
1128 comparator_list[fp_num].used = 1;
1129 comparator_list[fp_num].fpcr_value = (breakpoint->address & 0x1FFFFFFC) | hilo | 1;
1130 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1131 comparator_list[fp_num].fpcr_value);
1132 LOG_DEBUG("fpc_num %i fpcr_value 0x%" PRIx32 "",
1133 fp_num,
1134 comparator_list[fp_num].fpcr_value);
1135 if (!cortex_m->fpb_enabled) {
1136 LOG_DEBUG("FPB wasn't enabled, do it now");
1137 target_write_u32(target, FP_CTRL, 3);
1138 }
1139 } else if (breakpoint->type == BKPT_SOFT) {
1140 uint8_t code[4];
1141
1142 /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
1143 * semihosting; don't use that. Otherwise the BKPT
1144 * parameter is arbitrary.
1145 */
1146 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1147 retval = target_read_memory(target,
1148 breakpoint->address & 0xFFFFFFFE,
1149 breakpoint->length, 1,
1150 breakpoint->orig_instr);
1151 if (retval != ERROR_OK)
1152 return retval;
1153 retval = target_write_memory(target,
1154 breakpoint->address & 0xFFFFFFFE,
1155 breakpoint->length, 1,
1156 code);
1157 if (retval != ERROR_OK)
1158 return retval;
1159 breakpoint->set = true;
1160 }
1161
1162 LOG_DEBUG("BPID: %d, Type: %d, Address: 0x%08" PRIx32 " Length: %d (set=%d)",
1163 breakpoint->unique_id,
1164 (int)(breakpoint->type),
1165 breakpoint->address,
1166 breakpoint->length,
1167 breakpoint->set);
1168
1169 return ERROR_OK;
1170 }
1171
1172 int cortex_m_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1173 {
1174 int retval;
1175 struct cortex_m_common *cortex_m = target_to_cm(target);
1176 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1177
1178 if (!breakpoint->set) {
1179 LOG_WARNING("breakpoint not set");
1180 return ERROR_OK;
1181 }
1182
1183 LOG_DEBUG("BPID: %d, Type: %d, Address: 0x%08" PRIx32 " Length: %d (set=%d)",
1184 breakpoint->unique_id,
1185 (int)(breakpoint->type),
1186 breakpoint->address,
1187 breakpoint->length,
1188 breakpoint->set);
1189
1190 if (breakpoint->type == BKPT_HARD) {
1191 int fp_num = breakpoint->set - 1;
1192 if ((fp_num < 0) || (fp_num >= cortex_m->fp_num_code)) {
1193 LOG_DEBUG("Invalid FP Comparator number in breakpoint");
1194 return ERROR_OK;
1195 }
1196 comparator_list[fp_num].used = 0;
1197 comparator_list[fp_num].fpcr_value = 0;
1198 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1199 comparator_list[fp_num].fpcr_value);
1200 } else {
1201 /* restore original instruction (kept in target endianness) */
1202 if (breakpoint->length == 4) {
1203 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE, 4, 1,
1204 breakpoint->orig_instr);
1205 if (retval != ERROR_OK)
1206 return retval;
1207 } else {
1208 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE, 2, 1,
1209 breakpoint->orig_instr);
1210 if (retval != ERROR_OK)
1211 return retval;
1212 }
1213 }
1214 breakpoint->set = false;
1215
1216 return ERROR_OK;
1217 }
1218
1219 int cortex_m_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
1220 {
1221 struct cortex_m_common *cortex_m = target_to_cm(target);
1222
1223 if (cortex_m->auto_bp_type)
1224 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1225
1226 if (breakpoint->type != BKPT_TYPE_BY_ADDR(breakpoint->address)) {
1227 if (breakpoint->type == BKPT_HARD) {
1228 LOG_INFO("flash patch comparator requested outside code memory region");
1229 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1230 }
1231
1232 if (breakpoint->type == BKPT_SOFT) {
1233 LOG_INFO("soft breakpoint requested in code (flash) memory region");
1234 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1235 }
1236 }
1237
1238 if ((breakpoint->type == BKPT_HARD) && (cortex_m->fp_code_available < 1)) {
1239 LOG_INFO("no flash patch comparator unit available for hardware breakpoint");
1240 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1241 }
1242
1243 if ((breakpoint->length != 2)) {
1244 LOG_INFO("only breakpoints of two bytes length supported");
1245 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1246 }
1247
1248 if (breakpoint->type == BKPT_HARD)
1249 cortex_m->fp_code_available--;
1250
1251 return cortex_m_set_breakpoint(target, breakpoint);
1252 }
1253
1254 int cortex_m_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1255 {
1256 struct cortex_m_common *cortex_m = target_to_cm(target);
1257
1258 /* REVISIT why check? FBP can be updated with core running ... */
1259 if (target->state != TARGET_HALTED) {
1260 LOG_WARNING("target not halted");
1261 return ERROR_TARGET_NOT_HALTED;
1262 }
1263
1264 if (cortex_m->auto_bp_type)
1265 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1266
1267 if (breakpoint->set)
1268 cortex_m_unset_breakpoint(target, breakpoint);
1269
1270 if (breakpoint->type == BKPT_HARD)
1271 cortex_m->fp_code_available++;
1272
1273 return ERROR_OK;
1274 }
1275
1276 int cortex_m_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1277 {
1278 int dwt_num = 0;
1279 uint32_t mask, temp;
1280 struct cortex_m_common *cortex_m = target_to_cm(target);
1281
1282 /* watchpoint params were validated earlier */
1283 mask = 0;
1284 temp = watchpoint->length;
1285 while (temp) {
1286 temp >>= 1;
1287 mask++;
1288 }
1289 mask--;
1290
1291 /* REVISIT Don't fully trust these "not used" records ... users
1292 * may set up breakpoints by hand, e.g. dual-address data value
1293 * watchpoint using comparator #1; comparator #0 matching cycle
1294 * count; send data trace info through ITM and TPIU; etc
1295 */
1296 struct cortex_m_dwt_comparator *comparator;
1297
1298 for (comparator = cortex_m->dwt_comparator_list;
1299 comparator->used && dwt_num < cortex_m->dwt_num_comp;
1300 comparator++, dwt_num++)
1301 continue;
1302 if (dwt_num >= cortex_m->dwt_num_comp) {
1303 LOG_ERROR("Can not find free DWT Comparator");
1304 return ERROR_FAIL;
1305 }
1306 comparator->used = 1;
1307 watchpoint->set = dwt_num + 1;
1308
1309 comparator->comp = watchpoint->address;
1310 target_write_u32(target, comparator->dwt_comparator_address + 0,
1311 comparator->comp);
1312
1313 comparator->mask = mask;
1314 target_write_u32(target, comparator->dwt_comparator_address + 4,
1315 comparator->mask);
1316
1317 switch (watchpoint->rw) {
1318 case WPT_READ:
1319 comparator->function = 5;
1320 break;
1321 case WPT_WRITE:
1322 comparator->function = 6;
1323 break;
1324 case WPT_ACCESS:
1325 comparator->function = 7;
1326 break;
1327 }
1328 target_write_u32(target, comparator->dwt_comparator_address + 8,
1329 comparator->function);
1330
1331 LOG_DEBUG("Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
1332 watchpoint->unique_id, dwt_num,
1333 (unsigned) comparator->comp,
1334 (unsigned) comparator->mask,
1335 (unsigned) comparator->function);
1336 return ERROR_OK;
1337 }
1338
1339 int cortex_m_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
1340 {
1341 struct cortex_m_common *cortex_m = target_to_cm(target);
1342 struct cortex_m_dwt_comparator *comparator;
1343 int dwt_num;
1344
1345 if (!watchpoint->set) {
1346 LOG_WARNING("watchpoint (wpid: %d) not set",
1347 watchpoint->unique_id);
1348 return ERROR_OK;
1349 }
1350
1351 dwt_num = watchpoint->set - 1;
1352
1353 LOG_DEBUG("Watchpoint (ID %d) DWT%d address: 0x%08x clear",
1354 watchpoint->unique_id, dwt_num,
1355 (unsigned) watchpoint->address);
1356
1357 if ((dwt_num < 0) || (dwt_num >= cortex_m->dwt_num_comp)) {
1358 LOG_DEBUG("Invalid DWT Comparator number in watchpoint");
1359 return ERROR_OK;
1360 }
1361
1362 comparator = cortex_m->dwt_comparator_list + dwt_num;
1363 comparator->used = 0;
1364 comparator->function = 0;
1365 target_write_u32(target, comparator->dwt_comparator_address + 8,
1366 comparator->function);
1367
1368 watchpoint->set = false;
1369
1370 return ERROR_OK;
1371 }
1372
1373 int cortex_m_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
1374 {
1375 struct cortex_m_common *cortex_m = target_to_cm(target);
1376
1377 if (cortex_m->dwt_comp_available < 1) {
1378 LOG_DEBUG("no comparators?");
1379 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1380 }
1381
1382 /* hardware doesn't support data value masking */
1383 if (watchpoint->mask != ~(uint32_t)0) {
1384 LOG_DEBUG("watchpoint value masks not supported");
1385 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1386 }
1387
1388 /* hardware allows address masks of up to 32K */
1389 unsigned mask;
1390
1391 for (mask = 0; mask < 16; mask++) {
1392 if ((1u << mask) == watchpoint->length)
1393 break;
1394 }
1395 if (mask == 16) {
1396 LOG_DEBUG("unsupported watchpoint length");
1397 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1398 }
1399 if (watchpoint->address & ((1 << mask) - 1)) {
1400 LOG_DEBUG("watchpoint address is unaligned");
1401 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1402 }
1403
1404 /* Caller doesn't seem to be able to describe watching for data
1405 * values of zero; that flags "no value".
1406 *
1407 * REVISIT This DWT may well be able to watch for specific data
1408 * values. Requires comparator #1 to set DATAVMATCH and match
1409 * the data, and another comparator (DATAVADDR0) matching addr.
1410 */
1411 if (watchpoint->value) {
1412 LOG_DEBUG("data value watchpoint not YET supported");
1413 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1414 }
1415
1416 cortex_m->dwt_comp_available--;
1417 LOG_DEBUG("dwt_comp_available: %d", cortex_m->dwt_comp_available);
1418
1419 return ERROR_OK;
1420 }
1421
1422 int cortex_m_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
1423 {
1424 struct cortex_m_common *cortex_m = target_to_cm(target);
1425
1426 /* REVISIT why check? DWT can be updated with core running ... */
1427 if (target->state != TARGET_HALTED) {
1428 LOG_WARNING("target not halted");
1429 return ERROR_TARGET_NOT_HALTED;
1430 }
1431
1432 if (watchpoint->set)
1433 cortex_m_unset_watchpoint(target, watchpoint);
1434
1435 cortex_m->dwt_comp_available++;
1436 LOG_DEBUG("dwt_comp_available: %d", cortex_m->dwt_comp_available);
1437
1438 return ERROR_OK;
1439 }
1440
1441 void cortex_m_enable_watchpoints(struct target *target)
1442 {
1443 struct watchpoint *watchpoint = target->watchpoints;
1444
1445 /* set any pending watchpoints */
1446 while (watchpoint) {
1447 if (!watchpoint->set)
1448 cortex_m_set_watchpoint(target, watchpoint);
1449 watchpoint = watchpoint->next;
1450 }
1451 }
1452
1453 static int cortex_m_load_core_reg_u32(struct target *target,
1454 uint32_t num, uint32_t *value)
1455 {
1456 int retval;
1457 struct armv7m_common *armv7m = target_to_armv7m(target);
1458 struct adiv5_dap *swjdp = armv7m->arm.dap;
1459
1460 /* NOTE: we "know" here that the register identifiers used
1461 * in the v7m header match the Cortex-M3 Debug Core Register
1462 * Selector values for R0..R15, xPSR, MSP, and PSP.
1463 */
1464 switch (num) {
1465 case 0 ... 18:
1466 /* read a normal core register */
1467 retval = cortexm_dap_read_coreregister_u32(swjdp, value, num);
1468
1469 if (retval != ERROR_OK) {
1470 LOG_ERROR("JTAG failure %i", retval);
1471 return ERROR_JTAG_DEVICE_ERROR;
1472 }
1473 LOG_DEBUG("load from core reg %i value 0x%" PRIx32 "", (int)num, *value);
1474 break;
1475
1476 case ARMV7M_PRIMASK:
1477 case ARMV7M_BASEPRI:
1478 case ARMV7M_FAULTMASK:
1479 case ARMV7M_CONTROL:
1480 /* Cortex-M3 packages these four registers as bitfields
1481 * in one Debug Core register. So say r0 and r2 docs;
1482 * it was removed from r1 docs, but still works.
1483 */
1484 cortexm_dap_read_coreregister_u32(swjdp, value, 20);
1485
1486 switch (num) {
1487 case ARMV7M_PRIMASK:
1488 *value = buf_get_u32((uint8_t *)value, 0, 1);
1489 break;
1490
1491 case ARMV7M_BASEPRI:
1492 *value = buf_get_u32((uint8_t *)value, 8, 8);
1493 break;
1494
1495 case ARMV7M_FAULTMASK:
1496 *value = buf_get_u32((uint8_t *)value, 16, 1);
1497 break;
1498
1499 case ARMV7M_CONTROL:
1500 *value = buf_get_u32((uint8_t *)value, 24, 2);
1501 break;
1502 }
1503
1504 LOG_DEBUG("load from special reg %i value 0x%" PRIx32 "", (int)num, *value);
1505 break;
1506
1507 default:
1508 return ERROR_COMMAND_SYNTAX_ERROR;
1509 }
1510
1511 return ERROR_OK;
1512 }
1513
1514 static int cortex_m_store_core_reg_u32(struct target *target,
1515 uint32_t num, uint32_t value)
1516 {
1517 int retval;
1518 uint32_t reg;
1519 struct armv7m_common *armv7m = target_to_armv7m(target);
1520 struct adiv5_dap *swjdp = armv7m->arm.dap;
1521
1522 /* NOTE: we "know" here that the register identifiers used
1523 * in the v7m header match the Cortex-M3 Debug Core Register
1524 * Selector values for R0..R15, xPSR, MSP, and PSP.
1525 */
1526 switch (num) {
1527 case 0 ... 18:
1528 retval = cortexm_dap_write_coreregister_u32(swjdp, value, num);
1529 if (retval != ERROR_OK) {
1530 struct reg *r;
1531
1532 LOG_ERROR("JTAG failure");
1533 r = armv7m->arm.core_cache->reg_list + num;
1534 r->dirty = r->valid;
1535 return ERROR_JTAG_DEVICE_ERROR;
1536 }
1537 LOG_DEBUG("write core reg %i value 0x%" PRIx32 "", (int)num, value);
1538 break;
1539
1540 case ARMV7M_PRIMASK:
1541 case ARMV7M_BASEPRI:
1542 case ARMV7M_FAULTMASK:
1543 case ARMV7M_CONTROL:
1544 /* Cortex-M3 packages these four registers as bitfields
1545 * in one Debug Core register. So say r0 and r2 docs;
1546 * it was removed from r1 docs, but still works.
1547 */
1548 cortexm_dap_read_coreregister_u32(swjdp, &reg, 20);
1549
1550 switch (num) {
1551 case ARMV7M_PRIMASK:
1552 buf_set_u32((uint8_t *)&reg, 0, 1, value);
1553 break;
1554
1555 case ARMV7M_BASEPRI:
1556 buf_set_u32((uint8_t *)&reg, 8, 8, value);
1557 break;
1558
1559 case ARMV7M_FAULTMASK:
1560 buf_set_u32((uint8_t *)&reg, 16, 1, value);
1561 break;
1562
1563 case ARMV7M_CONTROL:
1564 buf_set_u32((uint8_t *)&reg, 24, 2, value);
1565 break;
1566 }
1567
1568 cortexm_dap_write_coreregister_u32(swjdp, reg, 20);
1569
1570 LOG_DEBUG("write special reg %i value 0x%" PRIx32 " ", (int)num, value);
1571 break;
1572
1573 default:
1574 return ERROR_COMMAND_SYNTAX_ERROR;
1575 }
1576
1577 return ERROR_OK;
1578 }
1579
1580 static int cortex_m_read_memory(struct target *target, uint32_t address,
1581 uint32_t size, uint32_t count, uint8_t *buffer)
1582 {
1583 struct armv7m_common *armv7m = target_to_armv7m(target);
1584 struct adiv5_dap *swjdp = armv7m->arm.dap;
1585
1586 if (armv7m->arm.is_armv6m) {
1587 /* armv6m does not handle unaligned memory access */
1588 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1589 return ERROR_TARGET_UNALIGNED_ACCESS;
1590 }
1591
1592 return mem_ap_read(swjdp, buffer, size, count, address, true);
1593 }
1594
1595 static int cortex_m_write_memory(struct target *target, uint32_t address,
1596 uint32_t size, uint32_t count, const uint8_t *buffer)
1597 {
1598 struct armv7m_common *armv7m = target_to_armv7m(target);
1599 struct adiv5_dap *swjdp = armv7m->arm.dap;
1600
1601 if (armv7m->arm.is_armv6m) {
1602 /* armv6m does not handle unaligned memory access */
1603 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1604 return ERROR_TARGET_UNALIGNED_ACCESS;
1605 }
1606
1607 return mem_ap_write(swjdp, buffer, size, count, address, true);
1608 }
1609
1610 static int cortex_m_init_target(struct command_context *cmd_ctx,
1611 struct target *target)
1612 {
1613 armv7m_build_reg_cache(target);
1614 return ERROR_OK;
1615 }
1616
1617 /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
1618 * on r/w if the core is not running, and clear on resume or reset ... or
1619 * at least, in a post_restore_context() method.
1620 */
1621
1622 struct dwt_reg_state {
1623 struct target *target;
1624 uint32_t addr;
1625 uint32_t value; /* scratch/cache */
1626 };
1627
1628 static int cortex_m_dwt_get_reg(struct reg *reg)
1629 {
1630 struct dwt_reg_state *state = reg->arch_info;
1631
1632 return target_read_u32(state->target, state->addr, &state->value);
1633 }
1634
1635 static int cortex_m_dwt_set_reg(struct reg *reg, uint8_t *buf)
1636 {
1637 struct dwt_reg_state *state = reg->arch_info;
1638
1639 return target_write_u32(state->target, state->addr,
1640 buf_get_u32(buf, 0, reg->size));
1641 }
1642
1643 struct dwt_reg {
1644 uint32_t addr;
1645 char *name;
1646 unsigned size;
1647 };
1648
1649 static struct dwt_reg dwt_base_regs[] = {
1650 { DWT_CTRL, "dwt_ctrl", 32, },
1651 /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
1652 * increments while the core is asleep.
1653 */
1654 { DWT_CYCCNT, "dwt_cyccnt", 32, },
1655 /* plus some 8 bit counters, useful for profiling with TPIU */
1656 };
1657
1658 static struct dwt_reg dwt_comp[] = {
1659 #define DWT_COMPARATOR(i) \
1660 { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
1661 { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
1662 { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
1663 DWT_COMPARATOR(0),
1664 DWT_COMPARATOR(1),
1665 DWT_COMPARATOR(2),
1666 DWT_COMPARATOR(3),
1667 #undef DWT_COMPARATOR
1668 };
1669
1670 static const struct reg_arch_type dwt_reg_type = {
1671 .get = cortex_m_dwt_get_reg,
1672 .set = cortex_m_dwt_set_reg,
1673 };
1674
1675 static void cortex_m_dwt_addreg(struct target *t, struct reg *r, struct dwt_reg *d)
1676 {
1677 struct dwt_reg_state *state;
1678
1679 state = calloc(1, sizeof *state);
1680 if (!state)
1681 return;
1682 state->addr = d->addr;
1683 state->target = t;
1684
1685 r->name = d->name;
1686 r->size = d->size;
1687 r->value = &state->value;
1688 r->arch_info = state;
1689 r->type = &dwt_reg_type;
1690 }
1691
1692 void cortex_m_dwt_setup(struct cortex_m_common *cm, struct target *target)
1693 {
1694 uint32_t dwtcr;
1695 struct reg_cache *cache;
1696 struct cortex_m_dwt_comparator *comparator;
1697 int reg, i;
1698
1699 target_read_u32(target, DWT_CTRL, &dwtcr);
1700 if (!dwtcr) {
1701 LOG_DEBUG("no DWT");
1702 return;
1703 }
1704
1705 cm->dwt_num_comp = (dwtcr >> 28) & 0xF;
1706 cm->dwt_comp_available = cm->dwt_num_comp;
1707 cm->dwt_comparator_list = calloc(cm->dwt_num_comp,
1708 sizeof(struct cortex_m_dwt_comparator));
1709 if (!cm->dwt_comparator_list) {
1710 fail0:
1711 cm->dwt_num_comp = 0;
1712 LOG_ERROR("out of mem");
1713 return;
1714 }
1715
1716 cache = calloc(1, sizeof *cache);
1717 if (!cache) {
1718 fail1:
1719 free(cm->dwt_comparator_list);
1720 goto fail0;
1721 }
1722 cache->name = "cortex-m3 dwt registers";
1723 cache->num_regs = 2 + cm->dwt_num_comp * 3;
1724 cache->reg_list = calloc(cache->num_regs, sizeof *cache->reg_list);
1725 if (!cache->reg_list) {
1726 free(cache);
1727 goto fail1;
1728 }
1729
1730 for (reg = 0; reg < 2; reg++)
1731 cortex_m_dwt_addreg(target, cache->reg_list + reg,
1732 dwt_base_regs + reg);
1733
1734 comparator = cm->dwt_comparator_list;
1735 for (i = 0; i < cm->dwt_num_comp; i++, comparator++) {
1736 int j;
1737
1738 comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
1739 for (j = 0; j < 3; j++, reg++)
1740 cortex_m_dwt_addreg(target, cache->reg_list + reg,
1741 dwt_comp + 3 * i + j);
1742
1743 /* make sure we clear any watchpoints enabled on the target */
1744 target_write_u32(target, comparator->dwt_comparator_address + 8, 0);
1745 }
1746
1747 *register_get_last_cache_p(&target->reg_cache) = cache;
1748 cm->dwt_cache = cache;
1749
1750 LOG_DEBUG("DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
1751 dwtcr, cm->dwt_num_comp,
1752 (dwtcr & (0xf << 24)) ? " only" : "/trigger");
1753
1754 /* REVISIT: if num_comp > 1, check whether comparator #1 can
1755 * implement single-address data value watchpoints ... so we
1756 * won't need to check it later, when asked to set one up.
1757 */
1758 }
1759
1760 #define MVFR0 0xe000ef40
1761 #define MVFR1 0xe000ef44
1762
1763 #define MVFR0_DEFAULT_M4 0x10110021
1764 #define MVFR1_DEFAULT_M4 0x11000011
1765
1766 int cortex_m_examine(struct target *target)
1767 {
1768 int retval;
1769 uint32_t cpuid, fpcr, mvfr0, mvfr1;
1770 int i;
1771 struct cortex_m_common *cortex_m = target_to_cm(target);
1772 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
1773 struct armv7m_common *armv7m = target_to_armv7m(target);
1774
1775 /* stlink shares the examine handler but does not support
1776 * all its calls */
1777 if (!armv7m->stlink) {
1778 retval = ahbap_debugport_init(swjdp);
1779 if (retval != ERROR_OK)
1780 return retval;
1781 }
1782
1783 if (!target_was_examined(target)) {
1784 target_set_examined(target);
1785
1786 /* Read from Device Identification Registers */
1787 retval = target_read_u32(target, CPUID, &cpuid);
1788 if (retval != ERROR_OK)
1789 return retval;
1790
1791 /* Get CPU Type */
1792 i = (cpuid >> 4) & 0xf;
1793
1794 LOG_DEBUG("Cortex-M%d r%" PRId8 "p%" PRId8 " processor detected",
1795 i, (uint8_t)((cpuid >> 20) & 0xf), (uint8_t)((cpuid >> 0) & 0xf));
1796 LOG_DEBUG("cpuid: 0x%8.8" PRIx32 "", cpuid);
1797
1798 /* test for floating point feature on cortex-m4 */
1799 if (i == 4) {
1800 target_read_u32(target, MVFR0, &mvfr0);
1801 target_read_u32(target, MVFR1, &mvfr1);
1802
1803 if ((mvfr0 == MVFR0_DEFAULT_M4) && (mvfr1 == MVFR1_DEFAULT_M4)) {
1804 LOG_DEBUG("Cortex-M%d floating point feature FPv4_SP found", i);
1805 armv7m->fp_feature = FPv4_SP;
1806 }
1807 } else if (i == 0) {
1808 /* Cortex-M0 does not support unaligned memory access */
1809 armv7m->arm.is_armv6m = true;
1810 }
1811
1812 if (i == 4 || i == 3) {
1813 /* Cortex-M3/M4 has 4096 bytes autoincrement range */
1814 armv7m->dap.tar_autoincr_block = (1 << 12);
1815 }
1816
1817 /* NOTE: FPB and DWT are both optional. */
1818
1819 /* Setup FPB */
1820 target_read_u32(target, FP_CTRL, &fpcr);
1821 cortex_m->auto_bp_type = 1;
1822 /* bits [14:12] and [7:4] */
1823 cortex_m->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF);
1824 cortex_m->fp_num_lit = (fpcr >> 8) & 0xF;
1825 cortex_m->fp_code_available = cortex_m->fp_num_code;
1826 cortex_m->fp_comparator_list = calloc(
1827 cortex_m->fp_num_code + cortex_m->fp_num_lit,
1828 sizeof(struct cortex_m_fp_comparator));
1829 cortex_m->fpb_enabled = fpcr & 1;
1830 for (i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
1831 cortex_m->fp_comparator_list[i].type =
1832 (i < cortex_m->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
1833 cortex_m->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
1834
1835 /* make sure we clear any breakpoints enabled on the target */
1836 target_write_u32(target, cortex_m->fp_comparator_list[i].fpcr_address, 0);
1837 }
1838 LOG_DEBUG("FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i",
1839 fpcr,
1840 cortex_m->fp_num_code,
1841 cortex_m->fp_num_lit);
1842
1843 /* Setup DWT */
1844 cortex_m_dwt_setup(cortex_m, target);
1845
1846 /* These hardware breakpoints only work for code in flash! */
1847 LOG_INFO("%s: hardware has %d breakpoints, %d watchpoints",
1848 target_name(target),
1849 cortex_m->fp_num_code,
1850 cortex_m->dwt_num_comp);
1851 }
1852
1853 return ERROR_OK;
1854 }
1855
1856 static int cortex_m_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1857 {
1858 uint16_t dcrdr;
1859 int retval;
1860
1861 mem_ap_read_buf_u16(swjdp, (uint8_t *)&dcrdr, 2, DCB_DCRDR);
1862 *ctrl = (uint8_t)dcrdr;
1863 *value = (uint8_t)(dcrdr >> 8);
1864
1865 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1866
1867 /* write ack back to software dcc register
1868 * signify we have read data */
1869 if (dcrdr & (1 << 0)) {
1870 dcrdr = 0;
1871 retval = mem_ap_write_buf_u16(swjdp, (uint8_t *)&dcrdr, 2, DCB_DCRDR);
1872 if (retval != ERROR_OK)
1873 return retval;
1874 }
1875
1876 return ERROR_OK;
1877 }
1878
1879 static int cortex_m_target_request_data(struct target *target,
1880 uint32_t size, uint8_t *buffer)
1881 {
1882 struct armv7m_common *armv7m = target_to_armv7m(target);
1883 struct adiv5_dap *swjdp = armv7m->arm.dap;
1884 uint8_t data;
1885 uint8_t ctrl;
1886 uint32_t i;
1887
1888 for (i = 0; i < (size * 4); i++) {
1889 cortex_m_dcc_read(swjdp, &data, &ctrl);
1890 buffer[i] = data;
1891 }
1892
1893 return ERROR_OK;
1894 }
1895
1896 static int cortex_m_handle_target_request(void *priv)
1897 {
1898 struct target *target = priv;
1899 if (!target_was_examined(target))
1900 return ERROR_OK;
1901 struct armv7m_common *armv7m = target_to_armv7m(target);
1902 struct adiv5_dap *swjdp = armv7m->arm.dap;
1903
1904 if (!target->dbg_msg_enabled)
1905 return ERROR_OK;
1906
1907 if (target->state == TARGET_RUNNING) {
1908 uint8_t data;
1909 uint8_t ctrl;
1910
1911 cortex_m_dcc_read(swjdp, &data, &ctrl);
1912
1913 /* check if we have data */
1914 if (ctrl & (1 << 0)) {
1915 uint32_t request;
1916
1917 /* we assume target is quick enough */
1918 request = data;
1919 cortex_m_dcc_read(swjdp, &data, &ctrl);
1920 request |= (data << 8);
1921 cortex_m_dcc_read(swjdp, &data, &ctrl);
1922 request |= (data << 16);
1923 cortex_m_dcc_read(swjdp, &data, &ctrl);
1924 request |= (data << 24);
1925 target_request(target, request);
1926 }
1927 }
1928
1929 return ERROR_OK;
1930 }
1931
1932 static int cortex_m_init_arch_info(struct target *target,
1933 struct cortex_m_common *cortex_m, struct jtag_tap *tap)
1934 {
1935 int retval;
1936 struct armv7m_common *armv7m = &cortex_m->armv7m;
1937
1938 armv7m_init_arch_info(target, armv7m);
1939
1940 /* prepare JTAG information for the new target */
1941 cortex_m->jtag_info.tap = tap;
1942 cortex_m->jtag_info.scann_size = 4;
1943
1944 /* default reset mode is to use srst if fitted
1945 * if not it will use CORTEX_M3_RESET_VECTRESET */
1946 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
1947
1948 armv7m->arm.dap = &armv7m->dap;
1949
1950 /* Leave (only) generic DAP stuff for debugport_init(); */
1951 armv7m->dap.jtag_info = &cortex_m->jtag_info;
1952 armv7m->dap.memaccess_tck = 8;
1953
1954 /* Cortex-M3/M4 has 4096 bytes autoincrement range
1955 * but set a safe default to 1024 to support Cortex-M0
1956 * this will be changed in cortex_m3_examine if a M3/M4 is detected */
1957 armv7m->dap.tar_autoincr_block = (1 << 10);
1958
1959 /* register arch-specific functions */
1960 armv7m->examine_debug_reason = cortex_m_examine_debug_reason;
1961
1962 armv7m->post_debug_entry = NULL;
1963
1964 armv7m->pre_restore_context = NULL;
1965
1966 armv7m->load_core_reg_u32 = cortex_m_load_core_reg_u32;
1967 armv7m->store_core_reg_u32 = cortex_m_store_core_reg_u32;
1968
1969 target_register_timer_callback(cortex_m_handle_target_request, 1, 1, target);
1970
1971 retval = arm_jtag_setup_connection(&cortex_m->jtag_info);
1972 if (retval != ERROR_OK)
1973 return retval;
1974
1975 return ERROR_OK;
1976 }
1977
1978 static int cortex_m_target_create(struct target *target, Jim_Interp *interp)
1979 {
1980 struct cortex_m_common *cortex_m = calloc(1, sizeof(struct cortex_m_common));
1981
1982 cortex_m->common_magic = CORTEX_M_COMMON_MAGIC;
1983 cortex_m_init_arch_info(target, cortex_m, target->tap);
1984
1985 return ERROR_OK;
1986 }
1987
1988 /*--------------------------------------------------------------------------*/
1989
1990 static int cortex_m_verify_pointer(struct command_context *cmd_ctx,
1991 struct cortex_m_common *cm)
1992 {
1993 if (cm->common_magic != CORTEX_M_COMMON_MAGIC) {
1994 command_print(cmd_ctx, "target is not a Cortex-M");
1995 return ERROR_TARGET_INVALID;
1996 }
1997 return ERROR_OK;
1998 }
1999
2000 /*
2001 * Only stuff below this line should need to verify that its target
2002 * is a Cortex-M3. Everything else should have indirected through the
2003 * cortexm3_target structure, which is only used with CM3 targets.
2004 */
2005
2006 static const struct {
2007 char name[10];
2008 unsigned mask;
2009 } vec_ids[] = {
2010 { "hard_err", VC_HARDERR, },
2011 { "int_err", VC_INTERR, },
2012 { "bus_err", VC_BUSERR, },
2013 { "state_err", VC_STATERR, },
2014 { "chk_err", VC_CHKERR, },
2015 { "nocp_err", VC_NOCPERR, },
2016 { "mm_err", VC_MMERR, },
2017 { "reset", VC_CORERESET, },
2018 };
2019
2020 COMMAND_HANDLER(handle_cortex_m_vector_catch_command)
2021 {
2022 struct target *target = get_current_target(CMD_CTX);
2023 struct cortex_m_common *cortex_m = target_to_cm(target);
2024 struct armv7m_common *armv7m = &cortex_m->armv7m;
2025 struct adiv5_dap *swjdp = armv7m->arm.dap;
2026 uint32_t demcr = 0;
2027 int retval;
2028
2029 retval = cortex_m_verify_pointer(CMD_CTX, cortex_m);
2030 if (retval != ERROR_OK)
2031 return retval;
2032
2033 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &demcr);
2034 if (retval != ERROR_OK)
2035 return retval;
2036
2037 if (CMD_ARGC > 0) {
2038 unsigned catch = 0;
2039
2040 if (CMD_ARGC == 1) {
2041 if (strcmp(CMD_ARGV[0], "all") == 0) {
2042 catch = VC_HARDERR | VC_INTERR | VC_BUSERR
2043 | VC_STATERR | VC_CHKERR | VC_NOCPERR
2044 | VC_MMERR | VC_CORERESET;
2045 goto write;
2046 } else if (strcmp(CMD_ARGV[0], "none") == 0)
2047 goto write;
2048 }
2049 while (CMD_ARGC-- > 0) {
2050 unsigned i;
2051 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2052 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
2053 continue;
2054 catch |= vec_ids[i].mask;
2055 break;
2056 }
2057 if (i == ARRAY_SIZE(vec_ids)) {
2058 LOG_ERROR("No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]);
2059 return ERROR_COMMAND_SYNTAX_ERROR;
2060 }
2061 }
2062 write:
2063 /* For now, armv7m->demcr only stores vector catch flags. */
2064 armv7m->demcr = catch;
2065
2066 demcr &= ~0xffff;
2067 demcr |= catch;
2068
2069 /* write, but don't assume it stuck (why not??) */
2070 retval = mem_ap_write_u32(swjdp, DCB_DEMCR, demcr);
2071 if (retval != ERROR_OK)
2072 return retval;
2073 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &demcr);
2074 if (retval != ERROR_OK)
2075 return retval;
2076
2077 /* FIXME be sure to clear DEMCR on clean server shutdown.
2078 * Otherwise the vector catch hardware could fire when there's
2079 * no debugger hooked up, causing much confusion...
2080 */
2081 }
2082
2083 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2084 command_print(CMD_CTX, "%9s: %s", vec_ids[i].name,
2085 (demcr & vec_ids[i].mask) ? "catch" : "ignore");
2086 }
2087
2088 return ERROR_OK;
2089 }
2090
2091 COMMAND_HANDLER(handle_cortex_m_mask_interrupts_command)
2092 {
2093 struct target *target = get_current_target(CMD_CTX);
2094 struct cortex_m_common *cortex_m = target_to_cm(target);
2095 int retval;
2096
2097 static const Jim_Nvp nvp_maskisr_modes[] = {
2098 { .name = "auto", .value = CORTEX_M_ISRMASK_AUTO },
2099 { .name = "off", .value = CORTEX_M_ISRMASK_OFF },
2100 { .name = "on", .value = CORTEX_M_ISRMASK_ON },
2101 { .name = NULL, .value = -1 },
2102 };
2103 const Jim_Nvp *n;
2104
2105
2106 retval = cortex_m_verify_pointer(CMD_CTX, cortex_m);
2107 if (retval != ERROR_OK)
2108 return retval;
2109
2110 if (target->state != TARGET_HALTED) {
2111 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
2112 return ERROR_OK;
2113 }
2114
2115 if (CMD_ARGC > 0) {
2116 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2117 if (n->name == NULL)
2118 return ERROR_COMMAND_SYNTAX_ERROR;
2119 cortex_m->isrmasking_mode = n->value;
2120
2121
2122 if (cortex_m->isrmasking_mode == CORTEX_M_ISRMASK_ON)
2123 cortex_m_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
2124 else
2125 cortex_m_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
2126 }
2127
2128 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_m->isrmasking_mode);
2129 command_print(CMD_CTX, "cortex_m interrupt mask %s", n->name);
2130
2131 return ERROR_OK;
2132 }
2133
2134 COMMAND_HANDLER(handle_cortex_m_reset_config_command)
2135 {
2136 struct target *target = get_current_target(CMD_CTX);
2137 struct cortex_m_common *cortex_m = target_to_cm(target);
2138 int retval;
2139 char *reset_config;
2140
2141 retval = cortex_m_verify_pointer(CMD_CTX, cortex_m);
2142 if (retval != ERROR_OK)
2143 return retval;
2144
2145 if (CMD_ARGC > 0) {
2146 if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
2147 cortex_m->soft_reset_config = CORTEX_M_RESET_SYSRESETREQ;
2148 else if (strcmp(*CMD_ARGV, "vectreset") == 0)
2149 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2150 }
2151
2152 switch (cortex_m->soft_reset_config) {
2153 case CORTEX_M_RESET_SYSRESETREQ:
2154 reset_config = "sysresetreq";
2155 break;
2156
2157 case CORTEX_M_RESET_VECTRESET:
2158 reset_config = "vectreset";
2159 break;
2160
2161 default:
2162 reset_config = "unknown";
2163 break;
2164 }
2165
2166 command_print(CMD_CTX, "cortex_m reset_config %s", reset_config);
2167
2168 return ERROR_OK;
2169 }
2170
2171 static const struct command_registration cortex_m_exec_command_handlers[] = {
2172 {
2173 .name = "maskisr",
2174 .handler = handle_cortex_m_mask_interrupts_command,
2175 .mode = COMMAND_EXEC,
2176 .help = "mask cortex_m interrupts",
2177 .usage = "['auto'|'on'|'off']",
2178 },
2179 {
2180 .name = "vector_catch",
2181 .handler = handle_cortex_m_vector_catch_command,
2182 .mode = COMMAND_EXEC,
2183 .help = "configure hardware vectors to trigger debug entry",
2184 .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
2185 },
2186 {
2187 .name = "reset_config",
2188 .handler = handle_cortex_m_reset_config_command,
2189 .mode = COMMAND_ANY,
2190 .help = "configure software reset handling",
2191 .usage = "['srst'|'sysresetreq'|'vectreset']",
2192 },
2193 COMMAND_REGISTRATION_DONE
2194 };
2195 static const struct command_registration cortex_m_command_handlers[] = {
2196 {
2197 .chain = armv7m_command_handlers,
2198 },
2199 {
2200 .name = "cortex_m",
2201 .mode = COMMAND_EXEC,
2202 .help = "Cortex-M command group",
2203 .usage = "",
2204 .chain = cortex_m_exec_command_handlers,
2205 },
2206 COMMAND_REGISTRATION_DONE
2207 };
2208
2209 struct target_type cortexm_target = {
2210 .name = "cortex_m",
2211 .deprecated_name = "cortex_m3",
2212
2213 .poll = cortex_m_poll,
2214 .arch_state = armv7m_arch_state,
2215
2216 .target_request_data = cortex_m_target_request_data,
2217
2218 .halt = cortex_m_halt,
2219 .resume = cortex_m_resume,
2220 .step = cortex_m_step,
2221
2222 .assert_reset = cortex_m_assert_reset,
2223 .deassert_reset = cortex_m_deassert_reset,
2224 .soft_reset_halt = cortex_m_soft_reset_halt,
2225
2226 .get_gdb_reg_list = armv7m_get_gdb_reg_list,
2227
2228 .read_memory = cortex_m_read_memory,
2229 .write_memory = cortex_m_write_memory,
2230 .checksum_memory = armv7m_checksum_memory,
2231 .blank_check_memory = armv7m_blank_check_memory,
2232
2233 .run_algorithm = armv7m_run_algorithm,
2234 .start_algorithm = armv7m_start_algorithm,
2235 .wait_algorithm = armv7m_wait_algorithm,
2236
2237 .add_breakpoint = cortex_m_add_breakpoint,
2238 .remove_breakpoint = cortex_m_remove_breakpoint,
2239 .add_watchpoint = cortex_m_add_watchpoint,
2240 .remove_watchpoint = cortex_m_remove_watchpoint,
2241
2242 .commands = cortex_m_command_handlers,
2243 .target_create = cortex_m_target_create,
2244 .init_target = cortex_m_init_target,
2245 .examine = cortex_m_examine,
2246 };