cortex_m: set fpb_enabled on enabling fpb
[openocd.git] / src / target / cortex_m.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
25 * *
26 * *
27 * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
28 * *
29 ***************************************************************************/
30 #ifdef HAVE_CONFIG_H
31 #include "config.h"
32 #endif
33
34 #include "jtag/interface.h"
35 #include "breakpoints.h"
36 #include "cortex_m.h"
37 #include "target_request.h"
38 #include "target_type.h"
39 #include "arm_disassembler.h"
40 #include "register.h"
41 #include "arm_opcodes.h"
42 #include "arm_semihosting.h"
43 #include <helper/time_support.h>
44
45 /* NOTE: most of this should work fine for the Cortex-M1 and
46 * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
47 * Some differences: M0/M1 doesn't have FBP remapping or the
48 * DWT tracing/profiling support. (So the cycle counter will
49 * not be usable; the other stuff isn't currently used here.)
50 *
51 * Although there are some workarounds for errata seen only in r0p0
52 * silicon, such old parts are hard to find and thus not much tested
53 * any longer.
54 */
55
56 /**
57 * Returns the type of a break point required by address location
58 */
59 #define BKPT_TYPE_BY_ADDR(addr) ((addr) < 0x20000000 ? BKPT_HARD : BKPT_SOFT)
60
61 /* forward declarations */
62 static int cortex_m_store_core_reg_u32(struct target *target,
63 uint32_t num, uint32_t value);
64
65 static int cortexm_dap_read_coreregister_u32(struct adiv5_dap *swjdp,
66 uint32_t *value, int regnum)
67 {
68 int retval;
69 uint32_t dcrdr;
70
71 /* because the DCB_DCRDR is used for the emulated dcc channel
72 * we have to save/restore the DCB_DCRDR when used */
73
74 retval = mem_ap_read_u32(swjdp, DCB_DCRDR, &dcrdr);
75 if (retval != ERROR_OK)
76 return retval;
77
78 /* mem_ap_write_u32(swjdp, DCB_DCRSR, regnum); */
79 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRSR & 0xFFFFFFF0);
80 if (retval != ERROR_OK)
81 return retval;
82 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRSR & 0xC), regnum);
83 if (retval != ERROR_OK)
84 return retval;
85
86 /* mem_ap_read_u32(swjdp, DCB_DCRDR, value); */
87 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRDR & 0xFFFFFFF0);
88 if (retval != ERROR_OK)
89 return retval;
90 retval = dap_queue_ap_read(swjdp, AP_REG_BD0 | (DCB_DCRDR & 0xC), value);
91 if (retval != ERROR_OK)
92 return retval;
93
94 retval = dap_run(swjdp);
95 if (retval != ERROR_OK)
96 return retval;
97
98 /* restore DCB_DCRDR - this needs to be in a seperate
99 * transaction otherwise the emulated DCC channel breaks */
100 if (retval == ERROR_OK)
101 retval = mem_ap_write_atomic_u32(swjdp, DCB_DCRDR, dcrdr);
102
103 return retval;
104 }
105
106 static int cortexm_dap_write_coreregister_u32(struct adiv5_dap *swjdp,
107 uint32_t value, int regnum)
108 {
109 int retval;
110 uint32_t dcrdr;
111
112 /* because the DCB_DCRDR is used for the emulated dcc channel
113 * we have to save/restore the DCB_DCRDR when used */
114
115 retval = mem_ap_read_u32(swjdp, DCB_DCRDR, &dcrdr);
116 if (retval != ERROR_OK)
117 return retval;
118
119 /* mem_ap_write_u32(swjdp, DCB_DCRDR, core_regs[i]); */
120 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRDR & 0xFFFFFFF0);
121 if (retval != ERROR_OK)
122 return retval;
123 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRDR & 0xC), value);
124 if (retval != ERROR_OK)
125 return retval;
126
127 /* mem_ap_write_u32(swjdp, DCB_DCRSR, i | DCRSR_WnR); */
128 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRSR & 0xFFFFFFF0);
129 if (retval != ERROR_OK)
130 return retval;
131 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRSR & 0xC), regnum | DCRSR_WnR);
132 if (retval != ERROR_OK)
133 return retval;
134
135 retval = dap_run(swjdp);
136 if (retval != ERROR_OK)
137 return retval;
138
139 /* restore DCB_DCRDR - this needs to be in a seperate
140 * transaction otherwise the emulated DCC channel breaks */
141 if (retval == ERROR_OK)
142 retval = mem_ap_write_atomic_u32(swjdp, DCB_DCRDR, dcrdr);
143
144 return retval;
145 }
146
147 static int cortex_m_write_debug_halt_mask(struct target *target,
148 uint32_t mask_on, uint32_t mask_off)
149 {
150 struct cortex_m_common *cortex_m = target_to_cm(target);
151 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
152
153 /* mask off status bits */
154 cortex_m->dcb_dhcsr &= ~((0xFFFF << 16) | mask_off);
155 /* create new register mask */
156 cortex_m->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
157
158 return mem_ap_write_atomic_u32(swjdp, DCB_DHCSR, cortex_m->dcb_dhcsr);
159 }
160
161 static int cortex_m_clear_halt(struct target *target)
162 {
163 struct cortex_m_common *cortex_m = target_to_cm(target);
164 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
165 int retval;
166
167 /* clear step if any */
168 cortex_m_write_debug_halt_mask(target, C_HALT, C_STEP);
169
170 /* Read Debug Fault Status Register */
171 retval = mem_ap_read_atomic_u32(swjdp, NVIC_DFSR, &cortex_m->nvic_dfsr);
172 if (retval != ERROR_OK)
173 return retval;
174
175 /* Clear Debug Fault Status */
176 retval = mem_ap_write_atomic_u32(swjdp, NVIC_DFSR, cortex_m->nvic_dfsr);
177 if (retval != ERROR_OK)
178 return retval;
179 LOG_DEBUG(" NVIC_DFSR 0x%" PRIx32 "", cortex_m->nvic_dfsr);
180
181 return ERROR_OK;
182 }
183
184 static int cortex_m_single_step_core(struct target *target)
185 {
186 struct cortex_m_common *cortex_m = target_to_cm(target);
187 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
188 uint32_t dhcsr_save;
189 int retval;
190
191 /* backup dhcsr reg */
192 dhcsr_save = cortex_m->dcb_dhcsr;
193
194 /* Mask interrupts before clearing halt, if done already. This avoids
195 * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
196 * HALT can put the core into an unknown state.
197 */
198 if (!(cortex_m->dcb_dhcsr & C_MASKINTS)) {
199 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
200 DBGKEY | C_MASKINTS | C_HALT | C_DEBUGEN);
201 if (retval != ERROR_OK)
202 return retval;
203 }
204 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
205 DBGKEY | C_MASKINTS | C_STEP | C_DEBUGEN);
206 if (retval != ERROR_OK)
207 return retval;
208 LOG_DEBUG(" ");
209
210 /* restore dhcsr reg */
211 cortex_m->dcb_dhcsr = dhcsr_save;
212 cortex_m_clear_halt(target);
213
214 return ERROR_OK;
215 }
216
217 static int cortex_m_enable_fpb(struct target *target)
218 {
219 int retval = target_write_u32(target, FP_CTRL, 3);
220 if (retval != ERROR_OK)
221 return retval;
222
223 /* check the fpb is actually enabled */
224 uint32_t fpctrl;
225 retval = target_read_u32(target, FP_CTRL, &fpctrl);
226 if (retval != ERROR_OK)
227 return retval;
228
229 if (fpctrl & 1)
230 return ERROR_OK;
231
232 return ERROR_FAIL;
233 }
234
235 static int cortex_m_endreset_event(struct target *target)
236 {
237 int i;
238 int retval;
239 uint32_t dcb_demcr;
240 struct cortex_m_common *cortex_m = target_to_cm(target);
241 struct armv7m_common *armv7m = &cortex_m->armv7m;
242 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
243 struct cortex_m_fp_comparator *fp_list = cortex_m->fp_comparator_list;
244 struct cortex_m_dwt_comparator *dwt_list = cortex_m->dwt_comparator_list;
245
246 /* REVISIT The four debug monitor bits are currently ignored... */
247 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &dcb_demcr);
248 if (retval != ERROR_OK)
249 return retval;
250 LOG_DEBUG("DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr);
251
252 /* this register is used for emulated dcc channel */
253 retval = mem_ap_write_u32(swjdp, DCB_DCRDR, 0);
254 if (retval != ERROR_OK)
255 return retval;
256
257 /* Enable debug requests */
258 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m->dcb_dhcsr);
259 if (retval != ERROR_OK)
260 return retval;
261 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
262 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_DEBUGEN);
263 if (retval != ERROR_OK)
264 return retval;
265 }
266
267 /* clear any interrupt masking */
268 cortex_m_write_debug_halt_mask(target, 0, C_MASKINTS);
269
270 /* Enable features controlled by ITM and DWT blocks, and catch only
271 * the vectors we were told to pay attention to.
272 *
273 * Target firmware is responsible for all fault handling policy
274 * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
275 * or manual updates to the NVIC SHCSR and CCR registers.
276 */
277 retval = mem_ap_write_u32(swjdp, DCB_DEMCR, TRCENA | armv7m->demcr);
278 if (retval != ERROR_OK)
279 return retval;
280
281 /* Paranoia: evidently some (early?) chips don't preserve all the
282 * debug state (including FBP, DWT, etc) across reset...
283 */
284
285 /* Enable FPB */
286 retval = cortex_m_enable_fpb(target);
287 if (retval != ERROR_OK) {
288 LOG_ERROR("Failed to enable the FPB");
289 return retval;
290 }
291
292 cortex_m->fpb_enabled = 1;
293
294 /* Restore FPB registers */
295 for (i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
296 retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
297 if (retval != ERROR_OK)
298 return retval;
299 }
300
301 /* Restore DWT registers */
302 for (i = 0; i < cortex_m->dwt_num_comp; i++) {
303 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
304 dwt_list[i].comp);
305 if (retval != ERROR_OK)
306 return retval;
307 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
308 dwt_list[i].mask);
309 if (retval != ERROR_OK)
310 return retval;
311 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
312 dwt_list[i].function);
313 if (retval != ERROR_OK)
314 return retval;
315 }
316 retval = dap_run(swjdp);
317 if (retval != ERROR_OK)
318 return retval;
319
320 register_cache_invalidate(armv7m->arm.core_cache);
321
322 /* make sure we have latest dhcsr flags */
323 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m->dcb_dhcsr);
324
325 return retval;
326 }
327
328 static int cortex_m_examine_debug_reason(struct target *target)
329 {
330 struct cortex_m_common *cortex_m = target_to_cm(target);
331
332 /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
333 * only check the debug reason if we don't know it already */
334
335 if ((target->debug_reason != DBG_REASON_DBGRQ)
336 && (target->debug_reason != DBG_REASON_SINGLESTEP)) {
337 if (cortex_m->nvic_dfsr & DFSR_BKPT) {
338 target->debug_reason = DBG_REASON_BREAKPOINT;
339 if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
340 target->debug_reason = DBG_REASON_WPTANDBKPT;
341 } else if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
342 target->debug_reason = DBG_REASON_WATCHPOINT;
343 else if (cortex_m->nvic_dfsr & DFSR_VCATCH)
344 target->debug_reason = DBG_REASON_BREAKPOINT;
345 else /* EXTERNAL, HALTED */
346 target->debug_reason = DBG_REASON_UNDEFINED;
347 }
348
349 return ERROR_OK;
350 }
351
352 static int cortex_m_examine_exception_reason(struct target *target)
353 {
354 uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
355 struct armv7m_common *armv7m = target_to_armv7m(target);
356 struct adiv5_dap *swjdp = armv7m->arm.dap;
357 int retval;
358
359 retval = mem_ap_read_u32(swjdp, NVIC_SHCSR, &shcsr);
360 if (retval != ERROR_OK)
361 return retval;
362 switch (armv7m->exception_number) {
363 case 2: /* NMI */
364 break;
365 case 3: /* Hard Fault */
366 retval = mem_ap_read_atomic_u32(swjdp, NVIC_HFSR, &except_sr);
367 if (retval != ERROR_OK)
368 return retval;
369 if (except_sr & 0x40000000) {
370 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &cfsr);
371 if (retval != ERROR_OK)
372 return retval;
373 }
374 break;
375 case 4: /* Memory Management */
376 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
377 if (retval != ERROR_OK)
378 return retval;
379 retval = mem_ap_read_u32(swjdp, NVIC_MMFAR, &except_ar);
380 if (retval != ERROR_OK)
381 return retval;
382 break;
383 case 5: /* Bus Fault */
384 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
385 if (retval != ERROR_OK)
386 return retval;
387 retval = mem_ap_read_u32(swjdp, NVIC_BFAR, &except_ar);
388 if (retval != ERROR_OK)
389 return retval;
390 break;
391 case 6: /* Usage Fault */
392 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
393 if (retval != ERROR_OK)
394 return retval;
395 break;
396 case 11: /* SVCall */
397 break;
398 case 12: /* Debug Monitor */
399 retval = mem_ap_read_u32(swjdp, NVIC_DFSR, &except_sr);
400 if (retval != ERROR_OK)
401 return retval;
402 break;
403 case 14: /* PendSV */
404 break;
405 case 15: /* SysTick */
406 break;
407 default:
408 except_sr = 0;
409 break;
410 }
411 retval = dap_run(swjdp);
412 if (retval == ERROR_OK)
413 LOG_DEBUG("%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
414 ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
415 armv7m_exception_string(armv7m->exception_number),
416 shcsr, except_sr, cfsr, except_ar);
417 return retval;
418 }
419
420 static int cortex_m_debug_entry(struct target *target)
421 {
422 int i;
423 uint32_t xPSR;
424 int retval;
425 struct cortex_m_common *cortex_m = target_to_cm(target);
426 struct armv7m_common *armv7m = &cortex_m->armv7m;
427 struct arm *arm = &armv7m->arm;
428 struct adiv5_dap *swjdp = armv7m->arm.dap;
429 struct reg *r;
430
431 LOG_DEBUG(" ");
432
433 cortex_m_clear_halt(target);
434 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m->dcb_dhcsr);
435 if (retval != ERROR_OK)
436 return retval;
437
438 retval = armv7m->examine_debug_reason(target);
439 if (retval != ERROR_OK)
440 return retval;
441
442 /* Examine target state and mode
443 * First load register accessible through core debug port */
444 int num_regs = arm->core_cache->num_regs;
445
446 for (i = 0; i < num_regs; i++) {
447 r = &armv7m->arm.core_cache->reg_list[i];
448 if (!r->valid)
449 arm->read_core_reg(target, r, i, ARM_MODE_ANY);
450 }
451
452 r = arm->cpsr;
453 xPSR = buf_get_u32(r->value, 0, 32);
454
455 /* For IT instructions xPSR must be reloaded on resume and clear on debug exec */
456 if (xPSR & 0xf00) {
457 r->dirty = r->valid;
458 cortex_m_store_core_reg_u32(target, 16, xPSR & ~0xff);
459 }
460
461 /* Are we in an exception handler */
462 if (xPSR & 0x1FF) {
463 armv7m->exception_number = (xPSR & 0x1FF);
464
465 arm->core_mode = ARM_MODE_HANDLER;
466 arm->map = armv7m_msp_reg_map;
467 } else {
468 unsigned control = buf_get_u32(arm->core_cache
469 ->reg_list[ARMV7M_CONTROL].value, 0, 2);
470
471 /* is this thread privileged? */
472 arm->core_mode = control & 1
473 ? ARM_MODE_USER_THREAD
474 : ARM_MODE_THREAD;
475
476 /* which stack is it using? */
477 if (control & 2)
478 arm->map = armv7m_psp_reg_map;
479 else
480 arm->map = armv7m_msp_reg_map;
481
482 armv7m->exception_number = 0;
483 }
484
485 if (armv7m->exception_number)
486 cortex_m_examine_exception_reason(target);
487
488 LOG_DEBUG("entered debug state in core mode: %s at PC 0x%" PRIx32 ", target->state: %s",
489 arm_mode_name(arm->core_mode),
490 *(uint32_t *)(arm->pc->value),
491 target_state_name(target));
492
493 if (armv7m->post_debug_entry) {
494 retval = armv7m->post_debug_entry(target);
495 if (retval != ERROR_OK)
496 return retval;
497 }
498
499 return ERROR_OK;
500 }
501
502 static int cortex_m_poll(struct target *target)
503 {
504 int detected_failure = ERROR_OK;
505 int retval = ERROR_OK;
506 enum target_state prev_target_state = target->state;
507 struct cortex_m_common *cortex_m = target_to_cm(target);
508 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
509
510 /* Read from Debug Halting Control and Status Register */
511 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m->dcb_dhcsr);
512 if (retval != ERROR_OK) {
513 target->state = TARGET_UNKNOWN;
514 return retval;
515 }
516
517 /* Recover from lockup. See ARMv7-M architecture spec,
518 * section B1.5.15 "Unrecoverable exception cases".
519 */
520 if (cortex_m->dcb_dhcsr & S_LOCKUP) {
521 LOG_ERROR("%s -- clearing lockup after double fault",
522 target_name(target));
523 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
524 target->debug_reason = DBG_REASON_DBGRQ;
525
526 /* We have to execute the rest (the "finally" equivalent, but
527 * still throw this exception again).
528 */
529 detected_failure = ERROR_FAIL;
530
531 /* refresh status bits */
532 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m->dcb_dhcsr);
533 if (retval != ERROR_OK)
534 return retval;
535 }
536
537 if (cortex_m->dcb_dhcsr & S_RESET_ST) {
538 /* check if still in reset */
539 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m->dcb_dhcsr);
540 if (retval != ERROR_OK)
541 return retval;
542
543 if (cortex_m->dcb_dhcsr & S_RESET_ST) {
544 target->state = TARGET_RESET;
545 return ERROR_OK;
546 }
547 }
548
549 if (target->state == TARGET_RESET) {
550 /* Cannot switch context while running so endreset is
551 * called with target->state == TARGET_RESET
552 */
553 LOG_DEBUG("Exit from reset with dcb_dhcsr 0x%" PRIx32,
554 cortex_m->dcb_dhcsr);
555 cortex_m_endreset_event(target);
556 target->state = TARGET_RUNNING;
557 prev_target_state = TARGET_RUNNING;
558 }
559
560 if (cortex_m->dcb_dhcsr & S_HALT) {
561 target->state = TARGET_HALTED;
562
563 if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET)) {
564 retval = cortex_m_debug_entry(target);
565 if (retval != ERROR_OK)
566 return retval;
567
568 if (arm_semihosting(target, &retval) != 0)
569 return retval;
570
571 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
572 }
573 if (prev_target_state == TARGET_DEBUG_RUNNING) {
574 LOG_DEBUG(" ");
575 retval = cortex_m_debug_entry(target);
576 if (retval != ERROR_OK)
577 return retval;
578
579 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
580 }
581 }
582
583 /* REVISIT when S_SLEEP is set, it's in a Sleep or DeepSleep state.
584 * How best to model low power modes?
585 */
586
587 if (target->state == TARGET_UNKNOWN) {
588 /* check if processor is retiring instructions */
589 if (cortex_m->dcb_dhcsr & S_RETIRE_ST) {
590 target->state = TARGET_RUNNING;
591 retval = ERROR_OK;
592 }
593 }
594
595 /* Did we detect a failure condition that we cleared? */
596 if (detected_failure != ERROR_OK)
597 retval = detected_failure;
598 return retval;
599 }
600
601 static int cortex_m_halt(struct target *target)
602 {
603 LOG_DEBUG("target->state: %s",
604 target_state_name(target));
605
606 if (target->state == TARGET_HALTED) {
607 LOG_DEBUG("target was already halted");
608 return ERROR_OK;
609 }
610
611 if (target->state == TARGET_UNKNOWN)
612 LOG_WARNING("target was in unknown state when halt was requested");
613
614 if (target->state == TARGET_RESET) {
615 if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
616 LOG_ERROR("can't request a halt while in reset if nSRST pulls nTRST");
617 return ERROR_TARGET_FAILURE;
618 } else {
619 /* we came here in a reset_halt or reset_init sequence
620 * debug entry was already prepared in cortex_m3_assert_reset()
621 */
622 target->debug_reason = DBG_REASON_DBGRQ;
623
624 return ERROR_OK;
625 }
626 }
627
628 /* Write to Debug Halting Control and Status Register */
629 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
630
631 target->debug_reason = DBG_REASON_DBGRQ;
632
633 return ERROR_OK;
634 }
635
636 static int cortex_m_soft_reset_halt(struct target *target)
637 {
638 struct cortex_m_common *cortex_m = target_to_cm(target);
639 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
640 uint32_t dcb_dhcsr = 0;
641 int retval, timeout = 0;
642
643 /* soft_reset_halt is deprecated on cortex_m as the same functionality
644 * can be obtained by using 'reset halt' and 'cortex_m reset_config vectreset'
645 * As this reset only used VC_CORERESET it would only ever reset the cortex_m
646 * core, not the peripherals */
647 LOG_WARNING("soft_reset_halt is deprecated, please use 'reset halt' instead.");
648
649 /* Enter debug state on reset; restore DEMCR in endreset_event() */
650 retval = mem_ap_write_u32(swjdp, DCB_DEMCR,
651 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
652 if (retval != ERROR_OK)
653 return retval;
654
655 /* Request a core-only reset */
656 retval = mem_ap_write_atomic_u32(swjdp, NVIC_AIRCR,
657 AIRCR_VECTKEY | AIRCR_VECTRESET);
658 if (retval != ERROR_OK)
659 return retval;
660 target->state = TARGET_RESET;
661
662 /* registers are now invalid */
663 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
664
665 while (timeout < 100) {
666 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &dcb_dhcsr);
667 if (retval == ERROR_OK) {
668 retval = mem_ap_read_atomic_u32(swjdp, NVIC_DFSR,
669 &cortex_m->nvic_dfsr);
670 if (retval != ERROR_OK)
671 return retval;
672 if ((dcb_dhcsr & S_HALT)
673 && (cortex_m->nvic_dfsr & DFSR_VCATCH)) {
674 LOG_DEBUG("system reset-halted, DHCSR 0x%08x, "
675 "DFSR 0x%08x",
676 (unsigned) dcb_dhcsr,
677 (unsigned) cortex_m->nvic_dfsr);
678 cortex_m_poll(target);
679 /* FIXME restore user's vector catch config */
680 return ERROR_OK;
681 } else
682 LOG_DEBUG("waiting for system reset-halt, "
683 "DHCSR 0x%08x, %d ms",
684 (unsigned) dcb_dhcsr, timeout);
685 }
686 timeout++;
687 alive_sleep(1);
688 }
689
690 return ERROR_OK;
691 }
692
693 void cortex_m_enable_breakpoints(struct target *target)
694 {
695 struct breakpoint *breakpoint = target->breakpoints;
696
697 /* set any pending breakpoints */
698 while (breakpoint) {
699 if (!breakpoint->set)
700 cortex_m_set_breakpoint(target, breakpoint);
701 breakpoint = breakpoint->next;
702 }
703 }
704
705 static int cortex_m_resume(struct target *target, int current,
706 uint32_t address, int handle_breakpoints, int debug_execution)
707 {
708 struct armv7m_common *armv7m = target_to_armv7m(target);
709 struct breakpoint *breakpoint = NULL;
710 uint32_t resume_pc;
711 struct reg *r;
712
713 if (target->state != TARGET_HALTED) {
714 LOG_WARNING("target not halted");
715 return ERROR_TARGET_NOT_HALTED;
716 }
717
718 if (!debug_execution) {
719 target_free_all_working_areas(target);
720 cortex_m_enable_breakpoints(target);
721 cortex_m_enable_watchpoints(target);
722 }
723
724 if (debug_execution) {
725 r = armv7m->arm.core_cache->reg_list + ARMV7M_PRIMASK;
726
727 /* Disable interrupts */
728 /* We disable interrupts in the PRIMASK register instead of
729 * masking with C_MASKINTS. This is probably the same issue
730 * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
731 * in parallel with disabled interrupts can cause local faults
732 * to not be taken.
733 *
734 * REVISIT this clearly breaks non-debug execution, since the
735 * PRIMASK register state isn't saved/restored... workaround
736 * by never resuming app code after debug execution.
737 */
738 buf_set_u32(r->value, 0, 1, 1);
739 r->dirty = true;
740 r->valid = true;
741
742 /* Make sure we are in Thumb mode */
743 r = armv7m->arm.cpsr;
744 buf_set_u32(r->value, 24, 1, 1);
745 r->dirty = true;
746 r->valid = true;
747 }
748
749 /* current = 1: continue on current pc, otherwise continue at <address> */
750 r = armv7m->arm.pc;
751 if (!current) {
752 buf_set_u32(r->value, 0, 32, address);
753 r->dirty = true;
754 r->valid = true;
755 }
756
757 /* if we halted last time due to a bkpt instruction
758 * then we have to manually step over it, otherwise
759 * the core will break again */
760
761 if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
762 && !debug_execution)
763 armv7m_maybe_skip_bkpt_inst(target, NULL);
764
765 resume_pc = buf_get_u32(r->value, 0, 32);
766
767 armv7m_restore_context(target);
768
769 /* the front-end may request us not to handle breakpoints */
770 if (handle_breakpoints) {
771 /* Single step past breakpoint at current address */
772 breakpoint = breakpoint_find(target, resume_pc);
773 if (breakpoint) {
774 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 " (ID: %d)",
775 breakpoint->address,
776 breakpoint->unique_id);
777 cortex_m_unset_breakpoint(target, breakpoint);
778 cortex_m_single_step_core(target);
779 cortex_m_set_breakpoint(target, breakpoint);
780 }
781 }
782
783 /* Restart core */
784 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
785
786 target->debug_reason = DBG_REASON_NOTHALTED;
787
788 /* registers are now invalid */
789 register_cache_invalidate(armv7m->arm.core_cache);
790
791 if (!debug_execution) {
792 target->state = TARGET_RUNNING;
793 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
794 LOG_DEBUG("target resumed at 0x%" PRIx32 "", resume_pc);
795 } else {
796 target->state = TARGET_DEBUG_RUNNING;
797 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
798 LOG_DEBUG("target debug resumed at 0x%" PRIx32 "", resume_pc);
799 }
800
801 return ERROR_OK;
802 }
803
804 /* int irqstepcount = 0; */
805 static int cortex_m_step(struct target *target, int current,
806 uint32_t address, int handle_breakpoints)
807 {
808 struct cortex_m_common *cortex_m = target_to_cm(target);
809 struct armv7m_common *armv7m = &cortex_m->armv7m;
810 struct adiv5_dap *swjdp = armv7m->arm.dap;
811 struct breakpoint *breakpoint = NULL;
812 struct reg *pc = armv7m->arm.pc;
813 bool bkpt_inst_found = false;
814 int retval;
815 bool isr_timed_out = false;
816
817 if (target->state != TARGET_HALTED) {
818 LOG_WARNING("target not halted");
819 return ERROR_TARGET_NOT_HALTED;
820 }
821
822 /* current = 1: continue on current pc, otherwise continue at <address> */
823 if (!current)
824 buf_set_u32(pc->value, 0, 32, address);
825
826 uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
827
828 /* the front-end may request us not to handle breakpoints */
829 if (handle_breakpoints) {
830 breakpoint = breakpoint_find(target, pc_value);
831 if (breakpoint)
832 cortex_m_unset_breakpoint(target, breakpoint);
833 }
834
835 armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
836
837 target->debug_reason = DBG_REASON_SINGLESTEP;
838
839 armv7m_restore_context(target);
840
841 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
842
843 /* if no bkpt instruction is found at pc then we can perform
844 * a normal step, otherwise we have to manually step over the bkpt
845 * instruction - as such simulate a step */
846 if (bkpt_inst_found == false) {
847 /* Automatic ISR masking mode off: Just step over the next instruction */
848 if ((cortex_m->isrmasking_mode != CORTEX_M_ISRMASK_AUTO))
849 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
850 else {
851 /* Process interrupts during stepping in a way they don't interfere
852 * debugging.
853 *
854 * Principle:
855 *
856 * Set a temporary break point at the current pc and let the core run
857 * with interrupts enabled. Pending interrupts get served and we run
858 * into the breakpoint again afterwards. Then we step over the next
859 * instruction with interrupts disabled.
860 *
861 * If the pending interrupts don't complete within time, we leave the
862 * core running. This may happen if the interrupts trigger faster
863 * than the core can process them or the handler doesn't return.
864 *
865 * If no more breakpoints are available we simply do a step with
866 * interrupts enabled.
867 *
868 */
869
870 /* 2012-09-29 ph
871 *
872 * If a break point is already set on the lower half word then a break point on
873 * the upper half word will not break again when the core is restarted. So we
874 * just step over the instruction with interrupts disabled.
875 *
876 * The documentation has no information about this, it was found by observation
877 * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 dosen't seem to
878 * suffer from this problem.
879 *
880 * To add some confusion: pc_value has bit 0 always set, while the breakpoint
881 * address has it always cleared. The former is done to indicate thumb mode
882 * to gdb.
883 *
884 */
885 if ((pc_value & 0x02) && breakpoint_find(target, pc_value & ~0x03)) {
886 LOG_DEBUG("Stepping over next instruction with interrupts disabled");
887 cortex_m_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
888 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
889 /* Re-enable interrupts */
890 cortex_m_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
891 }
892 else {
893
894 /* Set a temporary break point */
895 if (breakpoint)
896 retval = cortex_m_set_breakpoint(target, breakpoint);
897 else
898 retval = breakpoint_add(target, pc_value, 2, BKPT_TYPE_BY_ADDR(pc_value));
899 bool tmp_bp_set = (retval == ERROR_OK);
900
901 /* No more breakpoints left, just do a step */
902 if (!tmp_bp_set)
903 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
904 else {
905 /* Start the core */
906 LOG_DEBUG("Starting core to serve pending interrupts");
907 int64_t t_start = timeval_ms();
908 cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
909
910 /* Wait for pending handlers to complete or timeout */
911 do {
912 retval = mem_ap_read_atomic_u32(swjdp,
913 DCB_DHCSR,
914 &cortex_m->dcb_dhcsr);
915 if (retval != ERROR_OK) {
916 target->state = TARGET_UNKNOWN;
917 return retval;
918 }
919 isr_timed_out = ((timeval_ms() - t_start) > 500);
920 } while (!((cortex_m->dcb_dhcsr & S_HALT) || isr_timed_out));
921
922 /* only remove breakpoint if we created it */
923 if (breakpoint)
924 cortex_m_unset_breakpoint(target, breakpoint);
925 else {
926 /* Remove the temporary breakpoint */
927 breakpoint_remove(target, pc_value);
928 }
929
930 if (isr_timed_out) {
931 LOG_DEBUG("Interrupt handlers didn't complete within time, "
932 "leaving target running");
933 } else {
934 /* Step over next instruction with interrupts disabled */
935 cortex_m_write_debug_halt_mask(target,
936 C_HALT | C_MASKINTS,
937 0);
938 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
939 /* Re-enable interrupts */
940 cortex_m_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
941 }
942 }
943 }
944 }
945 }
946
947 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m->dcb_dhcsr);
948 if (retval != ERROR_OK)
949 return retval;
950
951 /* registers are now invalid */
952 register_cache_invalidate(armv7m->arm.core_cache);
953
954 if (breakpoint)
955 cortex_m_set_breakpoint(target, breakpoint);
956
957 if (isr_timed_out) {
958 /* Leave the core running. The user has to stop execution manually. */
959 target->debug_reason = DBG_REASON_NOTHALTED;
960 target->state = TARGET_RUNNING;
961 return ERROR_OK;
962 }
963
964 LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
965 " nvic_icsr = 0x%" PRIx32,
966 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
967
968 retval = cortex_m_debug_entry(target);
969 if (retval != ERROR_OK)
970 return retval;
971 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
972
973 LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
974 " nvic_icsr = 0x%" PRIx32,
975 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
976
977 return ERROR_OK;
978 }
979
980 static int cortex_m_assert_reset(struct target *target)
981 {
982 struct cortex_m_common *cortex_m = target_to_cm(target);
983 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
984 enum cortex_m_soft_reset_config reset_config = cortex_m->soft_reset_config;
985
986 LOG_DEBUG("target->state: %s",
987 target_state_name(target));
988
989 enum reset_types jtag_reset_config = jtag_get_reset_config();
990
991 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
992 /* allow scripts to override the reset event */
993
994 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
995 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
996 target->state = TARGET_RESET;
997
998 return ERROR_OK;
999 }
1000
1001 /* some cores support connecting while srst is asserted
1002 * use that mode is it has been configured */
1003
1004 bool srst_asserted = false;
1005
1006 if ((jtag_reset_config & RESET_HAS_SRST) &&
1007 (jtag_reset_config & RESET_SRST_NO_GATING)) {
1008 adapter_assert_reset();
1009 srst_asserted = true;
1010 }
1011
1012 /* Enable debug requests */
1013 int retval;
1014 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m->dcb_dhcsr);
1015 if (retval != ERROR_OK)
1016 return retval;
1017 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
1018 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_DEBUGEN);
1019 if (retval != ERROR_OK)
1020 return retval;
1021 }
1022
1023 /* If the processor is sleeping in a WFI or WFE instruction, the
1024 * C_HALT bit must be asserted to regain control */
1025 if (cortex_m->dcb_dhcsr & S_SLEEP) {
1026 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_HALT | C_DEBUGEN);
1027 if (retval != ERROR_OK)
1028 return retval;
1029 }
1030
1031 retval = mem_ap_write_u32(swjdp, DCB_DCRDR, 0);
1032 if (retval != ERROR_OK)
1033 return retval;
1034
1035 if (!target->reset_halt) {
1036 /* Set/Clear C_MASKINTS in a separate operation */
1037 if (cortex_m->dcb_dhcsr & C_MASKINTS) {
1038 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
1039 DBGKEY | C_DEBUGEN | C_HALT);
1040 if (retval != ERROR_OK)
1041 return retval;
1042 }
1043
1044 /* clear any debug flags before resuming */
1045 cortex_m_clear_halt(target);
1046
1047 /* clear C_HALT in dhcsr reg */
1048 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1049 } else {
1050 /* Halt in debug on reset; endreset_event() restores DEMCR.
1051 *
1052 * REVISIT catching BUSERR presumably helps to defend against
1053 * bad vector table entries. Should this include MMERR or
1054 * other flags too?
1055 */
1056 retval = mem_ap_write_atomic_u32(swjdp, DCB_DEMCR,
1057 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1058 if (retval != ERROR_OK)
1059 return retval;
1060 }
1061
1062 if (jtag_reset_config & RESET_HAS_SRST) {
1063 /* default to asserting srst */
1064 if (!srst_asserted)
1065 adapter_assert_reset();
1066 } else {
1067 /* Use a standard Cortex-M3 software reset mechanism.
1068 * We default to using VECRESET as it is supported on all current cores.
1069 * This has the disadvantage of not resetting the peripherals, so a
1070 * reset-init event handler is needed to perform any peripheral resets.
1071 */
1072 retval = mem_ap_write_atomic_u32(swjdp, NVIC_AIRCR,
1073 AIRCR_VECTKEY | ((reset_config == CORTEX_M_RESET_SYSRESETREQ)
1074 ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
1075 if (retval != ERROR_OK)
1076 return retval;
1077
1078 LOG_DEBUG("Using Cortex-M %s", (reset_config == CORTEX_M_RESET_SYSRESETREQ)
1079 ? "SYSRESETREQ" : "VECTRESET");
1080
1081 if (reset_config == CORTEX_M_RESET_VECTRESET) {
1082 LOG_WARNING("Only resetting the Cortex-M core, use a reset-init event "
1083 "handler to reset any peripherals or configure hardware srst support.");
1084 }
1085
1086 {
1087 /* I do not know why this is necessary, but it
1088 * fixes strange effects (step/resume cause NMI
1089 * after reset) on LM3S6918 -- Michael Schwingen
1090 */
1091 uint32_t tmp;
1092 retval = mem_ap_read_atomic_u32(swjdp, NVIC_AIRCR, &tmp);
1093 if (retval != ERROR_OK)
1094 return retval;
1095 }
1096 }
1097
1098 target->state = TARGET_RESET;
1099 jtag_add_sleep(50000);
1100
1101 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1102
1103 if (target->reset_halt) {
1104 retval = target_halt(target);
1105 if (retval != ERROR_OK)
1106 return retval;
1107 }
1108
1109 return ERROR_OK;
1110 }
1111
1112 static int cortex_m_deassert_reset(struct target *target)
1113 {
1114 LOG_DEBUG("target->state: %s",
1115 target_state_name(target));
1116
1117 /* deassert reset lines */
1118 adapter_deassert_reset();
1119
1120 return ERROR_OK;
1121 }
1122
1123 int cortex_m_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
1124 {
1125 int retval;
1126 int fp_num = 0;
1127 uint32_t hilo;
1128 struct cortex_m_common *cortex_m = target_to_cm(target);
1129 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1130
1131 if (breakpoint->set) {
1132 LOG_WARNING("breakpoint (BPID: %d) already set", breakpoint->unique_id);
1133 return ERROR_OK;
1134 }
1135
1136 if (cortex_m->auto_bp_type)
1137 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1138
1139 if (breakpoint->type == BKPT_HARD) {
1140 while (comparator_list[fp_num].used && (fp_num < cortex_m->fp_num_code))
1141 fp_num++;
1142 if (fp_num >= cortex_m->fp_num_code) {
1143 LOG_ERROR("Can not find free FPB Comparator!");
1144 return ERROR_FAIL;
1145 }
1146 breakpoint->set = fp_num + 1;
1147 hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
1148 comparator_list[fp_num].used = 1;
1149 comparator_list[fp_num].fpcr_value = (breakpoint->address & 0x1FFFFFFC) | hilo | 1;
1150 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1151 comparator_list[fp_num].fpcr_value);
1152 LOG_DEBUG("fpc_num %i fpcr_value 0x%" PRIx32 "",
1153 fp_num,
1154 comparator_list[fp_num].fpcr_value);
1155 if (!cortex_m->fpb_enabled) {
1156 LOG_DEBUG("FPB wasn't enabled, do it now");
1157 retval = cortex_m_enable_fpb(target);
1158 if (retval != ERROR_OK) {
1159 LOG_ERROR("Failed to enable the FPB");
1160 return retval;
1161 }
1162
1163 cortex_m->fpb_enabled = 1;
1164 }
1165 } else if (breakpoint->type == BKPT_SOFT) {
1166 uint8_t code[4];
1167
1168 /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
1169 * semihosting; don't use that. Otherwise the BKPT
1170 * parameter is arbitrary.
1171 */
1172 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1173 retval = target_read_memory(target,
1174 breakpoint->address & 0xFFFFFFFE,
1175 breakpoint->length, 1,
1176 breakpoint->orig_instr);
1177 if (retval != ERROR_OK)
1178 return retval;
1179 retval = target_write_memory(target,
1180 breakpoint->address & 0xFFFFFFFE,
1181 breakpoint->length, 1,
1182 code);
1183 if (retval != ERROR_OK)
1184 return retval;
1185 breakpoint->set = true;
1186 }
1187
1188 LOG_DEBUG("BPID: %d, Type: %d, Address: 0x%08" PRIx32 " Length: %d (set=%d)",
1189 breakpoint->unique_id,
1190 (int)(breakpoint->type),
1191 breakpoint->address,
1192 breakpoint->length,
1193 breakpoint->set);
1194
1195 return ERROR_OK;
1196 }
1197
1198 int cortex_m_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1199 {
1200 int retval;
1201 struct cortex_m_common *cortex_m = target_to_cm(target);
1202 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1203
1204 if (!breakpoint->set) {
1205 LOG_WARNING("breakpoint not set");
1206 return ERROR_OK;
1207 }
1208
1209 LOG_DEBUG("BPID: %d, Type: %d, Address: 0x%08" PRIx32 " Length: %d (set=%d)",
1210 breakpoint->unique_id,
1211 (int)(breakpoint->type),
1212 breakpoint->address,
1213 breakpoint->length,
1214 breakpoint->set);
1215
1216 if (breakpoint->type == BKPT_HARD) {
1217 int fp_num = breakpoint->set - 1;
1218 if ((fp_num < 0) || (fp_num >= cortex_m->fp_num_code)) {
1219 LOG_DEBUG("Invalid FP Comparator number in breakpoint");
1220 return ERROR_OK;
1221 }
1222 comparator_list[fp_num].used = 0;
1223 comparator_list[fp_num].fpcr_value = 0;
1224 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1225 comparator_list[fp_num].fpcr_value);
1226 } else {
1227 /* restore original instruction (kept in target endianness) */
1228 if (breakpoint->length == 4) {
1229 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE, 4, 1,
1230 breakpoint->orig_instr);
1231 if (retval != ERROR_OK)
1232 return retval;
1233 } else {
1234 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE, 2, 1,
1235 breakpoint->orig_instr);
1236 if (retval != ERROR_OK)
1237 return retval;
1238 }
1239 }
1240 breakpoint->set = false;
1241
1242 return ERROR_OK;
1243 }
1244
1245 int cortex_m_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
1246 {
1247 struct cortex_m_common *cortex_m = target_to_cm(target);
1248
1249 if (cortex_m->auto_bp_type)
1250 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1251
1252 if (breakpoint->type != BKPT_TYPE_BY_ADDR(breakpoint->address)) {
1253 if (breakpoint->type == BKPT_HARD) {
1254 LOG_INFO("flash patch comparator requested outside code memory region");
1255 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1256 }
1257
1258 if (breakpoint->type == BKPT_SOFT) {
1259 LOG_INFO("soft breakpoint requested in code (flash) memory region");
1260 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1261 }
1262 }
1263
1264 if ((breakpoint->type == BKPT_HARD) && (cortex_m->fp_code_available < 1)) {
1265 LOG_INFO("no flash patch comparator unit available for hardware breakpoint");
1266 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1267 }
1268
1269 if ((breakpoint->length != 2)) {
1270 LOG_INFO("only breakpoints of two bytes length supported");
1271 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1272 }
1273
1274 if (breakpoint->type == BKPT_HARD)
1275 cortex_m->fp_code_available--;
1276
1277 return cortex_m_set_breakpoint(target, breakpoint);
1278 }
1279
1280 int cortex_m_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1281 {
1282 struct cortex_m_common *cortex_m = target_to_cm(target);
1283
1284 /* REVISIT why check? FBP can be updated with core running ... */
1285 if (target->state != TARGET_HALTED) {
1286 LOG_WARNING("target not halted");
1287 return ERROR_TARGET_NOT_HALTED;
1288 }
1289
1290 if (cortex_m->auto_bp_type)
1291 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1292
1293 if (breakpoint->set)
1294 cortex_m_unset_breakpoint(target, breakpoint);
1295
1296 if (breakpoint->type == BKPT_HARD)
1297 cortex_m->fp_code_available++;
1298
1299 return ERROR_OK;
1300 }
1301
1302 int cortex_m_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1303 {
1304 int dwt_num = 0;
1305 uint32_t mask, temp;
1306 struct cortex_m_common *cortex_m = target_to_cm(target);
1307
1308 /* watchpoint params were validated earlier */
1309 mask = 0;
1310 temp = watchpoint->length;
1311 while (temp) {
1312 temp >>= 1;
1313 mask++;
1314 }
1315 mask--;
1316
1317 /* REVISIT Don't fully trust these "not used" records ... users
1318 * may set up breakpoints by hand, e.g. dual-address data value
1319 * watchpoint using comparator #1; comparator #0 matching cycle
1320 * count; send data trace info through ITM and TPIU; etc
1321 */
1322 struct cortex_m_dwt_comparator *comparator;
1323
1324 for (comparator = cortex_m->dwt_comparator_list;
1325 comparator->used && dwt_num < cortex_m->dwt_num_comp;
1326 comparator++, dwt_num++)
1327 continue;
1328 if (dwt_num >= cortex_m->dwt_num_comp) {
1329 LOG_ERROR("Can not find free DWT Comparator");
1330 return ERROR_FAIL;
1331 }
1332 comparator->used = 1;
1333 watchpoint->set = dwt_num + 1;
1334
1335 comparator->comp = watchpoint->address;
1336 target_write_u32(target, comparator->dwt_comparator_address + 0,
1337 comparator->comp);
1338
1339 comparator->mask = mask;
1340 target_write_u32(target, comparator->dwt_comparator_address + 4,
1341 comparator->mask);
1342
1343 switch (watchpoint->rw) {
1344 case WPT_READ:
1345 comparator->function = 5;
1346 break;
1347 case WPT_WRITE:
1348 comparator->function = 6;
1349 break;
1350 case WPT_ACCESS:
1351 comparator->function = 7;
1352 break;
1353 }
1354 target_write_u32(target, comparator->dwt_comparator_address + 8,
1355 comparator->function);
1356
1357 LOG_DEBUG("Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
1358 watchpoint->unique_id, dwt_num,
1359 (unsigned) comparator->comp,
1360 (unsigned) comparator->mask,
1361 (unsigned) comparator->function);
1362 return ERROR_OK;
1363 }
1364
1365 int cortex_m_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
1366 {
1367 struct cortex_m_common *cortex_m = target_to_cm(target);
1368 struct cortex_m_dwt_comparator *comparator;
1369 int dwt_num;
1370
1371 if (!watchpoint->set) {
1372 LOG_WARNING("watchpoint (wpid: %d) not set",
1373 watchpoint->unique_id);
1374 return ERROR_OK;
1375 }
1376
1377 dwt_num = watchpoint->set - 1;
1378
1379 LOG_DEBUG("Watchpoint (ID %d) DWT%d address: 0x%08x clear",
1380 watchpoint->unique_id, dwt_num,
1381 (unsigned) watchpoint->address);
1382
1383 if ((dwt_num < 0) || (dwt_num >= cortex_m->dwt_num_comp)) {
1384 LOG_DEBUG("Invalid DWT Comparator number in watchpoint");
1385 return ERROR_OK;
1386 }
1387
1388 comparator = cortex_m->dwt_comparator_list + dwt_num;
1389 comparator->used = 0;
1390 comparator->function = 0;
1391 target_write_u32(target, comparator->dwt_comparator_address + 8,
1392 comparator->function);
1393
1394 watchpoint->set = false;
1395
1396 return ERROR_OK;
1397 }
1398
1399 int cortex_m_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
1400 {
1401 struct cortex_m_common *cortex_m = target_to_cm(target);
1402
1403 if (cortex_m->dwt_comp_available < 1) {
1404 LOG_DEBUG("no comparators?");
1405 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1406 }
1407
1408 /* hardware doesn't support data value masking */
1409 if (watchpoint->mask != ~(uint32_t)0) {
1410 LOG_DEBUG("watchpoint value masks not supported");
1411 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1412 }
1413
1414 /* hardware allows address masks of up to 32K */
1415 unsigned mask;
1416
1417 for (mask = 0; mask < 16; mask++) {
1418 if ((1u << mask) == watchpoint->length)
1419 break;
1420 }
1421 if (mask == 16) {
1422 LOG_DEBUG("unsupported watchpoint length");
1423 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1424 }
1425 if (watchpoint->address & ((1 << mask) - 1)) {
1426 LOG_DEBUG("watchpoint address is unaligned");
1427 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1428 }
1429
1430 /* Caller doesn't seem to be able to describe watching for data
1431 * values of zero; that flags "no value".
1432 *
1433 * REVISIT This DWT may well be able to watch for specific data
1434 * values. Requires comparator #1 to set DATAVMATCH and match
1435 * the data, and another comparator (DATAVADDR0) matching addr.
1436 */
1437 if (watchpoint->value) {
1438 LOG_DEBUG("data value watchpoint not YET supported");
1439 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1440 }
1441
1442 cortex_m->dwt_comp_available--;
1443 LOG_DEBUG("dwt_comp_available: %d", cortex_m->dwt_comp_available);
1444
1445 return ERROR_OK;
1446 }
1447
1448 int cortex_m_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
1449 {
1450 struct cortex_m_common *cortex_m = target_to_cm(target);
1451
1452 /* REVISIT why check? DWT can be updated with core running ... */
1453 if (target->state != TARGET_HALTED) {
1454 LOG_WARNING("target not halted");
1455 return ERROR_TARGET_NOT_HALTED;
1456 }
1457
1458 if (watchpoint->set)
1459 cortex_m_unset_watchpoint(target, watchpoint);
1460
1461 cortex_m->dwt_comp_available++;
1462 LOG_DEBUG("dwt_comp_available: %d", cortex_m->dwt_comp_available);
1463
1464 return ERROR_OK;
1465 }
1466
1467 void cortex_m_enable_watchpoints(struct target *target)
1468 {
1469 struct watchpoint *watchpoint = target->watchpoints;
1470
1471 /* set any pending watchpoints */
1472 while (watchpoint) {
1473 if (!watchpoint->set)
1474 cortex_m_set_watchpoint(target, watchpoint);
1475 watchpoint = watchpoint->next;
1476 }
1477 }
1478
1479 static int cortex_m_load_core_reg_u32(struct target *target,
1480 uint32_t num, uint32_t *value)
1481 {
1482 int retval;
1483 struct armv7m_common *armv7m = target_to_armv7m(target);
1484 struct adiv5_dap *swjdp = armv7m->arm.dap;
1485
1486 /* NOTE: we "know" here that the register identifiers used
1487 * in the v7m header match the Cortex-M3 Debug Core Register
1488 * Selector values for R0..R15, xPSR, MSP, and PSP.
1489 */
1490 switch (num) {
1491 case 0 ... 18:
1492 /* read a normal core register */
1493 retval = cortexm_dap_read_coreregister_u32(swjdp, value, num);
1494
1495 if (retval != ERROR_OK) {
1496 LOG_ERROR("JTAG failure %i", retval);
1497 return ERROR_JTAG_DEVICE_ERROR;
1498 }
1499 LOG_DEBUG("load from core reg %i value 0x%" PRIx32 "", (int)num, *value);
1500 break;
1501
1502 case ARMV7M_PRIMASK:
1503 case ARMV7M_BASEPRI:
1504 case ARMV7M_FAULTMASK:
1505 case ARMV7M_CONTROL:
1506 /* Cortex-M3 packages these four registers as bitfields
1507 * in one Debug Core register. So say r0 and r2 docs;
1508 * it was removed from r1 docs, but still works.
1509 */
1510 cortexm_dap_read_coreregister_u32(swjdp, value, 20);
1511
1512 switch (num) {
1513 case ARMV7M_PRIMASK:
1514 *value = buf_get_u32((uint8_t *)value, 0, 1);
1515 break;
1516
1517 case ARMV7M_BASEPRI:
1518 *value = buf_get_u32((uint8_t *)value, 8, 8);
1519 break;
1520
1521 case ARMV7M_FAULTMASK:
1522 *value = buf_get_u32((uint8_t *)value, 16, 1);
1523 break;
1524
1525 case ARMV7M_CONTROL:
1526 *value = buf_get_u32((uint8_t *)value, 24, 2);
1527 break;
1528 }
1529
1530 LOG_DEBUG("load from special reg %i value 0x%" PRIx32 "", (int)num, *value);
1531 break;
1532
1533 default:
1534 return ERROR_COMMAND_SYNTAX_ERROR;
1535 }
1536
1537 return ERROR_OK;
1538 }
1539
1540 static int cortex_m_store_core_reg_u32(struct target *target,
1541 uint32_t num, uint32_t value)
1542 {
1543 int retval;
1544 uint32_t reg;
1545 struct armv7m_common *armv7m = target_to_armv7m(target);
1546 struct adiv5_dap *swjdp = armv7m->arm.dap;
1547
1548 /* NOTE: we "know" here that the register identifiers used
1549 * in the v7m header match the Cortex-M3 Debug Core Register
1550 * Selector values for R0..R15, xPSR, MSP, and PSP.
1551 */
1552 switch (num) {
1553 case 0 ... 18:
1554 retval = cortexm_dap_write_coreregister_u32(swjdp, value, num);
1555 if (retval != ERROR_OK) {
1556 struct reg *r;
1557
1558 LOG_ERROR("JTAG failure");
1559 r = armv7m->arm.core_cache->reg_list + num;
1560 r->dirty = r->valid;
1561 return ERROR_JTAG_DEVICE_ERROR;
1562 }
1563 LOG_DEBUG("write core reg %i value 0x%" PRIx32 "", (int)num, value);
1564 break;
1565
1566 case ARMV7M_PRIMASK:
1567 case ARMV7M_BASEPRI:
1568 case ARMV7M_FAULTMASK:
1569 case ARMV7M_CONTROL:
1570 /* Cortex-M3 packages these four registers as bitfields
1571 * in one Debug Core register. So say r0 and r2 docs;
1572 * it was removed from r1 docs, but still works.
1573 */
1574 cortexm_dap_read_coreregister_u32(swjdp, &reg, 20);
1575
1576 switch (num) {
1577 case ARMV7M_PRIMASK:
1578 buf_set_u32((uint8_t *)&reg, 0, 1, value);
1579 break;
1580
1581 case ARMV7M_BASEPRI:
1582 buf_set_u32((uint8_t *)&reg, 8, 8, value);
1583 break;
1584
1585 case ARMV7M_FAULTMASK:
1586 buf_set_u32((uint8_t *)&reg, 16, 1, value);
1587 break;
1588
1589 case ARMV7M_CONTROL:
1590 buf_set_u32((uint8_t *)&reg, 24, 2, value);
1591 break;
1592 }
1593
1594 cortexm_dap_write_coreregister_u32(swjdp, reg, 20);
1595
1596 LOG_DEBUG("write special reg %i value 0x%" PRIx32 " ", (int)num, value);
1597 break;
1598
1599 default:
1600 return ERROR_COMMAND_SYNTAX_ERROR;
1601 }
1602
1603 return ERROR_OK;
1604 }
1605
1606 static int cortex_m_read_memory(struct target *target, uint32_t address,
1607 uint32_t size, uint32_t count, uint8_t *buffer)
1608 {
1609 struct armv7m_common *armv7m = target_to_armv7m(target);
1610 struct adiv5_dap *swjdp = armv7m->arm.dap;
1611
1612 if (armv7m->arm.is_armv6m) {
1613 /* armv6m does not handle unaligned memory access */
1614 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1615 return ERROR_TARGET_UNALIGNED_ACCESS;
1616 }
1617
1618 return mem_ap_read(swjdp, buffer, size, count, address, true);
1619 }
1620
1621 static int cortex_m_write_memory(struct target *target, uint32_t address,
1622 uint32_t size, uint32_t count, const uint8_t *buffer)
1623 {
1624 struct armv7m_common *armv7m = target_to_armv7m(target);
1625 struct adiv5_dap *swjdp = armv7m->arm.dap;
1626
1627 if (armv7m->arm.is_armv6m) {
1628 /* armv6m does not handle unaligned memory access */
1629 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1630 return ERROR_TARGET_UNALIGNED_ACCESS;
1631 }
1632
1633 return mem_ap_write(swjdp, buffer, size, count, address, true);
1634 }
1635
1636 static int cortex_m_init_target(struct command_context *cmd_ctx,
1637 struct target *target)
1638 {
1639 armv7m_build_reg_cache(target);
1640 return ERROR_OK;
1641 }
1642
1643 /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
1644 * on r/w if the core is not running, and clear on resume or reset ... or
1645 * at least, in a post_restore_context() method.
1646 */
1647
1648 struct dwt_reg_state {
1649 struct target *target;
1650 uint32_t addr;
1651 uint32_t value; /* scratch/cache */
1652 };
1653
1654 static int cortex_m_dwt_get_reg(struct reg *reg)
1655 {
1656 struct dwt_reg_state *state = reg->arch_info;
1657
1658 return target_read_u32(state->target, state->addr, &state->value);
1659 }
1660
1661 static int cortex_m_dwt_set_reg(struct reg *reg, uint8_t *buf)
1662 {
1663 struct dwt_reg_state *state = reg->arch_info;
1664
1665 return target_write_u32(state->target, state->addr,
1666 buf_get_u32(buf, 0, reg->size));
1667 }
1668
1669 struct dwt_reg {
1670 uint32_t addr;
1671 char *name;
1672 unsigned size;
1673 };
1674
1675 static struct dwt_reg dwt_base_regs[] = {
1676 { DWT_CTRL, "dwt_ctrl", 32, },
1677 /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
1678 * increments while the core is asleep.
1679 */
1680 { DWT_CYCCNT, "dwt_cyccnt", 32, },
1681 /* plus some 8 bit counters, useful for profiling with TPIU */
1682 };
1683
1684 static struct dwt_reg dwt_comp[] = {
1685 #define DWT_COMPARATOR(i) \
1686 { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
1687 { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
1688 { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
1689 DWT_COMPARATOR(0),
1690 DWT_COMPARATOR(1),
1691 DWT_COMPARATOR(2),
1692 DWT_COMPARATOR(3),
1693 #undef DWT_COMPARATOR
1694 };
1695
1696 static const struct reg_arch_type dwt_reg_type = {
1697 .get = cortex_m_dwt_get_reg,
1698 .set = cortex_m_dwt_set_reg,
1699 };
1700
1701 static void cortex_m_dwt_addreg(struct target *t, struct reg *r, struct dwt_reg *d)
1702 {
1703 struct dwt_reg_state *state;
1704
1705 state = calloc(1, sizeof *state);
1706 if (!state)
1707 return;
1708 state->addr = d->addr;
1709 state->target = t;
1710
1711 r->name = d->name;
1712 r->size = d->size;
1713 r->value = &state->value;
1714 r->arch_info = state;
1715 r->type = &dwt_reg_type;
1716 }
1717
1718 void cortex_m_dwt_setup(struct cortex_m_common *cm, struct target *target)
1719 {
1720 uint32_t dwtcr;
1721 struct reg_cache *cache;
1722 struct cortex_m_dwt_comparator *comparator;
1723 int reg, i;
1724
1725 target_read_u32(target, DWT_CTRL, &dwtcr);
1726 if (!dwtcr) {
1727 LOG_DEBUG("no DWT");
1728 return;
1729 }
1730
1731 cm->dwt_num_comp = (dwtcr >> 28) & 0xF;
1732 cm->dwt_comp_available = cm->dwt_num_comp;
1733 cm->dwt_comparator_list = calloc(cm->dwt_num_comp,
1734 sizeof(struct cortex_m_dwt_comparator));
1735 if (!cm->dwt_comparator_list) {
1736 fail0:
1737 cm->dwt_num_comp = 0;
1738 LOG_ERROR("out of mem");
1739 return;
1740 }
1741
1742 cache = calloc(1, sizeof *cache);
1743 if (!cache) {
1744 fail1:
1745 free(cm->dwt_comparator_list);
1746 goto fail0;
1747 }
1748 cache->name = "cortex-m3 dwt registers";
1749 cache->num_regs = 2 + cm->dwt_num_comp * 3;
1750 cache->reg_list = calloc(cache->num_regs, sizeof *cache->reg_list);
1751 if (!cache->reg_list) {
1752 free(cache);
1753 goto fail1;
1754 }
1755
1756 for (reg = 0; reg < 2; reg++)
1757 cortex_m_dwt_addreg(target, cache->reg_list + reg,
1758 dwt_base_regs + reg);
1759
1760 comparator = cm->dwt_comparator_list;
1761 for (i = 0; i < cm->dwt_num_comp; i++, comparator++) {
1762 int j;
1763
1764 comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
1765 for (j = 0; j < 3; j++, reg++)
1766 cortex_m_dwt_addreg(target, cache->reg_list + reg,
1767 dwt_comp + 3 * i + j);
1768
1769 /* make sure we clear any watchpoints enabled on the target */
1770 target_write_u32(target, comparator->dwt_comparator_address + 8, 0);
1771 }
1772
1773 *register_get_last_cache_p(&target->reg_cache) = cache;
1774 cm->dwt_cache = cache;
1775
1776 LOG_DEBUG("DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
1777 dwtcr, cm->dwt_num_comp,
1778 (dwtcr & (0xf << 24)) ? " only" : "/trigger");
1779
1780 /* REVISIT: if num_comp > 1, check whether comparator #1 can
1781 * implement single-address data value watchpoints ... so we
1782 * won't need to check it later, when asked to set one up.
1783 */
1784 }
1785
1786 #define MVFR0 0xe000ef40
1787 #define MVFR1 0xe000ef44
1788
1789 #define MVFR0_DEFAULT_M4 0x10110021
1790 #define MVFR1_DEFAULT_M4 0x11000011
1791
1792 int cortex_m_examine(struct target *target)
1793 {
1794 int retval;
1795 uint32_t cpuid, fpcr, mvfr0, mvfr1;
1796 int i;
1797 struct cortex_m_common *cortex_m = target_to_cm(target);
1798 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
1799 struct armv7m_common *armv7m = target_to_armv7m(target);
1800
1801 /* stlink shares the examine handler but does not support
1802 * all its calls */
1803 if (!armv7m->stlink) {
1804 retval = ahbap_debugport_init(swjdp);
1805 if (retval != ERROR_OK)
1806 return retval;
1807 }
1808
1809 if (!target_was_examined(target)) {
1810 target_set_examined(target);
1811
1812 /* Read from Device Identification Registers */
1813 retval = target_read_u32(target, CPUID, &cpuid);
1814 if (retval != ERROR_OK)
1815 return retval;
1816
1817 /* Get CPU Type */
1818 i = (cpuid >> 4) & 0xf;
1819
1820 LOG_DEBUG("Cortex-M%d r%" PRId8 "p%" PRId8 " processor detected",
1821 i, (uint8_t)((cpuid >> 20) & 0xf), (uint8_t)((cpuid >> 0) & 0xf));
1822 LOG_DEBUG("cpuid: 0x%8.8" PRIx32 "", cpuid);
1823
1824 /* test for floating point feature on cortex-m4 */
1825 if (i == 4) {
1826 target_read_u32(target, MVFR0, &mvfr0);
1827 target_read_u32(target, MVFR1, &mvfr1);
1828
1829 if ((mvfr0 == MVFR0_DEFAULT_M4) && (mvfr1 == MVFR1_DEFAULT_M4)) {
1830 LOG_DEBUG("Cortex-M%d floating point feature FPv4_SP found", i);
1831 armv7m->fp_feature = FPv4_SP;
1832 }
1833 } else if (i == 0) {
1834 /* Cortex-M0 does not support unaligned memory access */
1835 armv7m->arm.is_armv6m = true;
1836 }
1837
1838 if (i == 4 || i == 3) {
1839 /* Cortex-M3/M4 has 4096 bytes autoincrement range */
1840 armv7m->dap.tar_autoincr_block = (1 << 12);
1841 }
1842
1843 /* NOTE: FPB and DWT are both optional. */
1844
1845 /* Setup FPB */
1846 target_read_u32(target, FP_CTRL, &fpcr);
1847 cortex_m->auto_bp_type = 1;
1848 /* bits [14:12] and [7:4] */
1849 cortex_m->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF);
1850 cortex_m->fp_num_lit = (fpcr >> 8) & 0xF;
1851 cortex_m->fp_code_available = cortex_m->fp_num_code;
1852 cortex_m->fp_comparator_list = calloc(
1853 cortex_m->fp_num_code + cortex_m->fp_num_lit,
1854 sizeof(struct cortex_m_fp_comparator));
1855 cortex_m->fpb_enabled = fpcr & 1;
1856 for (i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
1857 cortex_m->fp_comparator_list[i].type =
1858 (i < cortex_m->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
1859 cortex_m->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
1860
1861 /* make sure we clear any breakpoints enabled on the target */
1862 target_write_u32(target, cortex_m->fp_comparator_list[i].fpcr_address, 0);
1863 }
1864 LOG_DEBUG("FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i",
1865 fpcr,
1866 cortex_m->fp_num_code,
1867 cortex_m->fp_num_lit);
1868
1869 /* Setup DWT */
1870 cortex_m_dwt_setup(cortex_m, target);
1871
1872 /* These hardware breakpoints only work for code in flash! */
1873 LOG_INFO("%s: hardware has %d breakpoints, %d watchpoints",
1874 target_name(target),
1875 cortex_m->fp_num_code,
1876 cortex_m->dwt_num_comp);
1877 }
1878
1879 return ERROR_OK;
1880 }
1881
1882 static int cortex_m_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1883 {
1884 uint16_t dcrdr;
1885 int retval;
1886
1887 mem_ap_read_buf_u16(swjdp, (uint8_t *)&dcrdr, 2, DCB_DCRDR);
1888 *ctrl = (uint8_t)dcrdr;
1889 *value = (uint8_t)(dcrdr >> 8);
1890
1891 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1892
1893 /* write ack back to software dcc register
1894 * signify we have read data */
1895 if (dcrdr & (1 << 0)) {
1896 dcrdr = 0;
1897 retval = mem_ap_write_buf_u16(swjdp, (uint8_t *)&dcrdr, 2, DCB_DCRDR);
1898 if (retval != ERROR_OK)
1899 return retval;
1900 }
1901
1902 return ERROR_OK;
1903 }
1904
1905 static int cortex_m_target_request_data(struct target *target,
1906 uint32_t size, uint8_t *buffer)
1907 {
1908 struct armv7m_common *armv7m = target_to_armv7m(target);
1909 struct adiv5_dap *swjdp = armv7m->arm.dap;
1910 uint8_t data;
1911 uint8_t ctrl;
1912 uint32_t i;
1913
1914 for (i = 0; i < (size * 4); i++) {
1915 cortex_m_dcc_read(swjdp, &data, &ctrl);
1916 buffer[i] = data;
1917 }
1918
1919 return ERROR_OK;
1920 }
1921
1922 static int cortex_m_handle_target_request(void *priv)
1923 {
1924 struct target *target = priv;
1925 if (!target_was_examined(target))
1926 return ERROR_OK;
1927 struct armv7m_common *armv7m = target_to_armv7m(target);
1928 struct adiv5_dap *swjdp = armv7m->arm.dap;
1929
1930 if (!target->dbg_msg_enabled)
1931 return ERROR_OK;
1932
1933 if (target->state == TARGET_RUNNING) {
1934 uint8_t data;
1935 uint8_t ctrl;
1936
1937 cortex_m_dcc_read(swjdp, &data, &ctrl);
1938
1939 /* check if we have data */
1940 if (ctrl & (1 << 0)) {
1941 uint32_t request;
1942
1943 /* we assume target is quick enough */
1944 request = data;
1945 cortex_m_dcc_read(swjdp, &data, &ctrl);
1946 request |= (data << 8);
1947 cortex_m_dcc_read(swjdp, &data, &ctrl);
1948 request |= (data << 16);
1949 cortex_m_dcc_read(swjdp, &data, &ctrl);
1950 request |= (data << 24);
1951 target_request(target, request);
1952 }
1953 }
1954
1955 return ERROR_OK;
1956 }
1957
1958 static int cortex_m_init_arch_info(struct target *target,
1959 struct cortex_m_common *cortex_m, struct jtag_tap *tap)
1960 {
1961 int retval;
1962 struct armv7m_common *armv7m = &cortex_m->armv7m;
1963
1964 armv7m_init_arch_info(target, armv7m);
1965
1966 /* prepare JTAG information for the new target */
1967 cortex_m->jtag_info.tap = tap;
1968 cortex_m->jtag_info.scann_size = 4;
1969
1970 /* default reset mode is to use srst if fitted
1971 * if not it will use CORTEX_M3_RESET_VECTRESET */
1972 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
1973
1974 armv7m->arm.dap = &armv7m->dap;
1975
1976 /* Leave (only) generic DAP stuff for debugport_init(); */
1977 armv7m->dap.jtag_info = &cortex_m->jtag_info;
1978 armv7m->dap.memaccess_tck = 8;
1979
1980 /* Cortex-M3/M4 has 4096 bytes autoincrement range
1981 * but set a safe default to 1024 to support Cortex-M0
1982 * this will be changed in cortex_m3_examine if a M3/M4 is detected */
1983 armv7m->dap.tar_autoincr_block = (1 << 10);
1984
1985 /* register arch-specific functions */
1986 armv7m->examine_debug_reason = cortex_m_examine_debug_reason;
1987
1988 armv7m->post_debug_entry = NULL;
1989
1990 armv7m->pre_restore_context = NULL;
1991
1992 armv7m->load_core_reg_u32 = cortex_m_load_core_reg_u32;
1993 armv7m->store_core_reg_u32 = cortex_m_store_core_reg_u32;
1994
1995 target_register_timer_callback(cortex_m_handle_target_request, 1, 1, target);
1996
1997 retval = arm_jtag_setup_connection(&cortex_m->jtag_info);
1998 if (retval != ERROR_OK)
1999 return retval;
2000
2001 return ERROR_OK;
2002 }
2003
2004 static int cortex_m_target_create(struct target *target, Jim_Interp *interp)
2005 {
2006 struct cortex_m_common *cortex_m = calloc(1, sizeof(struct cortex_m_common));
2007
2008 cortex_m->common_magic = CORTEX_M_COMMON_MAGIC;
2009 cortex_m_init_arch_info(target, cortex_m, target->tap);
2010
2011 return ERROR_OK;
2012 }
2013
2014 /*--------------------------------------------------------------------------*/
2015
2016 static int cortex_m_verify_pointer(struct command_context *cmd_ctx,
2017 struct cortex_m_common *cm)
2018 {
2019 if (cm->common_magic != CORTEX_M_COMMON_MAGIC) {
2020 command_print(cmd_ctx, "target is not a Cortex-M");
2021 return ERROR_TARGET_INVALID;
2022 }
2023 return ERROR_OK;
2024 }
2025
2026 /*
2027 * Only stuff below this line should need to verify that its target
2028 * is a Cortex-M3. Everything else should have indirected through the
2029 * cortexm3_target structure, which is only used with CM3 targets.
2030 */
2031
2032 static const struct {
2033 char name[10];
2034 unsigned mask;
2035 } vec_ids[] = {
2036 { "hard_err", VC_HARDERR, },
2037 { "int_err", VC_INTERR, },
2038 { "bus_err", VC_BUSERR, },
2039 { "state_err", VC_STATERR, },
2040 { "chk_err", VC_CHKERR, },
2041 { "nocp_err", VC_NOCPERR, },
2042 { "mm_err", VC_MMERR, },
2043 { "reset", VC_CORERESET, },
2044 };
2045
2046 COMMAND_HANDLER(handle_cortex_m_vector_catch_command)
2047 {
2048 struct target *target = get_current_target(CMD_CTX);
2049 struct cortex_m_common *cortex_m = target_to_cm(target);
2050 struct armv7m_common *armv7m = &cortex_m->armv7m;
2051 struct adiv5_dap *swjdp = armv7m->arm.dap;
2052 uint32_t demcr = 0;
2053 int retval;
2054
2055 retval = cortex_m_verify_pointer(CMD_CTX, cortex_m);
2056 if (retval != ERROR_OK)
2057 return retval;
2058
2059 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &demcr);
2060 if (retval != ERROR_OK)
2061 return retval;
2062
2063 if (CMD_ARGC > 0) {
2064 unsigned catch = 0;
2065
2066 if (CMD_ARGC == 1) {
2067 if (strcmp(CMD_ARGV[0], "all") == 0) {
2068 catch = VC_HARDERR | VC_INTERR | VC_BUSERR
2069 | VC_STATERR | VC_CHKERR | VC_NOCPERR
2070 | VC_MMERR | VC_CORERESET;
2071 goto write;
2072 } else if (strcmp(CMD_ARGV[0], "none") == 0)
2073 goto write;
2074 }
2075 while (CMD_ARGC-- > 0) {
2076 unsigned i;
2077 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2078 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
2079 continue;
2080 catch |= vec_ids[i].mask;
2081 break;
2082 }
2083 if (i == ARRAY_SIZE(vec_ids)) {
2084 LOG_ERROR("No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]);
2085 return ERROR_COMMAND_SYNTAX_ERROR;
2086 }
2087 }
2088 write:
2089 /* For now, armv7m->demcr only stores vector catch flags. */
2090 armv7m->demcr = catch;
2091
2092 demcr &= ~0xffff;
2093 demcr |= catch;
2094
2095 /* write, but don't assume it stuck (why not??) */
2096 retval = mem_ap_write_u32(swjdp, DCB_DEMCR, demcr);
2097 if (retval != ERROR_OK)
2098 return retval;
2099 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &demcr);
2100 if (retval != ERROR_OK)
2101 return retval;
2102
2103 /* FIXME be sure to clear DEMCR on clean server shutdown.
2104 * Otherwise the vector catch hardware could fire when there's
2105 * no debugger hooked up, causing much confusion...
2106 */
2107 }
2108
2109 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2110 command_print(CMD_CTX, "%9s: %s", vec_ids[i].name,
2111 (demcr & vec_ids[i].mask) ? "catch" : "ignore");
2112 }
2113
2114 return ERROR_OK;
2115 }
2116
2117 COMMAND_HANDLER(handle_cortex_m_mask_interrupts_command)
2118 {
2119 struct target *target = get_current_target(CMD_CTX);
2120 struct cortex_m_common *cortex_m = target_to_cm(target);
2121 int retval;
2122
2123 static const Jim_Nvp nvp_maskisr_modes[] = {
2124 { .name = "auto", .value = CORTEX_M_ISRMASK_AUTO },
2125 { .name = "off", .value = CORTEX_M_ISRMASK_OFF },
2126 { .name = "on", .value = CORTEX_M_ISRMASK_ON },
2127 { .name = NULL, .value = -1 },
2128 };
2129 const Jim_Nvp *n;
2130
2131
2132 retval = cortex_m_verify_pointer(CMD_CTX, cortex_m);
2133 if (retval != ERROR_OK)
2134 return retval;
2135
2136 if (target->state != TARGET_HALTED) {
2137 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
2138 return ERROR_OK;
2139 }
2140
2141 if (CMD_ARGC > 0) {
2142 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2143 if (n->name == NULL)
2144 return ERROR_COMMAND_SYNTAX_ERROR;
2145 cortex_m->isrmasking_mode = n->value;
2146
2147
2148 if (cortex_m->isrmasking_mode == CORTEX_M_ISRMASK_ON)
2149 cortex_m_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
2150 else
2151 cortex_m_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
2152 }
2153
2154 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_m->isrmasking_mode);
2155 command_print(CMD_CTX, "cortex_m interrupt mask %s", n->name);
2156
2157 return ERROR_OK;
2158 }
2159
2160 COMMAND_HANDLER(handle_cortex_m_reset_config_command)
2161 {
2162 struct target *target = get_current_target(CMD_CTX);
2163 struct cortex_m_common *cortex_m = target_to_cm(target);
2164 int retval;
2165 char *reset_config;
2166
2167 retval = cortex_m_verify_pointer(CMD_CTX, cortex_m);
2168 if (retval != ERROR_OK)
2169 return retval;
2170
2171 if (CMD_ARGC > 0) {
2172 if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
2173 cortex_m->soft_reset_config = CORTEX_M_RESET_SYSRESETREQ;
2174 else if (strcmp(*CMD_ARGV, "vectreset") == 0)
2175 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2176 }
2177
2178 switch (cortex_m->soft_reset_config) {
2179 case CORTEX_M_RESET_SYSRESETREQ:
2180 reset_config = "sysresetreq";
2181 break;
2182
2183 case CORTEX_M_RESET_VECTRESET:
2184 reset_config = "vectreset";
2185 break;
2186
2187 default:
2188 reset_config = "unknown";
2189 break;
2190 }
2191
2192 command_print(CMD_CTX, "cortex_m reset_config %s", reset_config);
2193
2194 return ERROR_OK;
2195 }
2196
2197 static const struct command_registration cortex_m_exec_command_handlers[] = {
2198 {
2199 .name = "maskisr",
2200 .handler = handle_cortex_m_mask_interrupts_command,
2201 .mode = COMMAND_EXEC,
2202 .help = "mask cortex_m interrupts",
2203 .usage = "['auto'|'on'|'off']",
2204 },
2205 {
2206 .name = "vector_catch",
2207 .handler = handle_cortex_m_vector_catch_command,
2208 .mode = COMMAND_EXEC,
2209 .help = "configure hardware vectors to trigger debug entry",
2210 .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
2211 },
2212 {
2213 .name = "reset_config",
2214 .handler = handle_cortex_m_reset_config_command,
2215 .mode = COMMAND_ANY,
2216 .help = "configure software reset handling",
2217 .usage = "['srst'|'sysresetreq'|'vectreset']",
2218 },
2219 COMMAND_REGISTRATION_DONE
2220 };
2221 static const struct command_registration cortex_m_command_handlers[] = {
2222 {
2223 .chain = armv7m_command_handlers,
2224 },
2225 {
2226 .name = "cortex_m",
2227 .mode = COMMAND_EXEC,
2228 .help = "Cortex-M command group",
2229 .usage = "",
2230 .chain = cortex_m_exec_command_handlers,
2231 },
2232 COMMAND_REGISTRATION_DONE
2233 };
2234
2235 struct target_type cortexm_target = {
2236 .name = "cortex_m",
2237 .deprecated_name = "cortex_m3",
2238
2239 .poll = cortex_m_poll,
2240 .arch_state = armv7m_arch_state,
2241
2242 .target_request_data = cortex_m_target_request_data,
2243
2244 .halt = cortex_m_halt,
2245 .resume = cortex_m_resume,
2246 .step = cortex_m_step,
2247
2248 .assert_reset = cortex_m_assert_reset,
2249 .deassert_reset = cortex_m_deassert_reset,
2250 .soft_reset_halt = cortex_m_soft_reset_halt,
2251
2252 .get_gdb_reg_list = armv7m_get_gdb_reg_list,
2253
2254 .read_memory = cortex_m_read_memory,
2255 .write_memory = cortex_m_write_memory,
2256 .checksum_memory = armv7m_checksum_memory,
2257 .blank_check_memory = armv7m_blank_check_memory,
2258
2259 .run_algorithm = armv7m_run_algorithm,
2260 .start_algorithm = armv7m_start_algorithm,
2261 .wait_algorithm = armv7m_wait_algorithm,
2262
2263 .add_breakpoint = cortex_m_add_breakpoint,
2264 .remove_breakpoint = cortex_m_remove_breakpoint,
2265 .add_watchpoint = cortex_m_add_watchpoint,
2266 .remove_watchpoint = cortex_m_remove_watchpoint,
2267
2268 .commands = cortex_m_command_handlers,
2269 .target_create = cortex_m_target_create,
2270 .init_target = cortex_m_init_target,
2271 .examine = cortex_m_examine,
2272 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)