cortex_m: deprecate soft_reset_halt
[openocd.git] / src / target / cortex_m.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
25 * *
26 * *
27 * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
28 * *
29 ***************************************************************************/
30 #ifdef HAVE_CONFIG_H
31 #include "config.h"
32 #endif
33
34 #include "jtag/interface.h"
35 #include "breakpoints.h"
36 #include "cortex_m.h"
37 #include "target_request.h"
38 #include "target_type.h"
39 #include "arm_disassembler.h"
40 #include "register.h"
41 #include "arm_opcodes.h"
42 #include "arm_semihosting.h"
43 #include <helper/time_support.h>
44
45 /* NOTE: most of this should work fine for the Cortex-M1 and
46 * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
47 * Some differences: M0/M1 doesn't have FBP remapping or the
48 * DWT tracing/profiling support. (So the cycle counter will
49 * not be usable; the other stuff isn't currently used here.)
50 *
51 * Although there are some workarounds for errata seen only in r0p0
52 * silicon, such old parts are hard to find and thus not much tested
53 * any longer.
54 */
55
56 /**
57 * Returns the type of a break point required by address location
58 */
59 #define BKPT_TYPE_BY_ADDR(addr) ((addr) < 0x20000000 ? BKPT_HARD : BKPT_SOFT)
60
61
62 /* forward declarations */
63 static int cortex_m3_store_core_reg_u32(struct target *target,
64 uint32_t num, uint32_t value);
65
66 static int cortexm3_dap_read_coreregister_u32(struct adiv5_dap *swjdp,
67 uint32_t *value, int regnum)
68 {
69 int retval;
70 uint32_t dcrdr;
71
72 /* because the DCB_DCRDR is used for the emulated dcc channel
73 * we have to save/restore the DCB_DCRDR when used */
74
75 retval = mem_ap_read_u32(swjdp, DCB_DCRDR, &dcrdr);
76 if (retval != ERROR_OK)
77 return retval;
78
79 /* mem_ap_write_u32(swjdp, DCB_DCRSR, regnum); */
80 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRSR & 0xFFFFFFF0);
81 if (retval != ERROR_OK)
82 return retval;
83 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRSR & 0xC), regnum);
84 if (retval != ERROR_OK)
85 return retval;
86
87 /* mem_ap_read_u32(swjdp, DCB_DCRDR, value); */
88 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRDR & 0xFFFFFFF0);
89 if (retval != ERROR_OK)
90 return retval;
91 retval = dap_queue_ap_read(swjdp, AP_REG_BD0 | (DCB_DCRDR & 0xC), value);
92 if (retval != ERROR_OK)
93 return retval;
94
95 retval = dap_run(swjdp);
96 if (retval != ERROR_OK)
97 return retval;
98
99 /* restore DCB_DCRDR - this needs to be in a seperate
100 * transaction otherwise the emulated DCC channel breaks */
101 if (retval == ERROR_OK)
102 retval = mem_ap_write_atomic_u32(swjdp, DCB_DCRDR, dcrdr);
103
104 return retval;
105 }
106
107 static int cortexm3_dap_write_coreregister_u32(struct adiv5_dap *swjdp,
108 uint32_t value, int regnum)
109 {
110 int retval;
111 uint32_t dcrdr;
112
113 /* because the DCB_DCRDR is used for the emulated dcc channel
114 * we have to save/restore the DCB_DCRDR when used */
115
116 retval = mem_ap_read_u32(swjdp, DCB_DCRDR, &dcrdr);
117 if (retval != ERROR_OK)
118 return retval;
119
120 /* mem_ap_write_u32(swjdp, DCB_DCRDR, core_regs[i]); */
121 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRDR & 0xFFFFFFF0);
122 if (retval != ERROR_OK)
123 return retval;
124 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRDR & 0xC), value);
125 if (retval != ERROR_OK)
126 return retval;
127
128 /* mem_ap_write_u32(swjdp, DCB_DCRSR, i | DCRSR_WnR); */
129 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRSR & 0xFFFFFFF0);
130 if (retval != ERROR_OK)
131 return retval;
132 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRSR & 0xC), regnum | DCRSR_WnR);
133 if (retval != ERROR_OK)
134 return retval;
135
136 retval = dap_run(swjdp);
137 if (retval != ERROR_OK)
138 return retval;
139
140 /* restore DCB_DCRDR - this needs to be in a seperate
141 * transaction otherwise the emulated DCC channel breaks */
142 if (retval == ERROR_OK)
143 retval = mem_ap_write_atomic_u32(swjdp, DCB_DCRDR, dcrdr);
144
145 return retval;
146 }
147
148 static int cortex_m3_write_debug_halt_mask(struct target *target,
149 uint32_t mask_on, uint32_t mask_off)
150 {
151 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
152 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
153
154 /* mask off status bits */
155 cortex_m3->dcb_dhcsr &= ~((0xFFFF << 16) | mask_off);
156 /* create new register mask */
157 cortex_m3->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
158
159 return mem_ap_write_atomic_u32(swjdp, DCB_DHCSR, cortex_m3->dcb_dhcsr);
160 }
161
162 static int cortex_m3_clear_halt(struct target *target)
163 {
164 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
165 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
166 int retval;
167
168 /* clear step if any */
169 cortex_m3_write_debug_halt_mask(target, C_HALT, C_STEP);
170
171 /* Read Debug Fault Status Register */
172 retval = mem_ap_read_atomic_u32(swjdp, NVIC_DFSR, &cortex_m3->nvic_dfsr);
173 if (retval != ERROR_OK)
174 return retval;
175
176 /* Clear Debug Fault Status */
177 retval = mem_ap_write_atomic_u32(swjdp, NVIC_DFSR, cortex_m3->nvic_dfsr);
178 if (retval != ERROR_OK)
179 return retval;
180 LOG_DEBUG(" NVIC_DFSR 0x%" PRIx32 "", cortex_m3->nvic_dfsr);
181
182 return ERROR_OK;
183 }
184
185 static int cortex_m3_single_step_core(struct target *target)
186 {
187 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
188 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
189 uint32_t dhcsr_save;
190 int retval;
191
192 /* backup dhcsr reg */
193 dhcsr_save = cortex_m3->dcb_dhcsr;
194
195 /* Mask interrupts before clearing halt, if done already. This avoids
196 * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
197 * HALT can put the core into an unknown state.
198 */
199 if (!(cortex_m3->dcb_dhcsr & C_MASKINTS)) {
200 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
201 DBGKEY | C_MASKINTS | C_HALT | C_DEBUGEN);
202 if (retval != ERROR_OK)
203 return retval;
204 }
205 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
206 DBGKEY | C_MASKINTS | C_STEP | C_DEBUGEN);
207 if (retval != ERROR_OK)
208 return retval;
209 LOG_DEBUG(" ");
210
211 /* restore dhcsr reg */
212 cortex_m3->dcb_dhcsr = dhcsr_save;
213 cortex_m3_clear_halt(target);
214
215 return ERROR_OK;
216 }
217
218 static int cortex_m3_endreset_event(struct target *target)
219 {
220 int i;
221 int retval;
222 uint32_t dcb_demcr;
223 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
224 struct armv7m_common *armv7m = &cortex_m3->armv7m;
225 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
226 struct cortex_m3_fp_comparator *fp_list = cortex_m3->fp_comparator_list;
227 struct cortex_m3_dwt_comparator *dwt_list = cortex_m3->dwt_comparator_list;
228
229 /* REVISIT The four debug monitor bits are currently ignored... */
230 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &dcb_demcr);
231 if (retval != ERROR_OK)
232 return retval;
233 LOG_DEBUG("DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr);
234
235 /* this register is used for emulated dcc channel */
236 retval = mem_ap_write_u32(swjdp, DCB_DCRDR, 0);
237 if (retval != ERROR_OK)
238 return retval;
239
240 /* Enable debug requests */
241 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
242 if (retval != ERROR_OK)
243 return retval;
244 if (!(cortex_m3->dcb_dhcsr & C_DEBUGEN)) {
245 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_DEBUGEN);
246 if (retval != ERROR_OK)
247 return retval;
248 }
249
250 /* clear any interrupt masking */
251 cortex_m3_write_debug_halt_mask(target, 0, C_MASKINTS);
252
253 /* Enable features controlled by ITM and DWT blocks, and catch only
254 * the vectors we were told to pay attention to.
255 *
256 * Target firmware is responsible for all fault handling policy
257 * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
258 * or manual updates to the NVIC SHCSR and CCR registers.
259 */
260 retval = mem_ap_write_u32(swjdp, DCB_DEMCR, TRCENA | armv7m->demcr);
261 if (retval != ERROR_OK)
262 return retval;
263
264 /* Paranoia: evidently some (early?) chips don't preserve all the
265 * debug state (including FBP, DWT, etc) across reset...
266 */
267
268 /* Enable FPB */
269 retval = target_write_u32(target, FP_CTRL, 3);
270 if (retval != ERROR_OK)
271 return retval;
272
273 cortex_m3->fpb_enabled = 1;
274
275 /* Restore FPB registers */
276 for (i = 0; i < cortex_m3->fp_num_code + cortex_m3->fp_num_lit; i++) {
277 retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
278 if (retval != ERROR_OK)
279 return retval;
280 }
281
282 /* Restore DWT registers */
283 for (i = 0; i < cortex_m3->dwt_num_comp; i++) {
284 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
285 dwt_list[i].comp);
286 if (retval != ERROR_OK)
287 return retval;
288 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
289 dwt_list[i].mask);
290 if (retval != ERROR_OK)
291 return retval;
292 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
293 dwt_list[i].function);
294 if (retval != ERROR_OK)
295 return retval;
296 }
297 retval = dap_run(swjdp);
298 if (retval != ERROR_OK)
299 return retval;
300
301 register_cache_invalidate(armv7m->arm.core_cache);
302
303 /* make sure we have latest dhcsr flags */
304 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
305
306 return retval;
307 }
308
309 static int cortex_m3_examine_debug_reason(struct target *target)
310 {
311 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
312
313 /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
314 * only check the debug reason if we don't know it already */
315
316 if ((target->debug_reason != DBG_REASON_DBGRQ)
317 && (target->debug_reason != DBG_REASON_SINGLESTEP)) {
318 if (cortex_m3->nvic_dfsr & DFSR_BKPT) {
319 target->debug_reason = DBG_REASON_BREAKPOINT;
320 if (cortex_m3->nvic_dfsr & DFSR_DWTTRAP)
321 target->debug_reason = DBG_REASON_WPTANDBKPT;
322 } else if (cortex_m3->nvic_dfsr & DFSR_DWTTRAP)
323 target->debug_reason = DBG_REASON_WATCHPOINT;
324 else if (cortex_m3->nvic_dfsr & DFSR_VCATCH)
325 target->debug_reason = DBG_REASON_BREAKPOINT;
326 else /* EXTERNAL, HALTED */
327 target->debug_reason = DBG_REASON_UNDEFINED;
328 }
329
330 return ERROR_OK;
331 }
332
333 static int cortex_m3_examine_exception_reason(struct target *target)
334 {
335 uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
336 struct armv7m_common *armv7m = target_to_armv7m(target);
337 struct adiv5_dap *swjdp = armv7m->arm.dap;
338 int retval;
339
340 retval = mem_ap_read_u32(swjdp, NVIC_SHCSR, &shcsr);
341 if (retval != ERROR_OK)
342 return retval;
343 switch (armv7m->exception_number) {
344 case 2: /* NMI */
345 break;
346 case 3: /* Hard Fault */
347 retval = mem_ap_read_atomic_u32(swjdp, NVIC_HFSR, &except_sr);
348 if (retval != ERROR_OK)
349 return retval;
350 if (except_sr & 0x40000000) {
351 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &cfsr);
352 if (retval != ERROR_OK)
353 return retval;
354 }
355 break;
356 case 4: /* Memory Management */
357 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
358 if (retval != ERROR_OK)
359 return retval;
360 retval = mem_ap_read_u32(swjdp, NVIC_MMFAR, &except_ar);
361 if (retval != ERROR_OK)
362 return retval;
363 break;
364 case 5: /* Bus Fault */
365 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
366 if (retval != ERROR_OK)
367 return retval;
368 retval = mem_ap_read_u32(swjdp, NVIC_BFAR, &except_ar);
369 if (retval != ERROR_OK)
370 return retval;
371 break;
372 case 6: /* Usage Fault */
373 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
374 if (retval != ERROR_OK)
375 return retval;
376 break;
377 case 11: /* SVCall */
378 break;
379 case 12: /* Debug Monitor */
380 retval = mem_ap_read_u32(swjdp, NVIC_DFSR, &except_sr);
381 if (retval != ERROR_OK)
382 return retval;
383 break;
384 case 14: /* PendSV */
385 break;
386 case 15: /* SysTick */
387 break;
388 default:
389 except_sr = 0;
390 break;
391 }
392 retval = dap_run(swjdp);
393 if (retval == ERROR_OK)
394 LOG_DEBUG("%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
395 ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
396 armv7m_exception_string(armv7m->exception_number),
397 shcsr, except_sr, cfsr, except_ar);
398 return retval;
399 }
400
401 static int cortex_m3_debug_entry(struct target *target)
402 {
403 int i;
404 uint32_t xPSR;
405 int retval;
406 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
407 struct armv7m_common *armv7m = &cortex_m3->armv7m;
408 struct arm *arm = &armv7m->arm;
409 struct adiv5_dap *swjdp = armv7m->arm.dap;
410 struct reg *r;
411
412 LOG_DEBUG(" ");
413
414 cortex_m3_clear_halt(target);
415 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
416 if (retval != ERROR_OK)
417 return retval;
418
419 retval = armv7m->examine_debug_reason(target);
420 if (retval != ERROR_OK)
421 return retval;
422
423 /* Examine target state and mode
424 * First load register accessible through core debug port */
425 int num_regs = arm->core_cache->num_regs;
426
427 for (i = 0; i < num_regs; i++) {
428 r = &armv7m->arm.core_cache->reg_list[i];
429 if (!r->valid)
430 arm->read_core_reg(target, r, i, ARM_MODE_ANY);
431 }
432
433 r = arm->core_cache->reg_list + ARMV7M_xPSR;
434 xPSR = buf_get_u32(r->value, 0, 32);
435
436 #ifdef ARMV7_GDB_HACKS
437 /* FIXME this breaks on scan chains with more than one Cortex-M3.
438 * Instead, each CM3 should have its own dummy value...
439 */
440 /* copy real xpsr reg for gdb, setting thumb bit */
441 buf_set_u32(armv7m_gdb_dummy_cpsr_value, 0, 32, xPSR);
442 buf_set_u32(armv7m_gdb_dummy_cpsr_value, 5, 1, 1);
443 armv7m_gdb_dummy_cpsr_reg.valid = r->valid;
444 armv7m_gdb_dummy_cpsr_reg.dirty = r->dirty;
445 #endif
446
447 /* For IT instructions xPSR must be reloaded on resume and clear on debug exec */
448 if (xPSR & 0xf00) {
449 r->dirty = r->valid;
450 cortex_m3_store_core_reg_u32(target, 16, xPSR & ~0xff);
451 }
452
453 /* Are we in an exception handler */
454 if (xPSR & 0x1FF) {
455 armv7m->exception_number = (xPSR & 0x1FF);
456
457 arm->core_mode = ARM_MODE_HANDLER;
458 arm->map = armv7m_msp_reg_map;
459 } else {
460 unsigned control = buf_get_u32(arm->core_cache
461 ->reg_list[ARMV7M_CONTROL].value, 0, 2);
462
463 /* is this thread privileged? */
464 arm->core_mode = control & 1
465 ? ARM_MODE_USER_THREAD
466 : ARM_MODE_THREAD;
467
468 /* which stack is it using? */
469 if (control & 2)
470 arm->map = armv7m_psp_reg_map;
471 else
472 arm->map = armv7m_msp_reg_map;
473
474 armv7m->exception_number = 0;
475 }
476
477 if (armv7m->exception_number)
478 cortex_m3_examine_exception_reason(target);
479
480 LOG_DEBUG("entered debug state in core mode: %s at PC 0x%" PRIx32 ", target->state: %s",
481 arm_mode_name(arm->core_mode),
482 *(uint32_t *)(arm->pc->value),
483 target_state_name(target));
484
485 if (armv7m->post_debug_entry) {
486 retval = armv7m->post_debug_entry(target);
487 if (retval != ERROR_OK)
488 return retval;
489 }
490
491 return ERROR_OK;
492 }
493
494 static int cortex_m3_poll(struct target *target)
495 {
496 int detected_failure = ERROR_OK;
497 int retval = ERROR_OK;
498 enum target_state prev_target_state = target->state;
499 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
500 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
501
502 /* Read from Debug Halting Control and Status Register */
503 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
504 if (retval != ERROR_OK) {
505 target->state = TARGET_UNKNOWN;
506 return retval;
507 }
508
509 /* Recover from lockup. See ARMv7-M architecture spec,
510 * section B1.5.15 "Unrecoverable exception cases".
511 */
512 if (cortex_m3->dcb_dhcsr & S_LOCKUP) {
513 LOG_ERROR("%s -- clearing lockup after double fault",
514 target_name(target));
515 cortex_m3_write_debug_halt_mask(target, C_HALT, 0);
516 target->debug_reason = DBG_REASON_DBGRQ;
517
518 /* We have to execute the rest (the "finally" equivalent, but
519 * still throw this exception again).
520 */
521 detected_failure = ERROR_FAIL;
522
523 /* refresh status bits */
524 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
525 if (retval != ERROR_OK)
526 return retval;
527 }
528
529 if (cortex_m3->dcb_dhcsr & S_RESET_ST) {
530 /* check if still in reset */
531 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
532 if (retval != ERROR_OK)
533 return retval;
534
535 if (cortex_m3->dcb_dhcsr & S_RESET_ST) {
536 target->state = TARGET_RESET;
537 return ERROR_OK;
538 }
539 }
540
541 if (target->state == TARGET_RESET) {
542 /* Cannot switch context while running so endreset is
543 * called with target->state == TARGET_RESET
544 */
545 LOG_DEBUG("Exit from reset with dcb_dhcsr 0x%" PRIx32,
546 cortex_m3->dcb_dhcsr);
547 cortex_m3_endreset_event(target);
548 target->state = TARGET_RUNNING;
549 prev_target_state = TARGET_RUNNING;
550 }
551
552 if (cortex_m3->dcb_dhcsr & S_HALT) {
553 target->state = TARGET_HALTED;
554
555 if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET)) {
556 retval = cortex_m3_debug_entry(target);
557 if (retval != ERROR_OK)
558 return retval;
559
560 if (arm_semihosting(target, &retval) != 0)
561 return retval;
562
563 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
564 }
565 if (prev_target_state == TARGET_DEBUG_RUNNING) {
566 LOG_DEBUG(" ");
567 retval = cortex_m3_debug_entry(target);
568 if (retval != ERROR_OK)
569 return retval;
570
571 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
572 }
573 }
574
575 /* REVISIT when S_SLEEP is set, it's in a Sleep or DeepSleep state.
576 * How best to model low power modes?
577 */
578
579 if (target->state == TARGET_UNKNOWN) {
580 /* check if processor is retiring instructions */
581 if (cortex_m3->dcb_dhcsr & S_RETIRE_ST) {
582 target->state = TARGET_RUNNING;
583 retval = ERROR_OK;
584 }
585 }
586
587 /* Did we detect a failure condition that we cleared? */
588 if (detected_failure != ERROR_OK)
589 retval = detected_failure;
590 return retval;
591 }
592
593 static int cortex_m3_halt(struct target *target)
594 {
595 LOG_DEBUG("target->state: %s",
596 target_state_name(target));
597
598 if (target->state == TARGET_HALTED) {
599 LOG_DEBUG("target was already halted");
600 return ERROR_OK;
601 }
602
603 if (target->state == TARGET_UNKNOWN)
604 LOG_WARNING("target was in unknown state when halt was requested");
605
606 if (target->state == TARGET_RESET) {
607 if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
608 LOG_ERROR("can't request a halt while in reset if nSRST pulls nTRST");
609 return ERROR_TARGET_FAILURE;
610 } else {
611 /* we came here in a reset_halt or reset_init sequence
612 * debug entry was already prepared in cortex_m3_assert_reset()
613 */
614 target->debug_reason = DBG_REASON_DBGRQ;
615
616 return ERROR_OK;
617 }
618 }
619
620 /* Write to Debug Halting Control and Status Register */
621 cortex_m3_write_debug_halt_mask(target, C_HALT, 0);
622
623 target->debug_reason = DBG_REASON_DBGRQ;
624
625 return ERROR_OK;
626 }
627
628 static int cortex_m3_soft_reset_halt(struct target *target)
629 {
630 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
631 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
632 uint32_t dcb_dhcsr = 0;
633 int retval, timeout = 0;
634
635 /* soft_reset_halt is deprecated on cortex_m as the same functionality
636 * can be obtained by using 'reset halt' and 'cortex_m reset_config vectreset'
637 * As this reset only used VC_CORERESET it would only ever reset the cortex_m
638 * core, not the peripherals */
639 LOG_WARNING("soft_reset_halt is deprecated, please use 'reset halt' instead.");
640
641 /* Enter debug state on reset; restore DEMCR in endreset_event() */
642 retval = mem_ap_write_u32(swjdp, DCB_DEMCR,
643 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
644 if (retval != ERROR_OK)
645 return retval;
646
647 /* Request a core-only reset */
648 retval = mem_ap_write_atomic_u32(swjdp, NVIC_AIRCR,
649 AIRCR_VECTKEY | AIRCR_VECTRESET);
650 if (retval != ERROR_OK)
651 return retval;
652 target->state = TARGET_RESET;
653
654 /* registers are now invalid */
655 register_cache_invalidate(cortex_m3->armv7m.arm.core_cache);
656
657 while (timeout < 100) {
658 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &dcb_dhcsr);
659 if (retval == ERROR_OK) {
660 retval = mem_ap_read_atomic_u32(swjdp, NVIC_DFSR,
661 &cortex_m3->nvic_dfsr);
662 if (retval != ERROR_OK)
663 return retval;
664 if ((dcb_dhcsr & S_HALT)
665 && (cortex_m3->nvic_dfsr & DFSR_VCATCH)) {
666 LOG_DEBUG("system reset-halted, DHCSR 0x%08x, "
667 "DFSR 0x%08x",
668 (unsigned) dcb_dhcsr,
669 (unsigned) cortex_m3->nvic_dfsr);
670 cortex_m3_poll(target);
671 /* FIXME restore user's vector catch config */
672 return ERROR_OK;
673 } else
674 LOG_DEBUG("waiting for system reset-halt, "
675 "DHCSR 0x%08x, %d ms",
676 (unsigned) dcb_dhcsr, timeout);
677 }
678 timeout++;
679 alive_sleep(1);
680 }
681
682 return ERROR_OK;
683 }
684
685 void cortex_m3_enable_breakpoints(struct target *target)
686 {
687 struct breakpoint *breakpoint = target->breakpoints;
688
689 /* set any pending breakpoints */
690 while (breakpoint) {
691 if (!breakpoint->set)
692 cortex_m3_set_breakpoint(target, breakpoint);
693 breakpoint = breakpoint->next;
694 }
695 }
696
697 static int cortex_m3_resume(struct target *target, int current,
698 uint32_t address, int handle_breakpoints, int debug_execution)
699 {
700 struct armv7m_common *armv7m = target_to_armv7m(target);
701 struct breakpoint *breakpoint = NULL;
702 uint32_t resume_pc;
703 struct reg *r;
704
705 if (target->state != TARGET_HALTED) {
706 LOG_WARNING("target not halted");
707 return ERROR_TARGET_NOT_HALTED;
708 }
709
710 if (!debug_execution) {
711 target_free_all_working_areas(target);
712 cortex_m3_enable_breakpoints(target);
713 cortex_m3_enable_watchpoints(target);
714 }
715
716 if (debug_execution) {
717 r = armv7m->arm.core_cache->reg_list + ARMV7M_PRIMASK;
718
719 /* Disable interrupts */
720 /* We disable interrupts in the PRIMASK register instead of
721 * masking with C_MASKINTS. This is probably the same issue
722 * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
723 * in parallel with disabled interrupts can cause local faults
724 * to not be taken.
725 *
726 * REVISIT this clearly breaks non-debug execution, since the
727 * PRIMASK register state isn't saved/restored... workaround
728 * by never resuming app code after debug execution.
729 */
730 buf_set_u32(r->value, 0, 1, 1);
731 r->dirty = true;
732 r->valid = true;
733
734 /* Make sure we are in Thumb mode */
735 r = armv7m->arm.core_cache->reg_list + ARMV7M_xPSR;
736 buf_set_u32(r->value, 24, 1, 1);
737 r->dirty = true;
738 r->valid = true;
739 }
740
741 /* current = 1: continue on current pc, otherwise continue at <address> */
742 r = armv7m->arm.pc;
743 if (!current) {
744 buf_set_u32(r->value, 0, 32, address);
745 r->dirty = true;
746 r->valid = true;
747 }
748
749 /* if we halted last time due to a bkpt instruction
750 * then we have to manually step over it, otherwise
751 * the core will break again */
752
753 if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
754 && !debug_execution)
755 armv7m_maybe_skip_bkpt_inst(target, NULL);
756
757 resume_pc = buf_get_u32(r->value, 0, 32);
758
759 armv7m_restore_context(target);
760
761 /* the front-end may request us not to handle breakpoints */
762 if (handle_breakpoints) {
763 /* Single step past breakpoint at current address */
764 breakpoint = breakpoint_find(target, resume_pc);
765 if (breakpoint) {
766 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 " (ID: %d)",
767 breakpoint->address,
768 breakpoint->unique_id);
769 cortex_m3_unset_breakpoint(target, breakpoint);
770 cortex_m3_single_step_core(target);
771 cortex_m3_set_breakpoint(target, breakpoint);
772 }
773 }
774
775 /* Restart core */
776 cortex_m3_write_debug_halt_mask(target, 0, C_HALT);
777
778 target->debug_reason = DBG_REASON_NOTHALTED;
779
780 /* registers are now invalid */
781 register_cache_invalidate(armv7m->arm.core_cache);
782
783 if (!debug_execution) {
784 target->state = TARGET_RUNNING;
785 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
786 LOG_DEBUG("target resumed at 0x%" PRIx32 "", resume_pc);
787 } else {
788 target->state = TARGET_DEBUG_RUNNING;
789 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
790 LOG_DEBUG("target debug resumed at 0x%" PRIx32 "", resume_pc);
791 }
792
793 return ERROR_OK;
794 }
795
796 /* int irqstepcount = 0; */
797 static int cortex_m3_step(struct target *target, int current,
798 uint32_t address, int handle_breakpoints)
799 {
800 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
801 struct armv7m_common *armv7m = &cortex_m3->armv7m;
802 struct adiv5_dap *swjdp = armv7m->arm.dap;
803 struct breakpoint *breakpoint = NULL;
804 struct reg *pc = armv7m->arm.pc;
805 bool bkpt_inst_found = false;
806 int retval;
807 bool isr_timed_out = false;
808
809 if (target->state != TARGET_HALTED) {
810 LOG_WARNING("target not halted");
811 return ERROR_TARGET_NOT_HALTED;
812 }
813
814 /* current = 1: continue on current pc, otherwise continue at <address> */
815 if (!current)
816 buf_set_u32(pc->value, 0, 32, address);
817
818 uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
819
820 /* the front-end may request us not to handle breakpoints */
821 if (handle_breakpoints) {
822 breakpoint = breakpoint_find(target, pc_value);
823 if (breakpoint)
824 cortex_m3_unset_breakpoint(target, breakpoint);
825 }
826
827 armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
828
829 target->debug_reason = DBG_REASON_SINGLESTEP;
830
831 armv7m_restore_context(target);
832
833 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
834
835 /* if no bkpt instruction is found at pc then we can perform
836 * a normal step, otherwise we have to manually step over the bkpt
837 * instruction - as such simulate a step */
838 if (bkpt_inst_found == false) {
839 /* Automatic ISR masking mode off: Just step over the next instruction */
840 if ((cortex_m3->isrmasking_mode != CORTEX_M3_ISRMASK_AUTO))
841 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
842 else {
843 /* Process interrupts during stepping in a way they don't interfere
844 * debugging.
845 *
846 * Principle:
847 *
848 * Set a temporary break point at the current pc and let the core run
849 * with interrupts enabled. Pending interrupts get served and we run
850 * into the breakpoint again afterwards. Then we step over the next
851 * instruction with interrupts disabled.
852 *
853 * If the pending interrupts don't complete within time, we leave the
854 * core running. This may happen if the interrupts trigger faster
855 * than the core can process them or the handler doesn't return.
856 *
857 * If no more breakpoints are available we simply do a step with
858 * interrupts enabled.
859 *
860 */
861
862 /* 2012-09-29 ph
863 *
864 * If a break point is already set on the lower half word then a break point on
865 * the upper half word will not break again when the core is restarted. So we
866 * just step over the instruction with interrupts disabled.
867 *
868 * The documentation has no information about this, it was found by observation
869 * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 dosen't seem to
870 * suffer from this problem.
871 *
872 * To add some confusion: pc_value has bit 0 always set, while the breakpoint
873 * address has it always cleared. The former is done to indicate thumb mode
874 * to gdb.
875 *
876 */
877 if ((pc_value & 0x02) && breakpoint_find(target, pc_value & ~0x03)) {
878 LOG_DEBUG("Stepping over next instruction with interrupts disabled");
879 cortex_m3_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
880 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
881 /* Re-enable interrupts */
882 cortex_m3_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
883 }
884 else {
885
886 /* Set a temporary break point */
887 if (breakpoint)
888 retval = cortex_m3_set_breakpoint(target, breakpoint);
889 else
890 retval = breakpoint_add(target, pc_value, 2, BKPT_TYPE_BY_ADDR(pc_value));
891 bool tmp_bp_set = (retval == ERROR_OK);
892
893 /* No more breakpoints left, just do a step */
894 if (!tmp_bp_set)
895 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
896 else {
897 /* Start the core */
898 LOG_DEBUG("Starting core to serve pending interrupts");
899 int64_t t_start = timeval_ms();
900 cortex_m3_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
901
902 /* Wait for pending handlers to complete or timeout */
903 do {
904 retval = mem_ap_read_atomic_u32(swjdp,
905 DCB_DHCSR,
906 &cortex_m3->dcb_dhcsr);
907 if (retval != ERROR_OK) {
908 target->state = TARGET_UNKNOWN;
909 return retval;
910 }
911 isr_timed_out = ((timeval_ms() - t_start) > 500);
912 } while (!((cortex_m3->dcb_dhcsr & S_HALT) || isr_timed_out));
913
914 /* only remove breakpoint if we created it */
915 if (breakpoint)
916 cortex_m3_unset_breakpoint(target, breakpoint);
917 else {
918 /* Remove the temporary breakpoint */
919 breakpoint_remove(target, pc_value);
920 }
921
922 if (isr_timed_out) {
923 LOG_DEBUG("Interrupt handlers didn't complete within time, "
924 "leaving target running");
925 } else {
926 /* Step over next instruction with interrupts disabled */
927 cortex_m3_write_debug_halt_mask(target,
928 C_HALT | C_MASKINTS,
929 0);
930 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
931 /* Re-enable interrupts */
932 cortex_m3_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
933 }
934 }
935 }
936 }
937 }
938
939 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
940 if (retval != ERROR_OK)
941 return retval;
942
943 /* registers are now invalid */
944 register_cache_invalidate(armv7m->arm.core_cache);
945
946 if (breakpoint)
947 cortex_m3_set_breakpoint(target, breakpoint);
948
949 if (isr_timed_out) {
950 /* Leave the core running. The user has to stop execution manually. */
951 target->debug_reason = DBG_REASON_NOTHALTED;
952 target->state = TARGET_RUNNING;
953 return ERROR_OK;
954 }
955
956 LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
957 " nvic_icsr = 0x%" PRIx32,
958 cortex_m3->dcb_dhcsr, cortex_m3->nvic_icsr);
959
960 retval = cortex_m3_debug_entry(target);
961 if (retval != ERROR_OK)
962 return retval;
963 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
964
965 LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
966 " nvic_icsr = 0x%" PRIx32,
967 cortex_m3->dcb_dhcsr, cortex_m3->nvic_icsr);
968
969 return ERROR_OK;
970 }
971
972 static int cortex_m3_assert_reset(struct target *target)
973 {
974 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
975 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
976 enum cortex_m3_soft_reset_config reset_config = cortex_m3->soft_reset_config;
977
978 LOG_DEBUG("target->state: %s",
979 target_state_name(target));
980
981 enum reset_types jtag_reset_config = jtag_get_reset_config();
982
983 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
984 /* allow scripts to override the reset event */
985
986 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
987 register_cache_invalidate(cortex_m3->armv7m.arm.core_cache);
988 target->state = TARGET_RESET;
989
990 return ERROR_OK;
991 }
992
993 /* some cores support connecting while srst is asserted
994 * use that mode is it has been configured */
995
996 bool srst_asserted = false;
997
998 if ((jtag_reset_config & RESET_HAS_SRST) &&
999 (jtag_reset_config & RESET_SRST_NO_GATING)) {
1000 adapter_assert_reset();
1001 srst_asserted = true;
1002 }
1003
1004 /* Enable debug requests */
1005 int retval;
1006 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
1007 if (retval != ERROR_OK)
1008 return retval;
1009 if (!(cortex_m3->dcb_dhcsr & C_DEBUGEN)) {
1010 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_DEBUGEN);
1011 if (retval != ERROR_OK)
1012 return retval;
1013 }
1014
1015 /* If the processor is sleeping in a WFI or WFE instruction, the
1016 * C_HALT bit must be asserted to regain control */
1017 if (cortex_m3->dcb_dhcsr & S_SLEEP) {
1018 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_HALT | C_DEBUGEN);
1019 if (retval != ERROR_OK)
1020 return retval;
1021 }
1022
1023 retval = mem_ap_write_u32(swjdp, DCB_DCRDR, 0);
1024 if (retval != ERROR_OK)
1025 return retval;
1026
1027 if (!target->reset_halt) {
1028 /* Set/Clear C_MASKINTS in a separate operation */
1029 if (cortex_m3->dcb_dhcsr & C_MASKINTS) {
1030 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
1031 DBGKEY | C_DEBUGEN | C_HALT);
1032 if (retval != ERROR_OK)
1033 return retval;
1034 }
1035
1036 /* clear any debug flags before resuming */
1037 cortex_m3_clear_halt(target);
1038
1039 /* clear C_HALT in dhcsr reg */
1040 cortex_m3_write_debug_halt_mask(target, 0, C_HALT);
1041 } else {
1042 /* Halt in debug on reset; endreset_event() restores DEMCR.
1043 *
1044 * REVISIT catching BUSERR presumably helps to defend against
1045 * bad vector table entries. Should this include MMERR or
1046 * other flags too?
1047 */
1048 retval = mem_ap_write_atomic_u32(swjdp, DCB_DEMCR,
1049 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1050 if (retval != ERROR_OK)
1051 return retval;
1052 }
1053
1054 if (jtag_reset_config & RESET_HAS_SRST) {
1055 /* default to asserting srst */
1056 if (!srst_asserted)
1057 adapter_assert_reset();
1058 } else {
1059 /* Use a standard Cortex-M3 software reset mechanism.
1060 * We default to using VECRESET as it is supported on all current cores.
1061 * This has the disadvantage of not resetting the peripherals, so a
1062 * reset-init event handler is needed to perform any peripheral resets.
1063 */
1064 retval = mem_ap_write_atomic_u32(swjdp, NVIC_AIRCR,
1065 AIRCR_VECTKEY | ((reset_config == CORTEX_M3_RESET_SYSRESETREQ)
1066 ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
1067 if (retval != ERROR_OK)
1068 return retval;
1069
1070 LOG_DEBUG("Using Cortex-M %s", (reset_config == CORTEX_M3_RESET_SYSRESETREQ)
1071 ? "SYSRESETREQ" : "VECTRESET");
1072
1073 if (reset_config == CORTEX_M3_RESET_VECTRESET) {
1074 LOG_WARNING("Only resetting the Cortex-M core, use a reset-init event "
1075 "handler to reset any peripherals or configure hardware srst support.");
1076 }
1077
1078 {
1079 /* I do not know why this is necessary, but it
1080 * fixes strange effects (step/resume cause NMI
1081 * after reset) on LM3S6918 -- Michael Schwingen
1082 */
1083 uint32_t tmp;
1084 retval = mem_ap_read_atomic_u32(swjdp, NVIC_AIRCR, &tmp);
1085 if (retval != ERROR_OK)
1086 return retval;
1087 }
1088 }
1089
1090 target->state = TARGET_RESET;
1091 jtag_add_sleep(50000);
1092
1093 register_cache_invalidate(cortex_m3->armv7m.arm.core_cache);
1094
1095 if (target->reset_halt) {
1096 retval = target_halt(target);
1097 if (retval != ERROR_OK)
1098 return retval;
1099 }
1100
1101 return ERROR_OK;
1102 }
1103
1104 static int cortex_m3_deassert_reset(struct target *target)
1105 {
1106 LOG_DEBUG("target->state: %s",
1107 target_state_name(target));
1108
1109 /* deassert reset lines */
1110 adapter_deassert_reset();
1111
1112 return ERROR_OK;
1113 }
1114
1115 int cortex_m3_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
1116 {
1117 int retval;
1118 int fp_num = 0;
1119 uint32_t hilo;
1120 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1121 struct cortex_m3_fp_comparator *comparator_list = cortex_m3->fp_comparator_list;
1122
1123 if (breakpoint->set) {
1124 LOG_WARNING("breakpoint (BPID: %d) already set", breakpoint->unique_id);
1125 return ERROR_OK;
1126 }
1127
1128 if (cortex_m3->auto_bp_type)
1129 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1130
1131 if (breakpoint->type == BKPT_HARD) {
1132 while (comparator_list[fp_num].used && (fp_num < cortex_m3->fp_num_code))
1133 fp_num++;
1134 if (fp_num >= cortex_m3->fp_num_code) {
1135 LOG_ERROR("Can not find free FPB Comparator!");
1136 return ERROR_FAIL;
1137 }
1138 breakpoint->set = fp_num + 1;
1139 hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
1140 comparator_list[fp_num].used = 1;
1141 comparator_list[fp_num].fpcr_value = (breakpoint->address & 0x1FFFFFFC) | hilo | 1;
1142 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1143 comparator_list[fp_num].fpcr_value);
1144 LOG_DEBUG("fpc_num %i fpcr_value 0x%" PRIx32 "",
1145 fp_num,
1146 comparator_list[fp_num].fpcr_value);
1147 if (!cortex_m3->fpb_enabled) {
1148 LOG_DEBUG("FPB wasn't enabled, do it now");
1149 target_write_u32(target, FP_CTRL, 3);
1150 }
1151 } else if (breakpoint->type == BKPT_SOFT) {
1152 uint8_t code[4];
1153
1154 /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
1155 * semihosting; don't use that. Otherwise the BKPT
1156 * parameter is arbitrary.
1157 */
1158 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1159 retval = target_read_memory(target,
1160 breakpoint->address & 0xFFFFFFFE,
1161 breakpoint->length, 1,
1162 breakpoint->orig_instr);
1163 if (retval != ERROR_OK)
1164 return retval;
1165 retval = target_write_memory(target,
1166 breakpoint->address & 0xFFFFFFFE,
1167 breakpoint->length, 1,
1168 code);
1169 if (retval != ERROR_OK)
1170 return retval;
1171 breakpoint->set = true;
1172 }
1173
1174 LOG_DEBUG("BPID: %d, Type: %d, Address: 0x%08" PRIx32 " Length: %d (set=%d)",
1175 breakpoint->unique_id,
1176 (int)(breakpoint->type),
1177 breakpoint->address,
1178 breakpoint->length,
1179 breakpoint->set);
1180
1181 return ERROR_OK;
1182 }
1183
1184 int cortex_m3_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1185 {
1186 int retval;
1187 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1188 struct cortex_m3_fp_comparator *comparator_list = cortex_m3->fp_comparator_list;
1189
1190 if (!breakpoint->set) {
1191 LOG_WARNING("breakpoint not set");
1192 return ERROR_OK;
1193 }
1194
1195 LOG_DEBUG("BPID: %d, Type: %d, Address: 0x%08" PRIx32 " Length: %d (set=%d)",
1196 breakpoint->unique_id,
1197 (int)(breakpoint->type),
1198 breakpoint->address,
1199 breakpoint->length,
1200 breakpoint->set);
1201
1202 if (breakpoint->type == BKPT_HARD) {
1203 int fp_num = breakpoint->set - 1;
1204 if ((fp_num < 0) || (fp_num >= cortex_m3->fp_num_code)) {
1205 LOG_DEBUG("Invalid FP Comparator number in breakpoint");
1206 return ERROR_OK;
1207 }
1208 comparator_list[fp_num].used = 0;
1209 comparator_list[fp_num].fpcr_value = 0;
1210 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1211 comparator_list[fp_num].fpcr_value);
1212 } else {
1213 /* restore original instruction (kept in target endianness) */
1214 if (breakpoint->length == 4) {
1215 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE, 4, 1,
1216 breakpoint->orig_instr);
1217 if (retval != ERROR_OK)
1218 return retval;
1219 } else {
1220 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE, 2, 1,
1221 breakpoint->orig_instr);
1222 if (retval != ERROR_OK)
1223 return retval;
1224 }
1225 }
1226 breakpoint->set = false;
1227
1228 return ERROR_OK;
1229 }
1230
1231 int cortex_m3_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
1232 {
1233 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1234
1235 if (cortex_m3->auto_bp_type) {
1236 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1237 #ifdef ARMV7_GDB_HACKS
1238 if (breakpoint->length != 2) {
1239 /* XXX Hack: Replace all breakpoints with length != 2 with
1240 * a hardware breakpoint. */
1241 breakpoint->type = BKPT_HARD;
1242 breakpoint->length = 2;
1243 }
1244 #endif
1245 }
1246
1247 if (breakpoint->type != BKPT_TYPE_BY_ADDR(breakpoint->address)) {
1248 if (breakpoint->type == BKPT_HARD) {
1249 LOG_INFO("flash patch comparator requested outside code memory region");
1250 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1251 }
1252
1253 if (breakpoint->type == BKPT_SOFT) {
1254 LOG_INFO("soft breakpoint requested in code (flash) memory region");
1255 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1256 }
1257 }
1258
1259 if ((breakpoint->type == BKPT_HARD) && (cortex_m3->fp_code_available < 1)) {
1260 LOG_INFO("no flash patch comparator unit available for hardware breakpoint");
1261 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1262 }
1263
1264 if ((breakpoint->length != 2)) {
1265 LOG_INFO("only breakpoints of two bytes length supported");
1266 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1267 }
1268
1269 if (breakpoint->type == BKPT_HARD)
1270 cortex_m3->fp_code_available--;
1271
1272 return cortex_m3_set_breakpoint(target, breakpoint);
1273 }
1274
1275 int cortex_m3_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1276 {
1277 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1278
1279 /* REVISIT why check? FBP can be updated with core running ... */
1280 if (target->state != TARGET_HALTED) {
1281 LOG_WARNING("target not halted");
1282 return ERROR_TARGET_NOT_HALTED;
1283 }
1284
1285 if (cortex_m3->auto_bp_type)
1286 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1287
1288 if (breakpoint->set)
1289 cortex_m3_unset_breakpoint(target, breakpoint);
1290
1291 if (breakpoint->type == BKPT_HARD)
1292 cortex_m3->fp_code_available++;
1293
1294 return ERROR_OK;
1295 }
1296
1297 int cortex_m3_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1298 {
1299 int dwt_num = 0;
1300 uint32_t mask, temp;
1301 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1302
1303 /* watchpoint params were validated earlier */
1304 mask = 0;
1305 temp = watchpoint->length;
1306 while (temp) {
1307 temp >>= 1;
1308 mask++;
1309 }
1310 mask--;
1311
1312 /* REVISIT Don't fully trust these "not used" records ... users
1313 * may set up breakpoints by hand, e.g. dual-address data value
1314 * watchpoint using comparator #1; comparator #0 matching cycle
1315 * count; send data trace info through ITM and TPIU; etc
1316 */
1317 struct cortex_m3_dwt_comparator *comparator;
1318
1319 for (comparator = cortex_m3->dwt_comparator_list;
1320 comparator->used && dwt_num < cortex_m3->dwt_num_comp;
1321 comparator++, dwt_num++)
1322 continue;
1323 if (dwt_num >= cortex_m3->dwt_num_comp) {
1324 LOG_ERROR("Can not find free DWT Comparator");
1325 return ERROR_FAIL;
1326 }
1327 comparator->used = 1;
1328 watchpoint->set = dwt_num + 1;
1329
1330 comparator->comp = watchpoint->address;
1331 target_write_u32(target, comparator->dwt_comparator_address + 0,
1332 comparator->comp);
1333
1334 comparator->mask = mask;
1335 target_write_u32(target, comparator->dwt_comparator_address + 4,
1336 comparator->mask);
1337
1338 switch (watchpoint->rw) {
1339 case WPT_READ:
1340 comparator->function = 5;
1341 break;
1342 case WPT_WRITE:
1343 comparator->function = 6;
1344 break;
1345 case WPT_ACCESS:
1346 comparator->function = 7;
1347 break;
1348 }
1349 target_write_u32(target, comparator->dwt_comparator_address + 8,
1350 comparator->function);
1351
1352 LOG_DEBUG("Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
1353 watchpoint->unique_id, dwt_num,
1354 (unsigned) comparator->comp,
1355 (unsigned) comparator->mask,
1356 (unsigned) comparator->function);
1357 return ERROR_OK;
1358 }
1359
1360 int cortex_m3_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
1361 {
1362 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1363 struct cortex_m3_dwt_comparator *comparator;
1364 int dwt_num;
1365
1366 if (!watchpoint->set) {
1367 LOG_WARNING("watchpoint (wpid: %d) not set",
1368 watchpoint->unique_id);
1369 return ERROR_OK;
1370 }
1371
1372 dwt_num = watchpoint->set - 1;
1373
1374 LOG_DEBUG("Watchpoint (ID %d) DWT%d address: 0x%08x clear",
1375 watchpoint->unique_id, dwt_num,
1376 (unsigned) watchpoint->address);
1377
1378 if ((dwt_num < 0) || (dwt_num >= cortex_m3->dwt_num_comp)) {
1379 LOG_DEBUG("Invalid DWT Comparator number in watchpoint");
1380 return ERROR_OK;
1381 }
1382
1383 comparator = cortex_m3->dwt_comparator_list + dwt_num;
1384 comparator->used = 0;
1385 comparator->function = 0;
1386 target_write_u32(target, comparator->dwt_comparator_address + 8,
1387 comparator->function);
1388
1389 watchpoint->set = false;
1390
1391 return ERROR_OK;
1392 }
1393
1394 int cortex_m3_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
1395 {
1396 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1397
1398 if (cortex_m3->dwt_comp_available < 1) {
1399 LOG_DEBUG("no comparators?");
1400 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1401 }
1402
1403 /* hardware doesn't support data value masking */
1404 if (watchpoint->mask != ~(uint32_t)0) {
1405 LOG_DEBUG("watchpoint value masks not supported");
1406 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1407 }
1408
1409 /* hardware allows address masks of up to 32K */
1410 unsigned mask;
1411
1412 for (mask = 0; mask < 16; mask++) {
1413 if ((1u << mask) == watchpoint->length)
1414 break;
1415 }
1416 if (mask == 16) {
1417 LOG_DEBUG("unsupported watchpoint length");
1418 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1419 }
1420 if (watchpoint->address & ((1 << mask) - 1)) {
1421 LOG_DEBUG("watchpoint address is unaligned");
1422 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1423 }
1424
1425 /* Caller doesn't seem to be able to describe watching for data
1426 * values of zero; that flags "no value".
1427 *
1428 * REVISIT This DWT may well be able to watch for specific data
1429 * values. Requires comparator #1 to set DATAVMATCH and match
1430 * the data, and another comparator (DATAVADDR0) matching addr.
1431 */
1432 if (watchpoint->value) {
1433 LOG_DEBUG("data value watchpoint not YET supported");
1434 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1435 }
1436
1437 cortex_m3->dwt_comp_available--;
1438 LOG_DEBUG("dwt_comp_available: %d", cortex_m3->dwt_comp_available);
1439
1440 return ERROR_OK;
1441 }
1442
1443 int cortex_m3_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
1444 {
1445 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1446
1447 /* REVISIT why check? DWT can be updated with core running ... */
1448 if (target->state != TARGET_HALTED) {
1449 LOG_WARNING("target not halted");
1450 return ERROR_TARGET_NOT_HALTED;
1451 }
1452
1453 if (watchpoint->set)
1454 cortex_m3_unset_watchpoint(target, watchpoint);
1455
1456 cortex_m3->dwt_comp_available++;
1457 LOG_DEBUG("dwt_comp_available: %d", cortex_m3->dwt_comp_available);
1458
1459 return ERROR_OK;
1460 }
1461
1462 void cortex_m3_enable_watchpoints(struct target *target)
1463 {
1464 struct watchpoint *watchpoint = target->watchpoints;
1465
1466 /* set any pending watchpoints */
1467 while (watchpoint) {
1468 if (!watchpoint->set)
1469 cortex_m3_set_watchpoint(target, watchpoint);
1470 watchpoint = watchpoint->next;
1471 }
1472 }
1473
1474 static int cortex_m3_load_core_reg_u32(struct target *target,
1475 uint32_t num, uint32_t *value)
1476 {
1477 int retval;
1478 struct armv7m_common *armv7m = target_to_armv7m(target);
1479 struct adiv5_dap *swjdp = armv7m->arm.dap;
1480
1481 /* NOTE: we "know" here that the register identifiers used
1482 * in the v7m header match the Cortex-M3 Debug Core Register
1483 * Selector values for R0..R15, xPSR, MSP, and PSP.
1484 */
1485 switch (num) {
1486 case 0 ... 18:
1487 /* read a normal core register */
1488 retval = cortexm3_dap_read_coreregister_u32(swjdp, value, num);
1489
1490 if (retval != ERROR_OK) {
1491 LOG_ERROR("JTAG failure %i", retval);
1492 return ERROR_JTAG_DEVICE_ERROR;
1493 }
1494 LOG_DEBUG("load from core reg %i value 0x%" PRIx32 "", (int)num, *value);
1495 break;
1496
1497 case ARMV7M_PRIMASK:
1498 case ARMV7M_BASEPRI:
1499 case ARMV7M_FAULTMASK:
1500 case ARMV7M_CONTROL:
1501 /* Cortex-M3 packages these four registers as bitfields
1502 * in one Debug Core register. So say r0 and r2 docs;
1503 * it was removed from r1 docs, but still works.
1504 */
1505 cortexm3_dap_read_coreregister_u32(swjdp, value, 20);
1506
1507 switch (num) {
1508 case ARMV7M_PRIMASK:
1509 *value = buf_get_u32((uint8_t *)value, 0, 1);
1510 break;
1511
1512 case ARMV7M_BASEPRI:
1513 *value = buf_get_u32((uint8_t *)value, 8, 8);
1514 break;
1515
1516 case ARMV7M_FAULTMASK:
1517 *value = buf_get_u32((uint8_t *)value, 16, 1);
1518 break;
1519
1520 case ARMV7M_CONTROL:
1521 *value = buf_get_u32((uint8_t *)value, 24, 2);
1522 break;
1523 }
1524
1525 LOG_DEBUG("load from special reg %i value 0x%" PRIx32 "", (int)num, *value);
1526 break;
1527
1528 default:
1529 return ERROR_COMMAND_SYNTAX_ERROR;
1530 }
1531
1532 return ERROR_OK;
1533 }
1534
1535 static int cortex_m3_store_core_reg_u32(struct target *target,
1536 uint32_t num, uint32_t value)
1537 {
1538 int retval;
1539 uint32_t reg;
1540 struct armv7m_common *armv7m = target_to_armv7m(target);
1541 struct adiv5_dap *swjdp = armv7m->arm.dap;
1542
1543 #ifdef ARMV7_GDB_HACKS
1544 /* If the LR register is being modified, make sure it will put us
1545 * in "thumb" mode, or an INVSTATE exception will occur. This is a
1546 * hack to deal with the fact that gdb will sometimes "forge"
1547 * return addresses, and doesn't set the LSB correctly (i.e., when
1548 * printing expressions containing function calls, it sets LR = 0.)
1549 * Valid exception return codes have bit 0 set too.
1550 */
1551 if (num == ARMV7M_R14)
1552 value |= 0x01;
1553 #endif
1554
1555 /* NOTE: we "know" here that the register identifiers used
1556 * in the v7m header match the Cortex-M3 Debug Core Register
1557 * Selector values for R0..R15, xPSR, MSP, and PSP.
1558 */
1559 switch (num) {
1560 case 0 ... 18:
1561 retval = cortexm3_dap_write_coreregister_u32(swjdp, value, num);
1562 if (retval != ERROR_OK) {
1563 struct reg *r;
1564
1565 LOG_ERROR("JTAG failure");
1566 r = armv7m->arm.core_cache->reg_list + num;
1567 r->dirty = r->valid;
1568 return ERROR_JTAG_DEVICE_ERROR;
1569 }
1570 LOG_DEBUG("write core reg %i value 0x%" PRIx32 "", (int)num, value);
1571 break;
1572
1573 case ARMV7M_PRIMASK:
1574 case ARMV7M_BASEPRI:
1575 case ARMV7M_FAULTMASK:
1576 case ARMV7M_CONTROL:
1577 /* Cortex-M3 packages these four registers as bitfields
1578 * in one Debug Core register. So say r0 and r2 docs;
1579 * it was removed from r1 docs, but still works.
1580 */
1581 cortexm3_dap_read_coreregister_u32(swjdp, &reg, 20);
1582
1583 switch (num) {
1584 case ARMV7M_PRIMASK:
1585 buf_set_u32((uint8_t *)&reg, 0, 1, value);
1586 break;
1587
1588 case ARMV7M_BASEPRI:
1589 buf_set_u32((uint8_t *)&reg, 8, 8, value);
1590 break;
1591
1592 case ARMV7M_FAULTMASK:
1593 buf_set_u32((uint8_t *)&reg, 16, 1, value);
1594 break;
1595
1596 case ARMV7M_CONTROL:
1597 buf_set_u32((uint8_t *)&reg, 24, 2, value);
1598 break;
1599 }
1600
1601 cortexm3_dap_write_coreregister_u32(swjdp, reg, 20);
1602
1603 LOG_DEBUG("write special reg %i value 0x%" PRIx32 " ", (int)num, value);
1604 break;
1605
1606 default:
1607 return ERROR_COMMAND_SYNTAX_ERROR;
1608 }
1609
1610 return ERROR_OK;
1611 }
1612
1613 static int cortex_m3_read_memory(struct target *target, uint32_t address,
1614 uint32_t size, uint32_t count, uint8_t *buffer)
1615 {
1616 struct armv7m_common *armv7m = target_to_armv7m(target);
1617 struct adiv5_dap *swjdp = armv7m->arm.dap;
1618 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1619
1620 if (armv7m->arm.is_armv6m) {
1621 /* armv6m does not handle unaligned memory access */
1622 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1623 return ERROR_TARGET_UNALIGNED_ACCESS;
1624 }
1625
1626 /* cortex_m3 handles unaligned memory access */
1627 if (count && buffer) {
1628 switch (size) {
1629 case 4:
1630 retval = mem_ap_read_buf_u32(swjdp, buffer, 4 * count, address, true);
1631 break;
1632 case 2:
1633 retval = mem_ap_read_buf_u16(swjdp, buffer, 2 * count, address);
1634 break;
1635 case 1:
1636 retval = mem_ap_read_buf_u8(swjdp, buffer, count, address);
1637 break;
1638 }
1639 }
1640
1641 return retval;
1642 }
1643
1644 static int cortex_m3_write_memory(struct target *target, uint32_t address,
1645 uint32_t size, uint32_t count, const uint8_t *buffer)
1646 {
1647 struct armv7m_common *armv7m = target_to_armv7m(target);
1648 struct adiv5_dap *swjdp = armv7m->arm.dap;
1649 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1650
1651 if (armv7m->arm.is_armv6m) {
1652 /* armv6m does not handle unaligned memory access */
1653 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1654 return ERROR_TARGET_UNALIGNED_ACCESS;
1655 }
1656
1657 if (count && buffer) {
1658 switch (size) {
1659 case 4:
1660 retval = mem_ap_write_buf_u32(swjdp, buffer, 4 * count, address, true);
1661 break;
1662 case 2:
1663 retval = mem_ap_write_buf_u16(swjdp, buffer, 2 * count, address);
1664 break;
1665 case 1:
1666 retval = mem_ap_write_buf_u8(swjdp, buffer, count, address);
1667 break;
1668 }
1669 }
1670
1671 return retval;
1672 }
1673
1674 static int cortex_m3_init_target(struct command_context *cmd_ctx,
1675 struct target *target)
1676 {
1677 armv7m_build_reg_cache(target);
1678 return ERROR_OK;
1679 }
1680
1681 /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
1682 * on r/w if the core is not running, and clear on resume or reset ... or
1683 * at least, in a post_restore_context() method.
1684 */
1685
1686 struct dwt_reg_state {
1687 struct target *target;
1688 uint32_t addr;
1689 uint32_t value; /* scratch/cache */
1690 };
1691
1692 static int cortex_m3_dwt_get_reg(struct reg *reg)
1693 {
1694 struct dwt_reg_state *state = reg->arch_info;
1695
1696 return target_read_u32(state->target, state->addr, &state->value);
1697 }
1698
1699 static int cortex_m3_dwt_set_reg(struct reg *reg, uint8_t *buf)
1700 {
1701 struct dwt_reg_state *state = reg->arch_info;
1702
1703 return target_write_u32(state->target, state->addr,
1704 buf_get_u32(buf, 0, reg->size));
1705 }
1706
1707 struct dwt_reg {
1708 uint32_t addr;
1709 char *name;
1710 unsigned size;
1711 };
1712
1713 static struct dwt_reg dwt_base_regs[] = {
1714 { DWT_CTRL, "dwt_ctrl", 32, },
1715 /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
1716 * increments while the core is asleep.
1717 */
1718 { DWT_CYCCNT, "dwt_cyccnt", 32, },
1719 /* plus some 8 bit counters, useful for profiling with TPIU */
1720 };
1721
1722 static struct dwt_reg dwt_comp[] = {
1723 #define DWT_COMPARATOR(i) \
1724 { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
1725 { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
1726 { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
1727 DWT_COMPARATOR(0),
1728 DWT_COMPARATOR(1),
1729 DWT_COMPARATOR(2),
1730 DWT_COMPARATOR(3),
1731 #undef DWT_COMPARATOR
1732 };
1733
1734 static const struct reg_arch_type dwt_reg_type = {
1735 .get = cortex_m3_dwt_get_reg,
1736 .set = cortex_m3_dwt_set_reg,
1737 };
1738
1739 static void cortex_m3_dwt_addreg(struct target *t, struct reg *r, struct dwt_reg *d)
1740 {
1741 struct dwt_reg_state *state;
1742
1743 state = calloc(1, sizeof *state);
1744 if (!state)
1745 return;
1746 state->addr = d->addr;
1747 state->target = t;
1748
1749 r->name = d->name;
1750 r->size = d->size;
1751 r->value = &state->value;
1752 r->arch_info = state;
1753 r->type = &dwt_reg_type;
1754 }
1755
1756 void cortex_m3_dwt_setup(struct cortex_m3_common *cm3, struct target *target)
1757 {
1758 uint32_t dwtcr;
1759 struct reg_cache *cache;
1760 struct cortex_m3_dwt_comparator *comparator;
1761 int reg, i;
1762
1763 target_read_u32(target, DWT_CTRL, &dwtcr);
1764 if (!dwtcr) {
1765 LOG_DEBUG("no DWT");
1766 return;
1767 }
1768
1769 cm3->dwt_num_comp = (dwtcr >> 28) & 0xF;
1770 cm3->dwt_comp_available = cm3->dwt_num_comp;
1771 cm3->dwt_comparator_list = calloc(cm3->dwt_num_comp,
1772 sizeof(struct cortex_m3_dwt_comparator));
1773 if (!cm3->dwt_comparator_list) {
1774 fail0:
1775 cm3->dwt_num_comp = 0;
1776 LOG_ERROR("out of mem");
1777 return;
1778 }
1779
1780 cache = calloc(1, sizeof *cache);
1781 if (!cache) {
1782 fail1:
1783 free(cm3->dwt_comparator_list);
1784 goto fail0;
1785 }
1786 cache->name = "cortex-m3 dwt registers";
1787 cache->num_regs = 2 + cm3->dwt_num_comp * 3;
1788 cache->reg_list = calloc(cache->num_regs, sizeof *cache->reg_list);
1789 if (!cache->reg_list) {
1790 free(cache);
1791 goto fail1;
1792 }
1793
1794 for (reg = 0; reg < 2; reg++)
1795 cortex_m3_dwt_addreg(target, cache->reg_list + reg,
1796 dwt_base_regs + reg);
1797
1798 comparator = cm3->dwt_comparator_list;
1799 for (i = 0; i < cm3->dwt_num_comp; i++, comparator++) {
1800 int j;
1801
1802 comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
1803 for (j = 0; j < 3; j++, reg++)
1804 cortex_m3_dwt_addreg(target, cache->reg_list + reg,
1805 dwt_comp + 3 * i + j);
1806
1807 /* make sure we clear any watchpoints enabled on the target */
1808 target_write_u32(target, comparator->dwt_comparator_address + 8, 0);
1809 }
1810
1811 *register_get_last_cache_p(&target->reg_cache) = cache;
1812 cm3->dwt_cache = cache;
1813
1814 LOG_DEBUG("DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
1815 dwtcr, cm3->dwt_num_comp,
1816 (dwtcr & (0xf << 24)) ? " only" : "/trigger");
1817
1818 /* REVISIT: if num_comp > 1, check whether comparator #1 can
1819 * implement single-address data value watchpoints ... so we
1820 * won't need to check it later, when asked to set one up.
1821 */
1822 }
1823
1824 #define MVFR0 0xe000ef40
1825 #define MVFR1 0xe000ef44
1826
1827 #define MVFR0_DEFAULT_M4 0x10110021
1828 #define MVFR1_DEFAULT_M4 0x11000011
1829
1830 int cortex_m3_examine(struct target *target)
1831 {
1832 int retval;
1833 uint32_t cpuid, fpcr, mvfr0, mvfr1;
1834 int i;
1835 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1836 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
1837 struct armv7m_common *armv7m = target_to_armv7m(target);
1838
1839 /* stlink shares the examine handler but does not support
1840 * all its calls */
1841 if (!armv7m->stlink) {
1842 retval = ahbap_debugport_init(swjdp);
1843 if (retval != ERROR_OK)
1844 return retval;
1845 }
1846
1847 if (!target_was_examined(target)) {
1848 target_set_examined(target);
1849
1850 /* Read from Device Identification Registers */
1851 retval = target_read_u32(target, CPUID, &cpuid);
1852 if (retval != ERROR_OK)
1853 return retval;
1854
1855 /* Get CPU Type */
1856 i = (cpuid >> 4) & 0xf;
1857
1858 LOG_DEBUG("Cortex-M%d r%" PRId8 "p%" PRId8 " processor detected",
1859 i, (uint8_t)((cpuid >> 20) & 0xf), (uint8_t)((cpuid >> 0) & 0xf));
1860 LOG_DEBUG("cpuid: 0x%8.8" PRIx32 "", cpuid);
1861
1862 /* test for floating point feature on cortex-m4 */
1863 if (i == 4) {
1864 target_read_u32(target, MVFR0, &mvfr0);
1865 target_read_u32(target, MVFR1, &mvfr1);
1866
1867 if ((mvfr0 == MVFR0_DEFAULT_M4) && (mvfr1 == MVFR1_DEFAULT_M4)) {
1868 LOG_DEBUG("Cortex-M%d floating point feature FPv4_SP found", i);
1869 armv7m->fp_feature = FPv4_SP;
1870 }
1871 } else if (i == 0) {
1872 /* Cortex-M0 does not support unaligned memory access */
1873 armv7m->arm.is_armv6m = true;
1874 }
1875
1876 if (i == 4 || i == 3) {
1877 /* Cortex-M3/M4 has 4096 bytes autoincrement range */
1878 armv7m->dap.tar_autoincr_block = (1 << 12);
1879 }
1880
1881 /* NOTE: FPB and DWT are both optional. */
1882
1883 /* Setup FPB */
1884 target_read_u32(target, FP_CTRL, &fpcr);
1885 cortex_m3->auto_bp_type = 1;
1886 cortex_m3->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF); /* bits
1887 *[14:12]
1888 *and [7:4]
1889 **/
1890 cortex_m3->fp_num_lit = (fpcr >> 8) & 0xF;
1891 cortex_m3->fp_code_available = cortex_m3->fp_num_code;
1892 cortex_m3->fp_comparator_list = calloc(
1893 cortex_m3->fp_num_code + cortex_m3->fp_num_lit,
1894 sizeof(struct cortex_m3_fp_comparator));
1895 cortex_m3->fpb_enabled = fpcr & 1;
1896 for (i = 0; i < cortex_m3->fp_num_code + cortex_m3->fp_num_lit; i++) {
1897 cortex_m3->fp_comparator_list[i].type =
1898 (i < cortex_m3->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
1899 cortex_m3->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
1900
1901 /* make sure we clear any breakpoints enabled on the target */
1902 target_write_u32(target, cortex_m3->fp_comparator_list[i].fpcr_address, 0);
1903 }
1904 LOG_DEBUG("FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i",
1905 fpcr,
1906 cortex_m3->fp_num_code,
1907 cortex_m3->fp_num_lit);
1908
1909 /* Setup DWT */
1910 cortex_m3_dwt_setup(cortex_m3, target);
1911
1912 /* These hardware breakpoints only work for code in flash! */
1913 LOG_INFO("%s: hardware has %d breakpoints, %d watchpoints",
1914 target_name(target),
1915 cortex_m3->fp_num_code,
1916 cortex_m3->dwt_num_comp);
1917 }
1918
1919 return ERROR_OK;
1920 }
1921
1922 static int cortex_m3_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1923 {
1924 uint16_t dcrdr;
1925 int retval;
1926
1927 mem_ap_read_buf_u16(swjdp, (uint8_t *)&dcrdr, 1, DCB_DCRDR);
1928 *ctrl = (uint8_t)dcrdr;
1929 *value = (uint8_t)(dcrdr >> 8);
1930
1931 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1932
1933 /* write ack back to software dcc register
1934 * signify we have read data */
1935 if (dcrdr & (1 << 0)) {
1936 dcrdr = 0;
1937 retval = mem_ap_write_buf_u16(swjdp, (uint8_t *)&dcrdr, 1, DCB_DCRDR);
1938 if (retval != ERROR_OK)
1939 return retval;
1940 }
1941
1942 return ERROR_OK;
1943 }
1944
1945 static int cortex_m3_target_request_data(struct target *target,
1946 uint32_t size, uint8_t *buffer)
1947 {
1948 struct armv7m_common *armv7m = target_to_armv7m(target);
1949 struct adiv5_dap *swjdp = armv7m->arm.dap;
1950 uint8_t data;
1951 uint8_t ctrl;
1952 uint32_t i;
1953
1954 for (i = 0; i < (size * 4); i++) {
1955 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1956 buffer[i] = data;
1957 }
1958
1959 return ERROR_OK;
1960 }
1961
1962 static int cortex_m3_handle_target_request(void *priv)
1963 {
1964 struct target *target = priv;
1965 if (!target_was_examined(target))
1966 return ERROR_OK;
1967 struct armv7m_common *armv7m = target_to_armv7m(target);
1968 struct adiv5_dap *swjdp = armv7m->arm.dap;
1969
1970 if (!target->dbg_msg_enabled)
1971 return ERROR_OK;
1972
1973 if (target->state == TARGET_RUNNING) {
1974 uint8_t data;
1975 uint8_t ctrl;
1976
1977 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1978
1979 /* check if we have data */
1980 if (ctrl & (1 << 0)) {
1981 uint32_t request;
1982
1983 /* we assume target is quick enough */
1984 request = data;
1985 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1986 request |= (data << 8);
1987 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1988 request |= (data << 16);
1989 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1990 request |= (data << 24);
1991 target_request(target, request);
1992 }
1993 }
1994
1995 return ERROR_OK;
1996 }
1997
1998 static int cortex_m3_init_arch_info(struct target *target,
1999 struct cortex_m3_common *cortex_m3, struct jtag_tap *tap)
2000 {
2001 int retval;
2002 struct armv7m_common *armv7m = &cortex_m3->armv7m;
2003
2004 armv7m_init_arch_info(target, armv7m);
2005
2006 /* prepare JTAG information for the new target */
2007 cortex_m3->jtag_info.tap = tap;
2008 cortex_m3->jtag_info.scann_size = 4;
2009
2010 /* default reset mode is to use srst if fitted
2011 * if not it will use CORTEX_M3_RESET_VECTRESET */
2012 cortex_m3->soft_reset_config = CORTEX_M3_RESET_VECTRESET;
2013
2014 armv7m->arm.dap = &armv7m->dap;
2015
2016 /* Leave (only) generic DAP stuff for debugport_init(); */
2017 armv7m->dap.jtag_info = &cortex_m3->jtag_info;
2018 armv7m->dap.memaccess_tck = 8;
2019
2020 /* Cortex-M3/M4 has 4096 bytes autoincrement range
2021 * but set a safe default to 1024 to support Cortex-M0
2022 * this will be changed in cortex_m3_examine if a M3/M4 is detected */
2023 armv7m->dap.tar_autoincr_block = (1 << 10);
2024
2025 /* register arch-specific functions */
2026 armv7m->examine_debug_reason = cortex_m3_examine_debug_reason;
2027
2028 armv7m->post_debug_entry = NULL;
2029
2030 armv7m->pre_restore_context = NULL;
2031
2032 armv7m->load_core_reg_u32 = cortex_m3_load_core_reg_u32;
2033 armv7m->store_core_reg_u32 = cortex_m3_store_core_reg_u32;
2034
2035 target_register_timer_callback(cortex_m3_handle_target_request, 1, 1, target);
2036
2037 retval = arm_jtag_setup_connection(&cortex_m3->jtag_info);
2038 if (retval != ERROR_OK)
2039 return retval;
2040
2041 return ERROR_OK;
2042 }
2043
2044 static int cortex_m3_target_create(struct target *target, Jim_Interp *interp)
2045 {
2046 struct cortex_m3_common *cortex_m3 = calloc(1, sizeof(struct cortex_m3_common));
2047
2048 cortex_m3->common_magic = CORTEX_M3_COMMON_MAGIC;
2049 cortex_m3_init_arch_info(target, cortex_m3, target->tap);
2050
2051 return ERROR_OK;
2052 }
2053
2054 /*--------------------------------------------------------------------------*/
2055
2056 static int cortex_m3_verify_pointer(struct command_context *cmd_ctx,
2057 struct cortex_m3_common *cm3)
2058 {
2059 if (cm3->common_magic != CORTEX_M3_COMMON_MAGIC) {
2060 command_print(cmd_ctx, "target is not a Cortex-M");
2061 return ERROR_TARGET_INVALID;
2062 }
2063 return ERROR_OK;
2064 }
2065
2066 /*
2067 * Only stuff below this line should need to verify that its target
2068 * is a Cortex-M3. Everything else should have indirected through the
2069 * cortexm3_target structure, which is only used with CM3 targets.
2070 */
2071
2072 static const struct {
2073 char name[10];
2074 unsigned mask;
2075 } vec_ids[] = {
2076 { "hard_err", VC_HARDERR, },
2077 { "int_err", VC_INTERR, },
2078 { "bus_err", VC_BUSERR, },
2079 { "state_err", VC_STATERR, },
2080 { "chk_err", VC_CHKERR, },
2081 { "nocp_err", VC_NOCPERR, },
2082 { "mm_err", VC_MMERR, },
2083 { "reset", VC_CORERESET, },
2084 };
2085
2086 COMMAND_HANDLER(handle_cortex_m3_vector_catch_command)
2087 {
2088 struct target *target = get_current_target(CMD_CTX);
2089 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
2090 struct armv7m_common *armv7m = &cortex_m3->armv7m;
2091 struct adiv5_dap *swjdp = armv7m->arm.dap;
2092 uint32_t demcr = 0;
2093 int retval;
2094
2095 retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
2096 if (retval != ERROR_OK)
2097 return retval;
2098
2099 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &demcr);
2100 if (retval != ERROR_OK)
2101 return retval;
2102
2103 if (CMD_ARGC > 0) {
2104 unsigned catch = 0;
2105
2106 if (CMD_ARGC == 1) {
2107 if (strcmp(CMD_ARGV[0], "all") == 0) {
2108 catch = VC_HARDERR | VC_INTERR | VC_BUSERR
2109 | VC_STATERR | VC_CHKERR | VC_NOCPERR
2110 | VC_MMERR | VC_CORERESET;
2111 goto write;
2112 } else if (strcmp(CMD_ARGV[0], "none") == 0)
2113 goto write;
2114 }
2115 while (CMD_ARGC-- > 0) {
2116 unsigned i;
2117 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2118 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
2119 continue;
2120 catch |= vec_ids[i].mask;
2121 break;
2122 }
2123 if (i == ARRAY_SIZE(vec_ids)) {
2124 LOG_ERROR("No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]);
2125 return ERROR_COMMAND_SYNTAX_ERROR;
2126 }
2127 }
2128 write:
2129 /* For now, armv7m->demcr only stores vector catch flags. */
2130 armv7m->demcr = catch;
2131
2132 demcr &= ~0xffff;
2133 demcr |= catch;
2134
2135 /* write, but don't assume it stuck (why not??) */
2136 retval = mem_ap_write_u32(swjdp, DCB_DEMCR, demcr);
2137 if (retval != ERROR_OK)
2138 return retval;
2139 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &demcr);
2140 if (retval != ERROR_OK)
2141 return retval;
2142
2143 /* FIXME be sure to clear DEMCR on clean server shutdown.
2144 * Otherwise the vector catch hardware could fire when there's
2145 * no debugger hooked up, causing much confusion...
2146 */
2147 }
2148
2149 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2150 command_print(CMD_CTX, "%9s: %s", vec_ids[i].name,
2151 (demcr & vec_ids[i].mask) ? "catch" : "ignore");
2152 }
2153
2154 return ERROR_OK;
2155 }
2156
2157 COMMAND_HANDLER(handle_cortex_m3_mask_interrupts_command)
2158 {
2159 struct target *target = get_current_target(CMD_CTX);
2160 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
2161 int retval;
2162
2163 static const Jim_Nvp nvp_maskisr_modes[] = {
2164 { .name = "auto", .value = CORTEX_M3_ISRMASK_AUTO },
2165 { .name = "off", .value = CORTEX_M3_ISRMASK_OFF },
2166 { .name = "on", .value = CORTEX_M3_ISRMASK_ON },
2167 { .name = NULL, .value = -1 },
2168 };
2169 const Jim_Nvp *n;
2170
2171
2172 retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
2173 if (retval != ERROR_OK)
2174 return retval;
2175
2176 if (target->state != TARGET_HALTED) {
2177 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
2178 return ERROR_OK;
2179 }
2180
2181 if (CMD_ARGC > 0) {
2182 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2183 if (n->name == NULL)
2184 return ERROR_COMMAND_SYNTAX_ERROR;
2185 cortex_m3->isrmasking_mode = n->value;
2186
2187
2188 if (cortex_m3->isrmasking_mode == CORTEX_M3_ISRMASK_ON)
2189 cortex_m3_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
2190 else
2191 cortex_m3_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
2192 }
2193
2194 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_m3->isrmasking_mode);
2195 command_print(CMD_CTX, "cortex_m3 interrupt mask %s", n->name);
2196
2197 return ERROR_OK;
2198 }
2199
2200 COMMAND_HANDLER(handle_cortex_m3_reset_config_command)
2201 {
2202 struct target *target = get_current_target(CMD_CTX);
2203 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
2204 int retval;
2205 char *reset_config;
2206
2207 retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
2208 if (retval != ERROR_OK)
2209 return retval;
2210
2211 if (CMD_ARGC > 0) {
2212 if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
2213 cortex_m3->soft_reset_config = CORTEX_M3_RESET_SYSRESETREQ;
2214 else if (strcmp(*CMD_ARGV, "vectreset") == 0)
2215 cortex_m3->soft_reset_config = CORTEX_M3_RESET_VECTRESET;
2216 }
2217
2218 switch (cortex_m3->soft_reset_config) {
2219 case CORTEX_M3_RESET_SYSRESETREQ:
2220 reset_config = "sysresetreq";
2221 break;
2222
2223 case CORTEX_M3_RESET_VECTRESET:
2224 reset_config = "vectreset";
2225 break;
2226
2227 default:
2228 reset_config = "unknown";
2229 break;
2230 }
2231
2232 command_print(CMD_CTX, "cortex_m3 reset_config %s", reset_config);
2233
2234 return ERROR_OK;
2235 }
2236
2237 static const struct command_registration cortex_m3_exec_command_handlers[] = {
2238 {
2239 .name = "maskisr",
2240 .handler = handle_cortex_m3_mask_interrupts_command,
2241 .mode = COMMAND_EXEC,
2242 .help = "mask cortex_m3 interrupts",
2243 .usage = "['auto'|'on'|'off']",
2244 },
2245 {
2246 .name = "vector_catch",
2247 .handler = handle_cortex_m3_vector_catch_command,
2248 .mode = COMMAND_EXEC,
2249 .help = "configure hardware vectors to trigger debug entry",
2250 .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
2251 },
2252 {
2253 .name = "reset_config",
2254 .handler = handle_cortex_m3_reset_config_command,
2255 .mode = COMMAND_ANY,
2256 .help = "configure software reset handling",
2257 .usage = "['srst'|'sysresetreq'|'vectreset']",
2258 },
2259 COMMAND_REGISTRATION_DONE
2260 };
2261 static const struct command_registration cortex_m3_command_handlers[] = {
2262 {
2263 .chain = armv7m_command_handlers,
2264 },
2265 {
2266 .name = "cortex_m",
2267 .mode = COMMAND_EXEC,
2268 .help = "Cortex-M command group",
2269 .usage = "",
2270 .chain = cortex_m3_exec_command_handlers,
2271 },
2272 COMMAND_REGISTRATION_DONE
2273 };
2274
2275 struct target_type cortexm3_target = {
2276 .name = "cortex_m",
2277 .deprecated_name = "cortex_m3",
2278
2279 .poll = cortex_m3_poll,
2280 .arch_state = armv7m_arch_state,
2281
2282 .target_request_data = cortex_m3_target_request_data,
2283
2284 .halt = cortex_m3_halt,
2285 .resume = cortex_m3_resume,
2286 .step = cortex_m3_step,
2287
2288 .assert_reset = cortex_m3_assert_reset,
2289 .deassert_reset = cortex_m3_deassert_reset,
2290 .soft_reset_halt = cortex_m3_soft_reset_halt,
2291
2292 .get_gdb_reg_list = armv7m_get_gdb_reg_list,
2293
2294 .read_memory = cortex_m3_read_memory,
2295 .write_memory = cortex_m3_write_memory,
2296 .checksum_memory = armv7m_checksum_memory,
2297 .blank_check_memory = armv7m_blank_check_memory,
2298
2299 .run_algorithm = armv7m_run_algorithm,
2300 .start_algorithm = armv7m_start_algorithm,
2301 .wait_algorithm = armv7m_wait_algorithm,
2302
2303 .add_breakpoint = cortex_m3_add_breakpoint,
2304 .remove_breakpoint = cortex_m3_remove_breakpoint,
2305 .add_watchpoint = cortex_m3_add_watchpoint,
2306 .remove_watchpoint = cortex_m3_remove_watchpoint,
2307
2308 .commands = cortex_m3_command_handlers,
2309 .target_create = cortex_m3_target_create,
2310 .init_target = cortex_m3_init_target,
2311 .examine = cortex_m3_examine,
2312 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)