update files to correct FSF address
[openocd.git] / src / target / cortex_m.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
25 * *
26 * *
27 * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
28 * *
29 ***************************************************************************/
30 #ifdef HAVE_CONFIG_H
31 #include "config.h"
32 #endif
33
34 #include "jtag/interface.h"
35 #include "breakpoints.h"
36 #include "cortex_m.h"
37 #include "target_request.h"
38 #include "target_type.h"
39 #include "arm_disassembler.h"
40 #include "register.h"
41 #include "arm_opcodes.h"
42 #include "arm_semihosting.h"
43 #include <helper/time_support.h>
44
45 /* NOTE: most of this should work fine for the Cortex-M1 and
46 * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
47 * Some differences: M0/M1 doesn't have FBP remapping or the
48 * DWT tracing/profiling support. (So the cycle counter will
49 * not be usable; the other stuff isn't currently used here.)
50 *
51 * Although there are some workarounds for errata seen only in r0p0
52 * silicon, such old parts are hard to find and thus not much tested
53 * any longer.
54 */
55
56 /**
57 * Returns the type of a break point required by address location
58 */
59 #define BKPT_TYPE_BY_ADDR(addr) ((addr) < 0x20000000 ? BKPT_HARD : BKPT_SOFT)
60
61
62 /* forward declarations */
63 static int cortex_m3_store_core_reg_u32(struct target *target,
64 uint32_t num, uint32_t value);
65
66 static int cortexm3_dap_read_coreregister_u32(struct adiv5_dap *swjdp,
67 uint32_t *value, int regnum)
68 {
69 int retval;
70 uint32_t dcrdr;
71
72 /* because the DCB_DCRDR is used for the emulated dcc channel
73 * we have to save/restore the DCB_DCRDR when used */
74
75 retval = mem_ap_read_u32(swjdp, DCB_DCRDR, &dcrdr);
76 if (retval != ERROR_OK)
77 return retval;
78
79 /* mem_ap_write_u32(swjdp, DCB_DCRSR, regnum); */
80 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRSR & 0xFFFFFFF0);
81 if (retval != ERROR_OK)
82 return retval;
83 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRSR & 0xC), regnum);
84 if (retval != ERROR_OK)
85 return retval;
86
87 /* mem_ap_read_u32(swjdp, DCB_DCRDR, value); */
88 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRDR & 0xFFFFFFF0);
89 if (retval != ERROR_OK)
90 return retval;
91 retval = dap_queue_ap_read(swjdp, AP_REG_BD0 | (DCB_DCRDR & 0xC), value);
92 if (retval != ERROR_OK)
93 return retval;
94
95 retval = dap_run(swjdp);
96 if (retval != ERROR_OK)
97 return retval;
98
99 /* restore DCB_DCRDR - this needs to be in a seperate
100 * transaction otherwise the emulated DCC channel breaks */
101 if (retval == ERROR_OK)
102 retval = mem_ap_write_atomic_u32(swjdp, DCB_DCRDR, dcrdr);
103
104 return retval;
105 }
106
107 static int cortexm3_dap_write_coreregister_u32(struct adiv5_dap *swjdp,
108 uint32_t value, int regnum)
109 {
110 int retval;
111 uint32_t dcrdr;
112
113 /* because the DCB_DCRDR is used for the emulated dcc channel
114 * we have to save/restore the DCB_DCRDR when used */
115
116 retval = mem_ap_read_u32(swjdp, DCB_DCRDR, &dcrdr);
117 if (retval != ERROR_OK)
118 return retval;
119
120 /* mem_ap_write_u32(swjdp, DCB_DCRDR, core_regs[i]); */
121 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRDR & 0xFFFFFFF0);
122 if (retval != ERROR_OK)
123 return retval;
124 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRDR & 0xC), value);
125 if (retval != ERROR_OK)
126 return retval;
127
128 /* mem_ap_write_u32(swjdp, DCB_DCRSR, i | DCRSR_WnR); */
129 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRSR & 0xFFFFFFF0);
130 if (retval != ERROR_OK)
131 return retval;
132 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRSR & 0xC), regnum | DCRSR_WnR);
133 if (retval != ERROR_OK)
134 return retval;
135
136 retval = dap_run(swjdp);
137 if (retval != ERROR_OK)
138 return retval;
139
140 /* restore DCB_DCRDR - this needs to be in a seperate
141 * transaction otherwise the emulated DCC channel breaks */
142 if (retval == ERROR_OK)
143 retval = mem_ap_write_atomic_u32(swjdp, DCB_DCRDR, dcrdr);
144
145 return retval;
146 }
147
148 static int cortex_m3_write_debug_halt_mask(struct target *target,
149 uint32_t mask_on, uint32_t mask_off)
150 {
151 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
152 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
153
154 /* mask off status bits */
155 cortex_m3->dcb_dhcsr &= ~((0xFFFF << 16) | mask_off);
156 /* create new register mask */
157 cortex_m3->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
158
159 return mem_ap_write_atomic_u32(swjdp, DCB_DHCSR, cortex_m3->dcb_dhcsr);
160 }
161
162 static int cortex_m3_clear_halt(struct target *target)
163 {
164 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
165 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
166 int retval;
167
168 /* clear step if any */
169 cortex_m3_write_debug_halt_mask(target, C_HALT, C_STEP);
170
171 /* Read Debug Fault Status Register */
172 retval = mem_ap_read_atomic_u32(swjdp, NVIC_DFSR, &cortex_m3->nvic_dfsr);
173 if (retval != ERROR_OK)
174 return retval;
175
176 /* Clear Debug Fault Status */
177 retval = mem_ap_write_atomic_u32(swjdp, NVIC_DFSR, cortex_m3->nvic_dfsr);
178 if (retval != ERROR_OK)
179 return retval;
180 LOG_DEBUG(" NVIC_DFSR 0x%" PRIx32 "", cortex_m3->nvic_dfsr);
181
182 return ERROR_OK;
183 }
184
185 static int cortex_m3_single_step_core(struct target *target)
186 {
187 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
188 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
189 uint32_t dhcsr_save;
190 int retval;
191
192 /* backup dhcsr reg */
193 dhcsr_save = cortex_m3->dcb_dhcsr;
194
195 /* Mask interrupts before clearing halt, if done already. This avoids
196 * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
197 * HALT can put the core into an unknown state.
198 */
199 if (!(cortex_m3->dcb_dhcsr & C_MASKINTS)) {
200 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
201 DBGKEY | C_MASKINTS | C_HALT | C_DEBUGEN);
202 if (retval != ERROR_OK)
203 return retval;
204 }
205 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
206 DBGKEY | C_MASKINTS | C_STEP | C_DEBUGEN);
207 if (retval != ERROR_OK)
208 return retval;
209 LOG_DEBUG(" ");
210
211 /* restore dhcsr reg */
212 cortex_m3->dcb_dhcsr = dhcsr_save;
213 cortex_m3_clear_halt(target);
214
215 return ERROR_OK;
216 }
217
218 static int cortex_m3_endreset_event(struct target *target)
219 {
220 int i;
221 int retval;
222 uint32_t dcb_demcr;
223 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
224 struct armv7m_common *armv7m = &cortex_m3->armv7m;
225 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
226 struct cortex_m3_fp_comparator *fp_list = cortex_m3->fp_comparator_list;
227 struct cortex_m3_dwt_comparator *dwt_list = cortex_m3->dwt_comparator_list;
228
229 /* REVISIT The four debug monitor bits are currently ignored... */
230 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &dcb_demcr);
231 if (retval != ERROR_OK)
232 return retval;
233 LOG_DEBUG("DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr);
234
235 /* this register is used for emulated dcc channel */
236 retval = mem_ap_write_u32(swjdp, DCB_DCRDR, 0);
237 if (retval != ERROR_OK)
238 return retval;
239
240 /* Enable debug requests */
241 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
242 if (retval != ERROR_OK)
243 return retval;
244 if (!(cortex_m3->dcb_dhcsr & C_DEBUGEN)) {
245 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_DEBUGEN);
246 if (retval != ERROR_OK)
247 return retval;
248 }
249
250 /* clear any interrupt masking */
251 cortex_m3_write_debug_halt_mask(target, 0, C_MASKINTS);
252
253 /* Enable features controlled by ITM and DWT blocks, and catch only
254 * the vectors we were told to pay attention to.
255 *
256 * Target firmware is responsible for all fault handling policy
257 * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
258 * or manual updates to the NVIC SHCSR and CCR registers.
259 */
260 retval = mem_ap_write_u32(swjdp, DCB_DEMCR, TRCENA | armv7m->demcr);
261 if (retval != ERROR_OK)
262 return retval;
263
264 /* Paranoia: evidently some (early?) chips don't preserve all the
265 * debug state (including FBP, DWT, etc) across reset...
266 */
267
268 /* Enable FPB */
269 retval = target_write_u32(target, FP_CTRL, 3);
270 if (retval != ERROR_OK)
271 return retval;
272
273 cortex_m3->fpb_enabled = 1;
274
275 /* Restore FPB registers */
276 for (i = 0; i < cortex_m3->fp_num_code + cortex_m3->fp_num_lit; i++) {
277 retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
278 if (retval != ERROR_OK)
279 return retval;
280 }
281
282 /* Restore DWT registers */
283 for (i = 0; i < cortex_m3->dwt_num_comp; i++) {
284 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
285 dwt_list[i].comp);
286 if (retval != ERROR_OK)
287 return retval;
288 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
289 dwt_list[i].mask);
290 if (retval != ERROR_OK)
291 return retval;
292 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
293 dwt_list[i].function);
294 if (retval != ERROR_OK)
295 return retval;
296 }
297 retval = dap_run(swjdp);
298 if (retval != ERROR_OK)
299 return retval;
300
301 register_cache_invalidate(armv7m->arm.core_cache);
302
303 /* make sure we have latest dhcsr flags */
304 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
305
306 return retval;
307 }
308
309 static int cortex_m3_examine_debug_reason(struct target *target)
310 {
311 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
312
313 /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
314 * only check the debug reason if we don't know it already */
315
316 if ((target->debug_reason != DBG_REASON_DBGRQ)
317 && (target->debug_reason != DBG_REASON_SINGLESTEP)) {
318 if (cortex_m3->nvic_dfsr & DFSR_BKPT) {
319 target->debug_reason = DBG_REASON_BREAKPOINT;
320 if (cortex_m3->nvic_dfsr & DFSR_DWTTRAP)
321 target->debug_reason = DBG_REASON_WPTANDBKPT;
322 } else if (cortex_m3->nvic_dfsr & DFSR_DWTTRAP)
323 target->debug_reason = DBG_REASON_WATCHPOINT;
324 else if (cortex_m3->nvic_dfsr & DFSR_VCATCH)
325 target->debug_reason = DBG_REASON_BREAKPOINT;
326 else /* EXTERNAL, HALTED */
327 target->debug_reason = DBG_REASON_UNDEFINED;
328 }
329
330 return ERROR_OK;
331 }
332
333 static int cortex_m3_examine_exception_reason(struct target *target)
334 {
335 uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
336 struct armv7m_common *armv7m = target_to_armv7m(target);
337 struct adiv5_dap *swjdp = armv7m->arm.dap;
338 int retval;
339
340 retval = mem_ap_read_u32(swjdp, NVIC_SHCSR, &shcsr);
341 if (retval != ERROR_OK)
342 return retval;
343 switch (armv7m->exception_number) {
344 case 2: /* NMI */
345 break;
346 case 3: /* Hard Fault */
347 retval = mem_ap_read_atomic_u32(swjdp, NVIC_HFSR, &except_sr);
348 if (retval != ERROR_OK)
349 return retval;
350 if (except_sr & 0x40000000) {
351 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &cfsr);
352 if (retval != ERROR_OK)
353 return retval;
354 }
355 break;
356 case 4: /* Memory Management */
357 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
358 if (retval != ERROR_OK)
359 return retval;
360 retval = mem_ap_read_u32(swjdp, NVIC_MMFAR, &except_ar);
361 if (retval != ERROR_OK)
362 return retval;
363 break;
364 case 5: /* Bus Fault */
365 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
366 if (retval != ERROR_OK)
367 return retval;
368 retval = mem_ap_read_u32(swjdp, NVIC_BFAR, &except_ar);
369 if (retval != ERROR_OK)
370 return retval;
371 break;
372 case 6: /* Usage Fault */
373 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
374 if (retval != ERROR_OK)
375 return retval;
376 break;
377 case 11: /* SVCall */
378 break;
379 case 12: /* Debug Monitor */
380 retval = mem_ap_read_u32(swjdp, NVIC_DFSR, &except_sr);
381 if (retval != ERROR_OK)
382 return retval;
383 break;
384 case 14: /* PendSV */
385 break;
386 case 15: /* SysTick */
387 break;
388 default:
389 except_sr = 0;
390 break;
391 }
392 retval = dap_run(swjdp);
393 if (retval == ERROR_OK)
394 LOG_DEBUG("%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
395 ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
396 armv7m_exception_string(armv7m->exception_number),
397 shcsr, except_sr, cfsr, except_ar);
398 return retval;
399 }
400
401 static int cortex_m3_debug_entry(struct target *target)
402 {
403 int i;
404 uint32_t xPSR;
405 int retval;
406 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
407 struct armv7m_common *armv7m = &cortex_m3->armv7m;
408 struct arm *arm = &armv7m->arm;
409 struct adiv5_dap *swjdp = armv7m->arm.dap;
410 struct reg *r;
411
412 LOG_DEBUG(" ");
413
414 cortex_m3_clear_halt(target);
415 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
416 if (retval != ERROR_OK)
417 return retval;
418
419 retval = armv7m->examine_debug_reason(target);
420 if (retval != ERROR_OK)
421 return retval;
422
423 /* Examine target state and mode
424 * First load register accessible through core debug port */
425 int num_regs = arm->core_cache->num_regs;
426
427 for (i = 0; i < num_regs; i++) {
428 r = &armv7m->arm.core_cache->reg_list[i];
429 if (!r->valid)
430 arm->read_core_reg(target, r, i, ARM_MODE_ANY);
431 }
432
433 r = arm->core_cache->reg_list + ARMV7M_xPSR;
434 xPSR = buf_get_u32(r->value, 0, 32);
435
436 #ifdef ARMV7_GDB_HACKS
437 /* FIXME this breaks on scan chains with more than one Cortex-M3.
438 * Instead, each CM3 should have its own dummy value...
439 */
440 /* copy real xpsr reg for gdb, setting thumb bit */
441 buf_set_u32(armv7m_gdb_dummy_cpsr_value, 0, 32, xPSR);
442 buf_set_u32(armv7m_gdb_dummy_cpsr_value, 5, 1, 1);
443 armv7m_gdb_dummy_cpsr_reg.valid = r->valid;
444 armv7m_gdb_dummy_cpsr_reg.dirty = r->dirty;
445 #endif
446
447 /* For IT instructions xPSR must be reloaded on resume and clear on debug exec */
448 if (xPSR & 0xf00) {
449 r->dirty = r->valid;
450 cortex_m3_store_core_reg_u32(target, 16, xPSR & ~0xff);
451 }
452
453 /* Are we in an exception handler */
454 if (xPSR & 0x1FF) {
455 armv7m->exception_number = (xPSR & 0x1FF);
456
457 arm->core_mode = ARM_MODE_HANDLER;
458 arm->map = armv7m_msp_reg_map;
459 } else {
460 unsigned control = buf_get_u32(arm->core_cache
461 ->reg_list[ARMV7M_CONTROL].value, 0, 2);
462
463 /* is this thread privileged? */
464 arm->core_mode = control & 1
465 ? ARM_MODE_USER_THREAD
466 : ARM_MODE_THREAD;
467
468 /* which stack is it using? */
469 if (control & 2)
470 arm->map = armv7m_psp_reg_map;
471 else
472 arm->map = armv7m_msp_reg_map;
473
474 armv7m->exception_number = 0;
475 }
476
477 if (armv7m->exception_number)
478 cortex_m3_examine_exception_reason(target);
479
480 LOG_DEBUG("entered debug state in core mode: %s at PC 0x%" PRIx32 ", target->state: %s",
481 arm_mode_name(arm->core_mode),
482 *(uint32_t *)(arm->pc->value),
483 target_state_name(target));
484
485 if (armv7m->post_debug_entry) {
486 retval = armv7m->post_debug_entry(target);
487 if (retval != ERROR_OK)
488 return retval;
489 }
490
491 return ERROR_OK;
492 }
493
494 static int cortex_m3_poll(struct target *target)
495 {
496 int detected_failure = ERROR_OK;
497 int retval = ERROR_OK;
498 enum target_state prev_target_state = target->state;
499 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
500 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
501
502 /* Read from Debug Halting Control and Status Register */
503 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
504 if (retval != ERROR_OK) {
505 target->state = TARGET_UNKNOWN;
506 return retval;
507 }
508
509 /* Recover from lockup. See ARMv7-M architecture spec,
510 * section B1.5.15 "Unrecoverable exception cases".
511 */
512 if (cortex_m3->dcb_dhcsr & S_LOCKUP) {
513 LOG_ERROR("%s -- clearing lockup after double fault",
514 target_name(target));
515 cortex_m3_write_debug_halt_mask(target, C_HALT, 0);
516 target->debug_reason = DBG_REASON_DBGRQ;
517
518 /* We have to execute the rest (the "finally" equivalent, but
519 * still throw this exception again).
520 */
521 detected_failure = ERROR_FAIL;
522
523 /* refresh status bits */
524 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
525 if (retval != ERROR_OK)
526 return retval;
527 }
528
529 if (cortex_m3->dcb_dhcsr & S_RESET_ST) {
530 /* check if still in reset */
531 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
532 if (retval != ERROR_OK)
533 return retval;
534
535 if (cortex_m3->dcb_dhcsr & S_RESET_ST) {
536 target->state = TARGET_RESET;
537 return ERROR_OK;
538 }
539 }
540
541 if (target->state == TARGET_RESET) {
542 /* Cannot switch context while running so endreset is
543 * called with target->state == TARGET_RESET
544 */
545 LOG_DEBUG("Exit from reset with dcb_dhcsr 0x%" PRIx32,
546 cortex_m3->dcb_dhcsr);
547 cortex_m3_endreset_event(target);
548 target->state = TARGET_RUNNING;
549 prev_target_state = TARGET_RUNNING;
550 }
551
552 if (cortex_m3->dcb_dhcsr & S_HALT) {
553 target->state = TARGET_HALTED;
554
555 if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET)) {
556 retval = cortex_m3_debug_entry(target);
557 if (retval != ERROR_OK)
558 return retval;
559
560 if (arm_semihosting(target, &retval) != 0)
561 return retval;
562
563 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
564 }
565 if (prev_target_state == TARGET_DEBUG_RUNNING) {
566 LOG_DEBUG(" ");
567 retval = cortex_m3_debug_entry(target);
568 if (retval != ERROR_OK)
569 return retval;
570
571 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
572 }
573 }
574
575 /* REVISIT when S_SLEEP is set, it's in a Sleep or DeepSleep state.
576 * How best to model low power modes?
577 */
578
579 if (target->state == TARGET_UNKNOWN) {
580 /* check if processor is retiring instructions */
581 if (cortex_m3->dcb_dhcsr & S_RETIRE_ST) {
582 target->state = TARGET_RUNNING;
583 retval = ERROR_OK;
584 }
585 }
586
587 /* Did we detect a failure condition that we cleared? */
588 if (detected_failure != ERROR_OK)
589 retval = detected_failure;
590 return retval;
591 }
592
593 static int cortex_m3_halt(struct target *target)
594 {
595 LOG_DEBUG("target->state: %s",
596 target_state_name(target));
597
598 if (target->state == TARGET_HALTED) {
599 LOG_DEBUG("target was already halted");
600 return ERROR_OK;
601 }
602
603 if (target->state == TARGET_UNKNOWN)
604 LOG_WARNING("target was in unknown state when halt was requested");
605
606 if (target->state == TARGET_RESET) {
607 if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
608 LOG_ERROR("can't request a halt while in reset if nSRST pulls nTRST");
609 return ERROR_TARGET_FAILURE;
610 } else {
611 /* we came here in a reset_halt or reset_init sequence
612 * debug entry was already prepared in cortex_m3_assert_reset()
613 */
614 target->debug_reason = DBG_REASON_DBGRQ;
615
616 return ERROR_OK;
617 }
618 }
619
620 /* Write to Debug Halting Control and Status Register */
621 cortex_m3_write_debug_halt_mask(target, C_HALT, 0);
622
623 target->debug_reason = DBG_REASON_DBGRQ;
624
625 return ERROR_OK;
626 }
627
628 static int cortex_m3_soft_reset_halt(struct target *target)
629 {
630 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
631 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
632 uint32_t dcb_dhcsr = 0;
633 int retval, timeout = 0;
634
635 /* Enter debug state on reset; restore DEMCR in endreset_event() */
636 retval = mem_ap_write_u32(swjdp, DCB_DEMCR,
637 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
638 if (retval != ERROR_OK)
639 return retval;
640
641 /* Request a core-only reset */
642 retval = mem_ap_write_atomic_u32(swjdp, NVIC_AIRCR,
643 AIRCR_VECTKEY | AIRCR_VECTRESET);
644 if (retval != ERROR_OK)
645 return retval;
646 target->state = TARGET_RESET;
647
648 /* registers are now invalid */
649 register_cache_invalidate(cortex_m3->armv7m.arm.core_cache);
650
651 while (timeout < 100) {
652 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &dcb_dhcsr);
653 if (retval == ERROR_OK) {
654 retval = mem_ap_read_atomic_u32(swjdp, NVIC_DFSR,
655 &cortex_m3->nvic_dfsr);
656 if (retval != ERROR_OK)
657 return retval;
658 if ((dcb_dhcsr & S_HALT)
659 && (cortex_m3->nvic_dfsr & DFSR_VCATCH)) {
660 LOG_DEBUG("system reset-halted, DHCSR 0x%08x, "
661 "DFSR 0x%08x",
662 (unsigned) dcb_dhcsr,
663 (unsigned) cortex_m3->nvic_dfsr);
664 cortex_m3_poll(target);
665 /* FIXME restore user's vector catch config */
666 return ERROR_OK;
667 } else
668 LOG_DEBUG("waiting for system reset-halt, "
669 "DHCSR 0x%08x, %d ms",
670 (unsigned) dcb_dhcsr, timeout);
671 }
672 timeout++;
673 alive_sleep(1);
674 }
675
676 return ERROR_OK;
677 }
678
679 void cortex_m3_enable_breakpoints(struct target *target)
680 {
681 struct breakpoint *breakpoint = target->breakpoints;
682
683 /* set any pending breakpoints */
684 while (breakpoint) {
685 if (!breakpoint->set)
686 cortex_m3_set_breakpoint(target, breakpoint);
687 breakpoint = breakpoint->next;
688 }
689 }
690
691 static int cortex_m3_resume(struct target *target, int current,
692 uint32_t address, int handle_breakpoints, int debug_execution)
693 {
694 struct armv7m_common *armv7m = target_to_armv7m(target);
695 struct breakpoint *breakpoint = NULL;
696 uint32_t resume_pc;
697 struct reg *r;
698
699 if (target->state != TARGET_HALTED) {
700 LOG_WARNING("target not halted");
701 return ERROR_TARGET_NOT_HALTED;
702 }
703
704 if (!debug_execution) {
705 target_free_all_working_areas(target);
706 cortex_m3_enable_breakpoints(target);
707 cortex_m3_enable_watchpoints(target);
708 }
709
710 if (debug_execution) {
711 r = armv7m->arm.core_cache->reg_list + ARMV7M_PRIMASK;
712
713 /* Disable interrupts */
714 /* We disable interrupts in the PRIMASK register instead of
715 * masking with C_MASKINTS. This is probably the same issue
716 * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
717 * in parallel with disabled interrupts can cause local faults
718 * to not be taken.
719 *
720 * REVISIT this clearly breaks non-debug execution, since the
721 * PRIMASK register state isn't saved/restored... workaround
722 * by never resuming app code after debug execution.
723 */
724 buf_set_u32(r->value, 0, 1, 1);
725 r->dirty = true;
726 r->valid = true;
727
728 /* Make sure we are in Thumb mode */
729 r = armv7m->arm.core_cache->reg_list + ARMV7M_xPSR;
730 buf_set_u32(r->value, 24, 1, 1);
731 r->dirty = true;
732 r->valid = true;
733 }
734
735 /* current = 1: continue on current pc, otherwise continue at <address> */
736 r = armv7m->arm.pc;
737 if (!current) {
738 buf_set_u32(r->value, 0, 32, address);
739 r->dirty = true;
740 r->valid = true;
741 }
742
743 /* if we halted last time due to a bkpt instruction
744 * then we have to manually step over it, otherwise
745 * the core will break again */
746
747 if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
748 && !debug_execution)
749 armv7m_maybe_skip_bkpt_inst(target, NULL);
750
751 resume_pc = buf_get_u32(r->value, 0, 32);
752
753 armv7m_restore_context(target);
754
755 /* the front-end may request us not to handle breakpoints */
756 if (handle_breakpoints) {
757 /* Single step past breakpoint at current address */
758 breakpoint = breakpoint_find(target, resume_pc);
759 if (breakpoint) {
760 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 " (ID: %d)",
761 breakpoint->address,
762 breakpoint->unique_id);
763 cortex_m3_unset_breakpoint(target, breakpoint);
764 cortex_m3_single_step_core(target);
765 cortex_m3_set_breakpoint(target, breakpoint);
766 }
767 }
768
769 /* Restart core */
770 cortex_m3_write_debug_halt_mask(target, 0, C_HALT);
771
772 target->debug_reason = DBG_REASON_NOTHALTED;
773
774 /* registers are now invalid */
775 register_cache_invalidate(armv7m->arm.core_cache);
776
777 if (!debug_execution) {
778 target->state = TARGET_RUNNING;
779 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
780 LOG_DEBUG("target resumed at 0x%" PRIx32 "", resume_pc);
781 } else {
782 target->state = TARGET_DEBUG_RUNNING;
783 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
784 LOG_DEBUG("target debug resumed at 0x%" PRIx32 "", resume_pc);
785 }
786
787 return ERROR_OK;
788 }
789
790 /* int irqstepcount = 0; */
791 static int cortex_m3_step(struct target *target, int current,
792 uint32_t address, int handle_breakpoints)
793 {
794 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
795 struct armv7m_common *armv7m = &cortex_m3->armv7m;
796 struct adiv5_dap *swjdp = armv7m->arm.dap;
797 struct breakpoint *breakpoint = NULL;
798 struct reg *pc = armv7m->arm.pc;
799 bool bkpt_inst_found = false;
800 int retval;
801 bool isr_timed_out = false;
802
803 if (target->state != TARGET_HALTED) {
804 LOG_WARNING("target not halted");
805 return ERROR_TARGET_NOT_HALTED;
806 }
807
808 /* current = 1: continue on current pc, otherwise continue at <address> */
809 if (!current)
810 buf_set_u32(pc->value, 0, 32, address);
811
812 uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
813
814 /* the front-end may request us not to handle breakpoints */
815 if (handle_breakpoints) {
816 breakpoint = breakpoint_find(target, pc_value);
817 if (breakpoint)
818 cortex_m3_unset_breakpoint(target, breakpoint);
819 }
820
821 armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
822
823 target->debug_reason = DBG_REASON_SINGLESTEP;
824
825 armv7m_restore_context(target);
826
827 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
828
829 /* if no bkpt instruction is found at pc then we can perform
830 * a normal step, otherwise we have to manually step over the bkpt
831 * instruction - as such simulate a step */
832 if (bkpt_inst_found == false) {
833 /* Automatic ISR masking mode off: Just step over the next instruction */
834 if ((cortex_m3->isrmasking_mode != CORTEX_M3_ISRMASK_AUTO))
835 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
836 else {
837 /* Process interrupts during stepping in a way they don't interfere
838 * debugging.
839 *
840 * Principle:
841 *
842 * Set a temporary break point at the current pc and let the core run
843 * with interrupts enabled. Pending interrupts get served and we run
844 * into the breakpoint again afterwards. Then we step over the next
845 * instruction with interrupts disabled.
846 *
847 * If the pending interrupts don't complete within time, we leave the
848 * core running. This may happen if the interrupts trigger faster
849 * than the core can process them or the handler doesn't return.
850 *
851 * If no more breakpoints are available we simply do a step with
852 * interrupts enabled.
853 *
854 */
855
856 /* 2012-09-29 ph
857 *
858 * If a break point is already set on the lower half word then a break point on
859 * the upper half word will not break again when the core is restarted. So we
860 * just step over the instruction with interrupts disabled.
861 *
862 * The documentation has no information about this, it was found by observation
863 * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 dosen't seem to
864 * suffer from this problem.
865 *
866 * To add some confusion: pc_value has bit 0 always set, while the breakpoint
867 * address has it always cleared. The former is done to indicate thumb mode
868 * to gdb.
869 *
870 */
871 if ((pc_value & 0x02) && breakpoint_find(target, pc_value & ~0x03)) {
872 LOG_DEBUG("Stepping over next instruction with interrupts disabled");
873 cortex_m3_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
874 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
875 /* Re-enable interrupts */
876 cortex_m3_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
877 }
878 else {
879
880 /* Set a temporary break point */
881 if (breakpoint)
882 retval = cortex_m3_set_breakpoint(target, breakpoint);
883 else
884 retval = breakpoint_add(target, pc_value, 2, BKPT_TYPE_BY_ADDR(pc_value));
885 bool tmp_bp_set = (retval == ERROR_OK);
886
887 /* No more breakpoints left, just do a step */
888 if (!tmp_bp_set)
889 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
890 else {
891 /* Start the core */
892 LOG_DEBUG("Starting core to serve pending interrupts");
893 int64_t t_start = timeval_ms();
894 cortex_m3_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
895
896 /* Wait for pending handlers to complete or timeout */
897 do {
898 retval = mem_ap_read_atomic_u32(swjdp,
899 DCB_DHCSR,
900 &cortex_m3->dcb_dhcsr);
901 if (retval != ERROR_OK) {
902 target->state = TARGET_UNKNOWN;
903 return retval;
904 }
905 isr_timed_out = ((timeval_ms() - t_start) > 500);
906 } while (!((cortex_m3->dcb_dhcsr & S_HALT) || isr_timed_out));
907
908 /* only remove breakpoint if we created it */
909 if (breakpoint)
910 cortex_m3_unset_breakpoint(target, breakpoint);
911 else {
912 /* Remove the temporary breakpoint */
913 breakpoint_remove(target, pc_value);
914 }
915
916 if (isr_timed_out) {
917 LOG_DEBUG("Interrupt handlers didn't complete within time, "
918 "leaving target running");
919 } else {
920 /* Step over next instruction with interrupts disabled */
921 cortex_m3_write_debug_halt_mask(target,
922 C_HALT | C_MASKINTS,
923 0);
924 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
925 /* Re-enable interrupts */
926 cortex_m3_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
927 }
928 }
929 }
930 }
931 }
932
933 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
934 if (retval != ERROR_OK)
935 return retval;
936
937 /* registers are now invalid */
938 register_cache_invalidate(armv7m->arm.core_cache);
939
940 if (breakpoint)
941 cortex_m3_set_breakpoint(target, breakpoint);
942
943 if (isr_timed_out) {
944 /* Leave the core running. The user has to stop execution manually. */
945 target->debug_reason = DBG_REASON_NOTHALTED;
946 target->state = TARGET_RUNNING;
947 return ERROR_OK;
948 }
949
950 LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
951 " nvic_icsr = 0x%" PRIx32,
952 cortex_m3->dcb_dhcsr, cortex_m3->nvic_icsr);
953
954 retval = cortex_m3_debug_entry(target);
955 if (retval != ERROR_OK)
956 return retval;
957 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
958
959 LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
960 " nvic_icsr = 0x%" PRIx32,
961 cortex_m3->dcb_dhcsr, cortex_m3->nvic_icsr);
962
963 return ERROR_OK;
964 }
965
966 static int cortex_m3_assert_reset(struct target *target)
967 {
968 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
969 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
970 enum cortex_m3_soft_reset_config reset_config = cortex_m3->soft_reset_config;
971
972 LOG_DEBUG("target->state: %s",
973 target_state_name(target));
974
975 enum reset_types jtag_reset_config = jtag_get_reset_config();
976
977 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
978 /* allow scripts to override the reset event */
979
980 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
981 register_cache_invalidate(cortex_m3->armv7m.arm.core_cache);
982 target->state = TARGET_RESET;
983
984 return ERROR_OK;
985 }
986
987 /* some cores support connecting while srst is asserted
988 * use that mode is it has been configured */
989
990 bool srst_asserted = false;
991
992 if ((jtag_reset_config & RESET_HAS_SRST) &&
993 (jtag_reset_config & RESET_SRST_NO_GATING)) {
994 adapter_assert_reset();
995 srst_asserted = true;
996 }
997
998 /* Enable debug requests */
999 int retval;
1000 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
1001 if (retval != ERROR_OK)
1002 return retval;
1003 if (!(cortex_m3->dcb_dhcsr & C_DEBUGEN)) {
1004 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_DEBUGEN);
1005 if (retval != ERROR_OK)
1006 return retval;
1007 }
1008
1009 /* If the processor is sleeping in a WFI or WFE instruction, the
1010 * C_HALT bit must be asserted to regain control */
1011 if (cortex_m3->dcb_dhcsr & S_SLEEP) {
1012 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_HALT | C_DEBUGEN);
1013 if (retval != ERROR_OK)
1014 return retval;
1015 }
1016
1017 retval = mem_ap_write_u32(swjdp, DCB_DCRDR, 0);
1018 if (retval != ERROR_OK)
1019 return retval;
1020
1021 if (!target->reset_halt) {
1022 /* Set/Clear C_MASKINTS in a separate operation */
1023 if (cortex_m3->dcb_dhcsr & C_MASKINTS) {
1024 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
1025 DBGKEY | C_DEBUGEN | C_HALT);
1026 if (retval != ERROR_OK)
1027 return retval;
1028 }
1029
1030 /* clear any debug flags before resuming */
1031 cortex_m3_clear_halt(target);
1032
1033 /* clear C_HALT in dhcsr reg */
1034 cortex_m3_write_debug_halt_mask(target, 0, C_HALT);
1035 } else {
1036 /* Halt in debug on reset; endreset_event() restores DEMCR.
1037 *
1038 * REVISIT catching BUSERR presumably helps to defend against
1039 * bad vector table entries. Should this include MMERR or
1040 * other flags too?
1041 */
1042 retval = mem_ap_write_atomic_u32(swjdp, DCB_DEMCR,
1043 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1044 if (retval != ERROR_OK)
1045 return retval;
1046 }
1047
1048 if (jtag_reset_config & RESET_HAS_SRST) {
1049 /* default to asserting srst */
1050 if (!srst_asserted)
1051 adapter_assert_reset();
1052 } else {
1053 /* Use a standard Cortex-M3 software reset mechanism.
1054 * We default to using VECRESET as it is supported on all current cores.
1055 * This has the disadvantage of not resetting the peripherals, so a
1056 * reset-init event handler is needed to perform any peripheral resets.
1057 */
1058 retval = mem_ap_write_atomic_u32(swjdp, NVIC_AIRCR,
1059 AIRCR_VECTKEY | ((reset_config == CORTEX_M3_RESET_SYSRESETREQ)
1060 ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
1061 if (retval != ERROR_OK)
1062 return retval;
1063
1064 LOG_DEBUG("Using Cortex-M %s", (reset_config == CORTEX_M3_RESET_SYSRESETREQ)
1065 ? "SYSRESETREQ" : "VECTRESET");
1066
1067 if (reset_config == CORTEX_M3_RESET_VECTRESET) {
1068 LOG_WARNING("Only resetting the Cortex-M core, use a reset-init event "
1069 "handler to reset any peripherals or configure hardware srst support.");
1070 }
1071
1072 {
1073 /* I do not know why this is necessary, but it
1074 * fixes strange effects (step/resume cause NMI
1075 * after reset) on LM3S6918 -- Michael Schwingen
1076 */
1077 uint32_t tmp;
1078 retval = mem_ap_read_atomic_u32(swjdp, NVIC_AIRCR, &tmp);
1079 if (retval != ERROR_OK)
1080 return retval;
1081 }
1082 }
1083
1084 target->state = TARGET_RESET;
1085 jtag_add_sleep(50000);
1086
1087 register_cache_invalidate(cortex_m3->armv7m.arm.core_cache);
1088
1089 if (target->reset_halt) {
1090 retval = target_halt(target);
1091 if (retval != ERROR_OK)
1092 return retval;
1093 }
1094
1095 return ERROR_OK;
1096 }
1097
1098 static int cortex_m3_deassert_reset(struct target *target)
1099 {
1100 LOG_DEBUG("target->state: %s",
1101 target_state_name(target));
1102
1103 /* deassert reset lines */
1104 adapter_deassert_reset();
1105
1106 return ERROR_OK;
1107 }
1108
1109 int cortex_m3_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
1110 {
1111 int retval;
1112 int fp_num = 0;
1113 uint32_t hilo;
1114 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1115 struct cortex_m3_fp_comparator *comparator_list = cortex_m3->fp_comparator_list;
1116
1117 if (breakpoint->set) {
1118 LOG_WARNING("breakpoint (BPID: %d) already set", breakpoint->unique_id);
1119 return ERROR_OK;
1120 }
1121
1122 if (cortex_m3->auto_bp_type)
1123 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1124
1125 if (breakpoint->type == BKPT_HARD) {
1126 while (comparator_list[fp_num].used && (fp_num < cortex_m3->fp_num_code))
1127 fp_num++;
1128 if (fp_num >= cortex_m3->fp_num_code) {
1129 LOG_ERROR("Can not find free FPB Comparator!");
1130 return ERROR_FAIL;
1131 }
1132 breakpoint->set = fp_num + 1;
1133 hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
1134 comparator_list[fp_num].used = 1;
1135 comparator_list[fp_num].fpcr_value = (breakpoint->address & 0x1FFFFFFC) | hilo | 1;
1136 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1137 comparator_list[fp_num].fpcr_value);
1138 LOG_DEBUG("fpc_num %i fpcr_value 0x%" PRIx32 "",
1139 fp_num,
1140 comparator_list[fp_num].fpcr_value);
1141 if (!cortex_m3->fpb_enabled) {
1142 LOG_DEBUG("FPB wasn't enabled, do it now");
1143 target_write_u32(target, FP_CTRL, 3);
1144 }
1145 } else if (breakpoint->type == BKPT_SOFT) {
1146 uint8_t code[4];
1147
1148 /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
1149 * semihosting; don't use that. Otherwise the BKPT
1150 * parameter is arbitrary.
1151 */
1152 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1153 retval = target_read_memory(target,
1154 breakpoint->address & 0xFFFFFFFE,
1155 breakpoint->length, 1,
1156 breakpoint->orig_instr);
1157 if (retval != ERROR_OK)
1158 return retval;
1159 retval = target_write_memory(target,
1160 breakpoint->address & 0xFFFFFFFE,
1161 breakpoint->length, 1,
1162 code);
1163 if (retval != ERROR_OK)
1164 return retval;
1165 breakpoint->set = true;
1166 }
1167
1168 LOG_DEBUG("BPID: %d, Type: %d, Address: 0x%08" PRIx32 " Length: %d (set=%d)",
1169 breakpoint->unique_id,
1170 (int)(breakpoint->type),
1171 breakpoint->address,
1172 breakpoint->length,
1173 breakpoint->set);
1174
1175 return ERROR_OK;
1176 }
1177
1178 int cortex_m3_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1179 {
1180 int retval;
1181 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1182 struct cortex_m3_fp_comparator *comparator_list = cortex_m3->fp_comparator_list;
1183
1184 if (!breakpoint->set) {
1185 LOG_WARNING("breakpoint not set");
1186 return ERROR_OK;
1187 }
1188
1189 LOG_DEBUG("BPID: %d, Type: %d, Address: 0x%08" PRIx32 " Length: %d (set=%d)",
1190 breakpoint->unique_id,
1191 (int)(breakpoint->type),
1192 breakpoint->address,
1193 breakpoint->length,
1194 breakpoint->set);
1195
1196 if (breakpoint->type == BKPT_HARD) {
1197 int fp_num = breakpoint->set - 1;
1198 if ((fp_num < 0) || (fp_num >= cortex_m3->fp_num_code)) {
1199 LOG_DEBUG("Invalid FP Comparator number in breakpoint");
1200 return ERROR_OK;
1201 }
1202 comparator_list[fp_num].used = 0;
1203 comparator_list[fp_num].fpcr_value = 0;
1204 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1205 comparator_list[fp_num].fpcr_value);
1206 } else {
1207 /* restore original instruction (kept in target endianness) */
1208 if (breakpoint->length == 4) {
1209 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE, 4, 1,
1210 breakpoint->orig_instr);
1211 if (retval != ERROR_OK)
1212 return retval;
1213 } else {
1214 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE, 2, 1,
1215 breakpoint->orig_instr);
1216 if (retval != ERROR_OK)
1217 return retval;
1218 }
1219 }
1220 breakpoint->set = false;
1221
1222 return ERROR_OK;
1223 }
1224
1225 int cortex_m3_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
1226 {
1227 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1228
1229 if (cortex_m3->auto_bp_type) {
1230 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1231 #ifdef ARMV7_GDB_HACKS
1232 if (breakpoint->length != 2) {
1233 /* XXX Hack: Replace all breakpoints with length != 2 with
1234 * a hardware breakpoint. */
1235 breakpoint->type = BKPT_HARD;
1236 breakpoint->length = 2;
1237 }
1238 #endif
1239 }
1240
1241 if (breakpoint->type != BKPT_TYPE_BY_ADDR(breakpoint->address)) {
1242 if (breakpoint->type == BKPT_HARD) {
1243 LOG_INFO("flash patch comparator requested outside code memory region");
1244 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1245 }
1246
1247 if (breakpoint->type == BKPT_SOFT) {
1248 LOG_INFO("soft breakpoint requested in code (flash) memory region");
1249 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1250 }
1251 }
1252
1253 if ((breakpoint->type == BKPT_HARD) && (cortex_m3->fp_code_available < 1)) {
1254 LOG_INFO("no flash patch comparator unit available for hardware breakpoint");
1255 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1256 }
1257
1258 if ((breakpoint->length != 2)) {
1259 LOG_INFO("only breakpoints of two bytes length supported");
1260 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1261 }
1262
1263 if (breakpoint->type == BKPT_HARD)
1264 cortex_m3->fp_code_available--;
1265
1266 return cortex_m3_set_breakpoint(target, breakpoint);
1267 }
1268
1269 int cortex_m3_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1270 {
1271 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1272
1273 /* REVISIT why check? FBP can be updated with core running ... */
1274 if (target->state != TARGET_HALTED) {
1275 LOG_WARNING("target not halted");
1276 return ERROR_TARGET_NOT_HALTED;
1277 }
1278
1279 if (cortex_m3->auto_bp_type)
1280 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1281
1282 if (breakpoint->set)
1283 cortex_m3_unset_breakpoint(target, breakpoint);
1284
1285 if (breakpoint->type == BKPT_HARD)
1286 cortex_m3->fp_code_available++;
1287
1288 return ERROR_OK;
1289 }
1290
1291 int cortex_m3_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1292 {
1293 int dwt_num = 0;
1294 uint32_t mask, temp;
1295 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1296
1297 /* watchpoint params were validated earlier */
1298 mask = 0;
1299 temp = watchpoint->length;
1300 while (temp) {
1301 temp >>= 1;
1302 mask++;
1303 }
1304 mask--;
1305
1306 /* REVISIT Don't fully trust these "not used" records ... users
1307 * may set up breakpoints by hand, e.g. dual-address data value
1308 * watchpoint using comparator #1; comparator #0 matching cycle
1309 * count; send data trace info through ITM and TPIU; etc
1310 */
1311 struct cortex_m3_dwt_comparator *comparator;
1312
1313 for (comparator = cortex_m3->dwt_comparator_list;
1314 comparator->used && dwt_num < cortex_m3->dwt_num_comp;
1315 comparator++, dwt_num++)
1316 continue;
1317 if (dwt_num >= cortex_m3->dwt_num_comp) {
1318 LOG_ERROR("Can not find free DWT Comparator");
1319 return ERROR_FAIL;
1320 }
1321 comparator->used = 1;
1322 watchpoint->set = dwt_num + 1;
1323
1324 comparator->comp = watchpoint->address;
1325 target_write_u32(target, comparator->dwt_comparator_address + 0,
1326 comparator->comp);
1327
1328 comparator->mask = mask;
1329 target_write_u32(target, comparator->dwt_comparator_address + 4,
1330 comparator->mask);
1331
1332 switch (watchpoint->rw) {
1333 case WPT_READ:
1334 comparator->function = 5;
1335 break;
1336 case WPT_WRITE:
1337 comparator->function = 6;
1338 break;
1339 case WPT_ACCESS:
1340 comparator->function = 7;
1341 break;
1342 }
1343 target_write_u32(target, comparator->dwt_comparator_address + 8,
1344 comparator->function);
1345
1346 LOG_DEBUG("Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
1347 watchpoint->unique_id, dwt_num,
1348 (unsigned) comparator->comp,
1349 (unsigned) comparator->mask,
1350 (unsigned) comparator->function);
1351 return ERROR_OK;
1352 }
1353
1354 int cortex_m3_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
1355 {
1356 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1357 struct cortex_m3_dwt_comparator *comparator;
1358 int dwt_num;
1359
1360 if (!watchpoint->set) {
1361 LOG_WARNING("watchpoint (wpid: %d) not set",
1362 watchpoint->unique_id);
1363 return ERROR_OK;
1364 }
1365
1366 dwt_num = watchpoint->set - 1;
1367
1368 LOG_DEBUG("Watchpoint (ID %d) DWT%d address: 0x%08x clear",
1369 watchpoint->unique_id, dwt_num,
1370 (unsigned) watchpoint->address);
1371
1372 if ((dwt_num < 0) || (dwt_num >= cortex_m3->dwt_num_comp)) {
1373 LOG_DEBUG("Invalid DWT Comparator number in watchpoint");
1374 return ERROR_OK;
1375 }
1376
1377 comparator = cortex_m3->dwt_comparator_list + dwt_num;
1378 comparator->used = 0;
1379 comparator->function = 0;
1380 target_write_u32(target, comparator->dwt_comparator_address + 8,
1381 comparator->function);
1382
1383 watchpoint->set = false;
1384
1385 return ERROR_OK;
1386 }
1387
1388 int cortex_m3_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
1389 {
1390 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1391
1392 if (cortex_m3->dwt_comp_available < 1) {
1393 LOG_DEBUG("no comparators?");
1394 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1395 }
1396
1397 /* hardware doesn't support data value masking */
1398 if (watchpoint->mask != ~(uint32_t)0) {
1399 LOG_DEBUG("watchpoint value masks not supported");
1400 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1401 }
1402
1403 /* hardware allows address masks of up to 32K */
1404 unsigned mask;
1405
1406 for (mask = 0; mask < 16; mask++) {
1407 if ((1u << mask) == watchpoint->length)
1408 break;
1409 }
1410 if (mask == 16) {
1411 LOG_DEBUG("unsupported watchpoint length");
1412 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1413 }
1414 if (watchpoint->address & ((1 << mask) - 1)) {
1415 LOG_DEBUG("watchpoint address is unaligned");
1416 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1417 }
1418
1419 /* Caller doesn't seem to be able to describe watching for data
1420 * values of zero; that flags "no value".
1421 *
1422 * REVISIT This DWT may well be able to watch for specific data
1423 * values. Requires comparator #1 to set DATAVMATCH and match
1424 * the data, and another comparator (DATAVADDR0) matching addr.
1425 */
1426 if (watchpoint->value) {
1427 LOG_DEBUG("data value watchpoint not YET supported");
1428 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1429 }
1430
1431 cortex_m3->dwt_comp_available--;
1432 LOG_DEBUG("dwt_comp_available: %d", cortex_m3->dwt_comp_available);
1433
1434 return ERROR_OK;
1435 }
1436
1437 int cortex_m3_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
1438 {
1439 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1440
1441 /* REVISIT why check? DWT can be updated with core running ... */
1442 if (target->state != TARGET_HALTED) {
1443 LOG_WARNING("target not halted");
1444 return ERROR_TARGET_NOT_HALTED;
1445 }
1446
1447 if (watchpoint->set)
1448 cortex_m3_unset_watchpoint(target, watchpoint);
1449
1450 cortex_m3->dwt_comp_available++;
1451 LOG_DEBUG("dwt_comp_available: %d", cortex_m3->dwt_comp_available);
1452
1453 return ERROR_OK;
1454 }
1455
1456 void cortex_m3_enable_watchpoints(struct target *target)
1457 {
1458 struct watchpoint *watchpoint = target->watchpoints;
1459
1460 /* set any pending watchpoints */
1461 while (watchpoint) {
1462 if (!watchpoint->set)
1463 cortex_m3_set_watchpoint(target, watchpoint);
1464 watchpoint = watchpoint->next;
1465 }
1466 }
1467
1468 static int cortex_m3_load_core_reg_u32(struct target *target,
1469 uint32_t num, uint32_t *value)
1470 {
1471 int retval;
1472 struct armv7m_common *armv7m = target_to_armv7m(target);
1473 struct adiv5_dap *swjdp = armv7m->arm.dap;
1474
1475 /* NOTE: we "know" here that the register identifiers used
1476 * in the v7m header match the Cortex-M3 Debug Core Register
1477 * Selector values for R0..R15, xPSR, MSP, and PSP.
1478 */
1479 switch (num) {
1480 case 0 ... 18:
1481 /* read a normal core register */
1482 retval = cortexm3_dap_read_coreregister_u32(swjdp, value, num);
1483
1484 if (retval != ERROR_OK) {
1485 LOG_ERROR("JTAG failure %i", retval);
1486 return ERROR_JTAG_DEVICE_ERROR;
1487 }
1488 LOG_DEBUG("load from core reg %i value 0x%" PRIx32 "", (int)num, *value);
1489 break;
1490
1491 case ARMV7M_PRIMASK:
1492 case ARMV7M_BASEPRI:
1493 case ARMV7M_FAULTMASK:
1494 case ARMV7M_CONTROL:
1495 /* Cortex-M3 packages these four registers as bitfields
1496 * in one Debug Core register. So say r0 and r2 docs;
1497 * it was removed from r1 docs, but still works.
1498 */
1499 cortexm3_dap_read_coreregister_u32(swjdp, value, 20);
1500
1501 switch (num) {
1502 case ARMV7M_PRIMASK:
1503 *value = buf_get_u32((uint8_t *)value, 0, 1);
1504 break;
1505
1506 case ARMV7M_BASEPRI:
1507 *value = buf_get_u32((uint8_t *)value, 8, 8);
1508 break;
1509
1510 case ARMV7M_FAULTMASK:
1511 *value = buf_get_u32((uint8_t *)value, 16, 1);
1512 break;
1513
1514 case ARMV7M_CONTROL:
1515 *value = buf_get_u32((uint8_t *)value, 24, 2);
1516 break;
1517 }
1518
1519 LOG_DEBUG("load from special reg %i value 0x%" PRIx32 "", (int)num, *value);
1520 break;
1521
1522 default:
1523 return ERROR_COMMAND_SYNTAX_ERROR;
1524 }
1525
1526 return ERROR_OK;
1527 }
1528
1529 static int cortex_m3_store_core_reg_u32(struct target *target,
1530 uint32_t num, uint32_t value)
1531 {
1532 int retval;
1533 uint32_t reg;
1534 struct armv7m_common *armv7m = target_to_armv7m(target);
1535 struct adiv5_dap *swjdp = armv7m->arm.dap;
1536
1537 #ifdef ARMV7_GDB_HACKS
1538 /* If the LR register is being modified, make sure it will put us
1539 * in "thumb" mode, or an INVSTATE exception will occur. This is a
1540 * hack to deal with the fact that gdb will sometimes "forge"
1541 * return addresses, and doesn't set the LSB correctly (i.e., when
1542 * printing expressions containing function calls, it sets LR = 0.)
1543 * Valid exception return codes have bit 0 set too.
1544 */
1545 if (num == ARMV7M_R14)
1546 value |= 0x01;
1547 #endif
1548
1549 /* NOTE: we "know" here that the register identifiers used
1550 * in the v7m header match the Cortex-M3 Debug Core Register
1551 * Selector values for R0..R15, xPSR, MSP, and PSP.
1552 */
1553 switch (num) {
1554 case 0 ... 18:
1555 retval = cortexm3_dap_write_coreregister_u32(swjdp, value, num);
1556 if (retval != ERROR_OK) {
1557 struct reg *r;
1558
1559 LOG_ERROR("JTAG failure");
1560 r = armv7m->arm.core_cache->reg_list + num;
1561 r->dirty = r->valid;
1562 return ERROR_JTAG_DEVICE_ERROR;
1563 }
1564 LOG_DEBUG("write core reg %i value 0x%" PRIx32 "", (int)num, value);
1565 break;
1566
1567 case ARMV7M_PRIMASK:
1568 case ARMV7M_BASEPRI:
1569 case ARMV7M_FAULTMASK:
1570 case ARMV7M_CONTROL:
1571 /* Cortex-M3 packages these four registers as bitfields
1572 * in one Debug Core register. So say r0 and r2 docs;
1573 * it was removed from r1 docs, but still works.
1574 */
1575 cortexm3_dap_read_coreregister_u32(swjdp, &reg, 20);
1576
1577 switch (num) {
1578 case ARMV7M_PRIMASK:
1579 buf_set_u32((uint8_t *)&reg, 0, 1, value);
1580 break;
1581
1582 case ARMV7M_BASEPRI:
1583 buf_set_u32((uint8_t *)&reg, 8, 8, value);
1584 break;
1585
1586 case ARMV7M_FAULTMASK:
1587 buf_set_u32((uint8_t *)&reg, 16, 1, value);
1588 break;
1589
1590 case ARMV7M_CONTROL:
1591 buf_set_u32((uint8_t *)&reg, 24, 2, value);
1592 break;
1593 }
1594
1595 cortexm3_dap_write_coreregister_u32(swjdp, reg, 20);
1596
1597 LOG_DEBUG("write special reg %i value 0x%" PRIx32 " ", (int)num, value);
1598 break;
1599
1600 default:
1601 return ERROR_COMMAND_SYNTAX_ERROR;
1602 }
1603
1604 return ERROR_OK;
1605 }
1606
1607 static int cortex_m3_read_memory(struct target *target, uint32_t address,
1608 uint32_t size, uint32_t count, uint8_t *buffer)
1609 {
1610 struct armv7m_common *armv7m = target_to_armv7m(target);
1611 struct adiv5_dap *swjdp = armv7m->arm.dap;
1612 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1613
1614 if (armv7m->arm.is_armv6m) {
1615 /* armv6m does not handle unaligned memory access */
1616 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1617 return ERROR_TARGET_UNALIGNED_ACCESS;
1618 }
1619
1620 /* cortex_m3 handles unaligned memory access */
1621 if (count && buffer) {
1622 switch (size) {
1623 case 4:
1624 retval = mem_ap_read_buf_u32(swjdp, buffer, 4 * count, address, true);
1625 break;
1626 case 2:
1627 retval = mem_ap_read_buf_u16(swjdp, buffer, 2 * count, address);
1628 break;
1629 case 1:
1630 retval = mem_ap_read_buf_u8(swjdp, buffer, count, address);
1631 break;
1632 }
1633 }
1634
1635 return retval;
1636 }
1637
1638 static int cortex_m3_write_memory(struct target *target, uint32_t address,
1639 uint32_t size, uint32_t count, const uint8_t *buffer)
1640 {
1641 struct armv7m_common *armv7m = target_to_armv7m(target);
1642 struct adiv5_dap *swjdp = armv7m->arm.dap;
1643 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1644
1645 if (armv7m->arm.is_armv6m) {
1646 /* armv6m does not handle unaligned memory access */
1647 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1648 return ERROR_TARGET_UNALIGNED_ACCESS;
1649 }
1650
1651 if (count && buffer) {
1652 switch (size) {
1653 case 4:
1654 retval = mem_ap_write_buf_u32(swjdp, buffer, 4 * count, address, true);
1655 break;
1656 case 2:
1657 retval = mem_ap_write_buf_u16(swjdp, buffer, 2 * count, address);
1658 break;
1659 case 1:
1660 retval = mem_ap_write_buf_u8(swjdp, buffer, count, address);
1661 break;
1662 }
1663 }
1664
1665 return retval;
1666 }
1667
1668 static int cortex_m3_init_target(struct command_context *cmd_ctx,
1669 struct target *target)
1670 {
1671 armv7m_build_reg_cache(target);
1672 return ERROR_OK;
1673 }
1674
1675 /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
1676 * on r/w if the core is not running, and clear on resume or reset ... or
1677 * at least, in a post_restore_context() method.
1678 */
1679
1680 struct dwt_reg_state {
1681 struct target *target;
1682 uint32_t addr;
1683 uint32_t value; /* scratch/cache */
1684 };
1685
1686 static int cortex_m3_dwt_get_reg(struct reg *reg)
1687 {
1688 struct dwt_reg_state *state = reg->arch_info;
1689
1690 return target_read_u32(state->target, state->addr, &state->value);
1691 }
1692
1693 static int cortex_m3_dwt_set_reg(struct reg *reg, uint8_t *buf)
1694 {
1695 struct dwt_reg_state *state = reg->arch_info;
1696
1697 return target_write_u32(state->target, state->addr,
1698 buf_get_u32(buf, 0, reg->size));
1699 }
1700
1701 struct dwt_reg {
1702 uint32_t addr;
1703 char *name;
1704 unsigned size;
1705 };
1706
1707 static struct dwt_reg dwt_base_regs[] = {
1708 { DWT_CTRL, "dwt_ctrl", 32, },
1709 /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
1710 * increments while the core is asleep.
1711 */
1712 { DWT_CYCCNT, "dwt_cyccnt", 32, },
1713 /* plus some 8 bit counters, useful for profiling with TPIU */
1714 };
1715
1716 static struct dwt_reg dwt_comp[] = {
1717 #define DWT_COMPARATOR(i) \
1718 { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
1719 { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
1720 { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
1721 DWT_COMPARATOR(0),
1722 DWT_COMPARATOR(1),
1723 DWT_COMPARATOR(2),
1724 DWT_COMPARATOR(3),
1725 #undef DWT_COMPARATOR
1726 };
1727
1728 static const struct reg_arch_type dwt_reg_type = {
1729 .get = cortex_m3_dwt_get_reg,
1730 .set = cortex_m3_dwt_set_reg,
1731 };
1732
1733 static void cortex_m3_dwt_addreg(struct target *t, struct reg *r, struct dwt_reg *d)
1734 {
1735 struct dwt_reg_state *state;
1736
1737 state = calloc(1, sizeof *state);
1738 if (!state)
1739 return;
1740 state->addr = d->addr;
1741 state->target = t;
1742
1743 r->name = d->name;
1744 r->size = d->size;
1745 r->value = &state->value;
1746 r->arch_info = state;
1747 r->type = &dwt_reg_type;
1748 }
1749
1750 void cortex_m3_dwt_setup(struct cortex_m3_common *cm3, struct target *target)
1751 {
1752 uint32_t dwtcr;
1753 struct reg_cache *cache;
1754 struct cortex_m3_dwt_comparator *comparator;
1755 int reg, i;
1756
1757 target_read_u32(target, DWT_CTRL, &dwtcr);
1758 if (!dwtcr) {
1759 LOG_DEBUG("no DWT");
1760 return;
1761 }
1762
1763 cm3->dwt_num_comp = (dwtcr >> 28) & 0xF;
1764 cm3->dwt_comp_available = cm3->dwt_num_comp;
1765 cm3->dwt_comparator_list = calloc(cm3->dwt_num_comp,
1766 sizeof(struct cortex_m3_dwt_comparator));
1767 if (!cm3->dwt_comparator_list) {
1768 fail0:
1769 cm3->dwt_num_comp = 0;
1770 LOG_ERROR("out of mem");
1771 return;
1772 }
1773
1774 cache = calloc(1, sizeof *cache);
1775 if (!cache) {
1776 fail1:
1777 free(cm3->dwt_comparator_list);
1778 goto fail0;
1779 }
1780 cache->name = "cortex-m3 dwt registers";
1781 cache->num_regs = 2 + cm3->dwt_num_comp * 3;
1782 cache->reg_list = calloc(cache->num_regs, sizeof *cache->reg_list);
1783 if (!cache->reg_list) {
1784 free(cache);
1785 goto fail1;
1786 }
1787
1788 for (reg = 0; reg < 2; reg++)
1789 cortex_m3_dwt_addreg(target, cache->reg_list + reg,
1790 dwt_base_regs + reg);
1791
1792 comparator = cm3->dwt_comparator_list;
1793 for (i = 0; i < cm3->dwt_num_comp; i++, comparator++) {
1794 int j;
1795
1796 comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
1797 for (j = 0; j < 3; j++, reg++)
1798 cortex_m3_dwt_addreg(target, cache->reg_list + reg,
1799 dwt_comp + 3 * i + j);
1800
1801 /* make sure we clear any watchpoints enabled on the target */
1802 target_write_u32(target, comparator->dwt_comparator_address + 8, 0);
1803 }
1804
1805 *register_get_last_cache_p(&target->reg_cache) = cache;
1806 cm3->dwt_cache = cache;
1807
1808 LOG_DEBUG("DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
1809 dwtcr, cm3->dwt_num_comp,
1810 (dwtcr & (0xf << 24)) ? " only" : "/trigger");
1811
1812 /* REVISIT: if num_comp > 1, check whether comparator #1 can
1813 * implement single-address data value watchpoints ... so we
1814 * won't need to check it later, when asked to set one up.
1815 */
1816 }
1817
1818 #define MVFR0 0xe000ef40
1819 #define MVFR1 0xe000ef44
1820
1821 #define MVFR0_DEFAULT_M4 0x10110021
1822 #define MVFR1_DEFAULT_M4 0x11000011
1823
1824 int cortex_m3_examine(struct target *target)
1825 {
1826 int retval;
1827 uint32_t cpuid, fpcr, mvfr0, mvfr1;
1828 int i;
1829 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1830 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
1831 struct armv7m_common *armv7m = target_to_armv7m(target);
1832
1833 /* stlink shares the examine handler but does not support
1834 * all its calls */
1835 if (!armv7m->stlink) {
1836 retval = ahbap_debugport_init(swjdp);
1837 if (retval != ERROR_OK)
1838 return retval;
1839 }
1840
1841 if (!target_was_examined(target)) {
1842 target_set_examined(target);
1843
1844 /* Read from Device Identification Registers */
1845 retval = target_read_u32(target, CPUID, &cpuid);
1846 if (retval != ERROR_OK)
1847 return retval;
1848
1849 /* Get CPU Type */
1850 i = (cpuid >> 4) & 0xf;
1851
1852 LOG_DEBUG("Cortex-M%d r%" PRId8 "p%" PRId8 " processor detected",
1853 i, (uint8_t)((cpuid >> 20) & 0xf), (uint8_t)((cpuid >> 0) & 0xf));
1854 LOG_DEBUG("cpuid: 0x%8.8" PRIx32 "", cpuid);
1855
1856 /* test for floating point feature on cortex-m4 */
1857 if (i == 4) {
1858 target_read_u32(target, MVFR0, &mvfr0);
1859 target_read_u32(target, MVFR1, &mvfr1);
1860
1861 if ((mvfr0 == MVFR0_DEFAULT_M4) && (mvfr1 == MVFR1_DEFAULT_M4)) {
1862 LOG_DEBUG("Cortex-M%d floating point feature FPv4_SP found", i);
1863 armv7m->fp_feature = FPv4_SP;
1864 }
1865 } else if (i == 0) {
1866 /* Cortex-M0 does not support unaligned memory access */
1867 armv7m->arm.is_armv6m = true;
1868 }
1869
1870 if (i == 4 || i == 3) {
1871 /* Cortex-M3/M4 has 4096 bytes autoincrement range */
1872 armv7m->dap.tar_autoincr_block = (1 << 12);
1873 }
1874
1875 /* NOTE: FPB and DWT are both optional. */
1876
1877 /* Setup FPB */
1878 target_read_u32(target, FP_CTRL, &fpcr);
1879 cortex_m3->auto_bp_type = 1;
1880 cortex_m3->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF); /* bits
1881 *[14:12]
1882 *and [7:4]
1883 **/
1884 cortex_m3->fp_num_lit = (fpcr >> 8) & 0xF;
1885 cortex_m3->fp_code_available = cortex_m3->fp_num_code;
1886 cortex_m3->fp_comparator_list = calloc(
1887 cortex_m3->fp_num_code + cortex_m3->fp_num_lit,
1888 sizeof(struct cortex_m3_fp_comparator));
1889 cortex_m3->fpb_enabled = fpcr & 1;
1890 for (i = 0; i < cortex_m3->fp_num_code + cortex_m3->fp_num_lit; i++) {
1891 cortex_m3->fp_comparator_list[i].type =
1892 (i < cortex_m3->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
1893 cortex_m3->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
1894
1895 /* make sure we clear any breakpoints enabled on the target */
1896 target_write_u32(target, cortex_m3->fp_comparator_list[i].fpcr_address, 0);
1897 }
1898 LOG_DEBUG("FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i",
1899 fpcr,
1900 cortex_m3->fp_num_code,
1901 cortex_m3->fp_num_lit);
1902
1903 /* Setup DWT */
1904 cortex_m3_dwt_setup(cortex_m3, target);
1905
1906 /* These hardware breakpoints only work for code in flash! */
1907 LOG_INFO("%s: hardware has %d breakpoints, %d watchpoints",
1908 target_name(target),
1909 cortex_m3->fp_num_code,
1910 cortex_m3->dwt_num_comp);
1911 }
1912
1913 return ERROR_OK;
1914 }
1915
1916 static int cortex_m3_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1917 {
1918 uint16_t dcrdr;
1919 int retval;
1920
1921 mem_ap_read_buf_u16(swjdp, (uint8_t *)&dcrdr, 1, DCB_DCRDR);
1922 *ctrl = (uint8_t)dcrdr;
1923 *value = (uint8_t)(dcrdr >> 8);
1924
1925 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1926
1927 /* write ack back to software dcc register
1928 * signify we have read data */
1929 if (dcrdr & (1 << 0)) {
1930 dcrdr = 0;
1931 retval = mem_ap_write_buf_u16(swjdp, (uint8_t *)&dcrdr, 1, DCB_DCRDR);
1932 if (retval != ERROR_OK)
1933 return retval;
1934 }
1935
1936 return ERROR_OK;
1937 }
1938
1939 static int cortex_m3_target_request_data(struct target *target,
1940 uint32_t size, uint8_t *buffer)
1941 {
1942 struct armv7m_common *armv7m = target_to_armv7m(target);
1943 struct adiv5_dap *swjdp = armv7m->arm.dap;
1944 uint8_t data;
1945 uint8_t ctrl;
1946 uint32_t i;
1947
1948 for (i = 0; i < (size * 4); i++) {
1949 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1950 buffer[i] = data;
1951 }
1952
1953 return ERROR_OK;
1954 }
1955
1956 static int cortex_m3_handle_target_request(void *priv)
1957 {
1958 struct target *target = priv;
1959 if (!target_was_examined(target))
1960 return ERROR_OK;
1961 struct armv7m_common *armv7m = target_to_armv7m(target);
1962 struct adiv5_dap *swjdp = armv7m->arm.dap;
1963
1964 if (!target->dbg_msg_enabled)
1965 return ERROR_OK;
1966
1967 if (target->state == TARGET_RUNNING) {
1968 uint8_t data;
1969 uint8_t ctrl;
1970
1971 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1972
1973 /* check if we have data */
1974 if (ctrl & (1 << 0)) {
1975 uint32_t request;
1976
1977 /* we assume target is quick enough */
1978 request = data;
1979 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1980 request |= (data << 8);
1981 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1982 request |= (data << 16);
1983 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1984 request |= (data << 24);
1985 target_request(target, request);
1986 }
1987 }
1988
1989 return ERROR_OK;
1990 }
1991
1992 static int cortex_m3_init_arch_info(struct target *target,
1993 struct cortex_m3_common *cortex_m3, struct jtag_tap *tap)
1994 {
1995 int retval;
1996 struct armv7m_common *armv7m = &cortex_m3->armv7m;
1997
1998 armv7m_init_arch_info(target, armv7m);
1999
2000 /* prepare JTAG information for the new target */
2001 cortex_m3->jtag_info.tap = tap;
2002 cortex_m3->jtag_info.scann_size = 4;
2003
2004 /* default reset mode is to use srst if fitted
2005 * if not it will use CORTEX_M3_RESET_VECTRESET */
2006 cortex_m3->soft_reset_config = CORTEX_M3_RESET_VECTRESET;
2007
2008 armv7m->arm.dap = &armv7m->dap;
2009
2010 /* Leave (only) generic DAP stuff for debugport_init(); */
2011 armv7m->dap.jtag_info = &cortex_m3->jtag_info;
2012 armv7m->dap.memaccess_tck = 8;
2013
2014 /* Cortex-M3/M4 has 4096 bytes autoincrement range
2015 * but set a safe default to 1024 to support Cortex-M0
2016 * this will be changed in cortex_m3_examine if a M3/M4 is detected */
2017 armv7m->dap.tar_autoincr_block = (1 << 10);
2018
2019 /* register arch-specific functions */
2020 armv7m->examine_debug_reason = cortex_m3_examine_debug_reason;
2021
2022 armv7m->post_debug_entry = NULL;
2023
2024 armv7m->pre_restore_context = NULL;
2025
2026 armv7m->load_core_reg_u32 = cortex_m3_load_core_reg_u32;
2027 armv7m->store_core_reg_u32 = cortex_m3_store_core_reg_u32;
2028
2029 target_register_timer_callback(cortex_m3_handle_target_request, 1, 1, target);
2030
2031 retval = arm_jtag_setup_connection(&cortex_m3->jtag_info);
2032 if (retval != ERROR_OK)
2033 return retval;
2034
2035 return ERROR_OK;
2036 }
2037
2038 static int cortex_m3_target_create(struct target *target, Jim_Interp *interp)
2039 {
2040 struct cortex_m3_common *cortex_m3 = calloc(1, sizeof(struct cortex_m3_common));
2041
2042 cortex_m3->common_magic = CORTEX_M3_COMMON_MAGIC;
2043 cortex_m3_init_arch_info(target, cortex_m3, target->tap);
2044
2045 return ERROR_OK;
2046 }
2047
2048 /*--------------------------------------------------------------------------*/
2049
2050 static int cortex_m3_verify_pointer(struct command_context *cmd_ctx,
2051 struct cortex_m3_common *cm3)
2052 {
2053 if (cm3->common_magic != CORTEX_M3_COMMON_MAGIC) {
2054 command_print(cmd_ctx, "target is not a Cortex-M");
2055 return ERROR_TARGET_INVALID;
2056 }
2057 return ERROR_OK;
2058 }
2059
2060 /*
2061 * Only stuff below this line should need to verify that its target
2062 * is a Cortex-M3. Everything else should have indirected through the
2063 * cortexm3_target structure, which is only used with CM3 targets.
2064 */
2065
2066 static const struct {
2067 char name[10];
2068 unsigned mask;
2069 } vec_ids[] = {
2070 { "hard_err", VC_HARDERR, },
2071 { "int_err", VC_INTERR, },
2072 { "bus_err", VC_BUSERR, },
2073 { "state_err", VC_STATERR, },
2074 { "chk_err", VC_CHKERR, },
2075 { "nocp_err", VC_NOCPERR, },
2076 { "mm_err", VC_MMERR, },
2077 { "reset", VC_CORERESET, },
2078 };
2079
2080 COMMAND_HANDLER(handle_cortex_m3_vector_catch_command)
2081 {
2082 struct target *target = get_current_target(CMD_CTX);
2083 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
2084 struct armv7m_common *armv7m = &cortex_m3->armv7m;
2085 struct adiv5_dap *swjdp = armv7m->arm.dap;
2086 uint32_t demcr = 0;
2087 int retval;
2088
2089 retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
2090 if (retval != ERROR_OK)
2091 return retval;
2092
2093 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &demcr);
2094 if (retval != ERROR_OK)
2095 return retval;
2096
2097 if (CMD_ARGC > 0) {
2098 unsigned catch = 0;
2099
2100 if (CMD_ARGC == 1) {
2101 if (strcmp(CMD_ARGV[0], "all") == 0) {
2102 catch = VC_HARDERR | VC_INTERR | VC_BUSERR
2103 | VC_STATERR | VC_CHKERR | VC_NOCPERR
2104 | VC_MMERR | VC_CORERESET;
2105 goto write;
2106 } else if (strcmp(CMD_ARGV[0], "none") == 0)
2107 goto write;
2108 }
2109 while (CMD_ARGC-- > 0) {
2110 unsigned i;
2111 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2112 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
2113 continue;
2114 catch |= vec_ids[i].mask;
2115 break;
2116 }
2117 if (i == ARRAY_SIZE(vec_ids)) {
2118 LOG_ERROR("No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]);
2119 return ERROR_COMMAND_SYNTAX_ERROR;
2120 }
2121 }
2122 write:
2123 /* For now, armv7m->demcr only stores vector catch flags. */
2124 armv7m->demcr = catch;
2125
2126 demcr &= ~0xffff;
2127 demcr |= catch;
2128
2129 /* write, but don't assume it stuck (why not??) */
2130 retval = mem_ap_write_u32(swjdp, DCB_DEMCR, demcr);
2131 if (retval != ERROR_OK)
2132 return retval;
2133 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &demcr);
2134 if (retval != ERROR_OK)
2135 return retval;
2136
2137 /* FIXME be sure to clear DEMCR on clean server shutdown.
2138 * Otherwise the vector catch hardware could fire when there's
2139 * no debugger hooked up, causing much confusion...
2140 */
2141 }
2142
2143 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2144 command_print(CMD_CTX, "%9s: %s", vec_ids[i].name,
2145 (demcr & vec_ids[i].mask) ? "catch" : "ignore");
2146 }
2147
2148 return ERROR_OK;
2149 }
2150
2151 COMMAND_HANDLER(handle_cortex_m3_mask_interrupts_command)
2152 {
2153 struct target *target = get_current_target(CMD_CTX);
2154 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
2155 int retval;
2156
2157 static const Jim_Nvp nvp_maskisr_modes[] = {
2158 { .name = "auto", .value = CORTEX_M3_ISRMASK_AUTO },
2159 { .name = "off", .value = CORTEX_M3_ISRMASK_OFF },
2160 { .name = "on", .value = CORTEX_M3_ISRMASK_ON },
2161 { .name = NULL, .value = -1 },
2162 };
2163 const Jim_Nvp *n;
2164
2165
2166 retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
2167 if (retval != ERROR_OK)
2168 return retval;
2169
2170 if (target->state != TARGET_HALTED) {
2171 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
2172 return ERROR_OK;
2173 }
2174
2175 if (CMD_ARGC > 0) {
2176 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2177 if (n->name == NULL)
2178 return ERROR_COMMAND_SYNTAX_ERROR;
2179 cortex_m3->isrmasking_mode = n->value;
2180
2181
2182 if (cortex_m3->isrmasking_mode == CORTEX_M3_ISRMASK_ON)
2183 cortex_m3_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
2184 else
2185 cortex_m3_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
2186 }
2187
2188 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_m3->isrmasking_mode);
2189 command_print(CMD_CTX, "cortex_m3 interrupt mask %s", n->name);
2190
2191 return ERROR_OK;
2192 }
2193
2194 COMMAND_HANDLER(handle_cortex_m3_reset_config_command)
2195 {
2196 struct target *target = get_current_target(CMD_CTX);
2197 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
2198 int retval;
2199 char *reset_config;
2200
2201 retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
2202 if (retval != ERROR_OK)
2203 return retval;
2204
2205 if (CMD_ARGC > 0) {
2206 if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
2207 cortex_m3->soft_reset_config = CORTEX_M3_RESET_SYSRESETREQ;
2208 else if (strcmp(*CMD_ARGV, "vectreset") == 0)
2209 cortex_m3->soft_reset_config = CORTEX_M3_RESET_VECTRESET;
2210 }
2211
2212 switch (cortex_m3->soft_reset_config) {
2213 case CORTEX_M3_RESET_SYSRESETREQ:
2214 reset_config = "sysresetreq";
2215 break;
2216
2217 case CORTEX_M3_RESET_VECTRESET:
2218 reset_config = "vectreset";
2219 break;
2220
2221 default:
2222 reset_config = "unknown";
2223 break;
2224 }
2225
2226 command_print(CMD_CTX, "cortex_m3 reset_config %s", reset_config);
2227
2228 return ERROR_OK;
2229 }
2230
2231 static const struct command_registration cortex_m3_exec_command_handlers[] = {
2232 {
2233 .name = "maskisr",
2234 .handler = handle_cortex_m3_mask_interrupts_command,
2235 .mode = COMMAND_EXEC,
2236 .help = "mask cortex_m3 interrupts",
2237 .usage = "['auto'|'on'|'off']",
2238 },
2239 {
2240 .name = "vector_catch",
2241 .handler = handle_cortex_m3_vector_catch_command,
2242 .mode = COMMAND_EXEC,
2243 .help = "configure hardware vectors to trigger debug entry",
2244 .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
2245 },
2246 {
2247 .name = "reset_config",
2248 .handler = handle_cortex_m3_reset_config_command,
2249 .mode = COMMAND_ANY,
2250 .help = "configure software reset handling",
2251 .usage = "['srst'|'sysresetreq'|'vectreset']",
2252 },
2253 COMMAND_REGISTRATION_DONE
2254 };
2255 static const struct command_registration cortex_m3_command_handlers[] = {
2256 {
2257 .chain = armv7m_command_handlers,
2258 },
2259 {
2260 .name = "cortex_m",
2261 .mode = COMMAND_EXEC,
2262 .help = "Cortex-M command group",
2263 .usage = "",
2264 .chain = cortex_m3_exec_command_handlers,
2265 },
2266 COMMAND_REGISTRATION_DONE
2267 };
2268
2269 struct target_type cortexm3_target = {
2270 .name = "cortex_m",
2271 .deprecated_name = "cortex_m3",
2272
2273 .poll = cortex_m3_poll,
2274 .arch_state = armv7m_arch_state,
2275
2276 .target_request_data = cortex_m3_target_request_data,
2277
2278 .halt = cortex_m3_halt,
2279 .resume = cortex_m3_resume,
2280 .step = cortex_m3_step,
2281
2282 .assert_reset = cortex_m3_assert_reset,
2283 .deassert_reset = cortex_m3_deassert_reset,
2284 .soft_reset_halt = cortex_m3_soft_reset_halt,
2285
2286 .get_gdb_reg_list = armv7m_get_gdb_reg_list,
2287
2288 .read_memory = cortex_m3_read_memory,
2289 .write_memory = cortex_m3_write_memory,
2290 .checksum_memory = armv7m_checksum_memory,
2291 .blank_check_memory = armv7m_blank_check_memory,
2292
2293 .run_algorithm = armv7m_run_algorithm,
2294 .start_algorithm = armv7m_start_algorithm,
2295 .wait_algorithm = armv7m_wait_algorithm,
2296
2297 .add_breakpoint = cortex_m3_add_breakpoint,
2298 .remove_breakpoint = cortex_m3_remove_breakpoint,
2299 .add_watchpoint = cortex_m3_add_watchpoint,
2300 .remove_watchpoint = cortex_m3_remove_watchpoint,
2301
2302 .commands = cortex_m3_command_handlers,
2303 .target_create = cortex_m3_target_create,
2304 .init_target = cortex_m3_init_target,
2305 .examine = cortex_m3_examine,
2306 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)