armv7m: use generic arm::core_mode
[openocd.git] / src / target / cortex_m.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 * *
26 * *
27 * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
28 * *
29 ***************************************************************************/
30 #ifdef HAVE_CONFIG_H
31 #include "config.h"
32 #endif
33
34 #include "jtag/interface.h"
35 #include "breakpoints.h"
36 #include "cortex_m.h"
37 #include "target_request.h"
38 #include "target_type.h"
39 #include "arm_disassembler.h"
40 #include "register.h"
41 #include "arm_opcodes.h"
42 #include "arm_semihosting.h"
43 #include <helper/time_support.h>
44
45 /* NOTE: most of this should work fine for the Cortex-M1 and
46 * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
47 * Some differences: M0/M1 doesn't have FBP remapping or the
48 * DWT tracing/profiling support. (So the cycle counter will
49 * not be usable; the other stuff isn't currently used here.)
50 *
51 * Although there are some workarounds for errata seen only in r0p0
52 * silicon, such old parts are hard to find and thus not much tested
53 * any longer.
54 */
55
56 /**
57 * Returns the type of a break point required by address location
58 */
59 #define BKPT_TYPE_BY_ADDR(addr) ((addr) < 0x20000000 ? BKPT_HARD : BKPT_SOFT)
60
61
62 /* forward declarations */
63 static int cortex_m3_store_core_reg_u32(struct target *target,
64 enum armv7m_regtype type, uint32_t num, uint32_t value);
65
66 static int cortexm3_dap_read_coreregister_u32(struct adiv5_dap *swjdp,
67 uint32_t *value, int regnum)
68 {
69 int retval;
70 uint32_t dcrdr;
71
72 /* because the DCB_DCRDR is used for the emulated dcc channel
73 * we have to save/restore the DCB_DCRDR when used */
74
75 retval = mem_ap_read_u32(swjdp, DCB_DCRDR, &dcrdr);
76 if (retval != ERROR_OK)
77 return retval;
78
79 /* mem_ap_write_u32(swjdp, DCB_DCRSR, regnum); */
80 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRSR & 0xFFFFFFF0);
81 if (retval != ERROR_OK)
82 return retval;
83 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRSR & 0xC), regnum);
84 if (retval != ERROR_OK)
85 return retval;
86
87 /* mem_ap_read_u32(swjdp, DCB_DCRDR, value); */
88 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRDR & 0xFFFFFFF0);
89 if (retval != ERROR_OK)
90 return retval;
91 retval = dap_queue_ap_read(swjdp, AP_REG_BD0 | (DCB_DCRDR & 0xC), value);
92 if (retval != ERROR_OK)
93 return retval;
94
95 retval = dap_run(swjdp);
96 if (retval != ERROR_OK)
97 return retval;
98
99 /* restore DCB_DCRDR - this needs to be in a seperate
100 * transaction otherwise the emulated DCC channel breaks */
101 if (retval == ERROR_OK)
102 retval = mem_ap_write_atomic_u32(swjdp, DCB_DCRDR, dcrdr);
103
104 return retval;
105 }
106
107 static int cortexm3_dap_write_coreregister_u32(struct adiv5_dap *swjdp,
108 uint32_t value, int regnum)
109 {
110 int retval;
111 uint32_t dcrdr;
112
113 /* because the DCB_DCRDR is used for the emulated dcc channel
114 * we have to save/restore the DCB_DCRDR when used */
115
116 retval = mem_ap_read_u32(swjdp, DCB_DCRDR, &dcrdr);
117 if (retval != ERROR_OK)
118 return retval;
119
120 /* mem_ap_write_u32(swjdp, DCB_DCRDR, core_regs[i]); */
121 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRDR & 0xFFFFFFF0);
122 if (retval != ERROR_OK)
123 return retval;
124 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRDR & 0xC), value);
125 if (retval != ERROR_OK)
126 return retval;
127
128 /* mem_ap_write_u32(swjdp, DCB_DCRSR, i | DCRSR_WnR); */
129 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRSR & 0xFFFFFFF0);
130 if (retval != ERROR_OK)
131 return retval;
132 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRSR & 0xC), regnum | DCRSR_WnR);
133 if (retval != ERROR_OK)
134 return retval;
135
136 retval = dap_run(swjdp);
137 if (retval != ERROR_OK)
138 return retval;
139
140 /* restore DCB_DCRDR - this needs to be in a seperate
141 * transaction otherwise the emulated DCC channel breaks */
142 if (retval == ERROR_OK)
143 retval = mem_ap_write_atomic_u32(swjdp, DCB_DCRDR, dcrdr);
144
145 return retval;
146 }
147
148 static int cortex_m3_write_debug_halt_mask(struct target *target,
149 uint32_t mask_on, uint32_t mask_off)
150 {
151 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
152 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
153
154 /* mask off status bits */
155 cortex_m3->dcb_dhcsr &= ~((0xFFFF << 16) | mask_off);
156 /* create new register mask */
157 cortex_m3->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
158
159 return mem_ap_write_atomic_u32(swjdp, DCB_DHCSR, cortex_m3->dcb_dhcsr);
160 }
161
162 static int cortex_m3_clear_halt(struct target *target)
163 {
164 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
165 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
166 int retval;
167
168 /* clear step if any */
169 cortex_m3_write_debug_halt_mask(target, C_HALT, C_STEP);
170
171 /* Read Debug Fault Status Register */
172 retval = mem_ap_read_atomic_u32(swjdp, NVIC_DFSR, &cortex_m3->nvic_dfsr);
173 if (retval != ERROR_OK)
174 return retval;
175
176 /* Clear Debug Fault Status */
177 retval = mem_ap_write_atomic_u32(swjdp, NVIC_DFSR, cortex_m3->nvic_dfsr);
178 if (retval != ERROR_OK)
179 return retval;
180 LOG_DEBUG(" NVIC_DFSR 0x%" PRIx32 "", cortex_m3->nvic_dfsr);
181
182 return ERROR_OK;
183 }
184
185 static int cortex_m3_single_step_core(struct target *target)
186 {
187 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
188 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
189 uint32_t dhcsr_save;
190 int retval;
191
192 /* backup dhcsr reg */
193 dhcsr_save = cortex_m3->dcb_dhcsr;
194
195 /* Mask interrupts before clearing halt, if done already. This avoids
196 * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
197 * HALT can put the core into an unknown state.
198 */
199 if (!(cortex_m3->dcb_dhcsr & C_MASKINTS)) {
200 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
201 DBGKEY | C_MASKINTS | C_HALT | C_DEBUGEN);
202 if (retval != ERROR_OK)
203 return retval;
204 }
205 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
206 DBGKEY | C_MASKINTS | C_STEP | C_DEBUGEN);
207 if (retval != ERROR_OK)
208 return retval;
209 LOG_DEBUG(" ");
210
211 /* restore dhcsr reg */
212 cortex_m3->dcb_dhcsr = dhcsr_save;
213 cortex_m3_clear_halt(target);
214
215 return ERROR_OK;
216 }
217
218 static int cortex_m3_endreset_event(struct target *target)
219 {
220 int i;
221 int retval;
222 uint32_t dcb_demcr;
223 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
224 struct armv7m_common *armv7m = &cortex_m3->armv7m;
225 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
226 struct cortex_m3_fp_comparator *fp_list = cortex_m3->fp_comparator_list;
227 struct cortex_m3_dwt_comparator *dwt_list = cortex_m3->dwt_comparator_list;
228
229 /* REVISIT The four debug monitor bits are currently ignored... */
230 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &dcb_demcr);
231 if (retval != ERROR_OK)
232 return retval;
233 LOG_DEBUG("DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr);
234
235 /* this register is used for emulated dcc channel */
236 retval = mem_ap_write_u32(swjdp, DCB_DCRDR, 0);
237 if (retval != ERROR_OK)
238 return retval;
239
240 /* Enable debug requests */
241 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
242 if (retval != ERROR_OK)
243 return retval;
244 if (!(cortex_m3->dcb_dhcsr & C_DEBUGEN)) {
245 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_DEBUGEN);
246 if (retval != ERROR_OK)
247 return retval;
248 }
249
250 /* clear any interrupt masking */
251 cortex_m3_write_debug_halt_mask(target, 0, C_MASKINTS);
252
253 /* Enable features controlled by ITM and DWT blocks, and catch only
254 * the vectors we were told to pay attention to.
255 *
256 * Target firmware is responsible for all fault handling policy
257 * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
258 * or manual updates to the NVIC SHCSR and CCR registers.
259 */
260 retval = mem_ap_write_u32(swjdp, DCB_DEMCR, TRCENA | armv7m->demcr);
261 if (retval != ERROR_OK)
262 return retval;
263
264 /* Paranoia: evidently some (early?) chips don't preserve all the
265 * debug state (including FBP, DWT, etc) across reset...
266 */
267
268 /* Enable FPB */
269 retval = target_write_u32(target, FP_CTRL, 3);
270 if (retval != ERROR_OK)
271 return retval;
272
273 cortex_m3->fpb_enabled = 1;
274
275 /* Restore FPB registers */
276 for (i = 0; i < cortex_m3->fp_num_code + cortex_m3->fp_num_lit; i++) {
277 retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
278 if (retval != ERROR_OK)
279 return retval;
280 }
281
282 /* Restore DWT registers */
283 for (i = 0; i < cortex_m3->dwt_num_comp; i++) {
284 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
285 dwt_list[i].comp);
286 if (retval != ERROR_OK)
287 return retval;
288 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
289 dwt_list[i].mask);
290 if (retval != ERROR_OK)
291 return retval;
292 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
293 dwt_list[i].function);
294 if (retval != ERROR_OK)
295 return retval;
296 }
297 retval = dap_run(swjdp);
298 if (retval != ERROR_OK)
299 return retval;
300
301 register_cache_invalidate(cortex_m3->armv7m.core_cache);
302
303 /* make sure we have latest dhcsr flags */
304 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
305
306 return retval;
307 }
308
309 static int cortex_m3_examine_debug_reason(struct target *target)
310 {
311 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
312
313 /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
314 * only check the debug reason if we don't know it already */
315
316 if ((target->debug_reason != DBG_REASON_DBGRQ)
317 && (target->debug_reason != DBG_REASON_SINGLESTEP)) {
318 if (cortex_m3->nvic_dfsr & DFSR_BKPT) {
319 target->debug_reason = DBG_REASON_BREAKPOINT;
320 if (cortex_m3->nvic_dfsr & DFSR_DWTTRAP)
321 target->debug_reason = DBG_REASON_WPTANDBKPT;
322 } else if (cortex_m3->nvic_dfsr & DFSR_DWTTRAP)
323 target->debug_reason = DBG_REASON_WATCHPOINT;
324 else if (cortex_m3->nvic_dfsr & DFSR_VCATCH)
325 target->debug_reason = DBG_REASON_BREAKPOINT;
326 else /* EXTERNAL, HALTED */
327 target->debug_reason = DBG_REASON_UNDEFINED;
328 }
329
330 return ERROR_OK;
331 }
332
333 static int cortex_m3_examine_exception_reason(struct target *target)
334 {
335 uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
336 struct armv7m_common *armv7m = target_to_armv7m(target);
337 struct adiv5_dap *swjdp = armv7m->arm.dap;
338 int retval;
339
340 retval = mem_ap_read_u32(swjdp, NVIC_SHCSR, &shcsr);
341 if (retval != ERROR_OK)
342 return retval;
343 switch (armv7m->exception_number) {
344 case 2: /* NMI */
345 break;
346 case 3: /* Hard Fault */
347 retval = mem_ap_read_atomic_u32(swjdp, NVIC_HFSR, &except_sr);
348 if (retval != ERROR_OK)
349 return retval;
350 if (except_sr & 0x40000000) {
351 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &cfsr);
352 if (retval != ERROR_OK)
353 return retval;
354 }
355 break;
356 case 4: /* Memory Management */
357 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
358 if (retval != ERROR_OK)
359 return retval;
360 retval = mem_ap_read_u32(swjdp, NVIC_MMFAR, &except_ar);
361 if (retval != ERROR_OK)
362 return retval;
363 break;
364 case 5: /* Bus Fault */
365 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
366 if (retval != ERROR_OK)
367 return retval;
368 retval = mem_ap_read_u32(swjdp, NVIC_BFAR, &except_ar);
369 if (retval != ERROR_OK)
370 return retval;
371 break;
372 case 6: /* Usage Fault */
373 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
374 if (retval != ERROR_OK)
375 return retval;
376 break;
377 case 11: /* SVCall */
378 break;
379 case 12: /* Debug Monitor */
380 retval = mem_ap_read_u32(swjdp, NVIC_DFSR, &except_sr);
381 if (retval != ERROR_OK)
382 return retval;
383 break;
384 case 14: /* PendSV */
385 break;
386 case 15: /* SysTick */
387 break;
388 default:
389 except_sr = 0;
390 break;
391 }
392 retval = dap_run(swjdp);
393 if (retval == ERROR_OK)
394 LOG_DEBUG("%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
395 ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
396 armv7m_exception_string(armv7m->exception_number),
397 shcsr, except_sr, cfsr, except_ar);
398 return retval;
399 }
400
401 static int cortex_m3_debug_entry(struct target *target)
402 {
403 int i;
404 uint32_t xPSR;
405 int retval;
406 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
407 struct armv7m_common *armv7m = &cortex_m3->armv7m;
408 struct arm *arm = &armv7m->arm;
409 struct adiv5_dap *swjdp = armv7m->arm.dap;
410 struct reg *r;
411
412 LOG_DEBUG(" ");
413
414 cortex_m3_clear_halt(target);
415 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
416 if (retval != ERROR_OK)
417 return retval;
418
419 retval = armv7m->examine_debug_reason(target);
420 if (retval != ERROR_OK)
421 return retval;
422
423 /* Examine target state and mode
424 * First load register acessible through core debug port*/
425 int num_regs = armv7m->core_cache->num_regs;
426
427 for (i = 0; i < num_regs; i++) {
428 if (!armv7m->core_cache->reg_list[i].valid)
429 armv7m->read_core_reg(target, i);
430 }
431
432 r = armv7m->core_cache->reg_list + ARMV7M_xPSR;
433 xPSR = buf_get_u32(r->value, 0, 32);
434
435 #ifdef ARMV7_GDB_HACKS
436 /* FIXME this breaks on scan chains with more than one Cortex-M3.
437 * Instead, each CM3 should have its own dummy value...
438 */
439 /* copy real xpsr reg for gdb, setting thumb bit */
440 buf_set_u32(armv7m_gdb_dummy_cpsr_value, 0, 32, xPSR);
441 buf_set_u32(armv7m_gdb_dummy_cpsr_value, 5, 1, 1);
442 armv7m_gdb_dummy_cpsr_reg.valid = r->valid;
443 armv7m_gdb_dummy_cpsr_reg.dirty = r->dirty;
444 #endif
445
446 /* For IT instructions xPSR must be reloaded on resume and clear on debug exec */
447 if (xPSR & 0xf00) {
448 r->dirty = r->valid;
449 cortex_m3_store_core_reg_u32(target, ARMV7M_REGISTER_CORE_GP, 16, xPSR & ~0xff);
450 }
451
452 /* Are we in an exception handler */
453 if (xPSR & 0x1FF) {
454 armv7m->exception_number = (xPSR & 0x1FF);
455
456 arm->core_mode = ARM_MODE_HANDLER;
457 arm->map = armv7m_msp_reg_map;
458 } else {
459 unsigned control = buf_get_u32(arm->core_cache
460 ->reg_list[ARMV7M_CONTROL].value, 0, 2);
461
462 /* is this thread privileged? */
463 arm->core_mode = control & 1
464 ? ARM_MODE_USER_THREAD
465 : ARM_MODE_THREAD;
466
467 /* which stack is it using? */
468 if (control & 2)
469 arm->map = armv7m_psp_reg_map;
470 else
471 arm->map = armv7m_msp_reg_map;
472
473 armv7m->exception_number = 0;
474 }
475
476 if (armv7m->exception_number)
477 cortex_m3_examine_exception_reason(target);
478
479 LOG_DEBUG("entered debug state in core mode: %s at PC 0x%" PRIx32 ", target->state: %s",
480 arm_mode_name(arm->core_mode),
481 *(uint32_t *)(arm->pc->value),
482 target_state_name(target));
483
484 if (armv7m->post_debug_entry) {
485 retval = armv7m->post_debug_entry(target);
486 if (retval != ERROR_OK)
487 return retval;
488 }
489
490 return ERROR_OK;
491 }
492
493 static int cortex_m3_poll(struct target *target)
494 {
495 int detected_failure = ERROR_OK;
496 int retval = ERROR_OK;
497 enum target_state prev_target_state = target->state;
498 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
499 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
500
501 /* Read from Debug Halting Control and Status Register */
502 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
503 if (retval != ERROR_OK) {
504 target->state = TARGET_UNKNOWN;
505 return retval;
506 }
507
508 /* Recover from lockup. See ARMv7-M architecture spec,
509 * section B1.5.15 "Unrecoverable exception cases".
510 */
511 if (cortex_m3->dcb_dhcsr & S_LOCKUP) {
512 LOG_ERROR("%s -- clearing lockup after double fault",
513 target_name(target));
514 cortex_m3_write_debug_halt_mask(target, C_HALT, 0);
515 target->debug_reason = DBG_REASON_DBGRQ;
516
517 /* We have to execute the rest (the "finally" equivalent, but
518 * still throw this exception again).
519 */
520 detected_failure = ERROR_FAIL;
521
522 /* refresh status bits */
523 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
524 if (retval != ERROR_OK)
525 return retval;
526 }
527
528 if (cortex_m3->dcb_dhcsr & S_RESET_ST) {
529 /* check if still in reset */
530 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
531 if (retval != ERROR_OK)
532 return retval;
533
534 if (cortex_m3->dcb_dhcsr & S_RESET_ST) {
535 target->state = TARGET_RESET;
536 return ERROR_OK;
537 }
538 }
539
540 if (target->state == TARGET_RESET) {
541 /* Cannot switch context while running so endreset is
542 * called with target->state == TARGET_RESET
543 */
544 LOG_DEBUG("Exit from reset with dcb_dhcsr 0x%" PRIx32,
545 cortex_m3->dcb_dhcsr);
546 cortex_m3_endreset_event(target);
547 target->state = TARGET_RUNNING;
548 prev_target_state = TARGET_RUNNING;
549 }
550
551 if (cortex_m3->dcb_dhcsr & S_HALT) {
552 target->state = TARGET_HALTED;
553
554 if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET)) {
555 retval = cortex_m3_debug_entry(target);
556 if (retval != ERROR_OK)
557 return retval;
558
559 if (arm_semihosting(target, &retval) != 0)
560 return retval;
561
562 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
563 }
564 if (prev_target_state == TARGET_DEBUG_RUNNING) {
565 LOG_DEBUG(" ");
566 retval = cortex_m3_debug_entry(target);
567 if (retval != ERROR_OK)
568 return retval;
569
570 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
571 }
572 }
573
574 /* REVISIT when S_SLEEP is set, it's in a Sleep or DeepSleep state.
575 * How best to model low power modes?
576 */
577
578 if (target->state == TARGET_UNKNOWN) {
579 /* check if processor is retiring instructions */
580 if (cortex_m3->dcb_dhcsr & S_RETIRE_ST) {
581 target->state = TARGET_RUNNING;
582 retval = ERROR_OK;
583 }
584 }
585
586 /* Did we detect a failure condition that we cleared? */
587 if (detected_failure != ERROR_OK)
588 retval = detected_failure;
589 return retval;
590 }
591
592 static int cortex_m3_halt(struct target *target)
593 {
594 LOG_DEBUG("target->state: %s",
595 target_state_name(target));
596
597 if (target->state == TARGET_HALTED) {
598 LOG_DEBUG("target was already halted");
599 return ERROR_OK;
600 }
601
602 if (target->state == TARGET_UNKNOWN)
603 LOG_WARNING("target was in unknown state when halt was requested");
604
605 if (target->state == TARGET_RESET) {
606 if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
607 LOG_ERROR("can't request a halt while in reset if nSRST pulls nTRST");
608 return ERROR_TARGET_FAILURE;
609 } else {
610 /* we came here in a reset_halt or reset_init sequence
611 * debug entry was already prepared in cortex_m3_assert_reset()
612 */
613 target->debug_reason = DBG_REASON_DBGRQ;
614
615 return ERROR_OK;
616 }
617 }
618
619 /* Write to Debug Halting Control and Status Register */
620 cortex_m3_write_debug_halt_mask(target, C_HALT, 0);
621
622 target->debug_reason = DBG_REASON_DBGRQ;
623
624 return ERROR_OK;
625 }
626
627 static int cortex_m3_soft_reset_halt(struct target *target)
628 {
629 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
630 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
631 uint32_t dcb_dhcsr = 0;
632 int retval, timeout = 0;
633
634 /* Enter debug state on reset; restore DEMCR in endreset_event() */
635 retval = mem_ap_write_u32(swjdp, DCB_DEMCR,
636 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
637 if (retval != ERROR_OK)
638 return retval;
639
640 /* Request a core-only reset */
641 retval = mem_ap_write_atomic_u32(swjdp, NVIC_AIRCR,
642 AIRCR_VECTKEY | AIRCR_VECTRESET);
643 if (retval != ERROR_OK)
644 return retval;
645 target->state = TARGET_RESET;
646
647 /* registers are now invalid */
648 register_cache_invalidate(cortex_m3->armv7m.core_cache);
649
650 while (timeout < 100) {
651 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &dcb_dhcsr);
652 if (retval == ERROR_OK) {
653 retval = mem_ap_read_atomic_u32(swjdp, NVIC_DFSR,
654 &cortex_m3->nvic_dfsr);
655 if (retval != ERROR_OK)
656 return retval;
657 if ((dcb_dhcsr & S_HALT)
658 && (cortex_m3->nvic_dfsr & DFSR_VCATCH)) {
659 LOG_DEBUG("system reset-halted, DHCSR 0x%08x, "
660 "DFSR 0x%08x",
661 (unsigned) dcb_dhcsr,
662 (unsigned) cortex_m3->nvic_dfsr);
663 cortex_m3_poll(target);
664 /* FIXME restore user's vector catch config */
665 return ERROR_OK;
666 } else
667 LOG_DEBUG("waiting for system reset-halt, "
668 "DHCSR 0x%08x, %d ms",
669 (unsigned) dcb_dhcsr, timeout);
670 }
671 timeout++;
672 alive_sleep(1);
673 }
674
675 return ERROR_OK;
676 }
677
678 void cortex_m3_enable_breakpoints(struct target *target)
679 {
680 struct breakpoint *breakpoint = target->breakpoints;
681
682 /* set any pending breakpoints */
683 while (breakpoint) {
684 if (!breakpoint->set)
685 cortex_m3_set_breakpoint(target, breakpoint);
686 breakpoint = breakpoint->next;
687 }
688 }
689
690 static int cortex_m3_resume(struct target *target, int current,
691 uint32_t address, int handle_breakpoints, int debug_execution)
692 {
693 struct armv7m_common *armv7m = target_to_armv7m(target);
694 struct breakpoint *breakpoint = NULL;
695 uint32_t resume_pc;
696 struct reg *r;
697
698 if (target->state != TARGET_HALTED) {
699 LOG_WARNING("target not halted");
700 return ERROR_TARGET_NOT_HALTED;
701 }
702
703 if (!debug_execution) {
704 target_free_all_working_areas(target);
705 cortex_m3_enable_breakpoints(target);
706 cortex_m3_enable_watchpoints(target);
707 }
708
709 if (debug_execution) {
710 r = armv7m->core_cache->reg_list + ARMV7M_PRIMASK;
711
712 /* Disable interrupts */
713 /* We disable interrupts in the PRIMASK register instead of
714 * masking with C_MASKINTS. This is probably the same issue
715 * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
716 * in parallel with disabled interrupts can cause local faults
717 * to not be taken.
718 *
719 * REVISIT this clearly breaks non-debug execution, since the
720 * PRIMASK register state isn't saved/restored... workaround
721 * by never resuming app code after debug execution.
722 */
723 buf_set_u32(r->value, 0, 1, 1);
724 r->dirty = true;
725 r->valid = true;
726
727 /* Make sure we are in Thumb mode */
728 r = armv7m->core_cache->reg_list + ARMV7M_xPSR;
729 buf_set_u32(r->value, 24, 1, 1);
730 r->dirty = true;
731 r->valid = true;
732 }
733
734 /* current = 1: continue on current pc, otherwise continue at <address> */
735 r = armv7m->arm.pc;
736 if (!current) {
737 buf_set_u32(r->value, 0, 32, address);
738 r->dirty = true;
739 r->valid = true;
740 }
741
742 /* if we halted last time due to a bkpt instruction
743 * then we have to manually step over it, otherwise
744 * the core will break again */
745
746 if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
747 && !debug_execution)
748 armv7m_maybe_skip_bkpt_inst(target, NULL);
749
750 resume_pc = buf_get_u32(r->value, 0, 32);
751
752 armv7m_restore_context(target);
753
754 /* the front-end may request us not to handle breakpoints */
755 if (handle_breakpoints) {
756 /* Single step past breakpoint at current address */
757 breakpoint = breakpoint_find(target, resume_pc);
758 if (breakpoint) {
759 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 " (ID: %d)",
760 breakpoint->address,
761 breakpoint->unique_id);
762 cortex_m3_unset_breakpoint(target, breakpoint);
763 cortex_m3_single_step_core(target);
764 cortex_m3_set_breakpoint(target, breakpoint);
765 }
766 }
767
768 /* Restart core */
769 cortex_m3_write_debug_halt_mask(target, 0, C_HALT);
770
771 target->debug_reason = DBG_REASON_NOTHALTED;
772
773 /* registers are now invalid */
774 register_cache_invalidate(armv7m->core_cache);
775
776 if (!debug_execution) {
777 target->state = TARGET_RUNNING;
778 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
779 LOG_DEBUG("target resumed at 0x%" PRIx32 "", resume_pc);
780 } else {
781 target->state = TARGET_DEBUG_RUNNING;
782 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
783 LOG_DEBUG("target debug resumed at 0x%" PRIx32 "", resume_pc);
784 }
785
786 return ERROR_OK;
787 }
788
789 /* int irqstepcount = 0; */
790 static int cortex_m3_step(struct target *target, int current,
791 uint32_t address, int handle_breakpoints)
792 {
793 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
794 struct armv7m_common *armv7m = &cortex_m3->armv7m;
795 struct adiv5_dap *swjdp = armv7m->arm.dap;
796 struct breakpoint *breakpoint = NULL;
797 struct reg *pc = armv7m->arm.pc;
798 bool bkpt_inst_found = false;
799 int retval;
800 bool isr_timed_out = false;
801
802 if (target->state != TARGET_HALTED) {
803 LOG_WARNING("target not halted");
804 return ERROR_TARGET_NOT_HALTED;
805 }
806
807 /* current = 1: continue on current pc, otherwise continue at <address> */
808 if (!current)
809 buf_set_u32(pc->value, 0, 32, address);
810
811 uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
812
813 /* the front-end may request us not to handle breakpoints */
814 if (handle_breakpoints) {
815 breakpoint = breakpoint_find(target, pc_value);
816 if (breakpoint)
817 cortex_m3_unset_breakpoint(target, breakpoint);
818 }
819
820 armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
821
822 target->debug_reason = DBG_REASON_SINGLESTEP;
823
824 armv7m_restore_context(target);
825
826 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
827
828 /* if no bkpt instruction is found at pc then we can perform
829 * a normal step, otherwise we have to manually step over the bkpt
830 * instruction - as such simulate a step */
831 if (bkpt_inst_found == false) {
832 /* Automatic ISR masking mode off: Just step over the next instruction */
833 if ((cortex_m3->isrmasking_mode != CORTEX_M3_ISRMASK_AUTO))
834 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
835 else {
836 /* Process interrupts during stepping in a way they don't interfere
837 * debugging.
838 *
839 * Principle:
840 *
841 * Set a temporary break point at the current pc and let the core run
842 * with interrupts enabled. Pending interrupts get served and we run
843 * into the breakpoint again afterwards. Then we step over the next
844 * instruction with interrupts disabled.
845 *
846 * If the pending interrupts don't complete within time, we leave the
847 * core running. This may happen if the interrupts trigger faster
848 * than the core can process them or the handler doesn't return.
849 *
850 * If no more breakpoints are available we simply do a step with
851 * interrupts enabled.
852 *
853 */
854
855 /* 2012-09-29 ph
856 *
857 * If a break point is already set on the lower half word then a break point on
858 * the upper half word will not break again when the core is restarted. So we
859 * just step over the instruction with interrupts disabled.
860 *
861 * The documentation has no information about this, it was found by observation
862 * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 dosen't seem to
863 * suffer from this problem.
864 *
865 * To add some confusion: pc_value has bit 0 always set, while the breakpoint
866 * address has it always cleared. The former is done to indicate thumb mode
867 * to gdb.
868 *
869 */
870 if ((pc_value & 0x02) && breakpoint_find(target, pc_value & ~0x03)) {
871 LOG_DEBUG("Stepping over next instruction with interrupts disabled");
872 cortex_m3_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
873 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
874 /* Re-enable interrupts */
875 cortex_m3_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
876 }
877 else {
878
879 /* Set a temporary break point */
880 if (breakpoint)
881 retval = cortex_m3_set_breakpoint(target, breakpoint);
882 else
883 retval = breakpoint_add(target, pc_value, 2, BKPT_TYPE_BY_ADDR(pc_value));
884 bool tmp_bp_set = (retval == ERROR_OK);
885
886 /* No more breakpoints left, just do a step */
887 if (!tmp_bp_set)
888 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
889 else {
890 /* Start the core */
891 LOG_DEBUG("Starting core to serve pending interrupts");
892 int64_t t_start = timeval_ms();
893 cortex_m3_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
894
895 /* Wait for pending handlers to complete or timeout */
896 do {
897 retval = mem_ap_read_atomic_u32(swjdp,
898 DCB_DHCSR,
899 &cortex_m3->dcb_dhcsr);
900 if (retval != ERROR_OK) {
901 target->state = TARGET_UNKNOWN;
902 return retval;
903 }
904 isr_timed_out = ((timeval_ms() - t_start) > 500);
905 } while (!((cortex_m3->dcb_dhcsr & S_HALT) || isr_timed_out));
906
907 /* only remove breakpoint if we created it */
908 if (breakpoint)
909 cortex_m3_unset_breakpoint(target, breakpoint);
910 else {
911 /* Remove the temporary breakpoint */
912 breakpoint_remove(target, pc_value);
913 }
914
915 if (isr_timed_out) {
916 LOG_DEBUG("Interrupt handlers didn't complete within time, "
917 "leaving target running");
918 } else {
919 /* Step over next instruction with interrupts disabled */
920 cortex_m3_write_debug_halt_mask(target,
921 C_HALT | C_MASKINTS,
922 0);
923 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
924 /* Re-enable interrupts */
925 cortex_m3_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
926 }
927 }
928 }
929 }
930 }
931
932 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
933 if (retval != ERROR_OK)
934 return retval;
935
936 /* registers are now invalid */
937 register_cache_invalidate(cortex_m3->armv7m.core_cache);
938
939 if (breakpoint)
940 cortex_m3_set_breakpoint(target, breakpoint);
941
942 if (isr_timed_out) {
943 /* Leave the core running. The user has to stop execution manually. */
944 target->debug_reason = DBG_REASON_NOTHALTED;
945 target->state = TARGET_RUNNING;
946 return ERROR_OK;
947 }
948
949 LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
950 " nvic_icsr = 0x%" PRIx32,
951 cortex_m3->dcb_dhcsr, cortex_m3->nvic_icsr);
952
953 retval = cortex_m3_debug_entry(target);
954 if (retval != ERROR_OK)
955 return retval;
956 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
957
958 LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
959 " nvic_icsr = 0x%" PRIx32,
960 cortex_m3->dcb_dhcsr, cortex_m3->nvic_icsr);
961
962 return ERROR_OK;
963 }
964
965 static int cortex_m3_assert_reset(struct target *target)
966 {
967 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
968 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
969 enum cortex_m3_soft_reset_config reset_config = cortex_m3->soft_reset_config;
970
971 LOG_DEBUG("target->state: %s",
972 target_state_name(target));
973
974 enum reset_types jtag_reset_config = jtag_get_reset_config();
975
976 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
977 /* allow scripts to override the reset event */
978
979 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
980 register_cache_invalidate(cortex_m3->armv7m.core_cache);
981 target->state = TARGET_RESET;
982
983 return ERROR_OK;
984 }
985
986 /* some cores support connecting while srst is asserted
987 * use that mode is it has been configured */
988
989 bool srst_asserted = false;
990
991 if (jtag_reset_config & RESET_SRST_NO_GATING) {
992 adapter_assert_reset();
993 srst_asserted = true;
994 }
995
996 /* Enable debug requests */
997 int retval;
998 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
999 if (retval != ERROR_OK)
1000 return retval;
1001 if (!(cortex_m3->dcb_dhcsr & C_DEBUGEN)) {
1002 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_DEBUGEN);
1003 if (retval != ERROR_OK)
1004 return retval;
1005 }
1006
1007 /* If the processor is sleeping in a WFI or WFE instruction, the
1008 * C_HALT bit must be asserted to regain control */
1009 if (cortex_m3->dcb_dhcsr & S_SLEEP) {
1010 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_HALT | C_DEBUGEN);
1011 if (retval != ERROR_OK)
1012 return retval;
1013 }
1014
1015 retval = mem_ap_write_u32(swjdp, DCB_DCRDR, 0);
1016 if (retval != ERROR_OK)
1017 return retval;
1018
1019 if (!target->reset_halt) {
1020 /* Set/Clear C_MASKINTS in a separate operation */
1021 if (cortex_m3->dcb_dhcsr & C_MASKINTS) {
1022 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
1023 DBGKEY | C_DEBUGEN | C_HALT);
1024 if (retval != ERROR_OK)
1025 return retval;
1026 }
1027
1028 /* clear any debug flags before resuming */
1029 cortex_m3_clear_halt(target);
1030
1031 /* clear C_HALT in dhcsr reg */
1032 cortex_m3_write_debug_halt_mask(target, 0, C_HALT);
1033 } else {
1034 /* Halt in debug on reset; endreset_event() restores DEMCR.
1035 *
1036 * REVISIT catching BUSERR presumably helps to defend against
1037 * bad vector table entries. Should this include MMERR or
1038 * other flags too?
1039 */
1040 retval = mem_ap_write_atomic_u32(swjdp, DCB_DEMCR,
1041 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1042 if (retval != ERROR_OK)
1043 return retval;
1044 }
1045
1046 if (jtag_reset_config & RESET_HAS_SRST) {
1047 /* default to asserting srst */
1048 if (!srst_asserted)
1049 adapter_assert_reset();
1050 } else {
1051 /* Use a standard Cortex-M3 software reset mechanism.
1052 * We default to using VECRESET as it is supported on all current cores.
1053 * This has the disadvantage of not resetting the peripherals, so a
1054 * reset-init event handler is needed to perform any peripheral resets.
1055 */
1056 retval = mem_ap_write_atomic_u32(swjdp, NVIC_AIRCR,
1057 AIRCR_VECTKEY | ((reset_config == CORTEX_M3_RESET_SYSRESETREQ)
1058 ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
1059 if (retval != ERROR_OK)
1060 return retval;
1061
1062 LOG_DEBUG("Using Cortex-M3 %s", (reset_config == CORTEX_M3_RESET_SYSRESETREQ)
1063 ? "SYSRESETREQ" : "VECTRESET");
1064
1065 if (reset_config == CORTEX_M3_RESET_VECTRESET) {
1066 LOG_WARNING("Only resetting the Cortex-M3 core, use a reset-init event "
1067 "handler to reset any peripherals or configure hardware srst support.");
1068 }
1069
1070 {
1071 /* I do not know why this is necessary, but it
1072 * fixes strange effects (step/resume cause NMI
1073 * after reset) on LM3S6918 -- Michael Schwingen
1074 */
1075 uint32_t tmp;
1076 retval = mem_ap_read_atomic_u32(swjdp, NVIC_AIRCR, &tmp);
1077 if (retval != ERROR_OK)
1078 return retval;
1079 }
1080 }
1081
1082 target->state = TARGET_RESET;
1083 jtag_add_sleep(50000);
1084
1085 register_cache_invalidate(cortex_m3->armv7m.core_cache);
1086
1087 if (target->reset_halt) {
1088 retval = target_halt(target);
1089 if (retval != ERROR_OK)
1090 return retval;
1091 }
1092
1093 return ERROR_OK;
1094 }
1095
1096 static int cortex_m3_deassert_reset(struct target *target)
1097 {
1098 LOG_DEBUG("target->state: %s",
1099 target_state_name(target));
1100
1101 /* deassert reset lines */
1102 adapter_deassert_reset();
1103
1104 return ERROR_OK;
1105 }
1106
1107 int cortex_m3_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
1108 {
1109 int retval;
1110 int fp_num = 0;
1111 uint32_t hilo;
1112 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1113 struct cortex_m3_fp_comparator *comparator_list = cortex_m3->fp_comparator_list;
1114
1115 if (breakpoint->set) {
1116 LOG_WARNING("breakpoint (BPID: %d) already set", breakpoint->unique_id);
1117 return ERROR_OK;
1118 }
1119
1120 if (cortex_m3->auto_bp_type)
1121 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1122
1123 if (breakpoint->type == BKPT_HARD) {
1124 while (comparator_list[fp_num].used && (fp_num < cortex_m3->fp_num_code))
1125 fp_num++;
1126 if (fp_num >= cortex_m3->fp_num_code) {
1127 LOG_ERROR("Can not find free FPB Comparator!");
1128 return ERROR_FAIL;
1129 }
1130 breakpoint->set = fp_num + 1;
1131 hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
1132 comparator_list[fp_num].used = 1;
1133 comparator_list[fp_num].fpcr_value = (breakpoint->address & 0x1FFFFFFC) | hilo | 1;
1134 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1135 comparator_list[fp_num].fpcr_value);
1136 LOG_DEBUG("fpc_num %i fpcr_value 0x%" PRIx32 "",
1137 fp_num,
1138 comparator_list[fp_num].fpcr_value);
1139 if (!cortex_m3->fpb_enabled) {
1140 LOG_DEBUG("FPB wasn't enabled, do it now");
1141 target_write_u32(target, FP_CTRL, 3);
1142 }
1143 } else if (breakpoint->type == BKPT_SOFT) {
1144 uint8_t code[4];
1145
1146 /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
1147 * semihosting; don't use that. Otherwise the BKPT
1148 * parameter is arbitrary.
1149 */
1150 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1151 retval = target_read_memory(target,
1152 breakpoint->address & 0xFFFFFFFE,
1153 breakpoint->length, 1,
1154 breakpoint->orig_instr);
1155 if (retval != ERROR_OK)
1156 return retval;
1157 retval = target_write_memory(target,
1158 breakpoint->address & 0xFFFFFFFE,
1159 breakpoint->length, 1,
1160 code);
1161 if (retval != ERROR_OK)
1162 return retval;
1163 breakpoint->set = true;
1164 }
1165
1166 LOG_DEBUG("BPID: %d, Type: %d, Address: 0x%08" PRIx32 " Length: %d (set=%d)",
1167 breakpoint->unique_id,
1168 (int)(breakpoint->type),
1169 breakpoint->address,
1170 breakpoint->length,
1171 breakpoint->set);
1172
1173 return ERROR_OK;
1174 }
1175
1176 int cortex_m3_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1177 {
1178 int retval;
1179 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1180 struct cortex_m3_fp_comparator *comparator_list = cortex_m3->fp_comparator_list;
1181
1182 if (!breakpoint->set) {
1183 LOG_WARNING("breakpoint not set");
1184 return ERROR_OK;
1185 }
1186
1187 LOG_DEBUG("BPID: %d, Type: %d, Address: 0x%08" PRIx32 " Length: %d (set=%d)",
1188 breakpoint->unique_id,
1189 (int)(breakpoint->type),
1190 breakpoint->address,
1191 breakpoint->length,
1192 breakpoint->set);
1193
1194 if (breakpoint->type == BKPT_HARD) {
1195 int fp_num = breakpoint->set - 1;
1196 if ((fp_num < 0) || (fp_num >= cortex_m3->fp_num_code)) {
1197 LOG_DEBUG("Invalid FP Comparator number in breakpoint");
1198 return ERROR_OK;
1199 }
1200 comparator_list[fp_num].used = 0;
1201 comparator_list[fp_num].fpcr_value = 0;
1202 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1203 comparator_list[fp_num].fpcr_value);
1204 } else {
1205 /* restore original instruction (kept in target endianness) */
1206 if (breakpoint->length == 4) {
1207 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE, 4, 1,
1208 breakpoint->orig_instr);
1209 if (retval != ERROR_OK)
1210 return retval;
1211 } else {
1212 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE, 2, 1,
1213 breakpoint->orig_instr);
1214 if (retval != ERROR_OK)
1215 return retval;
1216 }
1217 }
1218 breakpoint->set = false;
1219
1220 return ERROR_OK;
1221 }
1222
1223 int cortex_m3_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
1224 {
1225 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1226
1227 if (cortex_m3->auto_bp_type) {
1228 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1229 #ifdef ARMV7_GDB_HACKS
1230 if (breakpoint->length != 2) {
1231 /* XXX Hack: Replace all breakpoints with length != 2 with
1232 * a hardware breakpoint. */
1233 breakpoint->type = BKPT_HARD;
1234 breakpoint->length = 2;
1235 }
1236 #endif
1237 }
1238
1239 if (breakpoint->type != BKPT_TYPE_BY_ADDR(breakpoint->address)) {
1240 if (breakpoint->type == BKPT_HARD) {
1241 LOG_INFO("flash patch comparator requested outside code memory region");
1242 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1243 }
1244
1245 if (breakpoint->type == BKPT_SOFT) {
1246 LOG_INFO("soft breakpoint requested in code (flash) memory region");
1247 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1248 }
1249 }
1250
1251 if ((breakpoint->type == BKPT_HARD) && (cortex_m3->fp_code_available < 1)) {
1252 LOG_INFO("no flash patch comparator unit available for hardware breakpoint");
1253 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1254 }
1255
1256 if ((breakpoint->length != 2)) {
1257 LOG_INFO("only breakpoints of two bytes length supported");
1258 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1259 }
1260
1261 if (breakpoint->type == BKPT_HARD)
1262 cortex_m3->fp_code_available--;
1263
1264 return cortex_m3_set_breakpoint(target, breakpoint);
1265 }
1266
1267 int cortex_m3_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1268 {
1269 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1270
1271 /* REVISIT why check? FBP can be updated with core running ... */
1272 if (target->state != TARGET_HALTED) {
1273 LOG_WARNING("target not halted");
1274 return ERROR_TARGET_NOT_HALTED;
1275 }
1276
1277 if (cortex_m3->auto_bp_type)
1278 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1279
1280 if (breakpoint->set)
1281 cortex_m3_unset_breakpoint(target, breakpoint);
1282
1283 if (breakpoint->type == BKPT_HARD)
1284 cortex_m3->fp_code_available++;
1285
1286 return ERROR_OK;
1287 }
1288
1289 int cortex_m3_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1290 {
1291 int dwt_num = 0;
1292 uint32_t mask, temp;
1293 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1294
1295 /* watchpoint params were validated earlier */
1296 mask = 0;
1297 temp = watchpoint->length;
1298 while (temp) {
1299 temp >>= 1;
1300 mask++;
1301 }
1302 mask--;
1303
1304 /* REVISIT Don't fully trust these "not used" records ... users
1305 * may set up breakpoints by hand, e.g. dual-address data value
1306 * watchpoint using comparator #1; comparator #0 matching cycle
1307 * count; send data trace info through ITM and TPIU; etc
1308 */
1309 struct cortex_m3_dwt_comparator *comparator;
1310
1311 for (comparator = cortex_m3->dwt_comparator_list;
1312 comparator->used && dwt_num < cortex_m3->dwt_num_comp;
1313 comparator++, dwt_num++)
1314 continue;
1315 if (dwt_num >= cortex_m3->dwt_num_comp) {
1316 LOG_ERROR("Can not find free DWT Comparator");
1317 return ERROR_FAIL;
1318 }
1319 comparator->used = 1;
1320 watchpoint->set = dwt_num + 1;
1321
1322 comparator->comp = watchpoint->address;
1323 target_write_u32(target, comparator->dwt_comparator_address + 0,
1324 comparator->comp);
1325
1326 comparator->mask = mask;
1327 target_write_u32(target, comparator->dwt_comparator_address + 4,
1328 comparator->mask);
1329
1330 switch (watchpoint->rw) {
1331 case WPT_READ:
1332 comparator->function = 5;
1333 break;
1334 case WPT_WRITE:
1335 comparator->function = 6;
1336 break;
1337 case WPT_ACCESS:
1338 comparator->function = 7;
1339 break;
1340 }
1341 target_write_u32(target, comparator->dwt_comparator_address + 8,
1342 comparator->function);
1343
1344 LOG_DEBUG("Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
1345 watchpoint->unique_id, dwt_num,
1346 (unsigned) comparator->comp,
1347 (unsigned) comparator->mask,
1348 (unsigned) comparator->function);
1349 return ERROR_OK;
1350 }
1351
1352 int cortex_m3_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
1353 {
1354 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1355 struct cortex_m3_dwt_comparator *comparator;
1356 int dwt_num;
1357
1358 if (!watchpoint->set) {
1359 LOG_WARNING("watchpoint (wpid: %d) not set",
1360 watchpoint->unique_id);
1361 return ERROR_OK;
1362 }
1363
1364 dwt_num = watchpoint->set - 1;
1365
1366 LOG_DEBUG("Watchpoint (ID %d) DWT%d address: 0x%08x clear",
1367 watchpoint->unique_id, dwt_num,
1368 (unsigned) watchpoint->address);
1369
1370 if ((dwt_num < 0) || (dwt_num >= cortex_m3->dwt_num_comp)) {
1371 LOG_DEBUG("Invalid DWT Comparator number in watchpoint");
1372 return ERROR_OK;
1373 }
1374
1375 comparator = cortex_m3->dwt_comparator_list + dwt_num;
1376 comparator->used = 0;
1377 comparator->function = 0;
1378 target_write_u32(target, comparator->dwt_comparator_address + 8,
1379 comparator->function);
1380
1381 watchpoint->set = false;
1382
1383 return ERROR_OK;
1384 }
1385
1386 int cortex_m3_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
1387 {
1388 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1389
1390 if (cortex_m3->dwt_comp_available < 1) {
1391 LOG_DEBUG("no comparators?");
1392 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1393 }
1394
1395 /* hardware doesn't support data value masking */
1396 if (watchpoint->mask != ~(uint32_t)0) {
1397 LOG_DEBUG("watchpoint value masks not supported");
1398 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1399 }
1400
1401 /* hardware allows address masks of up to 32K */
1402 unsigned mask;
1403
1404 for (mask = 0; mask < 16; mask++) {
1405 if ((1u << mask) == watchpoint->length)
1406 break;
1407 }
1408 if (mask == 16) {
1409 LOG_DEBUG("unsupported watchpoint length");
1410 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1411 }
1412 if (watchpoint->address & ((1 << mask) - 1)) {
1413 LOG_DEBUG("watchpoint address is unaligned");
1414 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1415 }
1416
1417 /* Caller doesn't seem to be able to describe watching for data
1418 * values of zero; that flags "no value".
1419 *
1420 * REVISIT This DWT may well be able to watch for specific data
1421 * values. Requires comparator #1 to set DATAVMATCH and match
1422 * the data, and another comparator (DATAVADDR0) matching addr.
1423 */
1424 if (watchpoint->value) {
1425 LOG_DEBUG("data value watchpoint not YET supported");
1426 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1427 }
1428
1429 cortex_m3->dwt_comp_available--;
1430 LOG_DEBUG("dwt_comp_available: %d", cortex_m3->dwt_comp_available);
1431
1432 return ERROR_OK;
1433 }
1434
1435 int cortex_m3_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
1436 {
1437 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1438
1439 /* REVISIT why check? DWT can be updated with core running ... */
1440 if (target->state != TARGET_HALTED) {
1441 LOG_WARNING("target not halted");
1442 return ERROR_TARGET_NOT_HALTED;
1443 }
1444
1445 if (watchpoint->set)
1446 cortex_m3_unset_watchpoint(target, watchpoint);
1447
1448 cortex_m3->dwt_comp_available++;
1449 LOG_DEBUG("dwt_comp_available: %d", cortex_m3->dwt_comp_available);
1450
1451 return ERROR_OK;
1452 }
1453
1454 void cortex_m3_enable_watchpoints(struct target *target)
1455 {
1456 struct watchpoint *watchpoint = target->watchpoints;
1457
1458 /* set any pending watchpoints */
1459 while (watchpoint) {
1460 if (!watchpoint->set)
1461 cortex_m3_set_watchpoint(target, watchpoint);
1462 watchpoint = watchpoint->next;
1463 }
1464 }
1465
1466 static int cortex_m3_load_core_reg_u32(struct target *target,
1467 enum armv7m_regtype type, uint32_t num, uint32_t *value)
1468 {
1469 int retval;
1470 struct armv7m_common *armv7m = target_to_armv7m(target);
1471 struct adiv5_dap *swjdp = armv7m->arm.dap;
1472
1473 /* NOTE: we "know" here that the register identifiers used
1474 * in the v7m header match the Cortex-M3 Debug Core Register
1475 * Selector values for R0..R15, xPSR, MSP, and PSP.
1476 */
1477 switch (num) {
1478 case 0 ... 18:
1479 /* read a normal core register */
1480 retval = cortexm3_dap_read_coreregister_u32(swjdp, value, num);
1481
1482 if (retval != ERROR_OK) {
1483 LOG_ERROR("JTAG failure %i", retval);
1484 return ERROR_JTAG_DEVICE_ERROR;
1485 }
1486 LOG_DEBUG("load from core reg %i value 0x%" PRIx32 "", (int)num, *value);
1487 break;
1488
1489 case ARMV7M_PRIMASK:
1490 case ARMV7M_BASEPRI:
1491 case ARMV7M_FAULTMASK:
1492 case ARMV7M_CONTROL:
1493 /* Cortex-M3 packages these four registers as bitfields
1494 * in one Debug Core register. So say r0 and r2 docs;
1495 * it was removed from r1 docs, but still works.
1496 */
1497 cortexm3_dap_read_coreregister_u32(swjdp, value, 20);
1498
1499 switch (num) {
1500 case ARMV7M_PRIMASK:
1501 *value = buf_get_u32((uint8_t *)value, 0, 1);
1502 break;
1503
1504 case ARMV7M_BASEPRI:
1505 *value = buf_get_u32((uint8_t *)value, 8, 8);
1506 break;
1507
1508 case ARMV7M_FAULTMASK:
1509 *value = buf_get_u32((uint8_t *)value, 16, 1);
1510 break;
1511
1512 case ARMV7M_CONTROL:
1513 *value = buf_get_u32((uint8_t *)value, 24, 2);
1514 break;
1515 }
1516
1517 LOG_DEBUG("load from special reg %i value 0x%" PRIx32 "", (int)num, *value);
1518 break;
1519
1520 default:
1521 return ERROR_COMMAND_SYNTAX_ERROR;
1522 }
1523
1524 return ERROR_OK;
1525 }
1526
1527 static int cortex_m3_store_core_reg_u32(struct target *target,
1528 enum armv7m_regtype type, uint32_t num, uint32_t value)
1529 {
1530 int retval;
1531 uint32_t reg;
1532 struct armv7m_common *armv7m = target_to_armv7m(target);
1533 struct adiv5_dap *swjdp = armv7m->arm.dap;
1534
1535 #ifdef ARMV7_GDB_HACKS
1536 /* If the LR register is being modified, make sure it will put us
1537 * in "thumb" mode, or an INVSTATE exception will occur. This is a
1538 * hack to deal with the fact that gdb will sometimes "forge"
1539 * return addresses, and doesn't set the LSB correctly (i.e., when
1540 * printing expressions containing function calls, it sets LR = 0.)
1541 * Valid exception return codes have bit 0 set too.
1542 */
1543 if (num == ARMV7M_R14)
1544 value |= 0x01;
1545 #endif
1546
1547 /* NOTE: we "know" here that the register identifiers used
1548 * in the v7m header match the Cortex-M3 Debug Core Register
1549 * Selector values for R0..R15, xPSR, MSP, and PSP.
1550 */
1551 switch (num) {
1552 case 0 ... 18:
1553 retval = cortexm3_dap_write_coreregister_u32(swjdp, value, num);
1554 if (retval != ERROR_OK) {
1555 struct reg *r;
1556
1557 LOG_ERROR("JTAG failure");
1558 r = armv7m->core_cache->reg_list + num;
1559 r->dirty = r->valid;
1560 return ERROR_JTAG_DEVICE_ERROR;
1561 }
1562 LOG_DEBUG("write core reg %i value 0x%" PRIx32 "", (int)num, value);
1563 break;
1564
1565 case ARMV7M_PRIMASK:
1566 case ARMV7M_BASEPRI:
1567 case ARMV7M_FAULTMASK:
1568 case ARMV7M_CONTROL:
1569 /* Cortex-M3 packages these four registers as bitfields
1570 * in one Debug Core register. So say r0 and r2 docs;
1571 * it was removed from r1 docs, but still works.
1572 */
1573 cortexm3_dap_read_coreregister_u32(swjdp, &reg, 20);
1574
1575 switch (num) {
1576 case ARMV7M_PRIMASK:
1577 buf_set_u32((uint8_t *)&reg, 0, 1, value);
1578 break;
1579
1580 case ARMV7M_BASEPRI:
1581 buf_set_u32((uint8_t *)&reg, 8, 8, value);
1582 break;
1583
1584 case ARMV7M_FAULTMASK:
1585 buf_set_u32((uint8_t *)&reg, 16, 1, value);
1586 break;
1587
1588 case ARMV7M_CONTROL:
1589 buf_set_u32((uint8_t *)&reg, 24, 2, value);
1590 break;
1591 }
1592
1593 cortexm3_dap_write_coreregister_u32(swjdp, reg, 20);
1594
1595 LOG_DEBUG("write special reg %i value 0x%" PRIx32 " ", (int)num, value);
1596 break;
1597
1598 default:
1599 return ERROR_COMMAND_SYNTAX_ERROR;
1600 }
1601
1602 return ERROR_OK;
1603 }
1604
1605 static int cortex_m3_read_memory(struct target *target, uint32_t address,
1606 uint32_t size, uint32_t count, uint8_t *buffer)
1607 {
1608 struct armv7m_common *armv7m = target_to_armv7m(target);
1609 struct adiv5_dap *swjdp = armv7m->arm.dap;
1610 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1611
1612 if (armv7m->arm.is_armv6m) {
1613 /* armv6m does not handle unaligned memory access */
1614 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1615 return ERROR_TARGET_UNALIGNED_ACCESS;
1616 }
1617
1618 /* cortex_m3 handles unaligned memory access */
1619 if (count && buffer) {
1620 switch (size) {
1621 case 4:
1622 retval = mem_ap_read_buf_u32(swjdp, buffer, 4 * count, address);
1623 break;
1624 case 2:
1625 retval = mem_ap_read_buf_u16(swjdp, buffer, 2 * count, address);
1626 break;
1627 case 1:
1628 retval = mem_ap_read_buf_u8(swjdp, buffer, count, address);
1629 break;
1630 }
1631 }
1632
1633 return retval;
1634 }
1635
1636 static int cortex_m3_write_memory(struct target *target, uint32_t address,
1637 uint32_t size, uint32_t count, const uint8_t *buffer)
1638 {
1639 struct armv7m_common *armv7m = target_to_armv7m(target);
1640 struct adiv5_dap *swjdp = armv7m->arm.dap;
1641 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1642
1643 if (armv7m->arm.is_armv6m) {
1644 /* armv6m does not handle unaligned memory access */
1645 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1646 return ERROR_TARGET_UNALIGNED_ACCESS;
1647 }
1648
1649 if (count && buffer) {
1650 switch (size) {
1651 case 4:
1652 retval = mem_ap_write_buf_u32(swjdp, buffer, 4 * count, address);
1653 break;
1654 case 2:
1655 retval = mem_ap_write_buf_u16(swjdp, buffer, 2 * count, address);
1656 break;
1657 case 1:
1658 retval = mem_ap_write_buf_u8(swjdp, buffer, count, address);
1659 break;
1660 }
1661 }
1662
1663 return retval;
1664 }
1665
1666 static int cortex_m3_bulk_write_memory(struct target *target, uint32_t address,
1667 uint32_t count, const uint8_t *buffer)
1668 {
1669 return cortex_m3_write_memory(target, address, 4, count, buffer);
1670 }
1671
1672 static int cortex_m3_init_target(struct command_context *cmd_ctx,
1673 struct target *target)
1674 {
1675 armv7m_build_reg_cache(target);
1676 return ERROR_OK;
1677 }
1678
1679 /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
1680 * on r/w if the core is not running, and clear on resume or reset ... or
1681 * at least, in a post_restore_context() method.
1682 */
1683
1684 struct dwt_reg_state {
1685 struct target *target;
1686 uint32_t addr;
1687 uint32_t value; /* scratch/cache */
1688 };
1689
1690 static int cortex_m3_dwt_get_reg(struct reg *reg)
1691 {
1692 struct dwt_reg_state *state = reg->arch_info;
1693
1694 return target_read_u32(state->target, state->addr, &state->value);
1695 }
1696
1697 static int cortex_m3_dwt_set_reg(struct reg *reg, uint8_t *buf)
1698 {
1699 struct dwt_reg_state *state = reg->arch_info;
1700
1701 return target_write_u32(state->target, state->addr,
1702 buf_get_u32(buf, 0, reg->size));
1703 }
1704
1705 struct dwt_reg {
1706 uint32_t addr;
1707 char *name;
1708 unsigned size;
1709 };
1710
1711 static struct dwt_reg dwt_base_regs[] = {
1712 { DWT_CTRL, "dwt_ctrl", 32, },
1713 /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
1714 * increments while the core is asleep.
1715 */
1716 { DWT_CYCCNT, "dwt_cyccnt", 32, },
1717 /* plus some 8 bit counters, useful for profiling with TPIU */
1718 };
1719
1720 static struct dwt_reg dwt_comp[] = {
1721 #define DWT_COMPARATOR(i) \
1722 { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
1723 { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
1724 { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
1725 DWT_COMPARATOR(0),
1726 DWT_COMPARATOR(1),
1727 DWT_COMPARATOR(2),
1728 DWT_COMPARATOR(3),
1729 #undef DWT_COMPARATOR
1730 };
1731
1732 static const struct reg_arch_type dwt_reg_type = {
1733 .get = cortex_m3_dwt_get_reg,
1734 .set = cortex_m3_dwt_set_reg,
1735 };
1736
1737 static void cortex_m3_dwt_addreg(struct target *t, struct reg *r, struct dwt_reg *d)
1738 {
1739 struct dwt_reg_state *state;
1740
1741 state = calloc(1, sizeof *state);
1742 if (!state)
1743 return;
1744 state->addr = d->addr;
1745 state->target = t;
1746
1747 r->name = d->name;
1748 r->size = d->size;
1749 r->value = &state->value;
1750 r->arch_info = state;
1751 r->type = &dwt_reg_type;
1752 }
1753
1754 void cortex_m3_dwt_setup(struct cortex_m3_common *cm3, struct target *target)
1755 {
1756 uint32_t dwtcr;
1757 struct reg_cache *cache;
1758 struct cortex_m3_dwt_comparator *comparator;
1759 int reg, i;
1760
1761 target_read_u32(target, DWT_CTRL, &dwtcr);
1762 if (!dwtcr) {
1763 LOG_DEBUG("no DWT");
1764 return;
1765 }
1766
1767 cm3->dwt_num_comp = (dwtcr >> 28) & 0xF;
1768 cm3->dwt_comp_available = cm3->dwt_num_comp;
1769 cm3->dwt_comparator_list = calloc(cm3->dwt_num_comp,
1770 sizeof(struct cortex_m3_dwt_comparator));
1771 if (!cm3->dwt_comparator_list) {
1772 fail0:
1773 cm3->dwt_num_comp = 0;
1774 LOG_ERROR("out of mem");
1775 return;
1776 }
1777
1778 cache = calloc(1, sizeof *cache);
1779 if (!cache) {
1780 fail1:
1781 free(cm3->dwt_comparator_list);
1782 goto fail0;
1783 }
1784 cache->name = "cortex-m3 dwt registers";
1785 cache->num_regs = 2 + cm3->dwt_num_comp * 3;
1786 cache->reg_list = calloc(cache->num_regs, sizeof *cache->reg_list);
1787 if (!cache->reg_list) {
1788 free(cache);
1789 goto fail1;
1790 }
1791
1792 for (reg = 0; reg < 2; reg++)
1793 cortex_m3_dwt_addreg(target, cache->reg_list + reg,
1794 dwt_base_regs + reg);
1795
1796 comparator = cm3->dwt_comparator_list;
1797 for (i = 0; i < cm3->dwt_num_comp; i++, comparator++) {
1798 int j;
1799
1800 comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
1801 for (j = 0; j < 3; j++, reg++)
1802 cortex_m3_dwt_addreg(target, cache->reg_list + reg,
1803 dwt_comp + 3 * i + j);
1804 }
1805
1806 *register_get_last_cache_p(&target->reg_cache) = cache;
1807 cm3->dwt_cache = cache;
1808
1809 LOG_DEBUG("DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
1810 dwtcr, cm3->dwt_num_comp,
1811 (dwtcr & (0xf << 24)) ? " only" : "/trigger");
1812
1813 /* REVISIT: if num_comp > 1, check whether comparator #1 can
1814 * implement single-address data value watchpoints ... so we
1815 * won't need to check it later, when asked to set one up.
1816 */
1817 }
1818
1819 #define MVFR0 0xe000ef40
1820 #define MVFR1 0xe000ef44
1821
1822 #define MVFR0_DEFAULT_M4 0x10110021
1823 #define MVFR1_DEFAULT_M4 0x11000011
1824
1825 int cortex_m3_examine(struct target *target)
1826 {
1827 int retval;
1828 uint32_t cpuid, fpcr, mvfr0, mvfr1;
1829 int i;
1830 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1831 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
1832 struct armv7m_common *armv7m = target_to_armv7m(target);
1833
1834 /* stlink shares the examine handler but does not support
1835 * all its calls */
1836 if (!armv7m->stlink) {
1837 retval = ahbap_debugport_init(swjdp);
1838 if (retval != ERROR_OK)
1839 return retval;
1840 }
1841
1842 if (!target_was_examined(target)) {
1843 target_set_examined(target);
1844
1845 /* Read from Device Identification Registers */
1846 retval = target_read_u32(target, CPUID, &cpuid);
1847 if (retval != ERROR_OK)
1848 return retval;
1849
1850 /* Get CPU Type */
1851 i = (cpuid >> 4) & 0xf;
1852
1853 LOG_DEBUG("Cortex-M%d r%" PRId8 "p%" PRId8 " processor detected",
1854 i, (uint8_t)((cpuid >> 20) & 0xf), (uint8_t)((cpuid >> 0) & 0xf));
1855 LOG_DEBUG("cpuid: 0x%8.8" PRIx32 "", cpuid);
1856
1857 /* test for floating point feature on cortex-m4 */
1858 if (i == 4) {
1859 target_read_u32(target, MVFR0, &mvfr0);
1860 target_read_u32(target, MVFR1, &mvfr1);
1861
1862 if ((mvfr0 == MVFR0_DEFAULT_M4) && (mvfr1 == MVFR1_DEFAULT_M4)) {
1863 LOG_DEBUG("Cortex-M%d floating point feature FPv4_SP found", i);
1864 armv7m->fp_feature = FPv4_SP;
1865 }
1866 } else if (i == 0) {
1867 /* Cortex-M0 does not support unaligned memory access */
1868 armv7m->arm.is_armv6m = true;
1869 }
1870
1871 if (i == 4 || i == 3) {
1872 /* Cortex-M3/M4 has 4096 bytes autoincrement range */
1873 armv7m->dap.tar_autoincr_block = (1 << 12);
1874 }
1875
1876 /* NOTE: FPB and DWT are both optional. */
1877
1878 /* Setup FPB */
1879 target_read_u32(target, FP_CTRL, &fpcr);
1880 cortex_m3->auto_bp_type = 1;
1881 cortex_m3->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF); /* bits
1882 *[14:12]
1883 *and [7:4]
1884 **/
1885 cortex_m3->fp_num_lit = (fpcr >> 8) & 0xF;
1886 cortex_m3->fp_code_available = cortex_m3->fp_num_code;
1887 cortex_m3->fp_comparator_list = calloc(
1888 cortex_m3->fp_num_code + cortex_m3->fp_num_lit,
1889 sizeof(struct cortex_m3_fp_comparator));
1890 cortex_m3->fpb_enabled = fpcr & 1;
1891 for (i = 0; i < cortex_m3->fp_num_code + cortex_m3->fp_num_lit; i++) {
1892 cortex_m3->fp_comparator_list[i].type =
1893 (i < cortex_m3->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
1894 cortex_m3->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
1895 }
1896 LOG_DEBUG("FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i",
1897 fpcr,
1898 cortex_m3->fp_num_code,
1899 cortex_m3->fp_num_lit);
1900
1901 /* Setup DWT */
1902 cortex_m3_dwt_setup(cortex_m3, target);
1903
1904 /* These hardware breakpoints only work for code in flash! */
1905 LOG_INFO("%s: hardware has %d breakpoints, %d watchpoints",
1906 target_name(target),
1907 cortex_m3->fp_num_code,
1908 cortex_m3->dwt_num_comp);
1909 }
1910
1911 return ERROR_OK;
1912 }
1913
1914 static int cortex_m3_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1915 {
1916 uint16_t dcrdr;
1917 int retval;
1918
1919 mem_ap_read_buf_u16(swjdp, (uint8_t *)&dcrdr, 1, DCB_DCRDR);
1920 *ctrl = (uint8_t)dcrdr;
1921 *value = (uint8_t)(dcrdr >> 8);
1922
1923 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1924
1925 /* write ack back to software dcc register
1926 * signify we have read data */
1927 if (dcrdr & (1 << 0)) {
1928 dcrdr = 0;
1929 retval = mem_ap_write_buf_u16(swjdp, (uint8_t *)&dcrdr, 1, DCB_DCRDR);
1930 if (retval != ERROR_OK)
1931 return retval;
1932 }
1933
1934 return ERROR_OK;
1935 }
1936
1937 static int cortex_m3_target_request_data(struct target *target,
1938 uint32_t size, uint8_t *buffer)
1939 {
1940 struct armv7m_common *armv7m = target_to_armv7m(target);
1941 struct adiv5_dap *swjdp = armv7m->arm.dap;
1942 uint8_t data;
1943 uint8_t ctrl;
1944 uint32_t i;
1945
1946 for (i = 0; i < (size * 4); i++) {
1947 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1948 buffer[i] = data;
1949 }
1950
1951 return ERROR_OK;
1952 }
1953
1954 static int cortex_m3_handle_target_request(void *priv)
1955 {
1956 struct target *target = priv;
1957 if (!target_was_examined(target))
1958 return ERROR_OK;
1959 struct armv7m_common *armv7m = target_to_armv7m(target);
1960 struct adiv5_dap *swjdp = armv7m->arm.dap;
1961
1962 if (!target->dbg_msg_enabled)
1963 return ERROR_OK;
1964
1965 if (target->state == TARGET_RUNNING) {
1966 uint8_t data;
1967 uint8_t ctrl;
1968
1969 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1970
1971 /* check if we have data */
1972 if (ctrl & (1 << 0)) {
1973 uint32_t request;
1974
1975 /* we assume target is quick enough */
1976 request = data;
1977 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1978 request |= (data << 8);
1979 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1980 request |= (data << 16);
1981 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1982 request |= (data << 24);
1983 target_request(target, request);
1984 }
1985 }
1986
1987 return ERROR_OK;
1988 }
1989
1990 static int cortex_m3_init_arch_info(struct target *target,
1991 struct cortex_m3_common *cortex_m3, struct jtag_tap *tap)
1992 {
1993 int retval;
1994 struct armv7m_common *armv7m = &cortex_m3->armv7m;
1995
1996 armv7m_init_arch_info(target, armv7m);
1997
1998 /* prepare JTAG information for the new target */
1999 cortex_m3->jtag_info.tap = tap;
2000 cortex_m3->jtag_info.scann_size = 4;
2001
2002 /* default reset mode is to use srst if fitted
2003 * if not it will use CORTEX_M3_RESET_VECTRESET */
2004 cortex_m3->soft_reset_config = CORTEX_M3_RESET_VECTRESET;
2005
2006 armv7m->arm.dap = &armv7m->dap;
2007
2008 /* Leave (only) generic DAP stuff for debugport_init(); */
2009 armv7m->dap.jtag_info = &cortex_m3->jtag_info;
2010 armv7m->dap.memaccess_tck = 8;
2011
2012 /* Cortex-M3/M4 has 4096 bytes autoincrement range
2013 * but set a safe default to 1024 to support Cortex-M0
2014 * this will be changed in cortex_m3_examine if a M3/M4 is detected */
2015 armv7m->dap.tar_autoincr_block = (1 << 10);
2016
2017 /* register arch-specific functions */
2018 armv7m->examine_debug_reason = cortex_m3_examine_debug_reason;
2019
2020 armv7m->post_debug_entry = NULL;
2021
2022 armv7m->pre_restore_context = NULL;
2023
2024 armv7m->load_core_reg_u32 = cortex_m3_load_core_reg_u32;
2025 armv7m->store_core_reg_u32 = cortex_m3_store_core_reg_u32;
2026
2027 target_register_timer_callback(cortex_m3_handle_target_request, 1, 1, target);
2028
2029 retval = arm_jtag_setup_connection(&cortex_m3->jtag_info);
2030 if (retval != ERROR_OK)
2031 return retval;
2032
2033 return ERROR_OK;
2034 }
2035
2036 static int cortex_m3_target_create(struct target *target, Jim_Interp *interp)
2037 {
2038 struct cortex_m3_common *cortex_m3 = calloc(1, sizeof(struct cortex_m3_common));
2039
2040 cortex_m3->common_magic = CORTEX_M3_COMMON_MAGIC;
2041 cortex_m3_init_arch_info(target, cortex_m3, target->tap);
2042
2043 return ERROR_OK;
2044 }
2045
2046 /*--------------------------------------------------------------------------*/
2047
2048 static int cortex_m3_verify_pointer(struct command_context *cmd_ctx,
2049 struct cortex_m3_common *cm3)
2050 {
2051 if (cm3->common_magic != CORTEX_M3_COMMON_MAGIC) {
2052 command_print(cmd_ctx, "target is not a Cortex-M3");
2053 return ERROR_TARGET_INVALID;
2054 }
2055 return ERROR_OK;
2056 }
2057
2058 /*
2059 * Only stuff below this line should need to verify that its target
2060 * is a Cortex-M3. Everything else should have indirected through the
2061 * cortexm3_target structure, which is only used with CM3 targets.
2062 */
2063
2064 static const struct {
2065 char name[10];
2066 unsigned mask;
2067 } vec_ids[] = {
2068 { "hard_err", VC_HARDERR, },
2069 { "int_err", VC_INTERR, },
2070 { "bus_err", VC_BUSERR, },
2071 { "state_err", VC_STATERR, },
2072 { "chk_err", VC_CHKERR, },
2073 { "nocp_err", VC_NOCPERR, },
2074 { "mm_err", VC_MMERR, },
2075 { "reset", VC_CORERESET, },
2076 };
2077
2078 COMMAND_HANDLER(handle_cortex_m3_vector_catch_command)
2079 {
2080 struct target *target = get_current_target(CMD_CTX);
2081 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
2082 struct armv7m_common *armv7m = &cortex_m3->armv7m;
2083 struct adiv5_dap *swjdp = armv7m->arm.dap;
2084 uint32_t demcr = 0;
2085 int retval;
2086
2087 retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
2088 if (retval != ERROR_OK)
2089 return retval;
2090
2091 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &demcr);
2092 if (retval != ERROR_OK)
2093 return retval;
2094
2095 if (CMD_ARGC > 0) {
2096 unsigned catch = 0;
2097
2098 if (CMD_ARGC == 1) {
2099 if (strcmp(CMD_ARGV[0], "all") == 0) {
2100 catch = VC_HARDERR | VC_INTERR | VC_BUSERR
2101 | VC_STATERR | VC_CHKERR | VC_NOCPERR
2102 | VC_MMERR | VC_CORERESET;
2103 goto write;
2104 } else if (strcmp(CMD_ARGV[0], "none") == 0)
2105 goto write;
2106 }
2107 while (CMD_ARGC-- > 0) {
2108 unsigned i;
2109 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2110 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
2111 continue;
2112 catch |= vec_ids[i].mask;
2113 break;
2114 }
2115 if (i == ARRAY_SIZE(vec_ids)) {
2116 LOG_ERROR("No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]);
2117 return ERROR_COMMAND_SYNTAX_ERROR;
2118 }
2119 }
2120 write:
2121 /* For now, armv7m->demcr only stores vector catch flags. */
2122 armv7m->demcr = catch;
2123
2124 demcr &= ~0xffff;
2125 demcr |= catch;
2126
2127 /* write, but don't assume it stuck (why not??) */
2128 retval = mem_ap_write_u32(swjdp, DCB_DEMCR, demcr);
2129 if (retval != ERROR_OK)
2130 return retval;
2131 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &demcr);
2132 if (retval != ERROR_OK)
2133 return retval;
2134
2135 /* FIXME be sure to clear DEMCR on clean server shutdown.
2136 * Otherwise the vector catch hardware could fire when there's
2137 * no debugger hooked up, causing much confusion...
2138 */
2139 }
2140
2141 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2142 command_print(CMD_CTX, "%9s: %s", vec_ids[i].name,
2143 (demcr & vec_ids[i].mask) ? "catch" : "ignore");
2144 }
2145
2146 return ERROR_OK;
2147 }
2148
2149 COMMAND_HANDLER(handle_cortex_m3_mask_interrupts_command)
2150 {
2151 struct target *target = get_current_target(CMD_CTX);
2152 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
2153 int retval;
2154
2155 static const Jim_Nvp nvp_maskisr_modes[] = {
2156 { .name = "auto", .value = CORTEX_M3_ISRMASK_AUTO },
2157 { .name = "off", .value = CORTEX_M3_ISRMASK_OFF },
2158 { .name = "on", .value = CORTEX_M3_ISRMASK_ON },
2159 { .name = NULL, .value = -1 },
2160 };
2161 const Jim_Nvp *n;
2162
2163
2164 retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
2165 if (retval != ERROR_OK)
2166 return retval;
2167
2168 if (target->state != TARGET_HALTED) {
2169 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
2170 return ERROR_OK;
2171 }
2172
2173 if (CMD_ARGC > 0) {
2174 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2175 if (n->name == NULL)
2176 return ERROR_COMMAND_SYNTAX_ERROR;
2177 cortex_m3->isrmasking_mode = n->value;
2178
2179
2180 if (cortex_m3->isrmasking_mode == CORTEX_M3_ISRMASK_ON)
2181 cortex_m3_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
2182 else
2183 cortex_m3_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
2184 }
2185
2186 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_m3->isrmasking_mode);
2187 command_print(CMD_CTX, "cortex_m3 interrupt mask %s", n->name);
2188
2189 return ERROR_OK;
2190 }
2191
2192 COMMAND_HANDLER(handle_cortex_m3_reset_config_command)
2193 {
2194 struct target *target = get_current_target(CMD_CTX);
2195 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
2196 int retval;
2197 char *reset_config;
2198
2199 retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
2200 if (retval != ERROR_OK)
2201 return retval;
2202
2203 if (CMD_ARGC > 0) {
2204 if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
2205 cortex_m3->soft_reset_config = CORTEX_M3_RESET_SYSRESETREQ;
2206 else if (strcmp(*CMD_ARGV, "vectreset") == 0)
2207 cortex_m3->soft_reset_config = CORTEX_M3_RESET_VECTRESET;
2208 }
2209
2210 switch (cortex_m3->soft_reset_config) {
2211 case CORTEX_M3_RESET_SYSRESETREQ:
2212 reset_config = "sysresetreq";
2213 break;
2214
2215 case CORTEX_M3_RESET_VECTRESET:
2216 reset_config = "vectreset";
2217 break;
2218
2219 default:
2220 reset_config = "unknown";
2221 break;
2222 }
2223
2224 command_print(CMD_CTX, "cortex_m3 reset_config %s", reset_config);
2225
2226 return ERROR_OK;
2227 }
2228
2229 static const struct command_registration cortex_m3_exec_command_handlers[] = {
2230 {
2231 .name = "maskisr",
2232 .handler = handle_cortex_m3_mask_interrupts_command,
2233 .mode = COMMAND_EXEC,
2234 .help = "mask cortex_m3 interrupts",
2235 .usage = "['auto'|'on'|'off']",
2236 },
2237 {
2238 .name = "vector_catch",
2239 .handler = handle_cortex_m3_vector_catch_command,
2240 .mode = COMMAND_EXEC,
2241 .help = "configure hardware vectors to trigger debug entry",
2242 .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
2243 },
2244 {
2245 .name = "reset_config",
2246 .handler = handle_cortex_m3_reset_config_command,
2247 .mode = COMMAND_ANY,
2248 .help = "configure software reset handling",
2249 .usage = "['srst'|'sysresetreq'|'vectreset']",
2250 },
2251 COMMAND_REGISTRATION_DONE
2252 };
2253 static const struct command_registration cortex_m3_command_handlers[] = {
2254 {
2255 .chain = armv7m_command_handlers,
2256 },
2257 {
2258 .name = "cortex_m3",
2259 .mode = COMMAND_EXEC,
2260 .help = "Cortex-M3 command group",
2261 .usage = "",
2262 .chain = cortex_m3_exec_command_handlers,
2263 },
2264 COMMAND_REGISTRATION_DONE
2265 };
2266
2267 struct target_type cortexm3_target = {
2268 .name = "cortex_m3",
2269
2270 .poll = cortex_m3_poll,
2271 .arch_state = armv7m_arch_state,
2272
2273 .target_request_data = cortex_m3_target_request_data,
2274
2275 .halt = cortex_m3_halt,
2276 .resume = cortex_m3_resume,
2277 .step = cortex_m3_step,
2278
2279 .assert_reset = cortex_m3_assert_reset,
2280 .deassert_reset = cortex_m3_deassert_reset,
2281 .soft_reset_halt = cortex_m3_soft_reset_halt,
2282
2283 .get_gdb_reg_list = armv7m_get_gdb_reg_list,
2284
2285 .read_memory = cortex_m3_read_memory,
2286 .write_memory = cortex_m3_write_memory,
2287 .bulk_write_memory = cortex_m3_bulk_write_memory,
2288 .checksum_memory = armv7m_checksum_memory,
2289 .blank_check_memory = armv7m_blank_check_memory,
2290
2291 .run_algorithm = armv7m_run_algorithm,
2292 .start_algorithm = armv7m_start_algorithm,
2293 .wait_algorithm = armv7m_wait_algorithm,
2294
2295 .add_breakpoint = cortex_m3_add_breakpoint,
2296 .remove_breakpoint = cortex_m3_remove_breakpoint,
2297 .add_watchpoint = cortex_m3_add_watchpoint,
2298 .remove_watchpoint = cortex_m3_remove_watchpoint,
2299
2300 .commands = cortex_m3_command_handlers,
2301 .target_create = cortex_m3_target_create,
2302 .init_target = cortex_m3_init_target,
2303 .examine = cortex_m3_examine,
2304 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)