c436bad482a592431fa6b8c1d30b6125b2bbd62c
[openocd.git] / src / target / cortex_m.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 * *
26 * *
27 * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
28 * *
29 ***************************************************************************/
30 #ifdef HAVE_CONFIG_H
31 #include "config.h"
32 #endif
33
34 #include "jtag/interface.h"
35 #include "breakpoints.h"
36 #include "cortex_m.h"
37 #include "target_request.h"
38 #include "target_type.h"
39 #include "arm_disassembler.h"
40 #include "register.h"
41 #include "arm_opcodes.h"
42 #include "arm_semihosting.h"
43 #include <helper/time_support.h>
44
45 /* NOTE: most of this should work fine for the Cortex-M1 and
46 * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
47 * Some differences: M0/M1 doesn't have FBP remapping or the
48 * DWT tracing/profiling support. (So the cycle counter will
49 * not be usable; the other stuff isn't currently used here.)
50 *
51 * Although there are some workarounds for errata seen only in r0p0
52 * silicon, such old parts are hard to find and thus not much tested
53 * any longer.
54 */
55
56 /**
57 * Returns the type of a break point required by address location
58 */
59 #define BKPT_TYPE_BY_ADDR(addr) ((addr) < 0x20000000 ? BKPT_HARD : BKPT_SOFT)
60
61
62 /* forward declarations */
63 static int cortex_m3_store_core_reg_u32(struct target *target,
64 uint32_t num, uint32_t value);
65
66 static int cortexm3_dap_read_coreregister_u32(struct adiv5_dap *swjdp,
67 uint32_t *value, int regnum)
68 {
69 int retval;
70 uint32_t dcrdr;
71
72 /* because the DCB_DCRDR is used for the emulated dcc channel
73 * we have to save/restore the DCB_DCRDR when used */
74
75 retval = mem_ap_read_u32(swjdp, DCB_DCRDR, &dcrdr);
76 if (retval != ERROR_OK)
77 return retval;
78
79 /* mem_ap_write_u32(swjdp, DCB_DCRSR, regnum); */
80 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRSR & 0xFFFFFFF0);
81 if (retval != ERROR_OK)
82 return retval;
83 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRSR & 0xC), regnum);
84 if (retval != ERROR_OK)
85 return retval;
86
87 /* mem_ap_read_u32(swjdp, DCB_DCRDR, value); */
88 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRDR & 0xFFFFFFF0);
89 if (retval != ERROR_OK)
90 return retval;
91 retval = dap_queue_ap_read(swjdp, AP_REG_BD0 | (DCB_DCRDR & 0xC), value);
92 if (retval != ERROR_OK)
93 return retval;
94
95 retval = dap_run(swjdp);
96 if (retval != ERROR_OK)
97 return retval;
98
99 /* restore DCB_DCRDR - this needs to be in a seperate
100 * transaction otherwise the emulated DCC channel breaks */
101 if (retval == ERROR_OK)
102 retval = mem_ap_write_atomic_u32(swjdp, DCB_DCRDR, dcrdr);
103
104 return retval;
105 }
106
107 static int cortexm3_dap_write_coreregister_u32(struct adiv5_dap *swjdp,
108 uint32_t value, int regnum)
109 {
110 int retval;
111 uint32_t dcrdr;
112
113 /* because the DCB_DCRDR is used for the emulated dcc channel
114 * we have to save/restore the DCB_DCRDR when used */
115
116 retval = mem_ap_read_u32(swjdp, DCB_DCRDR, &dcrdr);
117 if (retval != ERROR_OK)
118 return retval;
119
120 /* mem_ap_write_u32(swjdp, DCB_DCRDR, core_regs[i]); */
121 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRDR & 0xFFFFFFF0);
122 if (retval != ERROR_OK)
123 return retval;
124 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRDR & 0xC), value);
125 if (retval != ERROR_OK)
126 return retval;
127
128 /* mem_ap_write_u32(swjdp, DCB_DCRSR, i | DCRSR_WnR); */
129 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRSR & 0xFFFFFFF0);
130 if (retval != ERROR_OK)
131 return retval;
132 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRSR & 0xC), regnum | DCRSR_WnR);
133 if (retval != ERROR_OK)
134 return retval;
135
136 retval = dap_run(swjdp);
137 if (retval != ERROR_OK)
138 return retval;
139
140 /* restore DCB_DCRDR - this needs to be in a seperate
141 * transaction otherwise the emulated DCC channel breaks */
142 if (retval == ERROR_OK)
143 retval = mem_ap_write_atomic_u32(swjdp, DCB_DCRDR, dcrdr);
144
145 return retval;
146 }
147
148 static int cortex_m3_write_debug_halt_mask(struct target *target,
149 uint32_t mask_on, uint32_t mask_off)
150 {
151 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
152 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
153
154 /* mask off status bits */
155 cortex_m3->dcb_dhcsr &= ~((0xFFFF << 16) | mask_off);
156 /* create new register mask */
157 cortex_m3->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
158
159 return mem_ap_write_atomic_u32(swjdp, DCB_DHCSR, cortex_m3->dcb_dhcsr);
160 }
161
162 static int cortex_m3_clear_halt(struct target *target)
163 {
164 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
165 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
166 int retval;
167
168 /* clear step if any */
169 cortex_m3_write_debug_halt_mask(target, C_HALT, C_STEP);
170
171 /* Read Debug Fault Status Register */
172 retval = mem_ap_read_atomic_u32(swjdp, NVIC_DFSR, &cortex_m3->nvic_dfsr);
173 if (retval != ERROR_OK)
174 return retval;
175
176 /* Clear Debug Fault Status */
177 retval = mem_ap_write_atomic_u32(swjdp, NVIC_DFSR, cortex_m3->nvic_dfsr);
178 if (retval != ERROR_OK)
179 return retval;
180 LOG_DEBUG(" NVIC_DFSR 0x%" PRIx32 "", cortex_m3->nvic_dfsr);
181
182 return ERROR_OK;
183 }
184
185 static int cortex_m3_single_step_core(struct target *target)
186 {
187 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
188 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
189 uint32_t dhcsr_save;
190 int retval;
191
192 /* backup dhcsr reg */
193 dhcsr_save = cortex_m3->dcb_dhcsr;
194
195 /* Mask interrupts before clearing halt, if done already. This avoids
196 * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
197 * HALT can put the core into an unknown state.
198 */
199 if (!(cortex_m3->dcb_dhcsr & C_MASKINTS)) {
200 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
201 DBGKEY | C_MASKINTS | C_HALT | C_DEBUGEN);
202 if (retval != ERROR_OK)
203 return retval;
204 }
205 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
206 DBGKEY | C_MASKINTS | C_STEP | C_DEBUGEN);
207 if (retval != ERROR_OK)
208 return retval;
209 LOG_DEBUG(" ");
210
211 /* restore dhcsr reg */
212 cortex_m3->dcb_dhcsr = dhcsr_save;
213 cortex_m3_clear_halt(target);
214
215 return ERROR_OK;
216 }
217
218 static int cortex_m3_endreset_event(struct target *target)
219 {
220 int i;
221 int retval;
222 uint32_t dcb_demcr;
223 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
224 struct armv7m_common *armv7m = &cortex_m3->armv7m;
225 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
226 struct cortex_m3_fp_comparator *fp_list = cortex_m3->fp_comparator_list;
227 struct cortex_m3_dwt_comparator *dwt_list = cortex_m3->dwt_comparator_list;
228
229 /* REVISIT The four debug monitor bits are currently ignored... */
230 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &dcb_demcr);
231 if (retval != ERROR_OK)
232 return retval;
233 LOG_DEBUG("DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr);
234
235 /* this register is used for emulated dcc channel */
236 retval = mem_ap_write_u32(swjdp, DCB_DCRDR, 0);
237 if (retval != ERROR_OK)
238 return retval;
239
240 /* Enable debug requests */
241 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
242 if (retval != ERROR_OK)
243 return retval;
244 if (!(cortex_m3->dcb_dhcsr & C_DEBUGEN)) {
245 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_DEBUGEN);
246 if (retval != ERROR_OK)
247 return retval;
248 }
249
250 /* clear any interrupt masking */
251 cortex_m3_write_debug_halt_mask(target, 0, C_MASKINTS);
252
253 /* Enable features controlled by ITM and DWT blocks, and catch only
254 * the vectors we were told to pay attention to.
255 *
256 * Target firmware is responsible for all fault handling policy
257 * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
258 * or manual updates to the NVIC SHCSR and CCR registers.
259 */
260 retval = mem_ap_write_u32(swjdp, DCB_DEMCR, TRCENA | armv7m->demcr);
261 if (retval != ERROR_OK)
262 return retval;
263
264 /* Paranoia: evidently some (early?) chips don't preserve all the
265 * debug state (including FBP, DWT, etc) across reset...
266 */
267
268 /* Enable FPB */
269 retval = target_write_u32(target, FP_CTRL, 3);
270 if (retval != ERROR_OK)
271 return retval;
272
273 cortex_m3->fpb_enabled = 1;
274
275 /* Restore FPB registers */
276 for (i = 0; i < cortex_m3->fp_num_code + cortex_m3->fp_num_lit; i++) {
277 retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
278 if (retval != ERROR_OK)
279 return retval;
280 }
281
282 /* Restore DWT registers */
283 for (i = 0; i < cortex_m3->dwt_num_comp; i++) {
284 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
285 dwt_list[i].comp);
286 if (retval != ERROR_OK)
287 return retval;
288 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
289 dwt_list[i].mask);
290 if (retval != ERROR_OK)
291 return retval;
292 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
293 dwt_list[i].function);
294 if (retval != ERROR_OK)
295 return retval;
296 }
297 retval = dap_run(swjdp);
298 if (retval != ERROR_OK)
299 return retval;
300
301 register_cache_invalidate(armv7m->arm.core_cache);
302
303 /* make sure we have latest dhcsr flags */
304 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
305
306 return retval;
307 }
308
309 static int cortex_m3_examine_debug_reason(struct target *target)
310 {
311 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
312
313 /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
314 * only check the debug reason if we don't know it already */
315
316 if ((target->debug_reason != DBG_REASON_DBGRQ)
317 && (target->debug_reason != DBG_REASON_SINGLESTEP)) {
318 if (cortex_m3->nvic_dfsr & DFSR_BKPT) {
319 target->debug_reason = DBG_REASON_BREAKPOINT;
320 if (cortex_m3->nvic_dfsr & DFSR_DWTTRAP)
321 target->debug_reason = DBG_REASON_WPTANDBKPT;
322 } else if (cortex_m3->nvic_dfsr & DFSR_DWTTRAP)
323 target->debug_reason = DBG_REASON_WATCHPOINT;
324 else if (cortex_m3->nvic_dfsr & DFSR_VCATCH)
325 target->debug_reason = DBG_REASON_BREAKPOINT;
326 else /* EXTERNAL, HALTED */
327 target->debug_reason = DBG_REASON_UNDEFINED;
328 }
329
330 return ERROR_OK;
331 }
332
333 static int cortex_m3_examine_exception_reason(struct target *target)
334 {
335 uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
336 struct armv7m_common *armv7m = target_to_armv7m(target);
337 struct adiv5_dap *swjdp = armv7m->arm.dap;
338 int retval;
339
340 retval = mem_ap_read_u32(swjdp, NVIC_SHCSR, &shcsr);
341 if (retval != ERROR_OK)
342 return retval;
343 switch (armv7m->exception_number) {
344 case 2: /* NMI */
345 break;
346 case 3: /* Hard Fault */
347 retval = mem_ap_read_atomic_u32(swjdp, NVIC_HFSR, &except_sr);
348 if (retval != ERROR_OK)
349 return retval;
350 if (except_sr & 0x40000000) {
351 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &cfsr);
352 if (retval != ERROR_OK)
353 return retval;
354 }
355 break;
356 case 4: /* Memory Management */
357 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
358 if (retval != ERROR_OK)
359 return retval;
360 retval = mem_ap_read_u32(swjdp, NVIC_MMFAR, &except_ar);
361 if (retval != ERROR_OK)
362 return retval;
363 break;
364 case 5: /* Bus Fault */
365 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
366 if (retval != ERROR_OK)
367 return retval;
368 retval = mem_ap_read_u32(swjdp, NVIC_BFAR, &except_ar);
369 if (retval != ERROR_OK)
370 return retval;
371 break;
372 case 6: /* Usage Fault */
373 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
374 if (retval != ERROR_OK)
375 return retval;
376 break;
377 case 11: /* SVCall */
378 break;
379 case 12: /* Debug Monitor */
380 retval = mem_ap_read_u32(swjdp, NVIC_DFSR, &except_sr);
381 if (retval != ERROR_OK)
382 return retval;
383 break;
384 case 14: /* PendSV */
385 break;
386 case 15: /* SysTick */
387 break;
388 default:
389 except_sr = 0;
390 break;
391 }
392 retval = dap_run(swjdp);
393 if (retval == ERROR_OK)
394 LOG_DEBUG("%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
395 ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
396 armv7m_exception_string(armv7m->exception_number),
397 shcsr, except_sr, cfsr, except_ar);
398 return retval;
399 }
400
401 static int cortex_m3_debug_entry(struct target *target)
402 {
403 int i;
404 uint32_t xPSR;
405 int retval;
406 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
407 struct armv7m_common *armv7m = &cortex_m3->armv7m;
408 struct arm *arm = &armv7m->arm;
409 struct adiv5_dap *swjdp = armv7m->arm.dap;
410 struct reg *r;
411
412 LOG_DEBUG(" ");
413
414 cortex_m3_clear_halt(target);
415 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
416 if (retval != ERROR_OK)
417 return retval;
418
419 retval = armv7m->examine_debug_reason(target);
420 if (retval != ERROR_OK)
421 return retval;
422
423 /* Examine target state and mode
424 * First load register accessible through core debug port */
425 int num_regs = arm->core_cache->num_regs;
426
427 for (i = 0; i < num_regs; i++) {
428 r = &armv7m->arm.core_cache->reg_list[i];
429 if (!r->valid)
430 arm->read_core_reg(target, r, i, ARM_MODE_ANY);
431 }
432
433 r = arm->core_cache->reg_list + ARMV7M_xPSR;
434 xPSR = buf_get_u32(r->value, 0, 32);
435
436 #ifdef ARMV7_GDB_HACKS
437 /* FIXME this breaks on scan chains with more than one Cortex-M3.
438 * Instead, each CM3 should have its own dummy value...
439 */
440 /* copy real xpsr reg for gdb, setting thumb bit */
441 buf_set_u32(armv7m_gdb_dummy_cpsr_value, 0, 32, xPSR);
442 buf_set_u32(armv7m_gdb_dummy_cpsr_value, 5, 1, 1);
443 armv7m_gdb_dummy_cpsr_reg.valid = r->valid;
444 armv7m_gdb_dummy_cpsr_reg.dirty = r->dirty;
445 #endif
446
447 /* For IT instructions xPSR must be reloaded on resume and clear on debug exec */
448 if (xPSR & 0xf00) {
449 r->dirty = r->valid;
450 cortex_m3_store_core_reg_u32(target, 16, xPSR & ~0xff);
451 }
452
453 /* Are we in an exception handler */
454 if (xPSR & 0x1FF) {
455 armv7m->exception_number = (xPSR & 0x1FF);
456
457 arm->core_mode = ARM_MODE_HANDLER;
458 arm->map = armv7m_msp_reg_map;
459 } else {
460 unsigned control = buf_get_u32(arm->core_cache
461 ->reg_list[ARMV7M_CONTROL].value, 0, 2);
462
463 /* is this thread privileged? */
464 arm->core_mode = control & 1
465 ? ARM_MODE_USER_THREAD
466 : ARM_MODE_THREAD;
467
468 /* which stack is it using? */
469 if (control & 2)
470 arm->map = armv7m_psp_reg_map;
471 else
472 arm->map = armv7m_msp_reg_map;
473
474 armv7m->exception_number = 0;
475 }
476
477 if (armv7m->exception_number)
478 cortex_m3_examine_exception_reason(target);
479
480 LOG_DEBUG("entered debug state in core mode: %s at PC 0x%" PRIx32 ", target->state: %s",
481 arm_mode_name(arm->core_mode),
482 *(uint32_t *)(arm->pc->value),
483 target_state_name(target));
484
485 if (armv7m->post_debug_entry) {
486 retval = armv7m->post_debug_entry(target);
487 if (retval != ERROR_OK)
488 return retval;
489 }
490
491 return ERROR_OK;
492 }
493
494 static int cortex_m3_poll(struct target *target)
495 {
496 int detected_failure = ERROR_OK;
497 int retval = ERROR_OK;
498 enum target_state prev_target_state = target->state;
499 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
500 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
501
502 /* Read from Debug Halting Control and Status Register */
503 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
504 if (retval != ERROR_OK) {
505 target->state = TARGET_UNKNOWN;
506 return retval;
507 }
508
509 /* Recover from lockup. See ARMv7-M architecture spec,
510 * section B1.5.15 "Unrecoverable exception cases".
511 */
512 if (cortex_m3->dcb_dhcsr & S_LOCKUP) {
513 LOG_ERROR("%s -- clearing lockup after double fault",
514 target_name(target));
515 cortex_m3_write_debug_halt_mask(target, C_HALT, 0);
516 target->debug_reason = DBG_REASON_DBGRQ;
517
518 /* We have to execute the rest (the "finally" equivalent, but
519 * still throw this exception again).
520 */
521 detected_failure = ERROR_FAIL;
522
523 /* refresh status bits */
524 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
525 if (retval != ERROR_OK)
526 return retval;
527 }
528
529 if (cortex_m3->dcb_dhcsr & S_RESET_ST) {
530 /* check if still in reset */
531 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
532 if (retval != ERROR_OK)
533 return retval;
534
535 if (cortex_m3->dcb_dhcsr & S_RESET_ST) {
536 target->state = TARGET_RESET;
537 return ERROR_OK;
538 }
539 }
540
541 if (target->state == TARGET_RESET) {
542 /* Cannot switch context while running so endreset is
543 * called with target->state == TARGET_RESET
544 */
545 LOG_DEBUG("Exit from reset with dcb_dhcsr 0x%" PRIx32,
546 cortex_m3->dcb_dhcsr);
547 cortex_m3_endreset_event(target);
548 target->state = TARGET_RUNNING;
549 prev_target_state = TARGET_RUNNING;
550 }
551
552 if (cortex_m3->dcb_dhcsr & S_HALT) {
553 target->state = TARGET_HALTED;
554
555 if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET)) {
556 retval = cortex_m3_debug_entry(target);
557 if (retval != ERROR_OK)
558 return retval;
559
560 if (arm_semihosting(target, &retval) != 0)
561 return retval;
562
563 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
564 }
565 if (prev_target_state == TARGET_DEBUG_RUNNING) {
566 LOG_DEBUG(" ");
567 retval = cortex_m3_debug_entry(target);
568 if (retval != ERROR_OK)
569 return retval;
570
571 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
572 }
573 }
574
575 /* REVISIT when S_SLEEP is set, it's in a Sleep or DeepSleep state.
576 * How best to model low power modes?
577 */
578
579 if (target->state == TARGET_UNKNOWN) {
580 /* check if processor is retiring instructions */
581 if (cortex_m3->dcb_dhcsr & S_RETIRE_ST) {
582 target->state = TARGET_RUNNING;
583 retval = ERROR_OK;
584 }
585 }
586
587 /* Did we detect a failure condition that we cleared? */
588 if (detected_failure != ERROR_OK)
589 retval = detected_failure;
590 return retval;
591 }
592
593 static int cortex_m3_halt(struct target *target)
594 {
595 LOG_DEBUG("target->state: %s",
596 target_state_name(target));
597
598 if (target->state == TARGET_HALTED) {
599 LOG_DEBUG("target was already halted");
600 return ERROR_OK;
601 }
602
603 if (target->state == TARGET_UNKNOWN)
604 LOG_WARNING("target was in unknown state when halt was requested");
605
606 if (target->state == TARGET_RESET) {
607 if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
608 LOG_ERROR("can't request a halt while in reset if nSRST pulls nTRST");
609 return ERROR_TARGET_FAILURE;
610 } else {
611 /* we came here in a reset_halt or reset_init sequence
612 * debug entry was already prepared in cortex_m3_assert_reset()
613 */
614 target->debug_reason = DBG_REASON_DBGRQ;
615
616 return ERROR_OK;
617 }
618 }
619
620 /* Write to Debug Halting Control and Status Register */
621 cortex_m3_write_debug_halt_mask(target, C_HALT, 0);
622
623 target->debug_reason = DBG_REASON_DBGRQ;
624
625 return ERROR_OK;
626 }
627
628 static int cortex_m3_soft_reset_halt(struct target *target)
629 {
630 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
631 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
632 uint32_t dcb_dhcsr = 0;
633 int retval, timeout = 0;
634
635 /* Enter debug state on reset; restore DEMCR in endreset_event() */
636 retval = mem_ap_write_u32(swjdp, DCB_DEMCR,
637 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
638 if (retval != ERROR_OK)
639 return retval;
640
641 /* Request a core-only reset */
642 retval = mem_ap_write_atomic_u32(swjdp, NVIC_AIRCR,
643 AIRCR_VECTKEY | AIRCR_VECTRESET);
644 if (retval != ERROR_OK)
645 return retval;
646 target->state = TARGET_RESET;
647
648 /* registers are now invalid */
649 register_cache_invalidate(cortex_m3->armv7m.arm.core_cache);
650
651 while (timeout < 100) {
652 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &dcb_dhcsr);
653 if (retval == ERROR_OK) {
654 retval = mem_ap_read_atomic_u32(swjdp, NVIC_DFSR,
655 &cortex_m3->nvic_dfsr);
656 if (retval != ERROR_OK)
657 return retval;
658 if ((dcb_dhcsr & S_HALT)
659 && (cortex_m3->nvic_dfsr & DFSR_VCATCH)) {
660 LOG_DEBUG("system reset-halted, DHCSR 0x%08x, "
661 "DFSR 0x%08x",
662 (unsigned) dcb_dhcsr,
663 (unsigned) cortex_m3->nvic_dfsr);
664 cortex_m3_poll(target);
665 /* FIXME restore user's vector catch config */
666 return ERROR_OK;
667 } else
668 LOG_DEBUG("waiting for system reset-halt, "
669 "DHCSR 0x%08x, %d ms",
670 (unsigned) dcb_dhcsr, timeout);
671 }
672 timeout++;
673 alive_sleep(1);
674 }
675
676 return ERROR_OK;
677 }
678
679 void cortex_m3_enable_breakpoints(struct target *target)
680 {
681 struct breakpoint *breakpoint = target->breakpoints;
682
683 /* set any pending breakpoints */
684 while (breakpoint) {
685 if (!breakpoint->set)
686 cortex_m3_set_breakpoint(target, breakpoint);
687 breakpoint = breakpoint->next;
688 }
689 }
690
691 static int cortex_m3_resume(struct target *target, int current,
692 uint32_t address, int handle_breakpoints, int debug_execution)
693 {
694 struct armv7m_common *armv7m = target_to_armv7m(target);
695 struct breakpoint *breakpoint = NULL;
696 uint32_t resume_pc;
697 struct reg *r;
698
699 if (target->state != TARGET_HALTED) {
700 LOG_WARNING("target not halted");
701 return ERROR_TARGET_NOT_HALTED;
702 }
703
704 if (!debug_execution) {
705 target_free_all_working_areas(target);
706 cortex_m3_enable_breakpoints(target);
707 cortex_m3_enable_watchpoints(target);
708 }
709
710 if (debug_execution) {
711 r = armv7m->arm.core_cache->reg_list + ARMV7M_PRIMASK;
712
713 /* Disable interrupts */
714 /* We disable interrupts in the PRIMASK register instead of
715 * masking with C_MASKINTS. This is probably the same issue
716 * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
717 * in parallel with disabled interrupts can cause local faults
718 * to not be taken.
719 *
720 * REVISIT this clearly breaks non-debug execution, since the
721 * PRIMASK register state isn't saved/restored... workaround
722 * by never resuming app code after debug execution.
723 */
724 buf_set_u32(r->value, 0, 1, 1);
725 r->dirty = true;
726 r->valid = true;
727
728 /* Make sure we are in Thumb mode */
729 r = armv7m->arm.core_cache->reg_list + ARMV7M_xPSR;
730 buf_set_u32(r->value, 24, 1, 1);
731 r->dirty = true;
732 r->valid = true;
733 }
734
735 /* current = 1: continue on current pc, otherwise continue at <address> */
736 r = armv7m->arm.pc;
737 if (!current) {
738 buf_set_u32(r->value, 0, 32, address);
739 r->dirty = true;
740 r->valid = true;
741 }
742
743 /* if we halted last time due to a bkpt instruction
744 * then we have to manually step over it, otherwise
745 * the core will break again */
746
747 if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
748 && !debug_execution)
749 armv7m_maybe_skip_bkpt_inst(target, NULL);
750
751 resume_pc = buf_get_u32(r->value, 0, 32);
752
753 armv7m_restore_context(target);
754
755 /* the front-end may request us not to handle breakpoints */
756 if (handle_breakpoints) {
757 /* Single step past breakpoint at current address */
758 breakpoint = breakpoint_find(target, resume_pc);
759 if (breakpoint) {
760 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 " (ID: %d)",
761 breakpoint->address,
762 breakpoint->unique_id);
763 cortex_m3_unset_breakpoint(target, breakpoint);
764 cortex_m3_single_step_core(target);
765 cortex_m3_set_breakpoint(target, breakpoint);
766 }
767 }
768
769 /* Restart core */
770 cortex_m3_write_debug_halt_mask(target, 0, C_HALT);
771
772 target->debug_reason = DBG_REASON_NOTHALTED;
773
774 /* registers are now invalid */
775 register_cache_invalidate(armv7m->arm.core_cache);
776
777 if (!debug_execution) {
778 target->state = TARGET_RUNNING;
779 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
780 LOG_DEBUG("target resumed at 0x%" PRIx32 "", resume_pc);
781 } else {
782 target->state = TARGET_DEBUG_RUNNING;
783 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
784 LOG_DEBUG("target debug resumed at 0x%" PRIx32 "", resume_pc);
785 }
786
787 return ERROR_OK;
788 }
789
790 /* int irqstepcount = 0; */
791 static int cortex_m3_step(struct target *target, int current,
792 uint32_t address, int handle_breakpoints)
793 {
794 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
795 struct armv7m_common *armv7m = &cortex_m3->armv7m;
796 struct adiv5_dap *swjdp = armv7m->arm.dap;
797 struct breakpoint *breakpoint = NULL;
798 struct reg *pc = armv7m->arm.pc;
799 bool bkpt_inst_found = false;
800 int retval;
801 bool isr_timed_out = false;
802
803 if (target->state != TARGET_HALTED) {
804 LOG_WARNING("target not halted");
805 return ERROR_TARGET_NOT_HALTED;
806 }
807
808 /* current = 1: continue on current pc, otherwise continue at <address> */
809 if (!current)
810 buf_set_u32(pc->value, 0, 32, address);
811
812 uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
813
814 /* the front-end may request us not to handle breakpoints */
815 if (handle_breakpoints) {
816 breakpoint = breakpoint_find(target, pc_value);
817 if (breakpoint)
818 cortex_m3_unset_breakpoint(target, breakpoint);
819 }
820
821 armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
822
823 target->debug_reason = DBG_REASON_SINGLESTEP;
824
825 armv7m_restore_context(target);
826
827 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
828
829 /* if no bkpt instruction is found at pc then we can perform
830 * a normal step, otherwise we have to manually step over the bkpt
831 * instruction - as such simulate a step */
832 if (bkpt_inst_found == false) {
833 /* Automatic ISR masking mode off: Just step over the next instruction */
834 if ((cortex_m3->isrmasking_mode != CORTEX_M3_ISRMASK_AUTO))
835 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
836 else {
837 /* Process interrupts during stepping in a way they don't interfere
838 * debugging.
839 *
840 * Principle:
841 *
842 * Set a temporary break point at the current pc and let the core run
843 * with interrupts enabled. Pending interrupts get served and we run
844 * into the breakpoint again afterwards. Then we step over the next
845 * instruction with interrupts disabled.
846 *
847 * If the pending interrupts don't complete within time, we leave the
848 * core running. This may happen if the interrupts trigger faster
849 * than the core can process them or the handler doesn't return.
850 *
851 * If no more breakpoints are available we simply do a step with
852 * interrupts enabled.
853 *
854 */
855
856 /* 2012-09-29 ph
857 *
858 * If a break point is already set on the lower half word then a break point on
859 * the upper half word will not break again when the core is restarted. So we
860 * just step over the instruction with interrupts disabled.
861 *
862 * The documentation has no information about this, it was found by observation
863 * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 dosen't seem to
864 * suffer from this problem.
865 *
866 * To add some confusion: pc_value has bit 0 always set, while the breakpoint
867 * address has it always cleared. The former is done to indicate thumb mode
868 * to gdb.
869 *
870 */
871 if ((pc_value & 0x02) && breakpoint_find(target, pc_value & ~0x03)) {
872 LOG_DEBUG("Stepping over next instruction with interrupts disabled");
873 cortex_m3_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
874 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
875 /* Re-enable interrupts */
876 cortex_m3_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
877 }
878 else {
879
880 /* Set a temporary break point */
881 if (breakpoint)
882 retval = cortex_m3_set_breakpoint(target, breakpoint);
883 else
884 retval = breakpoint_add(target, pc_value, 2, BKPT_TYPE_BY_ADDR(pc_value));
885 bool tmp_bp_set = (retval == ERROR_OK);
886
887 /* No more breakpoints left, just do a step */
888 if (!tmp_bp_set)
889 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
890 else {
891 /* Start the core */
892 LOG_DEBUG("Starting core to serve pending interrupts");
893 int64_t t_start = timeval_ms();
894 cortex_m3_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
895
896 /* Wait for pending handlers to complete or timeout */
897 do {
898 retval = mem_ap_read_atomic_u32(swjdp,
899 DCB_DHCSR,
900 &cortex_m3->dcb_dhcsr);
901 if (retval != ERROR_OK) {
902 target->state = TARGET_UNKNOWN;
903 return retval;
904 }
905 isr_timed_out = ((timeval_ms() - t_start) > 500);
906 } while (!((cortex_m3->dcb_dhcsr & S_HALT) || isr_timed_out));
907
908 /* only remove breakpoint if we created it */
909 if (breakpoint)
910 cortex_m3_unset_breakpoint(target, breakpoint);
911 else {
912 /* Remove the temporary breakpoint */
913 breakpoint_remove(target, pc_value);
914 }
915
916 if (isr_timed_out) {
917 LOG_DEBUG("Interrupt handlers didn't complete within time, "
918 "leaving target running");
919 } else {
920 /* Step over next instruction with interrupts disabled */
921 cortex_m3_write_debug_halt_mask(target,
922 C_HALT | C_MASKINTS,
923 0);
924 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
925 /* Re-enable interrupts */
926 cortex_m3_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
927 }
928 }
929 }
930 }
931 }
932
933 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
934 if (retval != ERROR_OK)
935 return retval;
936
937 /* registers are now invalid */
938 register_cache_invalidate(armv7m->arm.core_cache);
939
940 if (breakpoint)
941 cortex_m3_set_breakpoint(target, breakpoint);
942
943 if (isr_timed_out) {
944 /* Leave the core running. The user has to stop execution manually. */
945 target->debug_reason = DBG_REASON_NOTHALTED;
946 target->state = TARGET_RUNNING;
947 return ERROR_OK;
948 }
949
950 LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
951 " nvic_icsr = 0x%" PRIx32,
952 cortex_m3->dcb_dhcsr, cortex_m3->nvic_icsr);
953
954 retval = cortex_m3_debug_entry(target);
955 if (retval != ERROR_OK)
956 return retval;
957 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
958
959 LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
960 " nvic_icsr = 0x%" PRIx32,
961 cortex_m3->dcb_dhcsr, cortex_m3->nvic_icsr);
962
963 return ERROR_OK;
964 }
965
966 static int cortex_m3_assert_reset(struct target *target)
967 {
968 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
969 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
970 enum cortex_m3_soft_reset_config reset_config = cortex_m3->soft_reset_config;
971
972 LOG_DEBUG("target->state: %s",
973 target_state_name(target));
974
975 enum reset_types jtag_reset_config = jtag_get_reset_config();
976
977 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
978 /* allow scripts to override the reset event */
979
980 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
981 register_cache_invalidate(cortex_m3->armv7m.arm.core_cache);
982 target->state = TARGET_RESET;
983
984 return ERROR_OK;
985 }
986
987 /* some cores support connecting while srst is asserted
988 * use that mode is it has been configured */
989
990 bool srst_asserted = false;
991
992 if (jtag_reset_config & RESET_SRST_NO_GATING) {
993 adapter_assert_reset();
994 srst_asserted = true;
995 }
996
997 /* Enable debug requests */
998 int retval;
999 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
1000 if (retval != ERROR_OK)
1001 return retval;
1002 if (!(cortex_m3->dcb_dhcsr & C_DEBUGEN)) {
1003 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_DEBUGEN);
1004 if (retval != ERROR_OK)
1005 return retval;
1006 }
1007
1008 /* If the processor is sleeping in a WFI or WFE instruction, the
1009 * C_HALT bit must be asserted to regain control */
1010 if (cortex_m3->dcb_dhcsr & S_SLEEP) {
1011 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_HALT | C_DEBUGEN);
1012 if (retval != ERROR_OK)
1013 return retval;
1014 }
1015
1016 retval = mem_ap_write_u32(swjdp, DCB_DCRDR, 0);
1017 if (retval != ERROR_OK)
1018 return retval;
1019
1020 if (!target->reset_halt) {
1021 /* Set/Clear C_MASKINTS in a separate operation */
1022 if (cortex_m3->dcb_dhcsr & C_MASKINTS) {
1023 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
1024 DBGKEY | C_DEBUGEN | C_HALT);
1025 if (retval != ERROR_OK)
1026 return retval;
1027 }
1028
1029 /* clear any debug flags before resuming */
1030 cortex_m3_clear_halt(target);
1031
1032 /* clear C_HALT in dhcsr reg */
1033 cortex_m3_write_debug_halt_mask(target, 0, C_HALT);
1034 } else {
1035 /* Halt in debug on reset; endreset_event() restores DEMCR.
1036 *
1037 * REVISIT catching BUSERR presumably helps to defend against
1038 * bad vector table entries. Should this include MMERR or
1039 * other flags too?
1040 */
1041 retval = mem_ap_write_atomic_u32(swjdp, DCB_DEMCR,
1042 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1043 if (retval != ERROR_OK)
1044 return retval;
1045 }
1046
1047 if (jtag_reset_config & RESET_HAS_SRST) {
1048 /* default to asserting srst */
1049 if (!srst_asserted)
1050 adapter_assert_reset();
1051 } else {
1052 /* Use a standard Cortex-M3 software reset mechanism.
1053 * We default to using VECRESET as it is supported on all current cores.
1054 * This has the disadvantage of not resetting the peripherals, so a
1055 * reset-init event handler is needed to perform any peripheral resets.
1056 */
1057 retval = mem_ap_write_atomic_u32(swjdp, NVIC_AIRCR,
1058 AIRCR_VECTKEY | ((reset_config == CORTEX_M3_RESET_SYSRESETREQ)
1059 ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
1060 if (retval != ERROR_OK)
1061 return retval;
1062
1063 LOG_DEBUG("Using Cortex-M3 %s", (reset_config == CORTEX_M3_RESET_SYSRESETREQ)
1064 ? "SYSRESETREQ" : "VECTRESET");
1065
1066 if (reset_config == CORTEX_M3_RESET_VECTRESET) {
1067 LOG_WARNING("Only resetting the Cortex-M3 core, use a reset-init event "
1068 "handler to reset any peripherals or configure hardware srst support.");
1069 }
1070
1071 {
1072 /* I do not know why this is necessary, but it
1073 * fixes strange effects (step/resume cause NMI
1074 * after reset) on LM3S6918 -- Michael Schwingen
1075 */
1076 uint32_t tmp;
1077 retval = mem_ap_read_atomic_u32(swjdp, NVIC_AIRCR, &tmp);
1078 if (retval != ERROR_OK)
1079 return retval;
1080 }
1081 }
1082
1083 target->state = TARGET_RESET;
1084 jtag_add_sleep(50000);
1085
1086 register_cache_invalidate(cortex_m3->armv7m.arm.core_cache);
1087
1088 if (target->reset_halt) {
1089 retval = target_halt(target);
1090 if (retval != ERROR_OK)
1091 return retval;
1092 }
1093
1094 return ERROR_OK;
1095 }
1096
1097 static int cortex_m3_deassert_reset(struct target *target)
1098 {
1099 LOG_DEBUG("target->state: %s",
1100 target_state_name(target));
1101
1102 /* deassert reset lines */
1103 adapter_deassert_reset();
1104
1105 return ERROR_OK;
1106 }
1107
1108 int cortex_m3_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
1109 {
1110 int retval;
1111 int fp_num = 0;
1112 uint32_t hilo;
1113 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1114 struct cortex_m3_fp_comparator *comparator_list = cortex_m3->fp_comparator_list;
1115
1116 if (breakpoint->set) {
1117 LOG_WARNING("breakpoint (BPID: %d) already set", breakpoint->unique_id);
1118 return ERROR_OK;
1119 }
1120
1121 if (cortex_m3->auto_bp_type)
1122 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1123
1124 if (breakpoint->type == BKPT_HARD) {
1125 while (comparator_list[fp_num].used && (fp_num < cortex_m3->fp_num_code))
1126 fp_num++;
1127 if (fp_num >= cortex_m3->fp_num_code) {
1128 LOG_ERROR("Can not find free FPB Comparator!");
1129 return ERROR_FAIL;
1130 }
1131 breakpoint->set = fp_num + 1;
1132 hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
1133 comparator_list[fp_num].used = 1;
1134 comparator_list[fp_num].fpcr_value = (breakpoint->address & 0x1FFFFFFC) | hilo | 1;
1135 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1136 comparator_list[fp_num].fpcr_value);
1137 LOG_DEBUG("fpc_num %i fpcr_value 0x%" PRIx32 "",
1138 fp_num,
1139 comparator_list[fp_num].fpcr_value);
1140 if (!cortex_m3->fpb_enabled) {
1141 LOG_DEBUG("FPB wasn't enabled, do it now");
1142 target_write_u32(target, FP_CTRL, 3);
1143 }
1144 } else if (breakpoint->type == BKPT_SOFT) {
1145 uint8_t code[4];
1146
1147 /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
1148 * semihosting; don't use that. Otherwise the BKPT
1149 * parameter is arbitrary.
1150 */
1151 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1152 retval = target_read_memory(target,
1153 breakpoint->address & 0xFFFFFFFE,
1154 breakpoint->length, 1,
1155 breakpoint->orig_instr);
1156 if (retval != ERROR_OK)
1157 return retval;
1158 retval = target_write_memory(target,
1159 breakpoint->address & 0xFFFFFFFE,
1160 breakpoint->length, 1,
1161 code);
1162 if (retval != ERROR_OK)
1163 return retval;
1164 breakpoint->set = true;
1165 }
1166
1167 LOG_DEBUG("BPID: %d, Type: %d, Address: 0x%08" PRIx32 " Length: %d (set=%d)",
1168 breakpoint->unique_id,
1169 (int)(breakpoint->type),
1170 breakpoint->address,
1171 breakpoint->length,
1172 breakpoint->set);
1173
1174 return ERROR_OK;
1175 }
1176
1177 int cortex_m3_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1178 {
1179 int retval;
1180 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1181 struct cortex_m3_fp_comparator *comparator_list = cortex_m3->fp_comparator_list;
1182
1183 if (!breakpoint->set) {
1184 LOG_WARNING("breakpoint not set");
1185 return ERROR_OK;
1186 }
1187
1188 LOG_DEBUG("BPID: %d, Type: %d, Address: 0x%08" PRIx32 " Length: %d (set=%d)",
1189 breakpoint->unique_id,
1190 (int)(breakpoint->type),
1191 breakpoint->address,
1192 breakpoint->length,
1193 breakpoint->set);
1194
1195 if (breakpoint->type == BKPT_HARD) {
1196 int fp_num = breakpoint->set - 1;
1197 if ((fp_num < 0) || (fp_num >= cortex_m3->fp_num_code)) {
1198 LOG_DEBUG("Invalid FP Comparator number in breakpoint");
1199 return ERROR_OK;
1200 }
1201 comparator_list[fp_num].used = 0;
1202 comparator_list[fp_num].fpcr_value = 0;
1203 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1204 comparator_list[fp_num].fpcr_value);
1205 } else {
1206 /* restore original instruction (kept in target endianness) */
1207 if (breakpoint->length == 4) {
1208 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE, 4, 1,
1209 breakpoint->orig_instr);
1210 if (retval != ERROR_OK)
1211 return retval;
1212 } else {
1213 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE, 2, 1,
1214 breakpoint->orig_instr);
1215 if (retval != ERROR_OK)
1216 return retval;
1217 }
1218 }
1219 breakpoint->set = false;
1220
1221 return ERROR_OK;
1222 }
1223
1224 int cortex_m3_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
1225 {
1226 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1227
1228 if (cortex_m3->auto_bp_type) {
1229 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1230 #ifdef ARMV7_GDB_HACKS
1231 if (breakpoint->length != 2) {
1232 /* XXX Hack: Replace all breakpoints with length != 2 with
1233 * a hardware breakpoint. */
1234 breakpoint->type = BKPT_HARD;
1235 breakpoint->length = 2;
1236 }
1237 #endif
1238 }
1239
1240 if (breakpoint->type != BKPT_TYPE_BY_ADDR(breakpoint->address)) {
1241 if (breakpoint->type == BKPT_HARD) {
1242 LOG_INFO("flash patch comparator requested outside code memory region");
1243 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1244 }
1245
1246 if (breakpoint->type == BKPT_SOFT) {
1247 LOG_INFO("soft breakpoint requested in code (flash) memory region");
1248 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1249 }
1250 }
1251
1252 if ((breakpoint->type == BKPT_HARD) && (cortex_m3->fp_code_available < 1)) {
1253 LOG_INFO("no flash patch comparator unit available for hardware breakpoint");
1254 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1255 }
1256
1257 if ((breakpoint->length != 2)) {
1258 LOG_INFO("only breakpoints of two bytes length supported");
1259 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1260 }
1261
1262 if (breakpoint->type == BKPT_HARD)
1263 cortex_m3->fp_code_available--;
1264
1265 return cortex_m3_set_breakpoint(target, breakpoint);
1266 }
1267
1268 int cortex_m3_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1269 {
1270 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1271
1272 /* REVISIT why check? FBP can be updated with core running ... */
1273 if (target->state != TARGET_HALTED) {
1274 LOG_WARNING("target not halted");
1275 return ERROR_TARGET_NOT_HALTED;
1276 }
1277
1278 if (cortex_m3->auto_bp_type)
1279 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1280
1281 if (breakpoint->set)
1282 cortex_m3_unset_breakpoint(target, breakpoint);
1283
1284 if (breakpoint->type == BKPT_HARD)
1285 cortex_m3->fp_code_available++;
1286
1287 return ERROR_OK;
1288 }
1289
1290 int cortex_m3_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1291 {
1292 int dwt_num = 0;
1293 uint32_t mask, temp;
1294 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1295
1296 /* watchpoint params were validated earlier */
1297 mask = 0;
1298 temp = watchpoint->length;
1299 while (temp) {
1300 temp >>= 1;
1301 mask++;
1302 }
1303 mask--;
1304
1305 /* REVISIT Don't fully trust these "not used" records ... users
1306 * may set up breakpoints by hand, e.g. dual-address data value
1307 * watchpoint using comparator #1; comparator #0 matching cycle
1308 * count; send data trace info through ITM and TPIU; etc
1309 */
1310 struct cortex_m3_dwt_comparator *comparator;
1311
1312 for (comparator = cortex_m3->dwt_comparator_list;
1313 comparator->used && dwt_num < cortex_m3->dwt_num_comp;
1314 comparator++, dwt_num++)
1315 continue;
1316 if (dwt_num >= cortex_m3->dwt_num_comp) {
1317 LOG_ERROR("Can not find free DWT Comparator");
1318 return ERROR_FAIL;
1319 }
1320 comparator->used = 1;
1321 watchpoint->set = dwt_num + 1;
1322
1323 comparator->comp = watchpoint->address;
1324 target_write_u32(target, comparator->dwt_comparator_address + 0,
1325 comparator->comp);
1326
1327 comparator->mask = mask;
1328 target_write_u32(target, comparator->dwt_comparator_address + 4,
1329 comparator->mask);
1330
1331 switch (watchpoint->rw) {
1332 case WPT_READ:
1333 comparator->function = 5;
1334 break;
1335 case WPT_WRITE:
1336 comparator->function = 6;
1337 break;
1338 case WPT_ACCESS:
1339 comparator->function = 7;
1340 break;
1341 }
1342 target_write_u32(target, comparator->dwt_comparator_address + 8,
1343 comparator->function);
1344
1345 LOG_DEBUG("Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
1346 watchpoint->unique_id, dwt_num,
1347 (unsigned) comparator->comp,
1348 (unsigned) comparator->mask,
1349 (unsigned) comparator->function);
1350 return ERROR_OK;
1351 }
1352
1353 int cortex_m3_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
1354 {
1355 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1356 struct cortex_m3_dwt_comparator *comparator;
1357 int dwt_num;
1358
1359 if (!watchpoint->set) {
1360 LOG_WARNING("watchpoint (wpid: %d) not set",
1361 watchpoint->unique_id);
1362 return ERROR_OK;
1363 }
1364
1365 dwt_num = watchpoint->set - 1;
1366
1367 LOG_DEBUG("Watchpoint (ID %d) DWT%d address: 0x%08x clear",
1368 watchpoint->unique_id, dwt_num,
1369 (unsigned) watchpoint->address);
1370
1371 if ((dwt_num < 0) || (dwt_num >= cortex_m3->dwt_num_comp)) {
1372 LOG_DEBUG("Invalid DWT Comparator number in watchpoint");
1373 return ERROR_OK;
1374 }
1375
1376 comparator = cortex_m3->dwt_comparator_list + dwt_num;
1377 comparator->used = 0;
1378 comparator->function = 0;
1379 target_write_u32(target, comparator->dwt_comparator_address + 8,
1380 comparator->function);
1381
1382 watchpoint->set = false;
1383
1384 return ERROR_OK;
1385 }
1386
1387 int cortex_m3_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
1388 {
1389 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1390
1391 if (cortex_m3->dwt_comp_available < 1) {
1392 LOG_DEBUG("no comparators?");
1393 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1394 }
1395
1396 /* hardware doesn't support data value masking */
1397 if (watchpoint->mask != ~(uint32_t)0) {
1398 LOG_DEBUG("watchpoint value masks not supported");
1399 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1400 }
1401
1402 /* hardware allows address masks of up to 32K */
1403 unsigned mask;
1404
1405 for (mask = 0; mask < 16; mask++) {
1406 if ((1u << mask) == watchpoint->length)
1407 break;
1408 }
1409 if (mask == 16) {
1410 LOG_DEBUG("unsupported watchpoint length");
1411 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1412 }
1413 if (watchpoint->address & ((1 << mask) - 1)) {
1414 LOG_DEBUG("watchpoint address is unaligned");
1415 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1416 }
1417
1418 /* Caller doesn't seem to be able to describe watching for data
1419 * values of zero; that flags "no value".
1420 *
1421 * REVISIT This DWT may well be able to watch for specific data
1422 * values. Requires comparator #1 to set DATAVMATCH and match
1423 * the data, and another comparator (DATAVADDR0) matching addr.
1424 */
1425 if (watchpoint->value) {
1426 LOG_DEBUG("data value watchpoint not YET supported");
1427 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1428 }
1429
1430 cortex_m3->dwt_comp_available--;
1431 LOG_DEBUG("dwt_comp_available: %d", cortex_m3->dwt_comp_available);
1432
1433 return ERROR_OK;
1434 }
1435
1436 int cortex_m3_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
1437 {
1438 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1439
1440 /* REVISIT why check? DWT can be updated with core running ... */
1441 if (target->state != TARGET_HALTED) {
1442 LOG_WARNING("target not halted");
1443 return ERROR_TARGET_NOT_HALTED;
1444 }
1445
1446 if (watchpoint->set)
1447 cortex_m3_unset_watchpoint(target, watchpoint);
1448
1449 cortex_m3->dwt_comp_available++;
1450 LOG_DEBUG("dwt_comp_available: %d", cortex_m3->dwt_comp_available);
1451
1452 return ERROR_OK;
1453 }
1454
1455 void cortex_m3_enable_watchpoints(struct target *target)
1456 {
1457 struct watchpoint *watchpoint = target->watchpoints;
1458
1459 /* set any pending watchpoints */
1460 while (watchpoint) {
1461 if (!watchpoint->set)
1462 cortex_m3_set_watchpoint(target, watchpoint);
1463 watchpoint = watchpoint->next;
1464 }
1465 }
1466
1467 static int cortex_m3_load_core_reg_u32(struct target *target,
1468 uint32_t num, uint32_t *value)
1469 {
1470 int retval;
1471 struct armv7m_common *armv7m = target_to_armv7m(target);
1472 struct adiv5_dap *swjdp = armv7m->arm.dap;
1473
1474 /* NOTE: we "know" here that the register identifiers used
1475 * in the v7m header match the Cortex-M3 Debug Core Register
1476 * Selector values for R0..R15, xPSR, MSP, and PSP.
1477 */
1478 switch (num) {
1479 case 0 ... 18:
1480 /* read a normal core register */
1481 retval = cortexm3_dap_read_coreregister_u32(swjdp, value, num);
1482
1483 if (retval != ERROR_OK) {
1484 LOG_ERROR("JTAG failure %i", retval);
1485 return ERROR_JTAG_DEVICE_ERROR;
1486 }
1487 LOG_DEBUG("load from core reg %i value 0x%" PRIx32 "", (int)num, *value);
1488 break;
1489
1490 case ARMV7M_PRIMASK:
1491 case ARMV7M_BASEPRI:
1492 case ARMV7M_FAULTMASK:
1493 case ARMV7M_CONTROL:
1494 /* Cortex-M3 packages these four registers as bitfields
1495 * in one Debug Core register. So say r0 and r2 docs;
1496 * it was removed from r1 docs, but still works.
1497 */
1498 cortexm3_dap_read_coreregister_u32(swjdp, value, 20);
1499
1500 switch (num) {
1501 case ARMV7M_PRIMASK:
1502 *value = buf_get_u32((uint8_t *)value, 0, 1);
1503 break;
1504
1505 case ARMV7M_BASEPRI:
1506 *value = buf_get_u32((uint8_t *)value, 8, 8);
1507 break;
1508
1509 case ARMV7M_FAULTMASK:
1510 *value = buf_get_u32((uint8_t *)value, 16, 1);
1511 break;
1512
1513 case ARMV7M_CONTROL:
1514 *value = buf_get_u32((uint8_t *)value, 24, 2);
1515 break;
1516 }
1517
1518 LOG_DEBUG("load from special reg %i value 0x%" PRIx32 "", (int)num, *value);
1519 break;
1520
1521 default:
1522 return ERROR_COMMAND_SYNTAX_ERROR;
1523 }
1524
1525 return ERROR_OK;
1526 }
1527
1528 static int cortex_m3_store_core_reg_u32(struct target *target,
1529 uint32_t num, uint32_t value)
1530 {
1531 int retval;
1532 uint32_t reg;
1533 struct armv7m_common *armv7m = target_to_armv7m(target);
1534 struct adiv5_dap *swjdp = armv7m->arm.dap;
1535
1536 #ifdef ARMV7_GDB_HACKS
1537 /* If the LR register is being modified, make sure it will put us
1538 * in "thumb" mode, or an INVSTATE exception will occur. This is a
1539 * hack to deal with the fact that gdb will sometimes "forge"
1540 * return addresses, and doesn't set the LSB correctly (i.e., when
1541 * printing expressions containing function calls, it sets LR = 0.)
1542 * Valid exception return codes have bit 0 set too.
1543 */
1544 if (num == ARMV7M_R14)
1545 value |= 0x01;
1546 #endif
1547
1548 /* NOTE: we "know" here that the register identifiers used
1549 * in the v7m header match the Cortex-M3 Debug Core Register
1550 * Selector values for R0..R15, xPSR, MSP, and PSP.
1551 */
1552 switch (num) {
1553 case 0 ... 18:
1554 retval = cortexm3_dap_write_coreregister_u32(swjdp, value, num);
1555 if (retval != ERROR_OK) {
1556 struct reg *r;
1557
1558 LOG_ERROR("JTAG failure");
1559 r = armv7m->arm.core_cache->reg_list + num;
1560 r->dirty = r->valid;
1561 return ERROR_JTAG_DEVICE_ERROR;
1562 }
1563 LOG_DEBUG("write core reg %i value 0x%" PRIx32 "", (int)num, value);
1564 break;
1565
1566 case ARMV7M_PRIMASK:
1567 case ARMV7M_BASEPRI:
1568 case ARMV7M_FAULTMASK:
1569 case ARMV7M_CONTROL:
1570 /* Cortex-M3 packages these four registers as bitfields
1571 * in one Debug Core register. So say r0 and r2 docs;
1572 * it was removed from r1 docs, but still works.
1573 */
1574 cortexm3_dap_read_coreregister_u32(swjdp, &reg, 20);
1575
1576 switch (num) {
1577 case ARMV7M_PRIMASK:
1578 buf_set_u32((uint8_t *)&reg, 0, 1, value);
1579 break;
1580
1581 case ARMV7M_BASEPRI:
1582 buf_set_u32((uint8_t *)&reg, 8, 8, value);
1583 break;
1584
1585 case ARMV7M_FAULTMASK:
1586 buf_set_u32((uint8_t *)&reg, 16, 1, value);
1587 break;
1588
1589 case ARMV7M_CONTROL:
1590 buf_set_u32((uint8_t *)&reg, 24, 2, value);
1591 break;
1592 }
1593
1594 cortexm3_dap_write_coreregister_u32(swjdp, reg, 20);
1595
1596 LOG_DEBUG("write special reg %i value 0x%" PRIx32 " ", (int)num, value);
1597 break;
1598
1599 default:
1600 return ERROR_COMMAND_SYNTAX_ERROR;
1601 }
1602
1603 return ERROR_OK;
1604 }
1605
1606 static int cortex_m3_read_memory(struct target *target, uint32_t address,
1607 uint32_t size, uint32_t count, uint8_t *buffer)
1608 {
1609 struct armv7m_common *armv7m = target_to_armv7m(target);
1610 struct adiv5_dap *swjdp = armv7m->arm.dap;
1611 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1612
1613 if (armv7m->arm.is_armv6m) {
1614 /* armv6m does not handle unaligned memory access */
1615 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1616 return ERROR_TARGET_UNALIGNED_ACCESS;
1617 }
1618
1619 /* cortex_m3 handles unaligned memory access */
1620 if (count && buffer) {
1621 switch (size) {
1622 case 4:
1623 retval = mem_ap_read_buf_u32(swjdp, buffer, 4 * count, address, true);
1624 break;
1625 case 2:
1626 retval = mem_ap_read_buf_u16(swjdp, buffer, 2 * count, address);
1627 break;
1628 case 1:
1629 retval = mem_ap_read_buf_u8(swjdp, buffer, count, address);
1630 break;
1631 }
1632 }
1633
1634 return retval;
1635 }
1636
1637 static int cortex_m3_write_memory(struct target *target, uint32_t address,
1638 uint32_t size, uint32_t count, const uint8_t *buffer)
1639 {
1640 struct armv7m_common *armv7m = target_to_armv7m(target);
1641 struct adiv5_dap *swjdp = armv7m->arm.dap;
1642 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1643
1644 if (armv7m->arm.is_armv6m) {
1645 /* armv6m does not handle unaligned memory access */
1646 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1647 return ERROR_TARGET_UNALIGNED_ACCESS;
1648 }
1649
1650 if (count && buffer) {
1651 switch (size) {
1652 case 4:
1653 retval = mem_ap_write_buf_u32(swjdp, buffer, 4 * count, address, true);
1654 break;
1655 case 2:
1656 retval = mem_ap_write_buf_u16(swjdp, buffer, 2 * count, address);
1657 break;
1658 case 1:
1659 retval = mem_ap_write_buf_u8(swjdp, buffer, count, address);
1660 break;
1661 }
1662 }
1663
1664 return retval;
1665 }
1666
1667 static int cortex_m3_init_target(struct command_context *cmd_ctx,
1668 struct target *target)
1669 {
1670 armv7m_build_reg_cache(target);
1671 return ERROR_OK;
1672 }
1673
1674 /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
1675 * on r/w if the core is not running, and clear on resume or reset ... or
1676 * at least, in a post_restore_context() method.
1677 */
1678
1679 struct dwt_reg_state {
1680 struct target *target;
1681 uint32_t addr;
1682 uint32_t value; /* scratch/cache */
1683 };
1684
1685 static int cortex_m3_dwt_get_reg(struct reg *reg)
1686 {
1687 struct dwt_reg_state *state = reg->arch_info;
1688
1689 return target_read_u32(state->target, state->addr, &state->value);
1690 }
1691
1692 static int cortex_m3_dwt_set_reg(struct reg *reg, uint8_t *buf)
1693 {
1694 struct dwt_reg_state *state = reg->arch_info;
1695
1696 return target_write_u32(state->target, state->addr,
1697 buf_get_u32(buf, 0, reg->size));
1698 }
1699
1700 struct dwt_reg {
1701 uint32_t addr;
1702 char *name;
1703 unsigned size;
1704 };
1705
1706 static struct dwt_reg dwt_base_regs[] = {
1707 { DWT_CTRL, "dwt_ctrl", 32, },
1708 /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
1709 * increments while the core is asleep.
1710 */
1711 { DWT_CYCCNT, "dwt_cyccnt", 32, },
1712 /* plus some 8 bit counters, useful for profiling with TPIU */
1713 };
1714
1715 static struct dwt_reg dwt_comp[] = {
1716 #define DWT_COMPARATOR(i) \
1717 { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
1718 { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
1719 { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
1720 DWT_COMPARATOR(0),
1721 DWT_COMPARATOR(1),
1722 DWT_COMPARATOR(2),
1723 DWT_COMPARATOR(3),
1724 #undef DWT_COMPARATOR
1725 };
1726
1727 static const struct reg_arch_type dwt_reg_type = {
1728 .get = cortex_m3_dwt_get_reg,
1729 .set = cortex_m3_dwt_set_reg,
1730 };
1731
1732 static void cortex_m3_dwt_addreg(struct target *t, struct reg *r, struct dwt_reg *d)
1733 {
1734 struct dwt_reg_state *state;
1735
1736 state = calloc(1, sizeof *state);
1737 if (!state)
1738 return;
1739 state->addr = d->addr;
1740 state->target = t;
1741
1742 r->name = d->name;
1743 r->size = d->size;
1744 r->value = &state->value;
1745 r->arch_info = state;
1746 r->type = &dwt_reg_type;
1747 }
1748
1749 void cortex_m3_dwt_setup(struct cortex_m3_common *cm3, struct target *target)
1750 {
1751 uint32_t dwtcr;
1752 struct reg_cache *cache;
1753 struct cortex_m3_dwt_comparator *comparator;
1754 int reg, i;
1755
1756 target_read_u32(target, DWT_CTRL, &dwtcr);
1757 if (!dwtcr) {
1758 LOG_DEBUG("no DWT");
1759 return;
1760 }
1761
1762 cm3->dwt_num_comp = (dwtcr >> 28) & 0xF;
1763 cm3->dwt_comp_available = cm3->dwt_num_comp;
1764 cm3->dwt_comparator_list = calloc(cm3->dwt_num_comp,
1765 sizeof(struct cortex_m3_dwt_comparator));
1766 if (!cm3->dwt_comparator_list) {
1767 fail0:
1768 cm3->dwt_num_comp = 0;
1769 LOG_ERROR("out of mem");
1770 return;
1771 }
1772
1773 cache = calloc(1, sizeof *cache);
1774 if (!cache) {
1775 fail1:
1776 free(cm3->dwt_comparator_list);
1777 goto fail0;
1778 }
1779 cache->name = "cortex-m3 dwt registers";
1780 cache->num_regs = 2 + cm3->dwt_num_comp * 3;
1781 cache->reg_list = calloc(cache->num_regs, sizeof *cache->reg_list);
1782 if (!cache->reg_list) {
1783 free(cache);
1784 goto fail1;
1785 }
1786
1787 for (reg = 0; reg < 2; reg++)
1788 cortex_m3_dwt_addreg(target, cache->reg_list + reg,
1789 dwt_base_regs + reg);
1790
1791 comparator = cm3->dwt_comparator_list;
1792 for (i = 0; i < cm3->dwt_num_comp; i++, comparator++) {
1793 int j;
1794
1795 comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
1796 for (j = 0; j < 3; j++, reg++)
1797 cortex_m3_dwt_addreg(target, cache->reg_list + reg,
1798 dwt_comp + 3 * i + j);
1799 }
1800
1801 *register_get_last_cache_p(&target->reg_cache) = cache;
1802 cm3->dwt_cache = cache;
1803
1804 LOG_DEBUG("DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
1805 dwtcr, cm3->dwt_num_comp,
1806 (dwtcr & (0xf << 24)) ? " only" : "/trigger");
1807
1808 /* REVISIT: if num_comp > 1, check whether comparator #1 can
1809 * implement single-address data value watchpoints ... so we
1810 * won't need to check it later, when asked to set one up.
1811 */
1812 }
1813
1814 #define MVFR0 0xe000ef40
1815 #define MVFR1 0xe000ef44
1816
1817 #define MVFR0_DEFAULT_M4 0x10110021
1818 #define MVFR1_DEFAULT_M4 0x11000011
1819
1820 int cortex_m3_examine(struct target *target)
1821 {
1822 int retval;
1823 uint32_t cpuid, fpcr, mvfr0, mvfr1;
1824 int i;
1825 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1826 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
1827 struct armv7m_common *armv7m = target_to_armv7m(target);
1828
1829 /* stlink shares the examine handler but does not support
1830 * all its calls */
1831 if (!armv7m->stlink) {
1832 retval = ahbap_debugport_init(swjdp);
1833 if (retval != ERROR_OK)
1834 return retval;
1835 }
1836
1837 if (!target_was_examined(target)) {
1838 target_set_examined(target);
1839
1840 /* Read from Device Identification Registers */
1841 retval = target_read_u32(target, CPUID, &cpuid);
1842 if (retval != ERROR_OK)
1843 return retval;
1844
1845 /* Get CPU Type */
1846 i = (cpuid >> 4) & 0xf;
1847
1848 LOG_DEBUG("Cortex-M%d r%" PRId8 "p%" PRId8 " processor detected",
1849 i, (uint8_t)((cpuid >> 20) & 0xf), (uint8_t)((cpuid >> 0) & 0xf));
1850 LOG_DEBUG("cpuid: 0x%8.8" PRIx32 "", cpuid);
1851
1852 /* test for floating point feature on cortex-m4 */
1853 if (i == 4) {
1854 target_read_u32(target, MVFR0, &mvfr0);
1855 target_read_u32(target, MVFR1, &mvfr1);
1856
1857 if ((mvfr0 == MVFR0_DEFAULT_M4) && (mvfr1 == MVFR1_DEFAULT_M4)) {
1858 LOG_DEBUG("Cortex-M%d floating point feature FPv4_SP found", i);
1859 armv7m->fp_feature = FPv4_SP;
1860 }
1861 } else if (i == 0) {
1862 /* Cortex-M0 does not support unaligned memory access */
1863 armv7m->arm.is_armv6m = true;
1864 }
1865
1866 if (i == 4 || i == 3) {
1867 /* Cortex-M3/M4 has 4096 bytes autoincrement range */
1868 armv7m->dap.tar_autoincr_block = (1 << 12);
1869 }
1870
1871 /* NOTE: FPB and DWT are both optional. */
1872
1873 /* Setup FPB */
1874 target_read_u32(target, FP_CTRL, &fpcr);
1875 cortex_m3->auto_bp_type = 1;
1876 cortex_m3->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF); /* bits
1877 *[14:12]
1878 *and [7:4]
1879 **/
1880 cortex_m3->fp_num_lit = (fpcr >> 8) & 0xF;
1881 cortex_m3->fp_code_available = cortex_m3->fp_num_code;
1882 cortex_m3->fp_comparator_list = calloc(
1883 cortex_m3->fp_num_code + cortex_m3->fp_num_lit,
1884 sizeof(struct cortex_m3_fp_comparator));
1885 cortex_m3->fpb_enabled = fpcr & 1;
1886 for (i = 0; i < cortex_m3->fp_num_code + cortex_m3->fp_num_lit; i++) {
1887 cortex_m3->fp_comparator_list[i].type =
1888 (i < cortex_m3->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
1889 cortex_m3->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
1890 }
1891 LOG_DEBUG("FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i",
1892 fpcr,
1893 cortex_m3->fp_num_code,
1894 cortex_m3->fp_num_lit);
1895
1896 /* Setup DWT */
1897 cortex_m3_dwt_setup(cortex_m3, target);
1898
1899 /* These hardware breakpoints only work for code in flash! */
1900 LOG_INFO("%s: hardware has %d breakpoints, %d watchpoints",
1901 target_name(target),
1902 cortex_m3->fp_num_code,
1903 cortex_m3->dwt_num_comp);
1904 }
1905
1906 return ERROR_OK;
1907 }
1908
1909 static int cortex_m3_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1910 {
1911 uint16_t dcrdr;
1912 int retval;
1913
1914 mem_ap_read_buf_u16(swjdp, (uint8_t *)&dcrdr, 1, DCB_DCRDR);
1915 *ctrl = (uint8_t)dcrdr;
1916 *value = (uint8_t)(dcrdr >> 8);
1917
1918 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1919
1920 /* write ack back to software dcc register
1921 * signify we have read data */
1922 if (dcrdr & (1 << 0)) {
1923 dcrdr = 0;
1924 retval = mem_ap_write_buf_u16(swjdp, (uint8_t *)&dcrdr, 1, DCB_DCRDR);
1925 if (retval != ERROR_OK)
1926 return retval;
1927 }
1928
1929 return ERROR_OK;
1930 }
1931
1932 static int cortex_m3_target_request_data(struct target *target,
1933 uint32_t size, uint8_t *buffer)
1934 {
1935 struct armv7m_common *armv7m = target_to_armv7m(target);
1936 struct adiv5_dap *swjdp = armv7m->arm.dap;
1937 uint8_t data;
1938 uint8_t ctrl;
1939 uint32_t i;
1940
1941 for (i = 0; i < (size * 4); i++) {
1942 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1943 buffer[i] = data;
1944 }
1945
1946 return ERROR_OK;
1947 }
1948
1949 static int cortex_m3_handle_target_request(void *priv)
1950 {
1951 struct target *target = priv;
1952 if (!target_was_examined(target))
1953 return ERROR_OK;
1954 struct armv7m_common *armv7m = target_to_armv7m(target);
1955 struct adiv5_dap *swjdp = armv7m->arm.dap;
1956
1957 if (!target->dbg_msg_enabled)
1958 return ERROR_OK;
1959
1960 if (target->state == TARGET_RUNNING) {
1961 uint8_t data;
1962 uint8_t ctrl;
1963
1964 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1965
1966 /* check if we have data */
1967 if (ctrl & (1 << 0)) {
1968 uint32_t request;
1969
1970 /* we assume target is quick enough */
1971 request = data;
1972 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1973 request |= (data << 8);
1974 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1975 request |= (data << 16);
1976 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1977 request |= (data << 24);
1978 target_request(target, request);
1979 }
1980 }
1981
1982 return ERROR_OK;
1983 }
1984
1985 static int cortex_m3_init_arch_info(struct target *target,
1986 struct cortex_m3_common *cortex_m3, struct jtag_tap *tap)
1987 {
1988 int retval;
1989 struct armv7m_common *armv7m = &cortex_m3->armv7m;
1990
1991 armv7m_init_arch_info(target, armv7m);
1992
1993 /* prepare JTAG information for the new target */
1994 cortex_m3->jtag_info.tap = tap;
1995 cortex_m3->jtag_info.scann_size = 4;
1996
1997 /* default reset mode is to use srst if fitted
1998 * if not it will use CORTEX_M3_RESET_VECTRESET */
1999 cortex_m3->soft_reset_config = CORTEX_M3_RESET_VECTRESET;
2000
2001 armv7m->arm.dap = &armv7m->dap;
2002
2003 /* Leave (only) generic DAP stuff for debugport_init(); */
2004 armv7m->dap.jtag_info = &cortex_m3->jtag_info;
2005 armv7m->dap.memaccess_tck = 8;
2006
2007 /* Cortex-M3/M4 has 4096 bytes autoincrement range
2008 * but set a safe default to 1024 to support Cortex-M0
2009 * this will be changed in cortex_m3_examine if a M3/M4 is detected */
2010 armv7m->dap.tar_autoincr_block = (1 << 10);
2011
2012 /* register arch-specific functions */
2013 armv7m->examine_debug_reason = cortex_m3_examine_debug_reason;
2014
2015 armv7m->post_debug_entry = NULL;
2016
2017 armv7m->pre_restore_context = NULL;
2018
2019 armv7m->load_core_reg_u32 = cortex_m3_load_core_reg_u32;
2020 armv7m->store_core_reg_u32 = cortex_m3_store_core_reg_u32;
2021
2022 target_register_timer_callback(cortex_m3_handle_target_request, 1, 1, target);
2023
2024 retval = arm_jtag_setup_connection(&cortex_m3->jtag_info);
2025 if (retval != ERROR_OK)
2026 return retval;
2027
2028 return ERROR_OK;
2029 }
2030
2031 static int cortex_m3_target_create(struct target *target, Jim_Interp *interp)
2032 {
2033 struct cortex_m3_common *cortex_m3 = calloc(1, sizeof(struct cortex_m3_common));
2034
2035 cortex_m3->common_magic = CORTEX_M3_COMMON_MAGIC;
2036 cortex_m3_init_arch_info(target, cortex_m3, target->tap);
2037
2038 return ERROR_OK;
2039 }
2040
2041 /*--------------------------------------------------------------------------*/
2042
2043 static int cortex_m3_verify_pointer(struct command_context *cmd_ctx,
2044 struct cortex_m3_common *cm3)
2045 {
2046 if (cm3->common_magic != CORTEX_M3_COMMON_MAGIC) {
2047 command_print(cmd_ctx, "target is not a Cortex-M3");
2048 return ERROR_TARGET_INVALID;
2049 }
2050 return ERROR_OK;
2051 }
2052
2053 /*
2054 * Only stuff below this line should need to verify that its target
2055 * is a Cortex-M3. Everything else should have indirected through the
2056 * cortexm3_target structure, which is only used with CM3 targets.
2057 */
2058
2059 static const struct {
2060 char name[10];
2061 unsigned mask;
2062 } vec_ids[] = {
2063 { "hard_err", VC_HARDERR, },
2064 { "int_err", VC_INTERR, },
2065 { "bus_err", VC_BUSERR, },
2066 { "state_err", VC_STATERR, },
2067 { "chk_err", VC_CHKERR, },
2068 { "nocp_err", VC_NOCPERR, },
2069 { "mm_err", VC_MMERR, },
2070 { "reset", VC_CORERESET, },
2071 };
2072
2073 COMMAND_HANDLER(handle_cortex_m3_vector_catch_command)
2074 {
2075 struct target *target = get_current_target(CMD_CTX);
2076 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
2077 struct armv7m_common *armv7m = &cortex_m3->armv7m;
2078 struct adiv5_dap *swjdp = armv7m->arm.dap;
2079 uint32_t demcr = 0;
2080 int retval;
2081
2082 retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
2083 if (retval != ERROR_OK)
2084 return retval;
2085
2086 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &demcr);
2087 if (retval != ERROR_OK)
2088 return retval;
2089
2090 if (CMD_ARGC > 0) {
2091 unsigned catch = 0;
2092
2093 if (CMD_ARGC == 1) {
2094 if (strcmp(CMD_ARGV[0], "all") == 0) {
2095 catch = VC_HARDERR | VC_INTERR | VC_BUSERR
2096 | VC_STATERR | VC_CHKERR | VC_NOCPERR
2097 | VC_MMERR | VC_CORERESET;
2098 goto write;
2099 } else if (strcmp(CMD_ARGV[0], "none") == 0)
2100 goto write;
2101 }
2102 while (CMD_ARGC-- > 0) {
2103 unsigned i;
2104 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2105 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
2106 continue;
2107 catch |= vec_ids[i].mask;
2108 break;
2109 }
2110 if (i == ARRAY_SIZE(vec_ids)) {
2111 LOG_ERROR("No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]);
2112 return ERROR_COMMAND_SYNTAX_ERROR;
2113 }
2114 }
2115 write:
2116 /* For now, armv7m->demcr only stores vector catch flags. */
2117 armv7m->demcr = catch;
2118
2119 demcr &= ~0xffff;
2120 demcr |= catch;
2121
2122 /* write, but don't assume it stuck (why not??) */
2123 retval = mem_ap_write_u32(swjdp, DCB_DEMCR, demcr);
2124 if (retval != ERROR_OK)
2125 return retval;
2126 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &demcr);
2127 if (retval != ERROR_OK)
2128 return retval;
2129
2130 /* FIXME be sure to clear DEMCR on clean server shutdown.
2131 * Otherwise the vector catch hardware could fire when there's
2132 * no debugger hooked up, causing much confusion...
2133 */
2134 }
2135
2136 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2137 command_print(CMD_CTX, "%9s: %s", vec_ids[i].name,
2138 (demcr & vec_ids[i].mask) ? "catch" : "ignore");
2139 }
2140
2141 return ERROR_OK;
2142 }
2143
2144 COMMAND_HANDLER(handle_cortex_m3_mask_interrupts_command)
2145 {
2146 struct target *target = get_current_target(CMD_CTX);
2147 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
2148 int retval;
2149
2150 static const Jim_Nvp nvp_maskisr_modes[] = {
2151 { .name = "auto", .value = CORTEX_M3_ISRMASK_AUTO },
2152 { .name = "off", .value = CORTEX_M3_ISRMASK_OFF },
2153 { .name = "on", .value = CORTEX_M3_ISRMASK_ON },
2154 { .name = NULL, .value = -1 },
2155 };
2156 const Jim_Nvp *n;
2157
2158
2159 retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
2160 if (retval != ERROR_OK)
2161 return retval;
2162
2163 if (target->state != TARGET_HALTED) {
2164 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
2165 return ERROR_OK;
2166 }
2167
2168 if (CMD_ARGC > 0) {
2169 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2170 if (n->name == NULL)
2171 return ERROR_COMMAND_SYNTAX_ERROR;
2172 cortex_m3->isrmasking_mode = n->value;
2173
2174
2175 if (cortex_m3->isrmasking_mode == CORTEX_M3_ISRMASK_ON)
2176 cortex_m3_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
2177 else
2178 cortex_m3_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
2179 }
2180
2181 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_m3->isrmasking_mode);
2182 command_print(CMD_CTX, "cortex_m3 interrupt mask %s", n->name);
2183
2184 return ERROR_OK;
2185 }
2186
2187 COMMAND_HANDLER(handle_cortex_m3_reset_config_command)
2188 {
2189 struct target *target = get_current_target(CMD_CTX);
2190 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
2191 int retval;
2192 char *reset_config;
2193
2194 retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
2195 if (retval != ERROR_OK)
2196 return retval;
2197
2198 if (CMD_ARGC > 0) {
2199 if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
2200 cortex_m3->soft_reset_config = CORTEX_M3_RESET_SYSRESETREQ;
2201 else if (strcmp(*CMD_ARGV, "vectreset") == 0)
2202 cortex_m3->soft_reset_config = CORTEX_M3_RESET_VECTRESET;
2203 }
2204
2205 switch (cortex_m3->soft_reset_config) {
2206 case CORTEX_M3_RESET_SYSRESETREQ:
2207 reset_config = "sysresetreq";
2208 break;
2209
2210 case CORTEX_M3_RESET_VECTRESET:
2211 reset_config = "vectreset";
2212 break;
2213
2214 default:
2215 reset_config = "unknown";
2216 break;
2217 }
2218
2219 command_print(CMD_CTX, "cortex_m3 reset_config %s", reset_config);
2220
2221 return ERROR_OK;
2222 }
2223
2224 static const struct command_registration cortex_m3_exec_command_handlers[] = {
2225 {
2226 .name = "maskisr",
2227 .handler = handle_cortex_m3_mask_interrupts_command,
2228 .mode = COMMAND_EXEC,
2229 .help = "mask cortex_m3 interrupts",
2230 .usage = "['auto'|'on'|'off']",
2231 },
2232 {
2233 .name = "vector_catch",
2234 .handler = handle_cortex_m3_vector_catch_command,
2235 .mode = COMMAND_EXEC,
2236 .help = "configure hardware vectors to trigger debug entry",
2237 .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
2238 },
2239 {
2240 .name = "reset_config",
2241 .handler = handle_cortex_m3_reset_config_command,
2242 .mode = COMMAND_ANY,
2243 .help = "configure software reset handling",
2244 .usage = "['srst'|'sysresetreq'|'vectreset']",
2245 },
2246 COMMAND_REGISTRATION_DONE
2247 };
2248 static const struct command_registration cortex_m3_command_handlers[] = {
2249 {
2250 .chain = armv7m_command_handlers,
2251 },
2252 {
2253 .name = "cortex_m3",
2254 .mode = COMMAND_EXEC,
2255 .help = "Cortex-M3 command group",
2256 .usage = "",
2257 .chain = cortex_m3_exec_command_handlers,
2258 },
2259 COMMAND_REGISTRATION_DONE
2260 };
2261
2262 struct target_type cortexm3_target = {
2263 .name = "cortex_m3",
2264
2265 .poll = cortex_m3_poll,
2266 .arch_state = armv7m_arch_state,
2267
2268 .target_request_data = cortex_m3_target_request_data,
2269
2270 .halt = cortex_m3_halt,
2271 .resume = cortex_m3_resume,
2272 .step = cortex_m3_step,
2273
2274 .assert_reset = cortex_m3_assert_reset,
2275 .deassert_reset = cortex_m3_deassert_reset,
2276 .soft_reset_halt = cortex_m3_soft_reset_halt,
2277
2278 .get_gdb_reg_list = armv7m_get_gdb_reg_list,
2279
2280 .read_memory = cortex_m3_read_memory,
2281 .write_memory = cortex_m3_write_memory,
2282 .checksum_memory = armv7m_checksum_memory,
2283 .blank_check_memory = armv7m_blank_check_memory,
2284
2285 .run_algorithm = armv7m_run_algorithm,
2286 .start_algorithm = armv7m_start_algorithm,
2287 .wait_algorithm = armv7m_wait_algorithm,
2288
2289 .add_breakpoint = cortex_m3_add_breakpoint,
2290 .remove_breakpoint = cortex_m3_remove_breakpoint,
2291 .add_watchpoint = cortex_m3_add_watchpoint,
2292 .remove_watchpoint = cortex_m3_remove_watchpoint,
2293
2294 .commands = cortex_m3_command_handlers,
2295 .target_create = cortex_m3_target_create,
2296 .init_target = cortex_m3_init_target,
2297 .examine = cortex_m3_examine,
2298 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)