cortex_m: Call mem_ap_read/write directly
[openocd.git] / src / target / cortex_m.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
25 * *
26 * *
27 * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
28 * *
29 ***************************************************************************/
30 #ifdef HAVE_CONFIG_H
31 #include "config.h"
32 #endif
33
34 #include "jtag/interface.h"
35 #include "breakpoints.h"
36 #include "cortex_m.h"
37 #include "target_request.h"
38 #include "target_type.h"
39 #include "arm_disassembler.h"
40 #include "register.h"
41 #include "arm_opcodes.h"
42 #include "arm_semihosting.h"
43 #include <helper/time_support.h>
44
45 /* NOTE: most of this should work fine for the Cortex-M1 and
46 * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
47 * Some differences: M0/M1 doesn't have FBP remapping or the
48 * DWT tracing/profiling support. (So the cycle counter will
49 * not be usable; the other stuff isn't currently used here.)
50 *
51 * Although there are some workarounds for errata seen only in r0p0
52 * silicon, such old parts are hard to find and thus not much tested
53 * any longer.
54 */
55
56 /**
57 * Returns the type of a break point required by address location
58 */
59 #define BKPT_TYPE_BY_ADDR(addr) ((addr) < 0x20000000 ? BKPT_HARD : BKPT_SOFT)
60
61
62 /* forward declarations */
63 static int cortex_m3_store_core_reg_u32(struct target *target,
64 uint32_t num, uint32_t value);
65
66 static int cortexm3_dap_read_coreregister_u32(struct adiv5_dap *swjdp,
67 uint32_t *value, int regnum)
68 {
69 int retval;
70 uint32_t dcrdr;
71
72 /* because the DCB_DCRDR is used for the emulated dcc channel
73 * we have to save/restore the DCB_DCRDR when used */
74
75 retval = mem_ap_read_u32(swjdp, DCB_DCRDR, &dcrdr);
76 if (retval != ERROR_OK)
77 return retval;
78
79 /* mem_ap_write_u32(swjdp, DCB_DCRSR, regnum); */
80 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRSR & 0xFFFFFFF0);
81 if (retval != ERROR_OK)
82 return retval;
83 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRSR & 0xC), regnum);
84 if (retval != ERROR_OK)
85 return retval;
86
87 /* mem_ap_read_u32(swjdp, DCB_DCRDR, value); */
88 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRDR & 0xFFFFFFF0);
89 if (retval != ERROR_OK)
90 return retval;
91 retval = dap_queue_ap_read(swjdp, AP_REG_BD0 | (DCB_DCRDR & 0xC), value);
92 if (retval != ERROR_OK)
93 return retval;
94
95 retval = dap_run(swjdp);
96 if (retval != ERROR_OK)
97 return retval;
98
99 /* restore DCB_DCRDR - this needs to be in a seperate
100 * transaction otherwise the emulated DCC channel breaks */
101 if (retval == ERROR_OK)
102 retval = mem_ap_write_atomic_u32(swjdp, DCB_DCRDR, dcrdr);
103
104 return retval;
105 }
106
107 static int cortexm3_dap_write_coreregister_u32(struct adiv5_dap *swjdp,
108 uint32_t value, int regnum)
109 {
110 int retval;
111 uint32_t dcrdr;
112
113 /* because the DCB_DCRDR is used for the emulated dcc channel
114 * we have to save/restore the DCB_DCRDR when used */
115
116 retval = mem_ap_read_u32(swjdp, DCB_DCRDR, &dcrdr);
117 if (retval != ERROR_OK)
118 return retval;
119
120 /* mem_ap_write_u32(swjdp, DCB_DCRDR, core_regs[i]); */
121 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRDR & 0xFFFFFFF0);
122 if (retval != ERROR_OK)
123 return retval;
124 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRDR & 0xC), value);
125 if (retval != ERROR_OK)
126 return retval;
127
128 /* mem_ap_write_u32(swjdp, DCB_DCRSR, i | DCRSR_WnR); */
129 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRSR & 0xFFFFFFF0);
130 if (retval != ERROR_OK)
131 return retval;
132 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRSR & 0xC), regnum | DCRSR_WnR);
133 if (retval != ERROR_OK)
134 return retval;
135
136 retval = dap_run(swjdp);
137 if (retval != ERROR_OK)
138 return retval;
139
140 /* restore DCB_DCRDR - this needs to be in a seperate
141 * transaction otherwise the emulated DCC channel breaks */
142 if (retval == ERROR_OK)
143 retval = mem_ap_write_atomic_u32(swjdp, DCB_DCRDR, dcrdr);
144
145 return retval;
146 }
147
148 static int cortex_m3_write_debug_halt_mask(struct target *target,
149 uint32_t mask_on, uint32_t mask_off)
150 {
151 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
152 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
153
154 /* mask off status bits */
155 cortex_m3->dcb_dhcsr &= ~((0xFFFF << 16) | mask_off);
156 /* create new register mask */
157 cortex_m3->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
158
159 return mem_ap_write_atomic_u32(swjdp, DCB_DHCSR, cortex_m3->dcb_dhcsr);
160 }
161
162 static int cortex_m3_clear_halt(struct target *target)
163 {
164 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
165 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
166 int retval;
167
168 /* clear step if any */
169 cortex_m3_write_debug_halt_mask(target, C_HALT, C_STEP);
170
171 /* Read Debug Fault Status Register */
172 retval = mem_ap_read_atomic_u32(swjdp, NVIC_DFSR, &cortex_m3->nvic_dfsr);
173 if (retval != ERROR_OK)
174 return retval;
175
176 /* Clear Debug Fault Status */
177 retval = mem_ap_write_atomic_u32(swjdp, NVIC_DFSR, cortex_m3->nvic_dfsr);
178 if (retval != ERROR_OK)
179 return retval;
180 LOG_DEBUG(" NVIC_DFSR 0x%" PRIx32 "", cortex_m3->nvic_dfsr);
181
182 return ERROR_OK;
183 }
184
185 static int cortex_m3_single_step_core(struct target *target)
186 {
187 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
188 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
189 uint32_t dhcsr_save;
190 int retval;
191
192 /* backup dhcsr reg */
193 dhcsr_save = cortex_m3->dcb_dhcsr;
194
195 /* Mask interrupts before clearing halt, if done already. This avoids
196 * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
197 * HALT can put the core into an unknown state.
198 */
199 if (!(cortex_m3->dcb_dhcsr & C_MASKINTS)) {
200 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
201 DBGKEY | C_MASKINTS | C_HALT | C_DEBUGEN);
202 if (retval != ERROR_OK)
203 return retval;
204 }
205 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
206 DBGKEY | C_MASKINTS | C_STEP | C_DEBUGEN);
207 if (retval != ERROR_OK)
208 return retval;
209 LOG_DEBUG(" ");
210
211 /* restore dhcsr reg */
212 cortex_m3->dcb_dhcsr = dhcsr_save;
213 cortex_m3_clear_halt(target);
214
215 return ERROR_OK;
216 }
217
218 static int cortex_m3_endreset_event(struct target *target)
219 {
220 int i;
221 int retval;
222 uint32_t dcb_demcr;
223 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
224 struct armv7m_common *armv7m = &cortex_m3->armv7m;
225 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
226 struct cortex_m3_fp_comparator *fp_list = cortex_m3->fp_comparator_list;
227 struct cortex_m3_dwt_comparator *dwt_list = cortex_m3->dwt_comparator_list;
228
229 /* REVISIT The four debug monitor bits are currently ignored... */
230 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &dcb_demcr);
231 if (retval != ERROR_OK)
232 return retval;
233 LOG_DEBUG("DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr);
234
235 /* this register is used for emulated dcc channel */
236 retval = mem_ap_write_u32(swjdp, DCB_DCRDR, 0);
237 if (retval != ERROR_OK)
238 return retval;
239
240 /* Enable debug requests */
241 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
242 if (retval != ERROR_OK)
243 return retval;
244 if (!(cortex_m3->dcb_dhcsr & C_DEBUGEN)) {
245 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_DEBUGEN);
246 if (retval != ERROR_OK)
247 return retval;
248 }
249
250 /* clear any interrupt masking */
251 cortex_m3_write_debug_halt_mask(target, 0, C_MASKINTS);
252
253 /* Enable features controlled by ITM and DWT blocks, and catch only
254 * the vectors we were told to pay attention to.
255 *
256 * Target firmware is responsible for all fault handling policy
257 * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
258 * or manual updates to the NVIC SHCSR and CCR registers.
259 */
260 retval = mem_ap_write_u32(swjdp, DCB_DEMCR, TRCENA | armv7m->demcr);
261 if (retval != ERROR_OK)
262 return retval;
263
264 /* Paranoia: evidently some (early?) chips don't preserve all the
265 * debug state (including FBP, DWT, etc) across reset...
266 */
267
268 /* Enable FPB */
269 retval = target_write_u32(target, FP_CTRL, 3);
270 if (retval != ERROR_OK)
271 return retval;
272
273 cortex_m3->fpb_enabled = 1;
274
275 /* Restore FPB registers */
276 for (i = 0; i < cortex_m3->fp_num_code + cortex_m3->fp_num_lit; i++) {
277 retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
278 if (retval != ERROR_OK)
279 return retval;
280 }
281
282 /* Restore DWT registers */
283 for (i = 0; i < cortex_m3->dwt_num_comp; i++) {
284 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
285 dwt_list[i].comp);
286 if (retval != ERROR_OK)
287 return retval;
288 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
289 dwt_list[i].mask);
290 if (retval != ERROR_OK)
291 return retval;
292 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
293 dwt_list[i].function);
294 if (retval != ERROR_OK)
295 return retval;
296 }
297 retval = dap_run(swjdp);
298 if (retval != ERROR_OK)
299 return retval;
300
301 register_cache_invalidate(armv7m->arm.core_cache);
302
303 /* make sure we have latest dhcsr flags */
304 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
305
306 return retval;
307 }
308
309 static int cortex_m3_examine_debug_reason(struct target *target)
310 {
311 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
312
313 /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
314 * only check the debug reason if we don't know it already */
315
316 if ((target->debug_reason != DBG_REASON_DBGRQ)
317 && (target->debug_reason != DBG_REASON_SINGLESTEP)) {
318 if (cortex_m3->nvic_dfsr & DFSR_BKPT) {
319 target->debug_reason = DBG_REASON_BREAKPOINT;
320 if (cortex_m3->nvic_dfsr & DFSR_DWTTRAP)
321 target->debug_reason = DBG_REASON_WPTANDBKPT;
322 } else if (cortex_m3->nvic_dfsr & DFSR_DWTTRAP)
323 target->debug_reason = DBG_REASON_WATCHPOINT;
324 else if (cortex_m3->nvic_dfsr & DFSR_VCATCH)
325 target->debug_reason = DBG_REASON_BREAKPOINT;
326 else /* EXTERNAL, HALTED */
327 target->debug_reason = DBG_REASON_UNDEFINED;
328 }
329
330 return ERROR_OK;
331 }
332
333 static int cortex_m3_examine_exception_reason(struct target *target)
334 {
335 uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
336 struct armv7m_common *armv7m = target_to_armv7m(target);
337 struct adiv5_dap *swjdp = armv7m->arm.dap;
338 int retval;
339
340 retval = mem_ap_read_u32(swjdp, NVIC_SHCSR, &shcsr);
341 if (retval != ERROR_OK)
342 return retval;
343 switch (armv7m->exception_number) {
344 case 2: /* NMI */
345 break;
346 case 3: /* Hard Fault */
347 retval = mem_ap_read_atomic_u32(swjdp, NVIC_HFSR, &except_sr);
348 if (retval != ERROR_OK)
349 return retval;
350 if (except_sr & 0x40000000) {
351 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &cfsr);
352 if (retval != ERROR_OK)
353 return retval;
354 }
355 break;
356 case 4: /* Memory Management */
357 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
358 if (retval != ERROR_OK)
359 return retval;
360 retval = mem_ap_read_u32(swjdp, NVIC_MMFAR, &except_ar);
361 if (retval != ERROR_OK)
362 return retval;
363 break;
364 case 5: /* Bus Fault */
365 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
366 if (retval != ERROR_OK)
367 return retval;
368 retval = mem_ap_read_u32(swjdp, NVIC_BFAR, &except_ar);
369 if (retval != ERROR_OK)
370 return retval;
371 break;
372 case 6: /* Usage Fault */
373 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
374 if (retval != ERROR_OK)
375 return retval;
376 break;
377 case 11: /* SVCall */
378 break;
379 case 12: /* Debug Monitor */
380 retval = mem_ap_read_u32(swjdp, NVIC_DFSR, &except_sr);
381 if (retval != ERROR_OK)
382 return retval;
383 break;
384 case 14: /* PendSV */
385 break;
386 case 15: /* SysTick */
387 break;
388 default:
389 except_sr = 0;
390 break;
391 }
392 retval = dap_run(swjdp);
393 if (retval == ERROR_OK)
394 LOG_DEBUG("%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
395 ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
396 armv7m_exception_string(armv7m->exception_number),
397 shcsr, except_sr, cfsr, except_ar);
398 return retval;
399 }
400
401 static int cortex_m3_debug_entry(struct target *target)
402 {
403 int i;
404 uint32_t xPSR;
405 int retval;
406 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
407 struct armv7m_common *armv7m = &cortex_m3->armv7m;
408 struct arm *arm = &armv7m->arm;
409 struct adiv5_dap *swjdp = armv7m->arm.dap;
410 struct reg *r;
411
412 LOG_DEBUG(" ");
413
414 cortex_m3_clear_halt(target);
415 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
416 if (retval != ERROR_OK)
417 return retval;
418
419 retval = armv7m->examine_debug_reason(target);
420 if (retval != ERROR_OK)
421 return retval;
422
423 /* Examine target state and mode
424 * First load register accessible through core debug port */
425 int num_regs = arm->core_cache->num_regs;
426
427 for (i = 0; i < num_regs; i++) {
428 r = &armv7m->arm.core_cache->reg_list[i];
429 if (!r->valid)
430 arm->read_core_reg(target, r, i, ARM_MODE_ANY);
431 }
432
433 r = arm->cpsr;
434 xPSR = buf_get_u32(r->value, 0, 32);
435
436 /* For IT instructions xPSR must be reloaded on resume and clear on debug exec */
437 if (xPSR & 0xf00) {
438 r->dirty = r->valid;
439 cortex_m3_store_core_reg_u32(target, 16, xPSR & ~0xff);
440 }
441
442 /* Are we in an exception handler */
443 if (xPSR & 0x1FF) {
444 armv7m->exception_number = (xPSR & 0x1FF);
445
446 arm->core_mode = ARM_MODE_HANDLER;
447 arm->map = armv7m_msp_reg_map;
448 } else {
449 unsigned control = buf_get_u32(arm->core_cache
450 ->reg_list[ARMV7M_CONTROL].value, 0, 2);
451
452 /* is this thread privileged? */
453 arm->core_mode = control & 1
454 ? ARM_MODE_USER_THREAD
455 : ARM_MODE_THREAD;
456
457 /* which stack is it using? */
458 if (control & 2)
459 arm->map = armv7m_psp_reg_map;
460 else
461 arm->map = armv7m_msp_reg_map;
462
463 armv7m->exception_number = 0;
464 }
465
466 if (armv7m->exception_number)
467 cortex_m3_examine_exception_reason(target);
468
469 LOG_DEBUG("entered debug state in core mode: %s at PC 0x%" PRIx32 ", target->state: %s",
470 arm_mode_name(arm->core_mode),
471 *(uint32_t *)(arm->pc->value),
472 target_state_name(target));
473
474 if (armv7m->post_debug_entry) {
475 retval = armv7m->post_debug_entry(target);
476 if (retval != ERROR_OK)
477 return retval;
478 }
479
480 return ERROR_OK;
481 }
482
483 static int cortex_m3_poll(struct target *target)
484 {
485 int detected_failure = ERROR_OK;
486 int retval = ERROR_OK;
487 enum target_state prev_target_state = target->state;
488 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
489 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
490
491 /* Read from Debug Halting Control and Status Register */
492 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
493 if (retval != ERROR_OK) {
494 target->state = TARGET_UNKNOWN;
495 return retval;
496 }
497
498 /* Recover from lockup. See ARMv7-M architecture spec,
499 * section B1.5.15 "Unrecoverable exception cases".
500 */
501 if (cortex_m3->dcb_dhcsr & S_LOCKUP) {
502 LOG_ERROR("%s -- clearing lockup after double fault",
503 target_name(target));
504 cortex_m3_write_debug_halt_mask(target, C_HALT, 0);
505 target->debug_reason = DBG_REASON_DBGRQ;
506
507 /* We have to execute the rest (the "finally" equivalent, but
508 * still throw this exception again).
509 */
510 detected_failure = ERROR_FAIL;
511
512 /* refresh status bits */
513 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
514 if (retval != ERROR_OK)
515 return retval;
516 }
517
518 if (cortex_m3->dcb_dhcsr & S_RESET_ST) {
519 /* check if still in reset */
520 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
521 if (retval != ERROR_OK)
522 return retval;
523
524 if (cortex_m3->dcb_dhcsr & S_RESET_ST) {
525 target->state = TARGET_RESET;
526 return ERROR_OK;
527 }
528 }
529
530 if (target->state == TARGET_RESET) {
531 /* Cannot switch context while running so endreset is
532 * called with target->state == TARGET_RESET
533 */
534 LOG_DEBUG("Exit from reset with dcb_dhcsr 0x%" PRIx32,
535 cortex_m3->dcb_dhcsr);
536 cortex_m3_endreset_event(target);
537 target->state = TARGET_RUNNING;
538 prev_target_state = TARGET_RUNNING;
539 }
540
541 if (cortex_m3->dcb_dhcsr & S_HALT) {
542 target->state = TARGET_HALTED;
543
544 if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET)) {
545 retval = cortex_m3_debug_entry(target);
546 if (retval != ERROR_OK)
547 return retval;
548
549 if (arm_semihosting(target, &retval) != 0)
550 return retval;
551
552 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
553 }
554 if (prev_target_state == TARGET_DEBUG_RUNNING) {
555 LOG_DEBUG(" ");
556 retval = cortex_m3_debug_entry(target);
557 if (retval != ERROR_OK)
558 return retval;
559
560 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
561 }
562 }
563
564 /* REVISIT when S_SLEEP is set, it's in a Sleep or DeepSleep state.
565 * How best to model low power modes?
566 */
567
568 if (target->state == TARGET_UNKNOWN) {
569 /* check if processor is retiring instructions */
570 if (cortex_m3->dcb_dhcsr & S_RETIRE_ST) {
571 target->state = TARGET_RUNNING;
572 retval = ERROR_OK;
573 }
574 }
575
576 /* Did we detect a failure condition that we cleared? */
577 if (detected_failure != ERROR_OK)
578 retval = detected_failure;
579 return retval;
580 }
581
582 static int cortex_m3_halt(struct target *target)
583 {
584 LOG_DEBUG("target->state: %s",
585 target_state_name(target));
586
587 if (target->state == TARGET_HALTED) {
588 LOG_DEBUG("target was already halted");
589 return ERROR_OK;
590 }
591
592 if (target->state == TARGET_UNKNOWN)
593 LOG_WARNING("target was in unknown state when halt was requested");
594
595 if (target->state == TARGET_RESET) {
596 if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
597 LOG_ERROR("can't request a halt while in reset if nSRST pulls nTRST");
598 return ERROR_TARGET_FAILURE;
599 } else {
600 /* we came here in a reset_halt or reset_init sequence
601 * debug entry was already prepared in cortex_m3_assert_reset()
602 */
603 target->debug_reason = DBG_REASON_DBGRQ;
604
605 return ERROR_OK;
606 }
607 }
608
609 /* Write to Debug Halting Control and Status Register */
610 cortex_m3_write_debug_halt_mask(target, C_HALT, 0);
611
612 target->debug_reason = DBG_REASON_DBGRQ;
613
614 return ERROR_OK;
615 }
616
617 static int cortex_m3_soft_reset_halt(struct target *target)
618 {
619 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
620 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
621 uint32_t dcb_dhcsr = 0;
622 int retval, timeout = 0;
623
624 /* soft_reset_halt is deprecated on cortex_m as the same functionality
625 * can be obtained by using 'reset halt' and 'cortex_m reset_config vectreset'
626 * As this reset only used VC_CORERESET it would only ever reset the cortex_m
627 * core, not the peripherals */
628 LOG_WARNING("soft_reset_halt is deprecated, please use 'reset halt' instead.");
629
630 /* Enter debug state on reset; restore DEMCR in endreset_event() */
631 retval = mem_ap_write_u32(swjdp, DCB_DEMCR,
632 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
633 if (retval != ERROR_OK)
634 return retval;
635
636 /* Request a core-only reset */
637 retval = mem_ap_write_atomic_u32(swjdp, NVIC_AIRCR,
638 AIRCR_VECTKEY | AIRCR_VECTRESET);
639 if (retval != ERROR_OK)
640 return retval;
641 target->state = TARGET_RESET;
642
643 /* registers are now invalid */
644 register_cache_invalidate(cortex_m3->armv7m.arm.core_cache);
645
646 while (timeout < 100) {
647 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &dcb_dhcsr);
648 if (retval == ERROR_OK) {
649 retval = mem_ap_read_atomic_u32(swjdp, NVIC_DFSR,
650 &cortex_m3->nvic_dfsr);
651 if (retval != ERROR_OK)
652 return retval;
653 if ((dcb_dhcsr & S_HALT)
654 && (cortex_m3->nvic_dfsr & DFSR_VCATCH)) {
655 LOG_DEBUG("system reset-halted, DHCSR 0x%08x, "
656 "DFSR 0x%08x",
657 (unsigned) dcb_dhcsr,
658 (unsigned) cortex_m3->nvic_dfsr);
659 cortex_m3_poll(target);
660 /* FIXME restore user's vector catch config */
661 return ERROR_OK;
662 } else
663 LOG_DEBUG("waiting for system reset-halt, "
664 "DHCSR 0x%08x, %d ms",
665 (unsigned) dcb_dhcsr, timeout);
666 }
667 timeout++;
668 alive_sleep(1);
669 }
670
671 return ERROR_OK;
672 }
673
674 void cortex_m3_enable_breakpoints(struct target *target)
675 {
676 struct breakpoint *breakpoint = target->breakpoints;
677
678 /* set any pending breakpoints */
679 while (breakpoint) {
680 if (!breakpoint->set)
681 cortex_m3_set_breakpoint(target, breakpoint);
682 breakpoint = breakpoint->next;
683 }
684 }
685
686 static int cortex_m3_resume(struct target *target, int current,
687 uint32_t address, int handle_breakpoints, int debug_execution)
688 {
689 struct armv7m_common *armv7m = target_to_armv7m(target);
690 struct breakpoint *breakpoint = NULL;
691 uint32_t resume_pc;
692 struct reg *r;
693
694 if (target->state != TARGET_HALTED) {
695 LOG_WARNING("target not halted");
696 return ERROR_TARGET_NOT_HALTED;
697 }
698
699 if (!debug_execution) {
700 target_free_all_working_areas(target);
701 cortex_m3_enable_breakpoints(target);
702 cortex_m3_enable_watchpoints(target);
703 }
704
705 if (debug_execution) {
706 r = armv7m->arm.core_cache->reg_list + ARMV7M_PRIMASK;
707
708 /* Disable interrupts */
709 /* We disable interrupts in the PRIMASK register instead of
710 * masking with C_MASKINTS. This is probably the same issue
711 * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
712 * in parallel with disabled interrupts can cause local faults
713 * to not be taken.
714 *
715 * REVISIT this clearly breaks non-debug execution, since the
716 * PRIMASK register state isn't saved/restored... workaround
717 * by never resuming app code after debug execution.
718 */
719 buf_set_u32(r->value, 0, 1, 1);
720 r->dirty = true;
721 r->valid = true;
722
723 /* Make sure we are in Thumb mode */
724 r = armv7m->arm.cpsr;
725 buf_set_u32(r->value, 24, 1, 1);
726 r->dirty = true;
727 r->valid = true;
728 }
729
730 /* current = 1: continue on current pc, otherwise continue at <address> */
731 r = armv7m->arm.pc;
732 if (!current) {
733 buf_set_u32(r->value, 0, 32, address);
734 r->dirty = true;
735 r->valid = true;
736 }
737
738 /* if we halted last time due to a bkpt instruction
739 * then we have to manually step over it, otherwise
740 * the core will break again */
741
742 if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
743 && !debug_execution)
744 armv7m_maybe_skip_bkpt_inst(target, NULL);
745
746 resume_pc = buf_get_u32(r->value, 0, 32);
747
748 armv7m_restore_context(target);
749
750 /* the front-end may request us not to handle breakpoints */
751 if (handle_breakpoints) {
752 /* Single step past breakpoint at current address */
753 breakpoint = breakpoint_find(target, resume_pc);
754 if (breakpoint) {
755 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 " (ID: %d)",
756 breakpoint->address,
757 breakpoint->unique_id);
758 cortex_m3_unset_breakpoint(target, breakpoint);
759 cortex_m3_single_step_core(target);
760 cortex_m3_set_breakpoint(target, breakpoint);
761 }
762 }
763
764 /* Restart core */
765 cortex_m3_write_debug_halt_mask(target, 0, C_HALT);
766
767 target->debug_reason = DBG_REASON_NOTHALTED;
768
769 /* registers are now invalid */
770 register_cache_invalidate(armv7m->arm.core_cache);
771
772 if (!debug_execution) {
773 target->state = TARGET_RUNNING;
774 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
775 LOG_DEBUG("target resumed at 0x%" PRIx32 "", resume_pc);
776 } else {
777 target->state = TARGET_DEBUG_RUNNING;
778 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
779 LOG_DEBUG("target debug resumed at 0x%" PRIx32 "", resume_pc);
780 }
781
782 return ERROR_OK;
783 }
784
785 /* int irqstepcount = 0; */
786 static int cortex_m3_step(struct target *target, int current,
787 uint32_t address, int handle_breakpoints)
788 {
789 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
790 struct armv7m_common *armv7m = &cortex_m3->armv7m;
791 struct adiv5_dap *swjdp = armv7m->arm.dap;
792 struct breakpoint *breakpoint = NULL;
793 struct reg *pc = armv7m->arm.pc;
794 bool bkpt_inst_found = false;
795 int retval;
796 bool isr_timed_out = false;
797
798 if (target->state != TARGET_HALTED) {
799 LOG_WARNING("target not halted");
800 return ERROR_TARGET_NOT_HALTED;
801 }
802
803 /* current = 1: continue on current pc, otherwise continue at <address> */
804 if (!current)
805 buf_set_u32(pc->value, 0, 32, address);
806
807 uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
808
809 /* the front-end may request us not to handle breakpoints */
810 if (handle_breakpoints) {
811 breakpoint = breakpoint_find(target, pc_value);
812 if (breakpoint)
813 cortex_m3_unset_breakpoint(target, breakpoint);
814 }
815
816 armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
817
818 target->debug_reason = DBG_REASON_SINGLESTEP;
819
820 armv7m_restore_context(target);
821
822 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
823
824 /* if no bkpt instruction is found at pc then we can perform
825 * a normal step, otherwise we have to manually step over the bkpt
826 * instruction - as such simulate a step */
827 if (bkpt_inst_found == false) {
828 /* Automatic ISR masking mode off: Just step over the next instruction */
829 if ((cortex_m3->isrmasking_mode != CORTEX_M3_ISRMASK_AUTO))
830 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
831 else {
832 /* Process interrupts during stepping in a way they don't interfere
833 * debugging.
834 *
835 * Principle:
836 *
837 * Set a temporary break point at the current pc and let the core run
838 * with interrupts enabled. Pending interrupts get served and we run
839 * into the breakpoint again afterwards. Then we step over the next
840 * instruction with interrupts disabled.
841 *
842 * If the pending interrupts don't complete within time, we leave the
843 * core running. This may happen if the interrupts trigger faster
844 * than the core can process them or the handler doesn't return.
845 *
846 * If no more breakpoints are available we simply do a step with
847 * interrupts enabled.
848 *
849 */
850
851 /* 2012-09-29 ph
852 *
853 * If a break point is already set on the lower half word then a break point on
854 * the upper half word will not break again when the core is restarted. So we
855 * just step over the instruction with interrupts disabled.
856 *
857 * The documentation has no information about this, it was found by observation
858 * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 dosen't seem to
859 * suffer from this problem.
860 *
861 * To add some confusion: pc_value has bit 0 always set, while the breakpoint
862 * address has it always cleared. The former is done to indicate thumb mode
863 * to gdb.
864 *
865 */
866 if ((pc_value & 0x02) && breakpoint_find(target, pc_value & ~0x03)) {
867 LOG_DEBUG("Stepping over next instruction with interrupts disabled");
868 cortex_m3_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
869 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
870 /* Re-enable interrupts */
871 cortex_m3_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
872 }
873 else {
874
875 /* Set a temporary break point */
876 if (breakpoint)
877 retval = cortex_m3_set_breakpoint(target, breakpoint);
878 else
879 retval = breakpoint_add(target, pc_value, 2, BKPT_TYPE_BY_ADDR(pc_value));
880 bool tmp_bp_set = (retval == ERROR_OK);
881
882 /* No more breakpoints left, just do a step */
883 if (!tmp_bp_set)
884 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
885 else {
886 /* Start the core */
887 LOG_DEBUG("Starting core to serve pending interrupts");
888 int64_t t_start = timeval_ms();
889 cortex_m3_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
890
891 /* Wait for pending handlers to complete or timeout */
892 do {
893 retval = mem_ap_read_atomic_u32(swjdp,
894 DCB_DHCSR,
895 &cortex_m3->dcb_dhcsr);
896 if (retval != ERROR_OK) {
897 target->state = TARGET_UNKNOWN;
898 return retval;
899 }
900 isr_timed_out = ((timeval_ms() - t_start) > 500);
901 } while (!((cortex_m3->dcb_dhcsr & S_HALT) || isr_timed_out));
902
903 /* only remove breakpoint if we created it */
904 if (breakpoint)
905 cortex_m3_unset_breakpoint(target, breakpoint);
906 else {
907 /* Remove the temporary breakpoint */
908 breakpoint_remove(target, pc_value);
909 }
910
911 if (isr_timed_out) {
912 LOG_DEBUG("Interrupt handlers didn't complete within time, "
913 "leaving target running");
914 } else {
915 /* Step over next instruction with interrupts disabled */
916 cortex_m3_write_debug_halt_mask(target,
917 C_HALT | C_MASKINTS,
918 0);
919 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
920 /* Re-enable interrupts */
921 cortex_m3_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
922 }
923 }
924 }
925 }
926 }
927
928 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
929 if (retval != ERROR_OK)
930 return retval;
931
932 /* registers are now invalid */
933 register_cache_invalidate(armv7m->arm.core_cache);
934
935 if (breakpoint)
936 cortex_m3_set_breakpoint(target, breakpoint);
937
938 if (isr_timed_out) {
939 /* Leave the core running. The user has to stop execution manually. */
940 target->debug_reason = DBG_REASON_NOTHALTED;
941 target->state = TARGET_RUNNING;
942 return ERROR_OK;
943 }
944
945 LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
946 " nvic_icsr = 0x%" PRIx32,
947 cortex_m3->dcb_dhcsr, cortex_m3->nvic_icsr);
948
949 retval = cortex_m3_debug_entry(target);
950 if (retval != ERROR_OK)
951 return retval;
952 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
953
954 LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
955 " nvic_icsr = 0x%" PRIx32,
956 cortex_m3->dcb_dhcsr, cortex_m3->nvic_icsr);
957
958 return ERROR_OK;
959 }
960
961 static int cortex_m3_assert_reset(struct target *target)
962 {
963 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
964 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
965 enum cortex_m3_soft_reset_config reset_config = cortex_m3->soft_reset_config;
966
967 LOG_DEBUG("target->state: %s",
968 target_state_name(target));
969
970 enum reset_types jtag_reset_config = jtag_get_reset_config();
971
972 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
973 /* allow scripts to override the reset event */
974
975 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
976 register_cache_invalidate(cortex_m3->armv7m.arm.core_cache);
977 target->state = TARGET_RESET;
978
979 return ERROR_OK;
980 }
981
982 /* some cores support connecting while srst is asserted
983 * use that mode is it has been configured */
984
985 bool srst_asserted = false;
986
987 if ((jtag_reset_config & RESET_HAS_SRST) &&
988 (jtag_reset_config & RESET_SRST_NO_GATING)) {
989 adapter_assert_reset();
990 srst_asserted = true;
991 }
992
993 /* Enable debug requests */
994 int retval;
995 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
996 if (retval != ERROR_OK)
997 return retval;
998 if (!(cortex_m3->dcb_dhcsr & C_DEBUGEN)) {
999 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_DEBUGEN);
1000 if (retval != ERROR_OK)
1001 return retval;
1002 }
1003
1004 /* If the processor is sleeping in a WFI or WFE instruction, the
1005 * C_HALT bit must be asserted to regain control */
1006 if (cortex_m3->dcb_dhcsr & S_SLEEP) {
1007 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_HALT | C_DEBUGEN);
1008 if (retval != ERROR_OK)
1009 return retval;
1010 }
1011
1012 retval = mem_ap_write_u32(swjdp, DCB_DCRDR, 0);
1013 if (retval != ERROR_OK)
1014 return retval;
1015
1016 if (!target->reset_halt) {
1017 /* Set/Clear C_MASKINTS in a separate operation */
1018 if (cortex_m3->dcb_dhcsr & C_MASKINTS) {
1019 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
1020 DBGKEY | C_DEBUGEN | C_HALT);
1021 if (retval != ERROR_OK)
1022 return retval;
1023 }
1024
1025 /* clear any debug flags before resuming */
1026 cortex_m3_clear_halt(target);
1027
1028 /* clear C_HALT in dhcsr reg */
1029 cortex_m3_write_debug_halt_mask(target, 0, C_HALT);
1030 } else {
1031 /* Halt in debug on reset; endreset_event() restores DEMCR.
1032 *
1033 * REVISIT catching BUSERR presumably helps to defend against
1034 * bad vector table entries. Should this include MMERR or
1035 * other flags too?
1036 */
1037 retval = mem_ap_write_atomic_u32(swjdp, DCB_DEMCR,
1038 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1039 if (retval != ERROR_OK)
1040 return retval;
1041 }
1042
1043 if (jtag_reset_config & RESET_HAS_SRST) {
1044 /* default to asserting srst */
1045 if (!srst_asserted)
1046 adapter_assert_reset();
1047 } else {
1048 /* Use a standard Cortex-M3 software reset mechanism.
1049 * We default to using VECRESET as it is supported on all current cores.
1050 * This has the disadvantage of not resetting the peripherals, so a
1051 * reset-init event handler is needed to perform any peripheral resets.
1052 */
1053 retval = mem_ap_write_atomic_u32(swjdp, NVIC_AIRCR,
1054 AIRCR_VECTKEY | ((reset_config == CORTEX_M3_RESET_SYSRESETREQ)
1055 ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
1056 if (retval != ERROR_OK)
1057 return retval;
1058
1059 LOG_DEBUG("Using Cortex-M %s", (reset_config == CORTEX_M3_RESET_SYSRESETREQ)
1060 ? "SYSRESETREQ" : "VECTRESET");
1061
1062 if (reset_config == CORTEX_M3_RESET_VECTRESET) {
1063 LOG_WARNING("Only resetting the Cortex-M core, use a reset-init event "
1064 "handler to reset any peripherals or configure hardware srst support.");
1065 }
1066
1067 {
1068 /* I do not know why this is necessary, but it
1069 * fixes strange effects (step/resume cause NMI
1070 * after reset) on LM3S6918 -- Michael Schwingen
1071 */
1072 uint32_t tmp;
1073 retval = mem_ap_read_atomic_u32(swjdp, NVIC_AIRCR, &tmp);
1074 if (retval != ERROR_OK)
1075 return retval;
1076 }
1077 }
1078
1079 target->state = TARGET_RESET;
1080 jtag_add_sleep(50000);
1081
1082 register_cache_invalidate(cortex_m3->armv7m.arm.core_cache);
1083
1084 if (target->reset_halt) {
1085 retval = target_halt(target);
1086 if (retval != ERROR_OK)
1087 return retval;
1088 }
1089
1090 return ERROR_OK;
1091 }
1092
1093 static int cortex_m3_deassert_reset(struct target *target)
1094 {
1095 LOG_DEBUG("target->state: %s",
1096 target_state_name(target));
1097
1098 /* deassert reset lines */
1099 adapter_deassert_reset();
1100
1101 return ERROR_OK;
1102 }
1103
1104 int cortex_m3_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
1105 {
1106 int retval;
1107 int fp_num = 0;
1108 uint32_t hilo;
1109 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1110 struct cortex_m3_fp_comparator *comparator_list = cortex_m3->fp_comparator_list;
1111
1112 if (breakpoint->set) {
1113 LOG_WARNING("breakpoint (BPID: %d) already set", breakpoint->unique_id);
1114 return ERROR_OK;
1115 }
1116
1117 if (cortex_m3->auto_bp_type)
1118 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1119
1120 if (breakpoint->type == BKPT_HARD) {
1121 while (comparator_list[fp_num].used && (fp_num < cortex_m3->fp_num_code))
1122 fp_num++;
1123 if (fp_num >= cortex_m3->fp_num_code) {
1124 LOG_ERROR("Can not find free FPB Comparator!");
1125 return ERROR_FAIL;
1126 }
1127 breakpoint->set = fp_num + 1;
1128 hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
1129 comparator_list[fp_num].used = 1;
1130 comparator_list[fp_num].fpcr_value = (breakpoint->address & 0x1FFFFFFC) | hilo | 1;
1131 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1132 comparator_list[fp_num].fpcr_value);
1133 LOG_DEBUG("fpc_num %i fpcr_value 0x%" PRIx32 "",
1134 fp_num,
1135 comparator_list[fp_num].fpcr_value);
1136 if (!cortex_m3->fpb_enabled) {
1137 LOG_DEBUG("FPB wasn't enabled, do it now");
1138 target_write_u32(target, FP_CTRL, 3);
1139 }
1140 } else if (breakpoint->type == BKPT_SOFT) {
1141 uint8_t code[4];
1142
1143 /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
1144 * semihosting; don't use that. Otherwise the BKPT
1145 * parameter is arbitrary.
1146 */
1147 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1148 retval = target_read_memory(target,
1149 breakpoint->address & 0xFFFFFFFE,
1150 breakpoint->length, 1,
1151 breakpoint->orig_instr);
1152 if (retval != ERROR_OK)
1153 return retval;
1154 retval = target_write_memory(target,
1155 breakpoint->address & 0xFFFFFFFE,
1156 breakpoint->length, 1,
1157 code);
1158 if (retval != ERROR_OK)
1159 return retval;
1160 breakpoint->set = true;
1161 }
1162
1163 LOG_DEBUG("BPID: %d, Type: %d, Address: 0x%08" PRIx32 " Length: %d (set=%d)",
1164 breakpoint->unique_id,
1165 (int)(breakpoint->type),
1166 breakpoint->address,
1167 breakpoint->length,
1168 breakpoint->set);
1169
1170 return ERROR_OK;
1171 }
1172
1173 int cortex_m3_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1174 {
1175 int retval;
1176 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1177 struct cortex_m3_fp_comparator *comparator_list = cortex_m3->fp_comparator_list;
1178
1179 if (!breakpoint->set) {
1180 LOG_WARNING("breakpoint not set");
1181 return ERROR_OK;
1182 }
1183
1184 LOG_DEBUG("BPID: %d, Type: %d, Address: 0x%08" PRIx32 " Length: %d (set=%d)",
1185 breakpoint->unique_id,
1186 (int)(breakpoint->type),
1187 breakpoint->address,
1188 breakpoint->length,
1189 breakpoint->set);
1190
1191 if (breakpoint->type == BKPT_HARD) {
1192 int fp_num = breakpoint->set - 1;
1193 if ((fp_num < 0) || (fp_num >= cortex_m3->fp_num_code)) {
1194 LOG_DEBUG("Invalid FP Comparator number in breakpoint");
1195 return ERROR_OK;
1196 }
1197 comparator_list[fp_num].used = 0;
1198 comparator_list[fp_num].fpcr_value = 0;
1199 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1200 comparator_list[fp_num].fpcr_value);
1201 } else {
1202 /* restore original instruction (kept in target endianness) */
1203 if (breakpoint->length == 4) {
1204 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE, 4, 1,
1205 breakpoint->orig_instr);
1206 if (retval != ERROR_OK)
1207 return retval;
1208 } else {
1209 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE, 2, 1,
1210 breakpoint->orig_instr);
1211 if (retval != ERROR_OK)
1212 return retval;
1213 }
1214 }
1215 breakpoint->set = false;
1216
1217 return ERROR_OK;
1218 }
1219
1220 int cortex_m3_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
1221 {
1222 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1223
1224 if (cortex_m3->auto_bp_type)
1225 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1226
1227 if (breakpoint->type != BKPT_TYPE_BY_ADDR(breakpoint->address)) {
1228 if (breakpoint->type == BKPT_HARD) {
1229 LOG_INFO("flash patch comparator requested outside code memory region");
1230 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1231 }
1232
1233 if (breakpoint->type == BKPT_SOFT) {
1234 LOG_INFO("soft breakpoint requested in code (flash) memory region");
1235 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1236 }
1237 }
1238
1239 if ((breakpoint->type == BKPT_HARD) && (cortex_m3->fp_code_available < 1)) {
1240 LOG_INFO("no flash patch comparator unit available for hardware breakpoint");
1241 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1242 }
1243
1244 if ((breakpoint->length != 2)) {
1245 LOG_INFO("only breakpoints of two bytes length supported");
1246 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1247 }
1248
1249 if (breakpoint->type == BKPT_HARD)
1250 cortex_m3->fp_code_available--;
1251
1252 return cortex_m3_set_breakpoint(target, breakpoint);
1253 }
1254
1255 int cortex_m3_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1256 {
1257 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1258
1259 /* REVISIT why check? FBP can be updated with core running ... */
1260 if (target->state != TARGET_HALTED) {
1261 LOG_WARNING("target not halted");
1262 return ERROR_TARGET_NOT_HALTED;
1263 }
1264
1265 if (cortex_m3->auto_bp_type)
1266 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1267
1268 if (breakpoint->set)
1269 cortex_m3_unset_breakpoint(target, breakpoint);
1270
1271 if (breakpoint->type == BKPT_HARD)
1272 cortex_m3->fp_code_available++;
1273
1274 return ERROR_OK;
1275 }
1276
1277 int cortex_m3_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1278 {
1279 int dwt_num = 0;
1280 uint32_t mask, temp;
1281 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1282
1283 /* watchpoint params were validated earlier */
1284 mask = 0;
1285 temp = watchpoint->length;
1286 while (temp) {
1287 temp >>= 1;
1288 mask++;
1289 }
1290 mask--;
1291
1292 /* REVISIT Don't fully trust these "not used" records ... users
1293 * may set up breakpoints by hand, e.g. dual-address data value
1294 * watchpoint using comparator #1; comparator #0 matching cycle
1295 * count; send data trace info through ITM and TPIU; etc
1296 */
1297 struct cortex_m3_dwt_comparator *comparator;
1298
1299 for (comparator = cortex_m3->dwt_comparator_list;
1300 comparator->used && dwt_num < cortex_m3->dwt_num_comp;
1301 comparator++, dwt_num++)
1302 continue;
1303 if (dwt_num >= cortex_m3->dwt_num_comp) {
1304 LOG_ERROR("Can not find free DWT Comparator");
1305 return ERROR_FAIL;
1306 }
1307 comparator->used = 1;
1308 watchpoint->set = dwt_num + 1;
1309
1310 comparator->comp = watchpoint->address;
1311 target_write_u32(target, comparator->dwt_comparator_address + 0,
1312 comparator->comp);
1313
1314 comparator->mask = mask;
1315 target_write_u32(target, comparator->dwt_comparator_address + 4,
1316 comparator->mask);
1317
1318 switch (watchpoint->rw) {
1319 case WPT_READ:
1320 comparator->function = 5;
1321 break;
1322 case WPT_WRITE:
1323 comparator->function = 6;
1324 break;
1325 case WPT_ACCESS:
1326 comparator->function = 7;
1327 break;
1328 }
1329 target_write_u32(target, comparator->dwt_comparator_address + 8,
1330 comparator->function);
1331
1332 LOG_DEBUG("Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
1333 watchpoint->unique_id, dwt_num,
1334 (unsigned) comparator->comp,
1335 (unsigned) comparator->mask,
1336 (unsigned) comparator->function);
1337 return ERROR_OK;
1338 }
1339
1340 int cortex_m3_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
1341 {
1342 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1343 struct cortex_m3_dwt_comparator *comparator;
1344 int dwt_num;
1345
1346 if (!watchpoint->set) {
1347 LOG_WARNING("watchpoint (wpid: %d) not set",
1348 watchpoint->unique_id);
1349 return ERROR_OK;
1350 }
1351
1352 dwt_num = watchpoint->set - 1;
1353
1354 LOG_DEBUG("Watchpoint (ID %d) DWT%d address: 0x%08x clear",
1355 watchpoint->unique_id, dwt_num,
1356 (unsigned) watchpoint->address);
1357
1358 if ((dwt_num < 0) || (dwt_num >= cortex_m3->dwt_num_comp)) {
1359 LOG_DEBUG("Invalid DWT Comparator number in watchpoint");
1360 return ERROR_OK;
1361 }
1362
1363 comparator = cortex_m3->dwt_comparator_list + dwt_num;
1364 comparator->used = 0;
1365 comparator->function = 0;
1366 target_write_u32(target, comparator->dwt_comparator_address + 8,
1367 comparator->function);
1368
1369 watchpoint->set = false;
1370
1371 return ERROR_OK;
1372 }
1373
1374 int cortex_m3_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
1375 {
1376 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1377
1378 if (cortex_m3->dwt_comp_available < 1) {
1379 LOG_DEBUG("no comparators?");
1380 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1381 }
1382
1383 /* hardware doesn't support data value masking */
1384 if (watchpoint->mask != ~(uint32_t)0) {
1385 LOG_DEBUG("watchpoint value masks not supported");
1386 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1387 }
1388
1389 /* hardware allows address masks of up to 32K */
1390 unsigned mask;
1391
1392 for (mask = 0; mask < 16; mask++) {
1393 if ((1u << mask) == watchpoint->length)
1394 break;
1395 }
1396 if (mask == 16) {
1397 LOG_DEBUG("unsupported watchpoint length");
1398 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1399 }
1400 if (watchpoint->address & ((1 << mask) - 1)) {
1401 LOG_DEBUG("watchpoint address is unaligned");
1402 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1403 }
1404
1405 /* Caller doesn't seem to be able to describe watching for data
1406 * values of zero; that flags "no value".
1407 *
1408 * REVISIT This DWT may well be able to watch for specific data
1409 * values. Requires comparator #1 to set DATAVMATCH and match
1410 * the data, and another comparator (DATAVADDR0) matching addr.
1411 */
1412 if (watchpoint->value) {
1413 LOG_DEBUG("data value watchpoint not YET supported");
1414 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1415 }
1416
1417 cortex_m3->dwt_comp_available--;
1418 LOG_DEBUG("dwt_comp_available: %d", cortex_m3->dwt_comp_available);
1419
1420 return ERROR_OK;
1421 }
1422
1423 int cortex_m3_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
1424 {
1425 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1426
1427 /* REVISIT why check? DWT can be updated with core running ... */
1428 if (target->state != TARGET_HALTED) {
1429 LOG_WARNING("target not halted");
1430 return ERROR_TARGET_NOT_HALTED;
1431 }
1432
1433 if (watchpoint->set)
1434 cortex_m3_unset_watchpoint(target, watchpoint);
1435
1436 cortex_m3->dwt_comp_available++;
1437 LOG_DEBUG("dwt_comp_available: %d", cortex_m3->dwt_comp_available);
1438
1439 return ERROR_OK;
1440 }
1441
1442 void cortex_m3_enable_watchpoints(struct target *target)
1443 {
1444 struct watchpoint *watchpoint = target->watchpoints;
1445
1446 /* set any pending watchpoints */
1447 while (watchpoint) {
1448 if (!watchpoint->set)
1449 cortex_m3_set_watchpoint(target, watchpoint);
1450 watchpoint = watchpoint->next;
1451 }
1452 }
1453
1454 static int cortex_m3_load_core_reg_u32(struct target *target,
1455 uint32_t num, uint32_t *value)
1456 {
1457 int retval;
1458 struct armv7m_common *armv7m = target_to_armv7m(target);
1459 struct adiv5_dap *swjdp = armv7m->arm.dap;
1460
1461 /* NOTE: we "know" here that the register identifiers used
1462 * in the v7m header match the Cortex-M3 Debug Core Register
1463 * Selector values for R0..R15, xPSR, MSP, and PSP.
1464 */
1465 switch (num) {
1466 case 0 ... 18:
1467 /* read a normal core register */
1468 retval = cortexm3_dap_read_coreregister_u32(swjdp, value, num);
1469
1470 if (retval != ERROR_OK) {
1471 LOG_ERROR("JTAG failure %i", retval);
1472 return ERROR_JTAG_DEVICE_ERROR;
1473 }
1474 LOG_DEBUG("load from core reg %i value 0x%" PRIx32 "", (int)num, *value);
1475 break;
1476
1477 case ARMV7M_PRIMASK:
1478 case ARMV7M_BASEPRI:
1479 case ARMV7M_FAULTMASK:
1480 case ARMV7M_CONTROL:
1481 /* Cortex-M3 packages these four registers as bitfields
1482 * in one Debug Core register. So say r0 and r2 docs;
1483 * it was removed from r1 docs, but still works.
1484 */
1485 cortexm3_dap_read_coreregister_u32(swjdp, value, 20);
1486
1487 switch (num) {
1488 case ARMV7M_PRIMASK:
1489 *value = buf_get_u32((uint8_t *)value, 0, 1);
1490 break;
1491
1492 case ARMV7M_BASEPRI:
1493 *value = buf_get_u32((uint8_t *)value, 8, 8);
1494 break;
1495
1496 case ARMV7M_FAULTMASK:
1497 *value = buf_get_u32((uint8_t *)value, 16, 1);
1498 break;
1499
1500 case ARMV7M_CONTROL:
1501 *value = buf_get_u32((uint8_t *)value, 24, 2);
1502 break;
1503 }
1504
1505 LOG_DEBUG("load from special reg %i value 0x%" PRIx32 "", (int)num, *value);
1506 break;
1507
1508 default:
1509 return ERROR_COMMAND_SYNTAX_ERROR;
1510 }
1511
1512 return ERROR_OK;
1513 }
1514
1515 static int cortex_m3_store_core_reg_u32(struct target *target,
1516 uint32_t num, uint32_t value)
1517 {
1518 int retval;
1519 uint32_t reg;
1520 struct armv7m_common *armv7m = target_to_armv7m(target);
1521 struct adiv5_dap *swjdp = armv7m->arm.dap;
1522
1523 /* NOTE: we "know" here that the register identifiers used
1524 * in the v7m header match the Cortex-M3 Debug Core Register
1525 * Selector values for R0..R15, xPSR, MSP, and PSP.
1526 */
1527 switch (num) {
1528 case 0 ... 18:
1529 retval = cortexm3_dap_write_coreregister_u32(swjdp, value, num);
1530 if (retval != ERROR_OK) {
1531 struct reg *r;
1532
1533 LOG_ERROR("JTAG failure");
1534 r = armv7m->arm.core_cache->reg_list + num;
1535 r->dirty = r->valid;
1536 return ERROR_JTAG_DEVICE_ERROR;
1537 }
1538 LOG_DEBUG("write core reg %i value 0x%" PRIx32 "", (int)num, value);
1539 break;
1540
1541 case ARMV7M_PRIMASK:
1542 case ARMV7M_BASEPRI:
1543 case ARMV7M_FAULTMASK:
1544 case ARMV7M_CONTROL:
1545 /* Cortex-M3 packages these four registers as bitfields
1546 * in one Debug Core register. So say r0 and r2 docs;
1547 * it was removed from r1 docs, but still works.
1548 */
1549 cortexm3_dap_read_coreregister_u32(swjdp, &reg, 20);
1550
1551 switch (num) {
1552 case ARMV7M_PRIMASK:
1553 buf_set_u32((uint8_t *)&reg, 0, 1, value);
1554 break;
1555
1556 case ARMV7M_BASEPRI:
1557 buf_set_u32((uint8_t *)&reg, 8, 8, value);
1558 break;
1559
1560 case ARMV7M_FAULTMASK:
1561 buf_set_u32((uint8_t *)&reg, 16, 1, value);
1562 break;
1563
1564 case ARMV7M_CONTROL:
1565 buf_set_u32((uint8_t *)&reg, 24, 2, value);
1566 break;
1567 }
1568
1569 cortexm3_dap_write_coreregister_u32(swjdp, reg, 20);
1570
1571 LOG_DEBUG("write special reg %i value 0x%" PRIx32 " ", (int)num, value);
1572 break;
1573
1574 default:
1575 return ERROR_COMMAND_SYNTAX_ERROR;
1576 }
1577
1578 return ERROR_OK;
1579 }
1580
1581 static int cortex_m3_read_memory(struct target *target, uint32_t address,
1582 uint32_t size, uint32_t count, uint8_t *buffer)
1583 {
1584 struct armv7m_common *armv7m = target_to_armv7m(target);
1585 struct adiv5_dap *swjdp = armv7m->arm.dap;
1586
1587 if (armv7m->arm.is_armv6m) {
1588 /* armv6m does not handle unaligned memory access */
1589 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1590 return ERROR_TARGET_UNALIGNED_ACCESS;
1591 }
1592
1593 return mem_ap_read(swjdp, buffer, size, count, address, true);
1594 }
1595
1596 static int cortex_m3_write_memory(struct target *target, uint32_t address,
1597 uint32_t size, uint32_t count, const uint8_t *buffer)
1598 {
1599 struct armv7m_common *armv7m = target_to_armv7m(target);
1600 struct adiv5_dap *swjdp = armv7m->arm.dap;
1601
1602 if (armv7m->arm.is_armv6m) {
1603 /* armv6m does not handle unaligned memory access */
1604 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1605 return ERROR_TARGET_UNALIGNED_ACCESS;
1606 }
1607
1608 return mem_ap_write(swjdp, buffer, size, count, address, true);
1609 }
1610
1611 static int cortex_m3_init_target(struct command_context *cmd_ctx,
1612 struct target *target)
1613 {
1614 armv7m_build_reg_cache(target);
1615 return ERROR_OK;
1616 }
1617
1618 /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
1619 * on r/w if the core is not running, and clear on resume or reset ... or
1620 * at least, in a post_restore_context() method.
1621 */
1622
1623 struct dwt_reg_state {
1624 struct target *target;
1625 uint32_t addr;
1626 uint32_t value; /* scratch/cache */
1627 };
1628
1629 static int cortex_m3_dwt_get_reg(struct reg *reg)
1630 {
1631 struct dwt_reg_state *state = reg->arch_info;
1632
1633 return target_read_u32(state->target, state->addr, &state->value);
1634 }
1635
1636 static int cortex_m3_dwt_set_reg(struct reg *reg, uint8_t *buf)
1637 {
1638 struct dwt_reg_state *state = reg->arch_info;
1639
1640 return target_write_u32(state->target, state->addr,
1641 buf_get_u32(buf, 0, reg->size));
1642 }
1643
1644 struct dwt_reg {
1645 uint32_t addr;
1646 char *name;
1647 unsigned size;
1648 };
1649
1650 static struct dwt_reg dwt_base_regs[] = {
1651 { DWT_CTRL, "dwt_ctrl", 32, },
1652 /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
1653 * increments while the core is asleep.
1654 */
1655 { DWT_CYCCNT, "dwt_cyccnt", 32, },
1656 /* plus some 8 bit counters, useful for profiling with TPIU */
1657 };
1658
1659 static struct dwt_reg dwt_comp[] = {
1660 #define DWT_COMPARATOR(i) \
1661 { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
1662 { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
1663 { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
1664 DWT_COMPARATOR(0),
1665 DWT_COMPARATOR(1),
1666 DWT_COMPARATOR(2),
1667 DWT_COMPARATOR(3),
1668 #undef DWT_COMPARATOR
1669 };
1670
1671 static const struct reg_arch_type dwt_reg_type = {
1672 .get = cortex_m3_dwt_get_reg,
1673 .set = cortex_m3_dwt_set_reg,
1674 };
1675
1676 static void cortex_m3_dwt_addreg(struct target *t, struct reg *r, struct dwt_reg *d)
1677 {
1678 struct dwt_reg_state *state;
1679
1680 state = calloc(1, sizeof *state);
1681 if (!state)
1682 return;
1683 state->addr = d->addr;
1684 state->target = t;
1685
1686 r->name = d->name;
1687 r->size = d->size;
1688 r->value = &state->value;
1689 r->arch_info = state;
1690 r->type = &dwt_reg_type;
1691 }
1692
1693 void cortex_m3_dwt_setup(struct cortex_m3_common *cm3, struct target *target)
1694 {
1695 uint32_t dwtcr;
1696 struct reg_cache *cache;
1697 struct cortex_m3_dwt_comparator *comparator;
1698 int reg, i;
1699
1700 target_read_u32(target, DWT_CTRL, &dwtcr);
1701 if (!dwtcr) {
1702 LOG_DEBUG("no DWT");
1703 return;
1704 }
1705
1706 cm3->dwt_num_comp = (dwtcr >> 28) & 0xF;
1707 cm3->dwt_comp_available = cm3->dwt_num_comp;
1708 cm3->dwt_comparator_list = calloc(cm3->dwt_num_comp,
1709 sizeof(struct cortex_m3_dwt_comparator));
1710 if (!cm3->dwt_comparator_list) {
1711 fail0:
1712 cm3->dwt_num_comp = 0;
1713 LOG_ERROR("out of mem");
1714 return;
1715 }
1716
1717 cache = calloc(1, sizeof *cache);
1718 if (!cache) {
1719 fail1:
1720 free(cm3->dwt_comparator_list);
1721 goto fail0;
1722 }
1723 cache->name = "cortex-m3 dwt registers";
1724 cache->num_regs = 2 + cm3->dwt_num_comp * 3;
1725 cache->reg_list = calloc(cache->num_regs, sizeof *cache->reg_list);
1726 if (!cache->reg_list) {
1727 free(cache);
1728 goto fail1;
1729 }
1730
1731 for (reg = 0; reg < 2; reg++)
1732 cortex_m3_dwt_addreg(target, cache->reg_list + reg,
1733 dwt_base_regs + reg);
1734
1735 comparator = cm3->dwt_comparator_list;
1736 for (i = 0; i < cm3->dwt_num_comp; i++, comparator++) {
1737 int j;
1738
1739 comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
1740 for (j = 0; j < 3; j++, reg++)
1741 cortex_m3_dwt_addreg(target, cache->reg_list + reg,
1742 dwt_comp + 3 * i + j);
1743
1744 /* make sure we clear any watchpoints enabled on the target */
1745 target_write_u32(target, comparator->dwt_comparator_address + 8, 0);
1746 }
1747
1748 *register_get_last_cache_p(&target->reg_cache) = cache;
1749 cm3->dwt_cache = cache;
1750
1751 LOG_DEBUG("DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
1752 dwtcr, cm3->dwt_num_comp,
1753 (dwtcr & (0xf << 24)) ? " only" : "/trigger");
1754
1755 /* REVISIT: if num_comp > 1, check whether comparator #1 can
1756 * implement single-address data value watchpoints ... so we
1757 * won't need to check it later, when asked to set one up.
1758 */
1759 }
1760
1761 #define MVFR0 0xe000ef40
1762 #define MVFR1 0xe000ef44
1763
1764 #define MVFR0_DEFAULT_M4 0x10110021
1765 #define MVFR1_DEFAULT_M4 0x11000011
1766
1767 int cortex_m3_examine(struct target *target)
1768 {
1769 int retval;
1770 uint32_t cpuid, fpcr, mvfr0, mvfr1;
1771 int i;
1772 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1773 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
1774 struct armv7m_common *armv7m = target_to_armv7m(target);
1775
1776 /* stlink shares the examine handler but does not support
1777 * all its calls */
1778 if (!armv7m->stlink) {
1779 retval = ahbap_debugport_init(swjdp);
1780 if (retval != ERROR_OK)
1781 return retval;
1782 }
1783
1784 if (!target_was_examined(target)) {
1785 target_set_examined(target);
1786
1787 /* Read from Device Identification Registers */
1788 retval = target_read_u32(target, CPUID, &cpuid);
1789 if (retval != ERROR_OK)
1790 return retval;
1791
1792 /* Get CPU Type */
1793 i = (cpuid >> 4) & 0xf;
1794
1795 LOG_DEBUG("Cortex-M%d r%" PRId8 "p%" PRId8 " processor detected",
1796 i, (uint8_t)((cpuid >> 20) & 0xf), (uint8_t)((cpuid >> 0) & 0xf));
1797 LOG_DEBUG("cpuid: 0x%8.8" PRIx32 "", cpuid);
1798
1799 /* test for floating point feature on cortex-m4 */
1800 if (i == 4) {
1801 target_read_u32(target, MVFR0, &mvfr0);
1802 target_read_u32(target, MVFR1, &mvfr1);
1803
1804 if ((mvfr0 == MVFR0_DEFAULT_M4) && (mvfr1 == MVFR1_DEFAULT_M4)) {
1805 LOG_DEBUG("Cortex-M%d floating point feature FPv4_SP found", i);
1806 armv7m->fp_feature = FPv4_SP;
1807 }
1808 } else if (i == 0) {
1809 /* Cortex-M0 does not support unaligned memory access */
1810 armv7m->arm.is_armv6m = true;
1811 }
1812
1813 if (i == 4 || i == 3) {
1814 /* Cortex-M3/M4 has 4096 bytes autoincrement range */
1815 armv7m->dap.tar_autoincr_block = (1 << 12);
1816 }
1817
1818 /* NOTE: FPB and DWT are both optional. */
1819
1820 /* Setup FPB */
1821 target_read_u32(target, FP_CTRL, &fpcr);
1822 cortex_m3->auto_bp_type = 1;
1823 cortex_m3->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF); /* bits
1824 *[14:12]
1825 *and [7:4]
1826 **/
1827 cortex_m3->fp_num_lit = (fpcr >> 8) & 0xF;
1828 cortex_m3->fp_code_available = cortex_m3->fp_num_code;
1829 cortex_m3->fp_comparator_list = calloc(
1830 cortex_m3->fp_num_code + cortex_m3->fp_num_lit,
1831 sizeof(struct cortex_m3_fp_comparator));
1832 cortex_m3->fpb_enabled = fpcr & 1;
1833 for (i = 0; i < cortex_m3->fp_num_code + cortex_m3->fp_num_lit; i++) {
1834 cortex_m3->fp_comparator_list[i].type =
1835 (i < cortex_m3->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
1836 cortex_m3->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
1837
1838 /* make sure we clear any breakpoints enabled on the target */
1839 target_write_u32(target, cortex_m3->fp_comparator_list[i].fpcr_address, 0);
1840 }
1841 LOG_DEBUG("FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i",
1842 fpcr,
1843 cortex_m3->fp_num_code,
1844 cortex_m3->fp_num_lit);
1845
1846 /* Setup DWT */
1847 cortex_m3_dwt_setup(cortex_m3, target);
1848
1849 /* These hardware breakpoints only work for code in flash! */
1850 LOG_INFO("%s: hardware has %d breakpoints, %d watchpoints",
1851 target_name(target),
1852 cortex_m3->fp_num_code,
1853 cortex_m3->dwt_num_comp);
1854 }
1855
1856 return ERROR_OK;
1857 }
1858
1859 static int cortex_m3_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1860 {
1861 uint16_t dcrdr;
1862 int retval;
1863
1864 mem_ap_read_buf_u16(swjdp, (uint8_t *)&dcrdr, 2, DCB_DCRDR);
1865 *ctrl = (uint8_t)dcrdr;
1866 *value = (uint8_t)(dcrdr >> 8);
1867
1868 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1869
1870 /* write ack back to software dcc register
1871 * signify we have read data */
1872 if (dcrdr & (1 << 0)) {
1873 dcrdr = 0;
1874 retval = mem_ap_write_buf_u16(swjdp, (uint8_t *)&dcrdr, 2, DCB_DCRDR);
1875 if (retval != ERROR_OK)
1876 return retval;
1877 }
1878
1879 return ERROR_OK;
1880 }
1881
1882 static int cortex_m3_target_request_data(struct target *target,
1883 uint32_t size, uint8_t *buffer)
1884 {
1885 struct armv7m_common *armv7m = target_to_armv7m(target);
1886 struct adiv5_dap *swjdp = armv7m->arm.dap;
1887 uint8_t data;
1888 uint8_t ctrl;
1889 uint32_t i;
1890
1891 for (i = 0; i < (size * 4); i++) {
1892 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1893 buffer[i] = data;
1894 }
1895
1896 return ERROR_OK;
1897 }
1898
1899 static int cortex_m3_handle_target_request(void *priv)
1900 {
1901 struct target *target = priv;
1902 if (!target_was_examined(target))
1903 return ERROR_OK;
1904 struct armv7m_common *armv7m = target_to_armv7m(target);
1905 struct adiv5_dap *swjdp = armv7m->arm.dap;
1906
1907 if (!target->dbg_msg_enabled)
1908 return ERROR_OK;
1909
1910 if (target->state == TARGET_RUNNING) {
1911 uint8_t data;
1912 uint8_t ctrl;
1913
1914 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1915
1916 /* check if we have data */
1917 if (ctrl & (1 << 0)) {
1918 uint32_t request;
1919
1920 /* we assume target is quick enough */
1921 request = data;
1922 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1923 request |= (data << 8);
1924 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1925 request |= (data << 16);
1926 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1927 request |= (data << 24);
1928 target_request(target, request);
1929 }
1930 }
1931
1932 return ERROR_OK;
1933 }
1934
1935 static int cortex_m3_init_arch_info(struct target *target,
1936 struct cortex_m3_common *cortex_m3, struct jtag_tap *tap)
1937 {
1938 int retval;
1939 struct armv7m_common *armv7m = &cortex_m3->armv7m;
1940
1941 armv7m_init_arch_info(target, armv7m);
1942
1943 /* prepare JTAG information for the new target */
1944 cortex_m3->jtag_info.tap = tap;
1945 cortex_m3->jtag_info.scann_size = 4;
1946
1947 /* default reset mode is to use srst if fitted
1948 * if not it will use CORTEX_M3_RESET_VECTRESET */
1949 cortex_m3->soft_reset_config = CORTEX_M3_RESET_VECTRESET;
1950
1951 armv7m->arm.dap = &armv7m->dap;
1952
1953 /* Leave (only) generic DAP stuff for debugport_init(); */
1954 armv7m->dap.jtag_info = &cortex_m3->jtag_info;
1955 armv7m->dap.memaccess_tck = 8;
1956
1957 /* Cortex-M3/M4 has 4096 bytes autoincrement range
1958 * but set a safe default to 1024 to support Cortex-M0
1959 * this will be changed in cortex_m3_examine if a M3/M4 is detected */
1960 armv7m->dap.tar_autoincr_block = (1 << 10);
1961
1962 /* register arch-specific functions */
1963 armv7m->examine_debug_reason = cortex_m3_examine_debug_reason;
1964
1965 armv7m->post_debug_entry = NULL;
1966
1967 armv7m->pre_restore_context = NULL;
1968
1969 armv7m->load_core_reg_u32 = cortex_m3_load_core_reg_u32;
1970 armv7m->store_core_reg_u32 = cortex_m3_store_core_reg_u32;
1971
1972 target_register_timer_callback(cortex_m3_handle_target_request, 1, 1, target);
1973
1974 retval = arm_jtag_setup_connection(&cortex_m3->jtag_info);
1975 if (retval != ERROR_OK)
1976 return retval;
1977
1978 return ERROR_OK;
1979 }
1980
1981 static int cortex_m3_target_create(struct target *target, Jim_Interp *interp)
1982 {
1983 struct cortex_m3_common *cortex_m3 = calloc(1, sizeof(struct cortex_m3_common));
1984
1985 cortex_m3->common_magic = CORTEX_M3_COMMON_MAGIC;
1986 cortex_m3_init_arch_info(target, cortex_m3, target->tap);
1987
1988 return ERROR_OK;
1989 }
1990
1991 /*--------------------------------------------------------------------------*/
1992
1993 static int cortex_m3_verify_pointer(struct command_context *cmd_ctx,
1994 struct cortex_m3_common *cm3)
1995 {
1996 if (cm3->common_magic != CORTEX_M3_COMMON_MAGIC) {
1997 command_print(cmd_ctx, "target is not a Cortex-M");
1998 return ERROR_TARGET_INVALID;
1999 }
2000 return ERROR_OK;
2001 }
2002
2003 /*
2004 * Only stuff below this line should need to verify that its target
2005 * is a Cortex-M3. Everything else should have indirected through the
2006 * cortexm3_target structure, which is only used with CM3 targets.
2007 */
2008
2009 static const struct {
2010 char name[10];
2011 unsigned mask;
2012 } vec_ids[] = {
2013 { "hard_err", VC_HARDERR, },
2014 { "int_err", VC_INTERR, },
2015 { "bus_err", VC_BUSERR, },
2016 { "state_err", VC_STATERR, },
2017 { "chk_err", VC_CHKERR, },
2018 { "nocp_err", VC_NOCPERR, },
2019 { "mm_err", VC_MMERR, },
2020 { "reset", VC_CORERESET, },
2021 };
2022
2023 COMMAND_HANDLER(handle_cortex_m3_vector_catch_command)
2024 {
2025 struct target *target = get_current_target(CMD_CTX);
2026 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
2027 struct armv7m_common *armv7m = &cortex_m3->armv7m;
2028 struct adiv5_dap *swjdp = armv7m->arm.dap;
2029 uint32_t demcr = 0;
2030 int retval;
2031
2032 retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
2033 if (retval != ERROR_OK)
2034 return retval;
2035
2036 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &demcr);
2037 if (retval != ERROR_OK)
2038 return retval;
2039
2040 if (CMD_ARGC > 0) {
2041 unsigned catch = 0;
2042
2043 if (CMD_ARGC == 1) {
2044 if (strcmp(CMD_ARGV[0], "all") == 0) {
2045 catch = VC_HARDERR | VC_INTERR | VC_BUSERR
2046 | VC_STATERR | VC_CHKERR | VC_NOCPERR
2047 | VC_MMERR | VC_CORERESET;
2048 goto write;
2049 } else if (strcmp(CMD_ARGV[0], "none") == 0)
2050 goto write;
2051 }
2052 while (CMD_ARGC-- > 0) {
2053 unsigned i;
2054 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2055 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
2056 continue;
2057 catch |= vec_ids[i].mask;
2058 break;
2059 }
2060 if (i == ARRAY_SIZE(vec_ids)) {
2061 LOG_ERROR("No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]);
2062 return ERROR_COMMAND_SYNTAX_ERROR;
2063 }
2064 }
2065 write:
2066 /* For now, armv7m->demcr only stores vector catch flags. */
2067 armv7m->demcr = catch;
2068
2069 demcr &= ~0xffff;
2070 demcr |= catch;
2071
2072 /* write, but don't assume it stuck (why not??) */
2073 retval = mem_ap_write_u32(swjdp, DCB_DEMCR, demcr);
2074 if (retval != ERROR_OK)
2075 return retval;
2076 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &demcr);
2077 if (retval != ERROR_OK)
2078 return retval;
2079
2080 /* FIXME be sure to clear DEMCR on clean server shutdown.
2081 * Otherwise the vector catch hardware could fire when there's
2082 * no debugger hooked up, causing much confusion...
2083 */
2084 }
2085
2086 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2087 command_print(CMD_CTX, "%9s: %s", vec_ids[i].name,
2088 (demcr & vec_ids[i].mask) ? "catch" : "ignore");
2089 }
2090
2091 return ERROR_OK;
2092 }
2093
2094 COMMAND_HANDLER(handle_cortex_m3_mask_interrupts_command)
2095 {
2096 struct target *target = get_current_target(CMD_CTX);
2097 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
2098 int retval;
2099
2100 static const Jim_Nvp nvp_maskisr_modes[] = {
2101 { .name = "auto", .value = CORTEX_M3_ISRMASK_AUTO },
2102 { .name = "off", .value = CORTEX_M3_ISRMASK_OFF },
2103 { .name = "on", .value = CORTEX_M3_ISRMASK_ON },
2104 { .name = NULL, .value = -1 },
2105 };
2106 const Jim_Nvp *n;
2107
2108
2109 retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
2110 if (retval != ERROR_OK)
2111 return retval;
2112
2113 if (target->state != TARGET_HALTED) {
2114 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
2115 return ERROR_OK;
2116 }
2117
2118 if (CMD_ARGC > 0) {
2119 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2120 if (n->name == NULL)
2121 return ERROR_COMMAND_SYNTAX_ERROR;
2122 cortex_m3->isrmasking_mode = n->value;
2123
2124
2125 if (cortex_m3->isrmasking_mode == CORTEX_M3_ISRMASK_ON)
2126 cortex_m3_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
2127 else
2128 cortex_m3_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
2129 }
2130
2131 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_m3->isrmasking_mode);
2132 command_print(CMD_CTX, "cortex_m interrupt mask %s", n->name);
2133
2134 return ERROR_OK;
2135 }
2136
2137 COMMAND_HANDLER(handle_cortex_m3_reset_config_command)
2138 {
2139 struct target *target = get_current_target(CMD_CTX);
2140 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
2141 int retval;
2142 char *reset_config;
2143
2144 retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
2145 if (retval != ERROR_OK)
2146 return retval;
2147
2148 if (CMD_ARGC > 0) {
2149 if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
2150 cortex_m3->soft_reset_config = CORTEX_M3_RESET_SYSRESETREQ;
2151 else if (strcmp(*CMD_ARGV, "vectreset") == 0)
2152 cortex_m3->soft_reset_config = CORTEX_M3_RESET_VECTRESET;
2153 }
2154
2155 switch (cortex_m3->soft_reset_config) {
2156 case CORTEX_M3_RESET_SYSRESETREQ:
2157 reset_config = "sysresetreq";
2158 break;
2159
2160 case CORTEX_M3_RESET_VECTRESET:
2161 reset_config = "vectreset";
2162 break;
2163
2164 default:
2165 reset_config = "unknown";
2166 break;
2167 }
2168
2169 command_print(CMD_CTX, "cortex_m reset_config %s", reset_config);
2170
2171 return ERROR_OK;
2172 }
2173
2174 static const struct command_registration cortex_m3_exec_command_handlers[] = {
2175 {
2176 .name = "maskisr",
2177 .handler = handle_cortex_m3_mask_interrupts_command,
2178 .mode = COMMAND_EXEC,
2179 .help = "mask cortex_m interrupts",
2180 .usage = "['auto'|'on'|'off']",
2181 },
2182 {
2183 .name = "vector_catch",
2184 .handler = handle_cortex_m3_vector_catch_command,
2185 .mode = COMMAND_EXEC,
2186 .help = "configure hardware vectors to trigger debug entry",
2187 .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
2188 },
2189 {
2190 .name = "reset_config",
2191 .handler = handle_cortex_m3_reset_config_command,
2192 .mode = COMMAND_ANY,
2193 .help = "configure software reset handling",
2194 .usage = "['srst'|'sysresetreq'|'vectreset']",
2195 },
2196 COMMAND_REGISTRATION_DONE
2197 };
2198 static const struct command_registration cortex_m3_command_handlers[] = {
2199 {
2200 .chain = armv7m_command_handlers,
2201 },
2202 {
2203 .name = "cortex_m",
2204 .mode = COMMAND_EXEC,
2205 .help = "Cortex-M command group",
2206 .usage = "",
2207 .chain = cortex_m3_exec_command_handlers,
2208 },
2209 COMMAND_REGISTRATION_DONE
2210 };
2211
2212 struct target_type cortexm3_target = {
2213 .name = "cortex_m",
2214 .deprecated_name = "cortex_m3",
2215
2216 .poll = cortex_m3_poll,
2217 .arch_state = armv7m_arch_state,
2218
2219 .target_request_data = cortex_m3_target_request_data,
2220
2221 .halt = cortex_m3_halt,
2222 .resume = cortex_m3_resume,
2223 .step = cortex_m3_step,
2224
2225 .assert_reset = cortex_m3_assert_reset,
2226 .deassert_reset = cortex_m3_deassert_reset,
2227 .soft_reset_halt = cortex_m3_soft_reset_halt,
2228
2229 .get_gdb_reg_list = armv7m_get_gdb_reg_list,
2230
2231 .read_memory = cortex_m3_read_memory,
2232 .write_memory = cortex_m3_write_memory,
2233 .checksum_memory = armv7m_checksum_memory,
2234 .blank_check_memory = armv7m_blank_check_memory,
2235
2236 .run_algorithm = armv7m_run_algorithm,
2237 .start_algorithm = armv7m_start_algorithm,
2238 .wait_algorithm = armv7m_wait_algorithm,
2239
2240 .add_breakpoint = cortex_m3_add_breakpoint,
2241 .remove_breakpoint = cortex_m3_remove_breakpoint,
2242 .add_watchpoint = cortex_m3_add_watchpoint,
2243 .remove_watchpoint = cortex_m3_remove_watchpoint,
2244
2245 .commands = cortex_m3_command_handlers,
2246 .target_create = cortex_m3_target_create,
2247 .init_target = cortex_m3_init_target,
2248 .examine = cortex_m3_examine,
2249 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)