build: cleanup src/server directory
[openocd.git] / src / target / cortex_m.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 * *
26 * *
27 * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
28 * *
29 ***************************************************************************/
30 #ifdef HAVE_CONFIG_H
31 #include "config.h"
32 #endif
33
34 #include "breakpoints.h"
35 #include "cortex_m.h"
36 #include "target_request.h"
37 #include "target_type.h"
38 #include "arm_disassembler.h"
39 #include "register.h"
40 #include "arm_opcodes.h"
41 #include "arm_semihosting.h"
42 #include <helper/time_support.h>
43
44 /* NOTE: most of this should work fine for the Cortex-M1 and
45 * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
46 * Some differences: M0/M1 doesn't have FBP remapping or the
47 * DWT tracing/profiling support. (So the cycle counter will
48 * not be usable; the other stuff isn't currently used here.)
49 *
50 * Although there are some workarounds for errata seen only in r0p0
51 * silicon, such old parts are hard to find and thus not much tested
52 * any longer.
53 */
54
55 /**
56 * Returns the type of a break point required by address location
57 */
58 #define BKPT_TYPE_BY_ADDR(addr) ((addr) < 0x20000000 ? BKPT_HARD : BKPT_SOFT)
59
60
61 /* forward declarations */
62 static int cortex_m3_store_core_reg_u32(struct target *target,
63 enum armv7m_regtype type, uint32_t num, uint32_t value);
64
65 static int cortexm3_dap_read_coreregister_u32(struct adiv5_dap *swjdp,
66 uint32_t *value, int regnum)
67 {
68 int retval;
69 uint32_t dcrdr;
70
71 /* because the DCB_DCRDR is used for the emulated dcc channel
72 * we have to save/restore the DCB_DCRDR when used */
73
74 retval = mem_ap_read_u32(swjdp, DCB_DCRDR, &dcrdr);
75 if (retval != ERROR_OK)
76 return retval;
77
78 /* mem_ap_write_u32(swjdp, DCB_DCRSR, regnum); */
79 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRSR & 0xFFFFFFF0);
80 if (retval != ERROR_OK)
81 return retval;
82 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRSR & 0xC), regnum);
83 if (retval != ERROR_OK)
84 return retval;
85
86 /* mem_ap_read_u32(swjdp, DCB_DCRDR, value); */
87 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRDR & 0xFFFFFFF0);
88 if (retval != ERROR_OK)
89 return retval;
90 retval = dap_queue_ap_read(swjdp, AP_REG_BD0 | (DCB_DCRDR & 0xC), value);
91 if (retval != ERROR_OK)
92 return retval;
93
94 retval = dap_run(swjdp);
95 if (retval != ERROR_OK)
96 return retval;
97
98 /* restore DCB_DCRDR - this needs to be in a seperate
99 * transaction otherwise the emulated DCC channel breaks */
100 if (retval == ERROR_OK)
101 retval = mem_ap_write_atomic_u32(swjdp, DCB_DCRDR, dcrdr);
102
103 return retval;
104 }
105
106 static int cortexm3_dap_write_coreregister_u32(struct adiv5_dap *swjdp,
107 uint32_t value, int regnum)
108 {
109 int retval;
110 uint32_t dcrdr;
111
112 /* because the DCB_DCRDR is used for the emulated dcc channel
113 * we have to save/restore the DCB_DCRDR when used */
114
115 retval = mem_ap_read_u32(swjdp, DCB_DCRDR, &dcrdr);
116 if (retval != ERROR_OK)
117 return retval;
118
119 /* mem_ap_write_u32(swjdp, DCB_DCRDR, core_regs[i]); */
120 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRDR & 0xFFFFFFF0);
121 if (retval != ERROR_OK)
122 return retval;
123 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRDR & 0xC), value);
124 if (retval != ERROR_OK)
125 return retval;
126
127 /* mem_ap_write_u32(swjdp, DCB_DCRSR, i | DCRSR_WnR); */
128 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRSR & 0xFFFFFFF0);
129 if (retval != ERROR_OK)
130 return retval;
131 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRSR & 0xC), regnum | DCRSR_WnR);
132 if (retval != ERROR_OK)
133 return retval;
134
135 retval = dap_run(swjdp);
136 if (retval != ERROR_OK)
137 return retval;
138
139 /* restore DCB_DCRDR - this needs to be in a seperate
140 * transaction otherwise the emulated DCC channel breaks */
141 if (retval == ERROR_OK)
142 retval = mem_ap_write_atomic_u32(swjdp, DCB_DCRDR, dcrdr);
143
144 return retval;
145 }
146
147 static int cortex_m3_write_debug_halt_mask(struct target *target,
148 uint32_t mask_on, uint32_t mask_off)
149 {
150 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
151 struct adiv5_dap *swjdp = &cortex_m3->armv7m.dap;
152
153 /* mask off status bits */
154 cortex_m3->dcb_dhcsr &= ~((0xFFFF << 16) | mask_off);
155 /* create new register mask */
156 cortex_m3->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
157
158 return mem_ap_write_atomic_u32(swjdp, DCB_DHCSR, cortex_m3->dcb_dhcsr);
159 }
160
161 static int cortex_m3_clear_halt(struct target *target)
162 {
163 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
164 struct adiv5_dap *swjdp = &cortex_m3->armv7m.dap;
165 int retval;
166
167 /* clear step if any */
168 cortex_m3_write_debug_halt_mask(target, C_HALT, C_STEP);
169
170 /* Read Debug Fault Status Register */
171 retval = mem_ap_read_atomic_u32(swjdp, NVIC_DFSR, &cortex_m3->nvic_dfsr);
172 if (retval != ERROR_OK)
173 return retval;
174
175 /* Clear Debug Fault Status */
176 retval = mem_ap_write_atomic_u32(swjdp, NVIC_DFSR, cortex_m3->nvic_dfsr);
177 if (retval != ERROR_OK)
178 return retval;
179 LOG_DEBUG(" NVIC_DFSR 0x%" PRIx32 "", cortex_m3->nvic_dfsr);
180
181 return ERROR_OK;
182 }
183
184 static int cortex_m3_single_step_core(struct target *target)
185 {
186 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
187 struct adiv5_dap *swjdp = &cortex_m3->armv7m.dap;
188 uint32_t dhcsr_save;
189 int retval;
190
191 /* backup dhcsr reg */
192 dhcsr_save = cortex_m3->dcb_dhcsr;
193
194 /* Mask interrupts before clearing halt, if done already. This avoids
195 * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
196 * HALT can put the core into an unknown state.
197 */
198 if (!(cortex_m3->dcb_dhcsr & C_MASKINTS))
199 {
200 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
201 DBGKEY | C_MASKINTS | C_HALT | C_DEBUGEN);
202 if (retval != ERROR_OK)
203 return retval;
204 }
205 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
206 DBGKEY | C_MASKINTS | C_STEP | C_DEBUGEN);
207 if (retval != ERROR_OK)
208 return retval;
209 LOG_DEBUG(" ");
210
211 /* restore dhcsr reg */
212 cortex_m3->dcb_dhcsr = dhcsr_save;
213 cortex_m3_clear_halt(target);
214
215 return ERROR_OK;
216 }
217
218 static int cortex_m3_endreset_event(struct target *target)
219 {
220 int i;
221 int retval;
222 uint32_t dcb_demcr;
223 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
224 struct armv7m_common *armv7m = &cortex_m3->armv7m;
225 struct adiv5_dap *swjdp = &cortex_m3->armv7m.dap;
226 struct cortex_m3_fp_comparator *fp_list = cortex_m3->fp_comparator_list;
227 struct cortex_m3_dwt_comparator *dwt_list = cortex_m3->dwt_comparator_list;
228
229 /* REVISIT The four debug monitor bits are currently ignored... */
230 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &dcb_demcr);
231 if (retval != ERROR_OK)
232 return retval;
233 LOG_DEBUG("DCB_DEMCR = 0x%8.8" PRIx32 "",dcb_demcr);
234
235 /* this register is used for emulated dcc channel */
236 retval = mem_ap_write_u32(swjdp, DCB_DCRDR, 0);
237 if (retval != ERROR_OK)
238 return retval;
239
240 /* Enable debug requests */
241 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
242 if (retval != ERROR_OK)
243 return retval;
244 if (!(cortex_m3->dcb_dhcsr & C_DEBUGEN))
245 {
246 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_DEBUGEN);
247 if (retval != ERROR_OK)
248 return retval;
249 }
250
251 /* clear any interrupt masking */
252 cortex_m3_write_debug_halt_mask(target, 0, C_MASKINTS);
253
254 /* Enable features controlled by ITM and DWT blocks, and catch only
255 * the vectors we were told to pay attention to.
256 *
257 * Target firmware is responsible for all fault handling policy
258 * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
259 * or manual updates to the NVIC SHCSR and CCR registers.
260 */
261 retval = mem_ap_write_u32(swjdp, DCB_DEMCR, TRCENA | armv7m->demcr);
262 if (retval != ERROR_OK)
263 return retval;
264
265 /* Paranoia: evidently some (early?) chips don't preserve all the
266 * debug state (including FBP, DWT, etc) across reset...
267 */
268
269 /* Enable FPB */
270 retval = target_write_u32(target, FP_CTRL, 3);
271 if (retval != ERROR_OK)
272 return retval;
273
274 cortex_m3->fpb_enabled = 1;
275
276 /* Restore FPB registers */
277 for (i = 0; i < cortex_m3->fp_num_code + cortex_m3->fp_num_lit; i++)
278 {
279 retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
280 if (retval != ERROR_OK)
281 return retval;
282 }
283
284 /* Restore DWT registers */
285 for (i = 0; i < cortex_m3->dwt_num_comp; i++)
286 {
287 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
288 dwt_list[i].comp);
289 if (retval != ERROR_OK)
290 return retval;
291 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
292 dwt_list[i].mask);
293 if (retval != ERROR_OK)
294 return retval;
295 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
296 dwt_list[i].function);
297 if (retval != ERROR_OK)
298 return retval;
299 }
300 retval = dap_run(swjdp);
301 if (retval != ERROR_OK)
302 return retval;
303
304 register_cache_invalidate(cortex_m3->armv7m.core_cache);
305
306 /* make sure we have latest dhcsr flags */
307 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
308
309 return retval;
310 }
311
312 static int cortex_m3_examine_debug_reason(struct target *target)
313 {
314 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
315
316 /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason */
317 /* only check the debug reason if we don't know it already */
318
319 if ((target->debug_reason != DBG_REASON_DBGRQ)
320 && (target->debug_reason != DBG_REASON_SINGLESTEP))
321 {
322 if (cortex_m3->nvic_dfsr & DFSR_BKPT)
323 {
324 target->debug_reason = DBG_REASON_BREAKPOINT;
325 if (cortex_m3->nvic_dfsr & DFSR_DWTTRAP)
326 target->debug_reason = DBG_REASON_WPTANDBKPT;
327 }
328 else if (cortex_m3->nvic_dfsr & DFSR_DWTTRAP)
329 target->debug_reason = DBG_REASON_WATCHPOINT;
330 else if (cortex_m3->nvic_dfsr & DFSR_VCATCH)
331 target->debug_reason = DBG_REASON_BREAKPOINT;
332 else /* EXTERNAL, HALTED */
333 target->debug_reason = DBG_REASON_UNDEFINED;
334 }
335
336 return ERROR_OK;
337 }
338
339 static int cortex_m3_examine_exception_reason(struct target *target)
340 {
341 uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
342 struct armv7m_common *armv7m = target_to_armv7m(target);
343 struct adiv5_dap *swjdp = &armv7m->dap;
344 int retval;
345
346 retval = mem_ap_read_u32(swjdp, NVIC_SHCSR, &shcsr);
347 if (retval != ERROR_OK)
348 return retval;
349 switch (armv7m->exception_number)
350 {
351 case 2: /* NMI */
352 break;
353 case 3: /* Hard Fault */
354 retval = mem_ap_read_atomic_u32(swjdp, NVIC_HFSR, &except_sr);
355 if (retval != ERROR_OK)
356 return retval;
357 if (except_sr & 0x40000000)
358 {
359 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &cfsr);
360 if (retval != ERROR_OK)
361 return retval;
362 }
363 break;
364 case 4: /* Memory Management */
365 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
366 if (retval != ERROR_OK)
367 return retval;
368 retval = mem_ap_read_u32(swjdp, NVIC_MMFAR, &except_ar);
369 if (retval != ERROR_OK)
370 return retval;
371 break;
372 case 5: /* Bus Fault */
373 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
374 if (retval != ERROR_OK)
375 return retval;
376 retval = mem_ap_read_u32(swjdp, NVIC_BFAR, &except_ar);
377 if (retval != ERROR_OK)
378 return retval;
379 break;
380 case 6: /* Usage Fault */
381 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
382 if (retval != ERROR_OK)
383 return retval;
384 break;
385 case 11: /* SVCall */
386 break;
387 case 12: /* Debug Monitor */
388 retval = mem_ap_read_u32(swjdp, NVIC_DFSR, &except_sr);
389 if (retval != ERROR_OK)
390 return retval;
391 break;
392 case 14: /* PendSV */
393 break;
394 case 15: /* SysTick */
395 break;
396 default:
397 except_sr = 0;
398 break;
399 }
400 retval = dap_run(swjdp);
401 if (retval == ERROR_OK)
402 LOG_DEBUG("%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
403 ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
404 armv7m_exception_string(armv7m->exception_number),
405 shcsr, except_sr, cfsr, except_ar);
406 return retval;
407 }
408
409 static int cortex_m3_debug_entry(struct target *target)
410 {
411 int i;
412 uint32_t xPSR;
413 int retval;
414 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
415 struct armv7m_common *armv7m = &cortex_m3->armv7m;
416 struct arm *arm = &armv7m->arm;
417 struct adiv5_dap *swjdp = &armv7m->dap;
418 struct reg *r;
419
420 LOG_DEBUG(" ");
421
422 cortex_m3_clear_halt(target);
423 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
424 if (retval != ERROR_OK)
425 return retval;
426
427 if ((retval = armv7m->examine_debug_reason(target)) != ERROR_OK)
428 return retval;
429
430 /* Examine target state and mode */
431 /* First load register acessible through core debug port*/
432 int num_regs = armv7m->core_cache->num_regs;
433
434 for (i = 0; i < num_regs; i++)
435 {
436 if (!armv7m->core_cache->reg_list[i].valid)
437 armv7m->read_core_reg(target, i);
438 }
439
440 r = armv7m->core_cache->reg_list + ARMV7M_xPSR;
441 xPSR = buf_get_u32(r->value, 0, 32);
442
443 #ifdef ARMV7_GDB_HACKS
444 /* FIXME this breaks on scan chains with more than one Cortex-M3.
445 * Instead, each CM3 should have its own dummy value...
446 */
447 /* copy real xpsr reg for gdb, setting thumb bit */
448 buf_set_u32(armv7m_gdb_dummy_cpsr_value, 0, 32, xPSR);
449 buf_set_u32(armv7m_gdb_dummy_cpsr_value, 5, 1, 1);
450 armv7m_gdb_dummy_cpsr_reg.valid = r->valid;
451 armv7m_gdb_dummy_cpsr_reg.dirty = r->dirty;
452 #endif
453
454 /* For IT instructions xPSR must be reloaded on resume and clear on debug exec */
455 if (xPSR & 0xf00)
456 {
457 r->dirty = r->valid;
458 cortex_m3_store_core_reg_u32(target, ARMV7M_REGISTER_CORE_GP, 16, xPSR &~ 0xff);
459 }
460
461 /* Are we in an exception handler */
462 if (xPSR & 0x1FF)
463 {
464 armv7m->core_mode = ARMV7M_MODE_HANDLER;
465 armv7m->exception_number = (xPSR & 0x1FF);
466
467 arm->core_mode = ARM_MODE_HANDLER;
468 arm->map = armv7m_msp_reg_map;
469 }
470 else
471 {
472 unsigned control = buf_get_u32(armv7m->core_cache
473 ->reg_list[ARMV7M_CONTROL].value, 0, 2);
474
475 /* is this thread privileged? */
476 armv7m->core_mode = control & 1;
477 arm->core_mode = armv7m->core_mode
478 ? ARM_MODE_USER_THREAD
479 : ARM_MODE_THREAD;
480
481 /* which stack is it using? */
482 if (control & 2)
483 arm->map = armv7m_psp_reg_map;
484 else
485 arm->map = armv7m_msp_reg_map;
486
487 armv7m->exception_number = 0;
488 }
489
490 if (armv7m->exception_number)
491 {
492 cortex_m3_examine_exception_reason(target);
493 }
494
495 LOG_DEBUG("entered debug state in core mode: %s at PC 0x%" PRIx32 ", target->state: %s",
496 armv7m_mode_strings[armv7m->core_mode],
497 *(uint32_t*)(arm->pc->value),
498 target_state_name(target));
499
500 if (armv7m->post_debug_entry)
501 {
502 retval = armv7m->post_debug_entry(target);
503 if (retval != ERROR_OK)
504 return retval;
505 }
506
507 return ERROR_OK;
508 }
509
510 static int cortex_m3_poll(struct target *target)
511 {
512 int detected_failure = ERROR_OK;
513 int retval = ERROR_OK;
514 enum target_state prev_target_state = target->state;
515 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
516 struct adiv5_dap *swjdp = &cortex_m3->armv7m.dap;
517
518 /* Read from Debug Halting Control and Status Register */
519 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
520 if (retval != ERROR_OK)
521 {
522 target->state = TARGET_UNKNOWN;
523 return retval;
524 }
525
526 /* Recover from lockup. See ARMv7-M architecture spec,
527 * section B1.5.15 "Unrecoverable exception cases".
528 */
529 if (cortex_m3->dcb_dhcsr & S_LOCKUP) {
530 LOG_ERROR("%s -- clearing lockup after double fault",
531 target_name(target));
532 cortex_m3_write_debug_halt_mask(target, C_HALT, 0);
533 target->debug_reason = DBG_REASON_DBGRQ;
534
535 /* We have to execute the rest (the "finally" equivalent, but
536 * still throw this exception again).
537 */
538 detected_failure = ERROR_FAIL;
539
540 /* refresh status bits */
541 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
542 if (retval != ERROR_OK)
543 return retval;
544 }
545
546 if (cortex_m3->dcb_dhcsr & S_RESET_ST)
547 {
548 /* check if still in reset */
549 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
550 if (retval != ERROR_OK)
551 return retval;
552
553 if (cortex_m3->dcb_dhcsr & S_RESET_ST)
554 {
555 target->state = TARGET_RESET;
556 return ERROR_OK;
557 }
558 }
559
560 if (target->state == TARGET_RESET)
561 {
562 /* Cannot switch context while running so endreset is
563 * called with target->state == TARGET_RESET
564 */
565 LOG_DEBUG("Exit from reset with dcb_dhcsr 0x%" PRIx32,
566 cortex_m3->dcb_dhcsr);
567 cortex_m3_endreset_event(target);
568 target->state = TARGET_RUNNING;
569 prev_target_state = TARGET_RUNNING;
570 }
571
572 if (cortex_m3->dcb_dhcsr & S_HALT)
573 {
574 target->state = TARGET_HALTED;
575
576 if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET))
577 {
578 if ((retval = cortex_m3_debug_entry(target)) != ERROR_OK)
579 return retval;
580
581 if (arm_semihosting(target, &retval) != 0)
582 return retval;
583
584 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
585 }
586 if (prev_target_state == TARGET_DEBUG_RUNNING)
587 {
588 LOG_DEBUG(" ");
589 if ((retval = cortex_m3_debug_entry(target)) != ERROR_OK)
590 return retval;
591
592 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
593 }
594 }
595
596 /* REVISIT when S_SLEEP is set, it's in a Sleep or DeepSleep state.
597 * How best to model low power modes?
598 */
599
600 if (target->state == TARGET_UNKNOWN)
601 {
602 /* check if processor is retiring instructions */
603 if (cortex_m3->dcb_dhcsr & S_RETIRE_ST)
604 {
605 target->state = TARGET_RUNNING;
606 retval = ERROR_OK;
607 }
608 }
609
610 /* Did we detect a failure condition that we cleared? */
611 if (detected_failure != ERROR_OK)
612 retval = detected_failure;
613 return retval;
614 }
615
616 static int cortex_m3_halt(struct target *target)
617 {
618 LOG_DEBUG("target->state: %s",
619 target_state_name(target));
620
621 if (target->state == TARGET_HALTED)
622 {
623 LOG_DEBUG("target was already halted");
624 return ERROR_OK;
625 }
626
627 if (target->state == TARGET_UNKNOWN)
628 {
629 LOG_WARNING("target was in unknown state when halt was requested");
630 }
631
632 if (target->state == TARGET_RESET)
633 {
634 if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst())
635 {
636 LOG_ERROR("can't request a halt while in reset if nSRST pulls nTRST");
637 return ERROR_TARGET_FAILURE;
638 }
639 else
640 {
641 /* we came here in a reset_halt or reset_init sequence
642 * debug entry was already prepared in cortex_m3_prepare_reset_halt()
643 */
644 target->debug_reason = DBG_REASON_DBGRQ;
645
646 return ERROR_OK;
647 }
648 }
649
650 /* Write to Debug Halting Control and Status Register */
651 cortex_m3_write_debug_halt_mask(target, C_HALT, 0);
652
653 target->debug_reason = DBG_REASON_DBGRQ;
654
655 return ERROR_OK;
656 }
657
658 static int cortex_m3_soft_reset_halt(struct target *target)
659 {
660 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
661 struct adiv5_dap *swjdp = &cortex_m3->armv7m.dap;
662 uint32_t dcb_dhcsr = 0;
663 int retval, timeout = 0;
664
665 /* Enter debug state on reset; restore DEMCR in endreset_event() */
666 retval = mem_ap_write_u32(swjdp, DCB_DEMCR,
667 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
668 if (retval != ERROR_OK)
669 return retval;
670
671 /* Request a core-only reset */
672 retval = mem_ap_write_atomic_u32(swjdp, NVIC_AIRCR,
673 AIRCR_VECTKEY | AIRCR_VECTRESET);
674 if (retval != ERROR_OK)
675 return retval;
676 target->state = TARGET_RESET;
677
678 /* registers are now invalid */
679 register_cache_invalidate(cortex_m3->armv7m.core_cache);
680
681 while (timeout < 100)
682 {
683 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &dcb_dhcsr);
684 if (retval == ERROR_OK)
685 {
686 retval = mem_ap_read_atomic_u32(swjdp, NVIC_DFSR,
687 &cortex_m3->nvic_dfsr);
688 if (retval != ERROR_OK)
689 return retval;
690 if ((dcb_dhcsr & S_HALT)
691 && (cortex_m3->nvic_dfsr & DFSR_VCATCH))
692 {
693 LOG_DEBUG("system reset-halted, DHCSR 0x%08x, "
694 "DFSR 0x%08x",
695 (unsigned) dcb_dhcsr,
696 (unsigned) cortex_m3->nvic_dfsr);
697 cortex_m3_poll(target);
698 /* FIXME restore user's vector catch config */
699 return ERROR_OK;
700 }
701 else
702 LOG_DEBUG("waiting for system reset-halt, "
703 "DHCSR 0x%08x, %d ms",
704 (unsigned) dcb_dhcsr, timeout);
705 }
706 timeout++;
707 alive_sleep(1);
708 }
709
710 return ERROR_OK;
711 }
712
713 static void cortex_m3_enable_breakpoints(struct target *target)
714 {
715 struct breakpoint *breakpoint = target->breakpoints;
716
717 /* set any pending breakpoints */
718 while (breakpoint)
719 {
720 if (!breakpoint->set)
721 cortex_m3_set_breakpoint(target, breakpoint);
722 breakpoint = breakpoint->next;
723 }
724 }
725
726 static int cortex_m3_resume(struct target *target, int current,
727 uint32_t address, int handle_breakpoints, int debug_execution)
728 {
729 struct armv7m_common *armv7m = target_to_armv7m(target);
730 struct breakpoint *breakpoint = NULL;
731 uint32_t resume_pc;
732 struct reg *r;
733
734 if (target->state != TARGET_HALTED)
735 {
736 LOG_WARNING("target not halted");
737 return ERROR_TARGET_NOT_HALTED;
738 }
739
740 if (!debug_execution)
741 {
742 target_free_all_working_areas(target);
743 cortex_m3_enable_breakpoints(target);
744 cortex_m3_enable_watchpoints(target);
745 }
746
747 if (debug_execution)
748 {
749 r = armv7m->core_cache->reg_list + ARMV7M_PRIMASK;
750
751 /* Disable interrupts */
752 /* We disable interrupts in the PRIMASK register instead of
753 * masking with C_MASKINTS. This is probably the same issue
754 * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
755 * in parallel with disabled interrupts can cause local faults
756 * to not be taken.
757 *
758 * REVISIT this clearly breaks non-debug execution, since the
759 * PRIMASK register state isn't saved/restored... workaround
760 * by never resuming app code after debug execution.
761 */
762 buf_set_u32(r->value, 0, 1, 1);
763 r->dirty = true;
764 r->valid = true;
765
766 /* Make sure we are in Thumb mode */
767 r = armv7m->core_cache->reg_list + ARMV7M_xPSR;
768 buf_set_u32(r->value, 24, 1, 1);
769 r->dirty = true;
770 r->valid = true;
771 }
772
773 /* current = 1: continue on current pc, otherwise continue at <address> */
774 r = armv7m->arm.pc;
775 if (!current)
776 {
777 buf_set_u32(r->value, 0, 32, address);
778 r->dirty = true;
779 r->valid = true;
780 }
781
782 /* if we halted last time due to a bkpt instruction
783 * then we have to manually step over it, otherwise
784 * the core will break again */
785
786 if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
787 && !debug_execution)
788 {
789 armv7m_maybe_skip_bkpt_inst(target, NULL);
790 }
791
792 resume_pc = buf_get_u32(r->value, 0, 32);
793
794 armv7m_restore_context(target);
795
796 /* the front-end may request us not to handle breakpoints */
797 if (handle_breakpoints)
798 {
799 /* Single step past breakpoint at current address */
800 if ((breakpoint = breakpoint_find(target, resume_pc)))
801 {
802 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 " (ID: %d)",
803 breakpoint->address,
804 breakpoint->unique_id);
805 cortex_m3_unset_breakpoint(target, breakpoint);
806 cortex_m3_single_step_core(target);
807 cortex_m3_set_breakpoint(target, breakpoint);
808 }
809 }
810
811 /* Restart core */
812 cortex_m3_write_debug_halt_mask(target, 0, C_HALT);
813
814 target->debug_reason = DBG_REASON_NOTHALTED;
815
816 /* registers are now invalid */
817 register_cache_invalidate(armv7m->core_cache);
818
819 if (!debug_execution)
820 {
821 target->state = TARGET_RUNNING;
822 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
823 LOG_DEBUG("target resumed at 0x%" PRIx32 "", resume_pc);
824 }
825 else
826 {
827 target->state = TARGET_DEBUG_RUNNING;
828 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
829 LOG_DEBUG("target debug resumed at 0x%" PRIx32 "", resume_pc);
830 }
831
832 return ERROR_OK;
833 }
834
835 /* int irqstepcount = 0; */
836 static int cortex_m3_step(struct target *target, int current,
837 uint32_t address, int handle_breakpoints)
838 {
839 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
840 struct armv7m_common *armv7m = &cortex_m3->armv7m;
841 struct adiv5_dap *swjdp = &armv7m->dap;
842 struct breakpoint *breakpoint = NULL;
843 struct reg *pc = armv7m->arm.pc;
844 bool bkpt_inst_found = false;
845 int retval;
846 bool isr_timed_out = false;
847
848 if (target->state != TARGET_HALTED)
849 {
850 LOG_WARNING("target not halted");
851 return ERROR_TARGET_NOT_HALTED;
852 }
853
854 /* current = 1: continue on current pc, otherwise continue at <address> */
855 if (!current)
856 buf_set_u32(pc->value, 0, 32, address);
857
858 uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
859
860 /* the front-end may request us not to handle breakpoints */
861 if (handle_breakpoints) {
862 breakpoint = breakpoint_find(target, pc_value);
863 if (breakpoint)
864 cortex_m3_unset_breakpoint(target, breakpoint);
865 }
866
867 armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
868
869 target->debug_reason = DBG_REASON_SINGLESTEP;
870
871 armv7m_restore_context(target);
872
873 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
874
875 /* if no bkpt instruction is found at pc then we can perform
876 * a normal step, otherwise we have to manually step over the bkpt
877 * instruction - as such simulate a step */
878 if (bkpt_inst_found == false)
879 {
880 /* Automatic ISR masking mode off: Just step over the next instruction */
881 if ((cortex_m3->isrmasking_mode != CORTEX_M3_ISRMASK_AUTO))
882 {
883 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
884 }
885 else
886 {
887 /* Process interrupts during stepping in a way they don't interfere
888 * debugging.
889 *
890 * Principle:
891 *
892 * Set a temporary break point at the current pc and let the core run
893 * with interrupts enabled. Pending interrupts get served and we run
894 * into the breakpoint again afterwards. Then we step over the next
895 * instruction with interrupts disabled.
896 *
897 * If the pending interrupts don't complete within time, we leave the
898 * core running. This may happen if the interrupts trigger faster
899 * than the core can process them or the handler doesn't return.
900 *
901 * If no more breakpoints are available we simply do a step with
902 * interrupts enabled.
903 *
904 */
905
906 /* Set a temporary break point */
907 retval = breakpoint_add(target, pc_value , 2, BKPT_TYPE_BY_ADDR(pc_value));
908 bool tmp_bp_set = (retval == ERROR_OK);
909
910 /* No more breakpoints left, just do a step */
911 if (!tmp_bp_set)
912 {
913 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
914 }
915 else
916 {
917 /* Start the core */
918 LOG_DEBUG("Starting core to serve pending interrupts");
919 int64_t t_start = timeval_ms();
920 cortex_m3_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
921
922 /* Wait for pending handlers to complete or timeout */
923 do {
924 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
925 if (retval != ERROR_OK)
926 {
927 target->state = TARGET_UNKNOWN;
928 return retval;
929 }
930 isr_timed_out = ((timeval_ms() - t_start) > 500);
931 } while (!((cortex_m3->dcb_dhcsr & S_HALT) || isr_timed_out));
932
933 /* Remove the temporary breakpoint */
934 breakpoint_remove(target, pc_value);
935
936 if (isr_timed_out)
937 {
938 LOG_DEBUG("Interrupt handlers didn't complete within time, "
939 "leaving target running");
940 }
941 else
942 {
943 /* Step over next instruction with interrupts disabled */
944 cortex_m3_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
945 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
946 /* Re-enable interrupts */
947 cortex_m3_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
948 }
949 }
950 }
951 }
952
953 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
954 if (retval != ERROR_OK)
955 return retval;
956
957 /* registers are now invalid */
958 register_cache_invalidate(cortex_m3->armv7m.core_cache);
959
960 if (breakpoint)
961 cortex_m3_set_breakpoint(target, breakpoint);
962
963 if (isr_timed_out) {
964 /* Leave the core running. The user has to stop execution manually. */
965 target->debug_reason = DBG_REASON_NOTHALTED;
966 target->state = TARGET_RUNNING;
967 return ERROR_OK;
968 }
969
970 LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
971 " nvic_icsr = 0x%" PRIx32,
972 cortex_m3->dcb_dhcsr, cortex_m3->nvic_icsr);
973
974 retval = cortex_m3_debug_entry(target);
975 if (retval != ERROR_OK)
976 return retval;
977 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
978
979 LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
980 " nvic_icsr = 0x%" PRIx32,
981 cortex_m3->dcb_dhcsr, cortex_m3->nvic_icsr);
982
983 return ERROR_OK;
984 }
985
986 static int cortex_m3_assert_reset(struct target *target)
987 {
988 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
989 struct adiv5_dap *swjdp = &cortex_m3->armv7m.dap;
990 enum cortex_m3_soft_reset_config reset_config = cortex_m3->soft_reset_config;
991
992 LOG_DEBUG("target->state: %s",
993 target_state_name(target));
994
995 enum reset_types jtag_reset_config = jtag_get_reset_config();
996
997 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
998 /* allow scripts to override the reset event */
999
1000 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1001 register_cache_invalidate(cortex_m3->armv7m.core_cache);
1002 target->state = TARGET_RESET;
1003
1004 return ERROR_OK;
1005 }
1006
1007 /* Enable debug requests */
1008 int retval;
1009 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
1010 if (retval != ERROR_OK)
1011 return retval;
1012 if (!(cortex_m3->dcb_dhcsr & C_DEBUGEN))
1013 {
1014 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_DEBUGEN);
1015 if (retval != ERROR_OK)
1016 return retval;
1017 }
1018
1019 retval = mem_ap_write_u32(swjdp, DCB_DCRDR, 0);
1020 if (retval != ERROR_OK)
1021 return retval;
1022
1023 if (!target->reset_halt)
1024 {
1025 /* Set/Clear C_MASKINTS in a separate operation */
1026 if (cortex_m3->dcb_dhcsr & C_MASKINTS)
1027 {
1028 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
1029 DBGKEY | C_DEBUGEN | C_HALT);
1030 if (retval != ERROR_OK)
1031 return retval;
1032 }
1033
1034 /* clear any debug flags before resuming */
1035 cortex_m3_clear_halt(target);
1036
1037 /* clear C_HALT in dhcsr reg */
1038 cortex_m3_write_debug_halt_mask(target, 0, C_HALT);
1039 }
1040 else
1041 {
1042 /* Halt in debug on reset; endreset_event() restores DEMCR.
1043 *
1044 * REVISIT catching BUSERR presumably helps to defend against
1045 * bad vector table entries. Should this include MMERR or
1046 * other flags too?
1047 */
1048 retval = mem_ap_write_atomic_u32(swjdp, DCB_DEMCR,
1049 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1050 if (retval != ERROR_OK)
1051 return retval;
1052 }
1053
1054 if (jtag_reset_config & RESET_HAS_SRST)
1055 {
1056 /* default to asserting srst */
1057 if (jtag_reset_config & RESET_SRST_PULLS_TRST)
1058 {
1059 jtag_add_reset(1, 1);
1060 }
1061 else
1062 {
1063 jtag_add_reset(0, 1);
1064 }
1065 }
1066 else
1067 {
1068 /* Use a standard Cortex-M3 software reset mechanism.
1069 * We default to using VECRESET as it is supported on all current cores.
1070 * This has the disadvantage of not resetting the peripherals, so a
1071 * reset-init event handler is needed to perform any peripheral resets.
1072 */
1073 retval = mem_ap_write_atomic_u32(swjdp, NVIC_AIRCR,
1074 AIRCR_VECTKEY | ((reset_config == CORTEX_M3_RESET_SYSRESETREQ)
1075 ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
1076 if (retval != ERROR_OK)
1077 return retval;
1078
1079 LOG_DEBUG("Using Cortex-M3 %s", (reset_config == CORTEX_M3_RESET_SYSRESETREQ)
1080 ? "SYSRESETREQ" : "VECTRESET");
1081
1082 if (reset_config == CORTEX_M3_RESET_VECTRESET) {
1083 LOG_WARNING("Only resetting the Cortex-M3 core, use a reset-init event "
1084 "handler to reset any peripherals");
1085 }
1086
1087 {
1088 /* I do not know why this is necessary, but it
1089 * fixes strange effects (step/resume cause NMI
1090 * after reset) on LM3S6918 -- Michael Schwingen
1091 */
1092 uint32_t tmp;
1093 retval = mem_ap_read_atomic_u32(swjdp, NVIC_AIRCR, &tmp);
1094 if (retval != ERROR_OK)
1095 return retval;
1096 }
1097 }
1098
1099 target->state = TARGET_RESET;
1100 jtag_add_sleep(50000);
1101
1102 register_cache_invalidate(cortex_m3->armv7m.core_cache);
1103
1104 if (target->reset_halt)
1105 {
1106 if ((retval = target_halt(target)) != ERROR_OK)
1107 return retval;
1108 }
1109
1110 return ERROR_OK;
1111 }
1112
1113 static int cortex_m3_deassert_reset(struct target *target)
1114 {
1115 LOG_DEBUG("target->state: %s",
1116 target_state_name(target));
1117
1118 /* deassert reset lines */
1119 jtag_add_reset(0, 0);
1120
1121 return ERROR_OK;
1122 }
1123
1124 int
1125 cortex_m3_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
1126 {
1127 int retval;
1128 int fp_num = 0;
1129 uint32_t hilo;
1130 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1131 struct cortex_m3_fp_comparator *comparator_list = cortex_m3->fp_comparator_list;
1132
1133 if (breakpoint->set)
1134 {
1135 LOG_WARNING("breakpoint (BPID: %d) already set", breakpoint->unique_id);
1136 return ERROR_OK;
1137 }
1138
1139 if (cortex_m3->auto_bp_type)
1140 {
1141 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1142 }
1143
1144 if (breakpoint->type == BKPT_HARD)
1145 {
1146 while (comparator_list[fp_num].used && (fp_num < cortex_m3->fp_num_code))
1147 fp_num++;
1148 if (fp_num >= cortex_m3->fp_num_code)
1149 {
1150 LOG_ERROR("Can not find free FPB Comparator!");
1151 return ERROR_FAIL;
1152 }
1153 breakpoint->set = fp_num + 1;
1154 hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
1155 comparator_list[fp_num].used = 1;
1156 comparator_list[fp_num].fpcr_value = (breakpoint->address & 0x1FFFFFFC) | hilo | 1;
1157 target_write_u32(target, comparator_list[fp_num].fpcr_address, comparator_list[fp_num].fpcr_value);
1158 LOG_DEBUG("fpc_num %i fpcr_value 0x%" PRIx32 "", fp_num, comparator_list[fp_num].fpcr_value);
1159 if (!cortex_m3->fpb_enabled)
1160 {
1161 LOG_DEBUG("FPB wasn't enabled, do it now");
1162 target_write_u32(target, FP_CTRL, 3);
1163 }
1164 }
1165 else if (breakpoint->type == BKPT_SOFT)
1166 {
1167 uint8_t code[4];
1168
1169 /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
1170 * semihosting; don't use that. Otherwise the BKPT
1171 * parameter is arbitrary.
1172 */
1173 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1174 retval = target_read_memory(target,
1175 breakpoint->address & 0xFFFFFFFE,
1176 breakpoint->length, 1,
1177 breakpoint->orig_instr);
1178 if (retval != ERROR_OK)
1179 return retval;
1180 retval = target_write_memory(target,
1181 breakpoint->address & 0xFFFFFFFE,
1182 breakpoint->length, 1,
1183 code);
1184 if (retval != ERROR_OK)
1185 return retval;
1186 breakpoint->set = true;
1187 }
1188
1189 LOG_DEBUG("BPID: %d, Type: %d, Address: 0x%08" PRIx32 " Length: %d (set=%d)",
1190 breakpoint->unique_id,
1191 (int)(breakpoint->type),
1192 breakpoint->address,
1193 breakpoint->length,
1194 breakpoint->set);
1195
1196 return ERROR_OK;
1197 }
1198
1199 int
1200 cortex_m3_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1201 {
1202 int retval;
1203 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1204 struct cortex_m3_fp_comparator * comparator_list = cortex_m3->fp_comparator_list;
1205
1206 if (!breakpoint->set)
1207 {
1208 LOG_WARNING("breakpoint not set");
1209 return ERROR_OK;
1210 }
1211
1212 LOG_DEBUG("BPID: %d, Type: %d, Address: 0x%08" PRIx32 " Length: %d (set=%d)",
1213 breakpoint->unique_id,
1214 (int)(breakpoint->type),
1215 breakpoint->address,
1216 breakpoint->length,
1217 breakpoint->set);
1218
1219 if (breakpoint->type == BKPT_HARD)
1220 {
1221 int fp_num = breakpoint->set - 1;
1222 if ((fp_num < 0) || (fp_num >= cortex_m3->fp_num_code))
1223 {
1224 LOG_DEBUG("Invalid FP Comparator number in breakpoint");
1225 return ERROR_OK;
1226 }
1227 comparator_list[fp_num].used = 0;
1228 comparator_list[fp_num].fpcr_value = 0;
1229 target_write_u32(target, comparator_list[fp_num].fpcr_address, comparator_list[fp_num].fpcr_value);
1230 }
1231 else
1232 {
1233 /* restore original instruction (kept in target endianness) */
1234 if (breakpoint->length == 4)
1235 {
1236 if ((retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE, 4, 1, breakpoint->orig_instr)) != ERROR_OK)
1237 {
1238 return retval;
1239 }
1240 }
1241 else
1242 {
1243 if ((retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE, 2, 1, breakpoint->orig_instr)) != ERROR_OK)
1244 {
1245 return retval;
1246 }
1247 }
1248 }
1249 breakpoint->set = false;
1250
1251 return ERROR_OK;
1252 }
1253
1254 int
1255 cortex_m3_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
1256 {
1257 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1258
1259 if (cortex_m3->auto_bp_type)
1260 {
1261 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1262 #ifdef ARMV7_GDB_HACKS
1263 if (breakpoint->length != 2) {
1264 /* XXX Hack: Replace all breakpoints with length != 2 with
1265 * a hardware breakpoint. */
1266 breakpoint->type = BKPT_HARD;
1267 breakpoint->length = 2;
1268 }
1269 #endif
1270 }
1271
1272 if(breakpoint->type != BKPT_TYPE_BY_ADDR(breakpoint->address)) {
1273 if (breakpoint->type == BKPT_HARD)
1274 {
1275 LOG_INFO("flash patch comparator requested outside code memory region");
1276 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1277 }
1278
1279 if (breakpoint->type == BKPT_SOFT)
1280 {
1281 LOG_INFO("soft breakpoint requested in code (flash) memory region");
1282 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1283 }
1284 }
1285
1286 if ((breakpoint->type == BKPT_HARD) && (cortex_m3->fp_code_available < 1))
1287 {
1288 LOG_INFO("no flash patch comparator unit available for hardware breakpoint");
1289 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1290 }
1291
1292 if ((breakpoint->length != 2))
1293 {
1294 LOG_INFO("only breakpoints of two bytes length supported");
1295 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1296 }
1297
1298 if (breakpoint->type == BKPT_HARD)
1299 cortex_m3->fp_code_available--;
1300
1301 return cortex_m3_set_breakpoint(target, breakpoint);
1302 }
1303
1304 int
1305 cortex_m3_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1306 {
1307 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1308
1309 /* REVISIT why check? FBP can be updated with core running ... */
1310 if (target->state != TARGET_HALTED)
1311 {
1312 LOG_WARNING("target not halted");
1313 return ERROR_TARGET_NOT_HALTED;
1314 }
1315
1316 if (cortex_m3->auto_bp_type)
1317 {
1318 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1319 }
1320
1321 if (breakpoint->set)
1322 {
1323 cortex_m3_unset_breakpoint(target, breakpoint);
1324 }
1325
1326 if (breakpoint->type == BKPT_HARD)
1327 cortex_m3->fp_code_available++;
1328
1329 return ERROR_OK;
1330 }
1331
1332 int
1333 cortex_m3_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1334 {
1335 int dwt_num = 0;
1336 uint32_t mask, temp;
1337 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1338
1339 /* watchpoint params were validated earlier */
1340 mask = 0;
1341 temp = watchpoint->length;
1342 while (temp) {
1343 temp >>= 1;
1344 mask++;
1345 }
1346 mask--;
1347
1348 /* REVISIT Don't fully trust these "not used" records ... users
1349 * may set up breakpoints by hand, e.g. dual-address data value
1350 * watchpoint using comparator #1; comparator #0 matching cycle
1351 * count; send data trace info through ITM and TPIU; etc
1352 */
1353 struct cortex_m3_dwt_comparator *comparator;
1354
1355 for (comparator = cortex_m3->dwt_comparator_list;
1356 comparator->used && dwt_num < cortex_m3->dwt_num_comp;
1357 comparator++, dwt_num++)
1358 continue;
1359 if (dwt_num >= cortex_m3->dwt_num_comp)
1360 {
1361 LOG_ERROR("Can not find free DWT Comparator");
1362 return ERROR_FAIL;
1363 }
1364 comparator->used = 1;
1365 watchpoint->set = dwt_num + 1;
1366
1367 comparator->comp = watchpoint->address;
1368 target_write_u32(target, comparator->dwt_comparator_address + 0,
1369 comparator->comp);
1370
1371 comparator->mask = mask;
1372 target_write_u32(target, comparator->dwt_comparator_address + 4,
1373 comparator->mask);
1374
1375 switch (watchpoint->rw) {
1376 case WPT_READ:
1377 comparator->function = 5;
1378 break;
1379 case WPT_WRITE:
1380 comparator->function = 6;
1381 break;
1382 case WPT_ACCESS:
1383 comparator->function = 7;
1384 break;
1385 }
1386 target_write_u32(target, comparator->dwt_comparator_address + 8,
1387 comparator->function);
1388
1389 LOG_DEBUG("Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
1390 watchpoint->unique_id, dwt_num,
1391 (unsigned) comparator->comp,
1392 (unsigned) comparator->mask,
1393 (unsigned) comparator->function);
1394 return ERROR_OK;
1395 }
1396
1397 int
1398 cortex_m3_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
1399 {
1400 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1401 struct cortex_m3_dwt_comparator *comparator;
1402 int dwt_num;
1403
1404 if (!watchpoint->set)
1405 {
1406 LOG_WARNING("watchpoint (wpid: %d) not set",
1407 watchpoint->unique_id);
1408 return ERROR_OK;
1409 }
1410
1411 dwt_num = watchpoint->set - 1;
1412
1413 LOG_DEBUG("Watchpoint (ID %d) DWT%d address: 0x%08x clear",
1414 watchpoint->unique_id, dwt_num,
1415 (unsigned) watchpoint->address);
1416
1417 if ((dwt_num < 0) || (dwt_num >= cortex_m3->dwt_num_comp))
1418 {
1419 LOG_DEBUG("Invalid DWT Comparator number in watchpoint");
1420 return ERROR_OK;
1421 }
1422
1423 comparator = cortex_m3->dwt_comparator_list + dwt_num;
1424 comparator->used = 0;
1425 comparator->function = 0;
1426 target_write_u32(target, comparator->dwt_comparator_address + 8,
1427 comparator->function);
1428
1429 watchpoint->set = false;
1430
1431 return ERROR_OK;
1432 }
1433
1434 int
1435 cortex_m3_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
1436 {
1437 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1438
1439 if (cortex_m3->dwt_comp_available < 1)
1440 {
1441 LOG_DEBUG("no comparators?");
1442 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1443 }
1444
1445 /* hardware doesn't support data value masking */
1446 if (watchpoint->mask != ~(uint32_t)0) {
1447 LOG_DEBUG("watchpoint value masks not supported");
1448 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1449 }
1450
1451 /* hardware allows address masks of up to 32K */
1452 unsigned mask;
1453
1454 for (mask = 0; mask < 16; mask++) {
1455 if ((1u << mask) == watchpoint->length)
1456 break;
1457 }
1458 if (mask == 16) {
1459 LOG_DEBUG("unsupported watchpoint length");
1460 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1461 }
1462 if (watchpoint->address & ((1 << mask) - 1)) {
1463 LOG_DEBUG("watchpoint address is unaligned");
1464 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1465 }
1466
1467 /* Caller doesn't seem to be able to describe watching for data
1468 * values of zero; that flags "no value".
1469 *
1470 * REVISIT This DWT may well be able to watch for specific data
1471 * values. Requires comparator #1 to set DATAVMATCH and match
1472 * the data, and another comparator (DATAVADDR0) matching addr.
1473 */
1474 if (watchpoint->value) {
1475 LOG_DEBUG("data value watchpoint not YET supported");
1476 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1477 }
1478
1479 cortex_m3->dwt_comp_available--;
1480 LOG_DEBUG("dwt_comp_available: %d", cortex_m3->dwt_comp_available);
1481
1482 return ERROR_OK;
1483 }
1484
1485 int
1486 cortex_m3_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
1487 {
1488 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1489
1490 /* REVISIT why check? DWT can be updated with core running ... */
1491 if (target->state != TARGET_HALTED)
1492 {
1493 LOG_WARNING("target not halted");
1494 return ERROR_TARGET_NOT_HALTED;
1495 }
1496
1497 if (watchpoint->set)
1498 {
1499 cortex_m3_unset_watchpoint(target, watchpoint);
1500 }
1501
1502 cortex_m3->dwt_comp_available++;
1503 LOG_DEBUG("dwt_comp_available: %d", cortex_m3->dwt_comp_available);
1504
1505 return ERROR_OK;
1506 }
1507
1508 void cortex_m3_enable_watchpoints(struct target *target)
1509 {
1510 struct watchpoint *watchpoint = target->watchpoints;
1511
1512 /* set any pending watchpoints */
1513 while (watchpoint)
1514 {
1515 if (!watchpoint->set)
1516 cortex_m3_set_watchpoint(target, watchpoint);
1517 watchpoint = watchpoint->next;
1518 }
1519 }
1520
1521 static int cortex_m3_load_core_reg_u32(struct target *target,
1522 enum armv7m_regtype type, uint32_t num, uint32_t * value)
1523 {
1524 int retval;
1525 struct armv7m_common *armv7m = target_to_armv7m(target);
1526 struct adiv5_dap *swjdp = &armv7m->dap;
1527
1528 /* NOTE: we "know" here that the register identifiers used
1529 * in the v7m header match the Cortex-M3 Debug Core Register
1530 * Selector values for R0..R15, xPSR, MSP, and PSP.
1531 */
1532 switch (num) {
1533 case 0 ... 18:
1534 /* read a normal core register */
1535 retval = cortexm3_dap_read_coreregister_u32(swjdp, value, num);
1536
1537 if (retval != ERROR_OK)
1538 {
1539 LOG_ERROR("JTAG failure %i",retval);
1540 return ERROR_JTAG_DEVICE_ERROR;
1541 }
1542 LOG_DEBUG("load from core reg %i value 0x%" PRIx32 "",(int)num,*value);
1543 break;
1544
1545 case ARMV7M_PRIMASK:
1546 case ARMV7M_BASEPRI:
1547 case ARMV7M_FAULTMASK:
1548 case ARMV7M_CONTROL:
1549 /* Cortex-M3 packages these four registers as bitfields
1550 * in one Debug Core register. So say r0 and r2 docs;
1551 * it was removed from r1 docs, but still works.
1552 */
1553 cortexm3_dap_read_coreregister_u32(swjdp, value, 20);
1554
1555 switch (num)
1556 {
1557 case ARMV7M_PRIMASK:
1558 *value = buf_get_u32((uint8_t*)value, 0, 1);
1559 break;
1560
1561 case ARMV7M_BASEPRI:
1562 *value = buf_get_u32((uint8_t*)value, 8, 8);
1563 break;
1564
1565 case ARMV7M_FAULTMASK:
1566 *value = buf_get_u32((uint8_t*)value, 16, 1);
1567 break;
1568
1569 case ARMV7M_CONTROL:
1570 *value = buf_get_u32((uint8_t*)value, 24, 2);
1571 break;
1572 }
1573
1574 LOG_DEBUG("load from special reg %i value 0x%" PRIx32 "", (int)num, *value);
1575 break;
1576
1577 default:
1578 return ERROR_COMMAND_SYNTAX_ERROR;
1579 }
1580
1581 return ERROR_OK;
1582 }
1583
1584 static int cortex_m3_store_core_reg_u32(struct target *target,
1585 enum armv7m_regtype type, uint32_t num, uint32_t value)
1586 {
1587 int retval;
1588 uint32_t reg;
1589 struct armv7m_common *armv7m = target_to_armv7m(target);
1590 struct adiv5_dap *swjdp = &armv7m->dap;
1591
1592 #ifdef ARMV7_GDB_HACKS
1593 /* If the LR register is being modified, make sure it will put us
1594 * in "thumb" mode, or an INVSTATE exception will occur. This is a
1595 * hack to deal with the fact that gdb will sometimes "forge"
1596 * return addresses, and doesn't set the LSB correctly (i.e., when
1597 * printing expressions containing function calls, it sets LR = 0.)
1598 * Valid exception return codes have bit 0 set too.
1599 */
1600 if (num == ARMV7M_R14)
1601 value |= 0x01;
1602 #endif
1603
1604 /* NOTE: we "know" here that the register identifiers used
1605 * in the v7m header match the Cortex-M3 Debug Core Register
1606 * Selector values for R0..R15, xPSR, MSP, and PSP.
1607 */
1608 switch (num) {
1609 case 0 ... 18:
1610 retval = cortexm3_dap_write_coreregister_u32(swjdp, value, num);
1611 if (retval != ERROR_OK)
1612 {
1613 struct reg *r;
1614
1615 LOG_ERROR("JTAG failure");
1616 r = armv7m->core_cache->reg_list + num;
1617 r->dirty = r->valid;
1618 return ERROR_JTAG_DEVICE_ERROR;
1619 }
1620 LOG_DEBUG("write core reg %i value 0x%" PRIx32 "", (int)num, value);
1621 break;
1622
1623 case ARMV7M_PRIMASK:
1624 case ARMV7M_BASEPRI:
1625 case ARMV7M_FAULTMASK:
1626 case ARMV7M_CONTROL:
1627 /* Cortex-M3 packages these four registers as bitfields
1628 * in one Debug Core register. So say r0 and r2 docs;
1629 * it was removed from r1 docs, but still works.
1630 */
1631 cortexm3_dap_read_coreregister_u32(swjdp, &reg, 20);
1632
1633 switch (num)
1634 {
1635 case ARMV7M_PRIMASK:
1636 buf_set_u32((uint8_t*)&reg, 0, 1, value);
1637 break;
1638
1639 case ARMV7M_BASEPRI:
1640 buf_set_u32((uint8_t*)&reg, 8, 8, value);
1641 break;
1642
1643 case ARMV7M_FAULTMASK:
1644 buf_set_u32((uint8_t*)&reg, 16, 1, value);
1645 break;
1646
1647 case ARMV7M_CONTROL:
1648 buf_set_u32((uint8_t*)&reg, 24, 2, value);
1649 break;
1650 }
1651
1652 cortexm3_dap_write_coreregister_u32(swjdp, reg, 20);
1653
1654 LOG_DEBUG("write special reg %i value 0x%" PRIx32 " ", (int)num, value);
1655 break;
1656
1657 default:
1658 return ERROR_COMMAND_SYNTAX_ERROR;
1659 }
1660
1661 return ERROR_OK;
1662 }
1663
1664 static int cortex_m3_read_memory(struct target *target, uint32_t address,
1665 uint32_t size, uint32_t count, uint8_t *buffer)
1666 {
1667 struct armv7m_common *armv7m = target_to_armv7m(target);
1668 struct adiv5_dap *swjdp = &armv7m->dap;
1669 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1670
1671 /* cortex_m3 handles unaligned memory access */
1672 if (count && buffer) {
1673 switch (size) {
1674 case 4:
1675 retval = mem_ap_read_buf_u32(swjdp, buffer, 4 * count, address);
1676 break;
1677 case 2:
1678 retval = mem_ap_read_buf_u16(swjdp, buffer, 2 * count, address);
1679 break;
1680 case 1:
1681 retval = mem_ap_read_buf_u8(swjdp, buffer, count, address);
1682 break;
1683 }
1684 }
1685
1686 return retval;
1687 }
1688
1689 static int cortex_m3_write_memory(struct target *target, uint32_t address,
1690 uint32_t size, uint32_t count, const uint8_t *buffer)
1691 {
1692 struct armv7m_common *armv7m = target_to_armv7m(target);
1693 struct adiv5_dap *swjdp = &armv7m->dap;
1694 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1695
1696 if (count && buffer) {
1697 switch (size) {
1698 case 4:
1699 retval = mem_ap_write_buf_u32(swjdp, buffer, 4 * count, address);
1700 break;
1701 case 2:
1702 retval = mem_ap_write_buf_u16(swjdp, buffer, 2 * count, address);
1703 break;
1704 case 1:
1705 retval = mem_ap_write_buf_u8(swjdp, buffer, count, address);
1706 break;
1707 }
1708 }
1709
1710 return retval;
1711 }
1712
1713 static int cortex_m3_bulk_write_memory(struct target *target, uint32_t address,
1714 uint32_t count, const uint8_t *buffer)
1715 {
1716 return cortex_m3_write_memory(target, address, 4, count, buffer);
1717 }
1718
1719 static int cortex_m3_init_target(struct command_context *cmd_ctx,
1720 struct target *target)
1721 {
1722 armv7m_build_reg_cache(target);
1723 return ERROR_OK;
1724 }
1725
1726 /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
1727 * on r/w if the core is not running, and clear on resume or reset ... or
1728 * at least, in a post_restore_context() method.
1729 */
1730
1731 struct dwt_reg_state {
1732 struct target *target;
1733 uint32_t addr;
1734 uint32_t value; /* scratch/cache */
1735 };
1736
1737 static int cortex_m3_dwt_get_reg(struct reg *reg)
1738 {
1739 struct dwt_reg_state *state = reg->arch_info;
1740
1741 return target_read_u32(state->target, state->addr, &state->value);
1742 }
1743
1744 static int cortex_m3_dwt_set_reg(struct reg *reg, uint8_t *buf)
1745 {
1746 struct dwt_reg_state *state = reg->arch_info;
1747
1748 return target_write_u32(state->target, state->addr,
1749 buf_get_u32(buf, 0, reg->size));
1750 }
1751
1752 struct dwt_reg {
1753 uint32_t addr;
1754 char *name;
1755 unsigned size;
1756 };
1757
1758 static struct dwt_reg dwt_base_regs[] = {
1759 { DWT_CTRL, "dwt_ctrl", 32, },
1760 /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
1761 * increments while the core is asleep.
1762 */
1763 { DWT_CYCCNT, "dwt_cyccnt", 32, },
1764 /* plus some 8 bit counters, useful for profiling with TPIU */
1765 };
1766
1767 static struct dwt_reg dwt_comp[] = {
1768 #define DWT_COMPARATOR(i) \
1769 { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
1770 { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
1771 { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
1772 DWT_COMPARATOR(0),
1773 DWT_COMPARATOR(1),
1774 DWT_COMPARATOR(2),
1775 DWT_COMPARATOR(3),
1776 #undef DWT_COMPARATOR
1777 };
1778
1779 static const struct reg_arch_type dwt_reg_type = {
1780 .get = cortex_m3_dwt_get_reg,
1781 .set = cortex_m3_dwt_set_reg,
1782 };
1783
1784 static void
1785 cortex_m3_dwt_addreg(struct target *t, struct reg *r, struct dwt_reg *d)
1786 {
1787 struct dwt_reg_state *state;
1788
1789 state = calloc(1, sizeof *state);
1790 if (!state)
1791 return;
1792 state->addr = d->addr;
1793 state->target = t;
1794
1795 r->name = d->name;
1796 r->size = d->size;
1797 r->value = &state->value;
1798 r->arch_info = state;
1799 r->type = &dwt_reg_type;
1800 }
1801
1802 void
1803 cortex_m3_dwt_setup(struct cortex_m3_common *cm3, struct target *target)
1804 {
1805 uint32_t dwtcr;
1806 struct reg_cache *cache;
1807 struct cortex_m3_dwt_comparator *comparator;
1808 int reg, i;
1809
1810 target_read_u32(target, DWT_CTRL, &dwtcr);
1811 if (!dwtcr) {
1812 LOG_DEBUG("no DWT");
1813 return;
1814 }
1815
1816 cm3->dwt_num_comp = (dwtcr >> 28) & 0xF;
1817 cm3->dwt_comp_available = cm3->dwt_num_comp;
1818 cm3->dwt_comparator_list = calloc(cm3->dwt_num_comp,
1819 sizeof(struct cortex_m3_dwt_comparator));
1820 if (!cm3->dwt_comparator_list) {
1821 fail0:
1822 cm3->dwt_num_comp = 0;
1823 LOG_ERROR("out of mem");
1824 return;
1825 }
1826
1827 cache = calloc(1, sizeof *cache);
1828 if (!cache) {
1829 fail1:
1830 free(cm3->dwt_comparator_list);
1831 goto fail0;
1832 }
1833 cache->name = "cortex-m3 dwt registers";
1834 cache->num_regs = 2 + cm3->dwt_num_comp * 3;
1835 cache->reg_list = calloc(cache->num_regs, sizeof *cache->reg_list);
1836 if (!cache->reg_list) {
1837 free(cache);
1838 goto fail1;
1839 }
1840
1841 for (reg = 0; reg < 2; reg++)
1842 cortex_m3_dwt_addreg(target, cache->reg_list + reg,
1843 dwt_base_regs + reg);
1844
1845 comparator = cm3->dwt_comparator_list;
1846 for (i = 0; i < cm3->dwt_num_comp; i++, comparator++) {
1847 int j;
1848
1849 comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
1850 for (j = 0; j < 3; j++, reg++)
1851 cortex_m3_dwt_addreg(target, cache->reg_list + reg,
1852 dwt_comp + 3 * i + j);
1853 }
1854
1855 *register_get_last_cache_p(&target->reg_cache) = cache;
1856 cm3->dwt_cache = cache;
1857
1858 LOG_DEBUG("DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
1859 dwtcr, cm3->dwt_num_comp,
1860 (dwtcr & (0xf << 24)) ? " only" : "/trigger");
1861
1862 /* REVISIT: if num_comp > 1, check whether comparator #1 can
1863 * implement single-address data value watchpoints ... so we
1864 * won't need to check it later, when asked to set one up.
1865 */
1866 }
1867
1868 static int cortex_m3_examine(struct target *target)
1869 {
1870 int retval;
1871 uint32_t cpuid, fpcr;
1872 int i;
1873 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1874 struct adiv5_dap *swjdp = &cortex_m3->armv7m.dap;
1875
1876 if ((retval = ahbap_debugport_init(swjdp)) != ERROR_OK)
1877 return retval;
1878
1879 if (!target_was_examined(target))
1880 {
1881 target_set_examined(target);
1882
1883 /* Read from Device Identification Registers */
1884 retval = target_read_u32(target, CPUID, &cpuid);
1885 if (retval != ERROR_OK)
1886 return retval;
1887
1888 if (((cpuid >> 4) & 0xc3f) == 0xc23)
1889 LOG_DEBUG("Cortex-M3 r%" PRId8 "p%" PRId8 " processor detected",
1890 (uint8_t)((cpuid >> 20) & 0xf), (uint8_t)((cpuid >> 0) & 0xf));
1891 LOG_DEBUG("cpuid: 0x%8.8" PRIx32 "", cpuid);
1892
1893 /* NOTE: FPB and DWT are both optional. */
1894
1895 /* Setup FPB */
1896 target_read_u32(target, FP_CTRL, &fpcr);
1897 cortex_m3->auto_bp_type = 1;
1898 cortex_m3->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF); /* bits [14:12] and [7:4] */
1899 cortex_m3->fp_num_lit = (fpcr >> 8) & 0xF;
1900 cortex_m3->fp_code_available = cortex_m3->fp_num_code;
1901 cortex_m3->fp_comparator_list = calloc(cortex_m3->fp_num_code + cortex_m3->fp_num_lit, sizeof(struct cortex_m3_fp_comparator));
1902 cortex_m3->fpb_enabled = fpcr & 1;
1903 for (i = 0; i < cortex_m3->fp_num_code + cortex_m3->fp_num_lit; i++)
1904 {
1905 cortex_m3->fp_comparator_list[i].type = (i < cortex_m3->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
1906 cortex_m3->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
1907 }
1908 LOG_DEBUG("FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i", fpcr, cortex_m3->fp_num_code, cortex_m3->fp_num_lit);
1909
1910 /* Setup DWT */
1911 cortex_m3_dwt_setup(cortex_m3, target);
1912
1913 /* These hardware breakpoints only work for code in flash! */
1914 LOG_INFO("%s: hardware has %d breakpoints, %d watchpoints",
1915 target_name(target),
1916 cortex_m3->fp_num_code,
1917 cortex_m3->dwt_num_comp);
1918 }
1919
1920 return ERROR_OK;
1921 }
1922
1923 static int cortex_m3_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1924 {
1925 uint16_t dcrdr;
1926 int retval;
1927
1928 mem_ap_read_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1929 *ctrl = (uint8_t)dcrdr;
1930 *value = (uint8_t)(dcrdr >> 8);
1931
1932 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1933
1934 /* write ack back to software dcc register
1935 * signify we have read data */
1936 if (dcrdr & (1 << 0))
1937 {
1938 dcrdr = 0;
1939 retval = mem_ap_write_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1940 if (retval != ERROR_OK)
1941 return retval;
1942 }
1943
1944 return ERROR_OK;
1945 }
1946
1947 static int cortex_m3_target_request_data(struct target *target,
1948 uint32_t size, uint8_t *buffer)
1949 {
1950 struct armv7m_common *armv7m = target_to_armv7m(target);
1951 struct adiv5_dap *swjdp = &armv7m->dap;
1952 uint8_t data;
1953 uint8_t ctrl;
1954 uint32_t i;
1955
1956 for (i = 0; i < (size * 4); i++)
1957 {
1958 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1959 buffer[i] = data;
1960 }
1961
1962 return ERROR_OK;
1963 }
1964
1965 static int cortex_m3_handle_target_request(void *priv)
1966 {
1967 struct target *target = priv;
1968 if (!target_was_examined(target))
1969 return ERROR_OK;
1970 struct armv7m_common *armv7m = target_to_armv7m(target);
1971 struct adiv5_dap *swjdp = &armv7m->dap;
1972
1973 if (!target->dbg_msg_enabled)
1974 return ERROR_OK;
1975
1976 if (target->state == TARGET_RUNNING)
1977 {
1978 uint8_t data;
1979 uint8_t ctrl;
1980
1981 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1982
1983 /* check if we have data */
1984 if (ctrl & (1 << 0))
1985 {
1986 uint32_t request;
1987
1988 /* we assume target is quick enough */
1989 request = data;
1990 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1991 request |= (data << 8);
1992 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1993 request |= (data << 16);
1994 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1995 request |= (data << 24);
1996 target_request(target, request);
1997 }
1998 }
1999
2000 return ERROR_OK;
2001 }
2002
2003 static int cortex_m3_init_arch_info(struct target *target,
2004 struct cortex_m3_common *cortex_m3, struct jtag_tap *tap)
2005 {
2006 int retval;
2007 struct armv7m_common *armv7m = &cortex_m3->armv7m;
2008
2009 armv7m_init_arch_info(target, armv7m);
2010
2011 /* prepare JTAG information for the new target */
2012 cortex_m3->jtag_info.tap = tap;
2013 cortex_m3->jtag_info.scann_size = 4;
2014
2015 /* default reset mode is to use srst if fitted
2016 * if not it will use CORTEX_M3_RESET_VECTRESET */
2017 cortex_m3->soft_reset_config = CORTEX_M3_RESET_VECTRESET;
2018
2019 armv7m->arm.dap = &armv7m->dap;
2020
2021 /* Leave (only) generic DAP stuff for debugport_init(); */
2022 armv7m->dap.jtag_info = &cortex_m3->jtag_info;
2023 armv7m->dap.memaccess_tck = 8;
2024 /* Cortex-M3 has 4096 bytes autoincrement range */
2025 armv7m->dap.tar_autoincr_block = (1 << 12);
2026
2027 /* register arch-specific functions */
2028 armv7m->examine_debug_reason = cortex_m3_examine_debug_reason;
2029
2030 armv7m->post_debug_entry = NULL;
2031
2032 armv7m->pre_restore_context = NULL;
2033
2034 armv7m->load_core_reg_u32 = cortex_m3_load_core_reg_u32;
2035 armv7m->store_core_reg_u32 = cortex_m3_store_core_reg_u32;
2036
2037 target_register_timer_callback(cortex_m3_handle_target_request, 1, 1, target);
2038
2039 if ((retval = arm_jtag_setup_connection(&cortex_m3->jtag_info)) != ERROR_OK)
2040 {
2041 return retval;
2042 }
2043
2044 return ERROR_OK;
2045 }
2046
2047 static int cortex_m3_target_create(struct target *target, Jim_Interp *interp)
2048 {
2049 struct cortex_m3_common *cortex_m3 = calloc(1,sizeof(struct cortex_m3_common));
2050
2051 cortex_m3->common_magic = CORTEX_M3_COMMON_MAGIC;
2052 cortex_m3_init_arch_info(target, cortex_m3, target->tap);
2053
2054 return ERROR_OK;
2055 }
2056
2057 /*--------------------------------------------------------------------------*/
2058
2059 static int cortex_m3_verify_pointer(struct command_context *cmd_ctx,
2060 struct cortex_m3_common *cm3)
2061 {
2062 if (cm3->common_magic != CORTEX_M3_COMMON_MAGIC) {
2063 command_print(cmd_ctx, "target is not a Cortex-M3");
2064 return ERROR_TARGET_INVALID;
2065 }
2066 return ERROR_OK;
2067 }
2068
2069 /*
2070 * Only stuff below this line should need to verify that its target
2071 * is a Cortex-M3. Everything else should have indirected through the
2072 * cortexm3_target structure, which is only used with CM3 targets.
2073 */
2074
2075 static const struct {
2076 char name[10];
2077 unsigned mask;
2078 } vec_ids[] = {
2079 { "hard_err", VC_HARDERR, },
2080 { "int_err", VC_INTERR, },
2081 { "bus_err", VC_BUSERR, },
2082 { "state_err", VC_STATERR, },
2083 { "chk_err", VC_CHKERR, },
2084 { "nocp_err", VC_NOCPERR, },
2085 { "mm_err", VC_MMERR, },
2086 { "reset", VC_CORERESET, },
2087 };
2088
2089 COMMAND_HANDLER(handle_cortex_m3_vector_catch_command)
2090 {
2091 struct target *target = get_current_target(CMD_CTX);
2092 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
2093 struct armv7m_common *armv7m = &cortex_m3->armv7m;
2094 struct adiv5_dap *swjdp = &armv7m->dap;
2095 uint32_t demcr = 0;
2096 int retval;
2097
2098 retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
2099 if (retval != ERROR_OK)
2100 return retval;
2101
2102 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &demcr);
2103 if (retval != ERROR_OK)
2104 return retval;
2105
2106 if (CMD_ARGC > 0) {
2107 unsigned catch = 0;
2108
2109 if (CMD_ARGC == 1) {
2110 if (strcmp(CMD_ARGV[0], "all") == 0) {
2111 catch = VC_HARDERR | VC_INTERR | VC_BUSERR
2112 | VC_STATERR | VC_CHKERR | VC_NOCPERR
2113 | VC_MMERR | VC_CORERESET;
2114 goto write;
2115 } else if (strcmp(CMD_ARGV[0], "none") == 0) {
2116 goto write;
2117 }
2118 }
2119 while (CMD_ARGC-- > 0) {
2120 unsigned i;
2121 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2122 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
2123 continue;
2124 catch |= vec_ids[i].mask;
2125 break;
2126 }
2127 if (i == ARRAY_SIZE(vec_ids)) {
2128 LOG_ERROR("No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]);
2129 return ERROR_COMMAND_SYNTAX_ERROR;
2130 }
2131 }
2132 write:
2133 /* For now, armv7m->demcr only stores vector catch flags. */
2134 armv7m->demcr = catch;
2135
2136 demcr &= ~0xffff;
2137 demcr |= catch;
2138
2139 /* write, but don't assume it stuck (why not??) */
2140 retval = mem_ap_write_u32(swjdp, DCB_DEMCR, demcr);
2141 if (retval != ERROR_OK)
2142 return retval;
2143 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &demcr);
2144 if (retval != ERROR_OK)
2145 return retval;
2146
2147 /* FIXME be sure to clear DEMCR on clean server shutdown.
2148 * Otherwise the vector catch hardware could fire when there's
2149 * no debugger hooked up, causing much confusion...
2150 */
2151 }
2152
2153 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++)
2154 {
2155 command_print(CMD_CTX, "%9s: %s", vec_ids[i].name,
2156 (demcr & vec_ids[i].mask) ? "catch" : "ignore");
2157 }
2158
2159 return ERROR_OK;
2160 }
2161
2162 COMMAND_HANDLER(handle_cortex_m3_mask_interrupts_command)
2163 {
2164 struct target *target = get_current_target(CMD_CTX);
2165 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
2166 int retval;
2167
2168 static const Jim_Nvp nvp_maskisr_modes[] = {
2169 { .name = "auto", .value = CORTEX_M3_ISRMASK_AUTO },
2170 { .name = "off" , .value = CORTEX_M3_ISRMASK_OFF },
2171 { .name = "on" , .value = CORTEX_M3_ISRMASK_ON },
2172 { .name = NULL , .value = -1 },
2173 };
2174 const Jim_Nvp *n;
2175
2176
2177 retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
2178 if (retval != ERROR_OK)
2179 return retval;
2180
2181 if (target->state != TARGET_HALTED)
2182 {
2183 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
2184 return ERROR_OK;
2185 }
2186
2187 if (CMD_ARGC > 0)
2188 {
2189 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2190 if (n->name == NULL)
2191 {
2192 return ERROR_COMMAND_SYNTAX_ERROR;
2193 }
2194 cortex_m3->isrmasking_mode = n->value;
2195
2196
2197 if(cortex_m3->isrmasking_mode == CORTEX_M3_ISRMASK_ON)
2198 {
2199 cortex_m3_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
2200 }
2201 else
2202 {
2203 cortex_m3_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
2204 }
2205 }
2206
2207 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_m3->isrmasking_mode);
2208 command_print(CMD_CTX, "cortex_m3 interrupt mask %s", n->name);
2209
2210 return ERROR_OK;
2211 }
2212
2213 COMMAND_HANDLER(handle_cortex_m3_reset_config_command)
2214 {
2215 struct target *target = get_current_target(CMD_CTX);
2216 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
2217 int retval;
2218 char *reset_config;
2219
2220 retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
2221 if (retval != ERROR_OK)
2222 return retval;
2223
2224 if (CMD_ARGC > 0)
2225 {
2226 if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
2227 cortex_m3->soft_reset_config = CORTEX_M3_RESET_SYSRESETREQ;
2228 else if (strcmp(*CMD_ARGV, "vectreset") == 0)
2229 cortex_m3->soft_reset_config = CORTEX_M3_RESET_VECTRESET;
2230 }
2231
2232 switch (cortex_m3->soft_reset_config)
2233 {
2234 case CORTEX_M3_RESET_SYSRESETREQ:
2235 reset_config = "sysresetreq";
2236 break;
2237
2238 case CORTEX_M3_RESET_VECTRESET:
2239 reset_config = "vectreset";
2240 break;
2241
2242 default:
2243 reset_config = "unknown";
2244 break;
2245 }
2246
2247 command_print(CMD_CTX, "cortex_m3 reset_config %s", reset_config);
2248
2249 return ERROR_OK;
2250 }
2251
2252 static const struct command_registration cortex_m3_exec_command_handlers[] = {
2253 {
2254 .name = "maskisr",
2255 .handler = handle_cortex_m3_mask_interrupts_command,
2256 .mode = COMMAND_EXEC,
2257 .help = "mask cortex_m3 interrupts",
2258 .usage = "['auto'|'on'|'off']",
2259 },
2260 {
2261 .name = "vector_catch",
2262 .handler = handle_cortex_m3_vector_catch_command,
2263 .mode = COMMAND_EXEC,
2264 .help = "configure hardware vectors to trigger debug entry",
2265 .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
2266 },
2267 {
2268 .name = "reset_config",
2269 .handler = handle_cortex_m3_reset_config_command,
2270 .mode = COMMAND_ANY,
2271 .help = "configure software reset handling",
2272 .usage = "['srst'|'sysresetreq'|'vectreset']",
2273 },
2274 COMMAND_REGISTRATION_DONE
2275 };
2276 static const struct command_registration cortex_m3_command_handlers[] = {
2277 {
2278 .chain = armv7m_command_handlers,
2279 },
2280 {
2281 .name = "cortex_m3",
2282 .mode = COMMAND_EXEC,
2283 .help = "Cortex-M3 command group",
2284 .usage = "",
2285 .chain = cortex_m3_exec_command_handlers,
2286 },
2287 COMMAND_REGISTRATION_DONE
2288 };
2289
2290 struct target_type cortexm3_target =
2291 {
2292 .name = "cortex_m3",
2293
2294 .poll = cortex_m3_poll,
2295 .arch_state = armv7m_arch_state,
2296
2297 .target_request_data = cortex_m3_target_request_data,
2298
2299 .halt = cortex_m3_halt,
2300 .resume = cortex_m3_resume,
2301 .step = cortex_m3_step,
2302
2303 .assert_reset = cortex_m3_assert_reset,
2304 .deassert_reset = cortex_m3_deassert_reset,
2305 .soft_reset_halt = cortex_m3_soft_reset_halt,
2306
2307 .get_gdb_reg_list = armv7m_get_gdb_reg_list,
2308
2309 .read_memory = cortex_m3_read_memory,
2310 .write_memory = cortex_m3_write_memory,
2311 .bulk_write_memory = cortex_m3_bulk_write_memory,
2312 .checksum_memory = armv7m_checksum_memory,
2313 .blank_check_memory = armv7m_blank_check_memory,
2314
2315 .run_algorithm = armv7m_run_algorithm,
2316 .start_algorithm = armv7m_start_algorithm,
2317 .wait_algorithm = armv7m_wait_algorithm,
2318
2319 .add_breakpoint = cortex_m3_add_breakpoint,
2320 .remove_breakpoint = cortex_m3_remove_breakpoint,
2321 .add_watchpoint = cortex_m3_add_watchpoint,
2322 .remove_watchpoint = cortex_m3_remove_watchpoint,
2323
2324 .commands = cortex_m3_command_handlers,
2325 .target_create = cortex_m3_target_create,
2326 .init_target = cortex_m3_init_target,
2327 .examine = cortex_m3_examine,
2328 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)