4e18e3eb480ca214078cca5cb5139a70f73ad8d9
[openocd.git] / src / target / xtensa / xtensa.c
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2
3 /***************************************************************************
4 * Generic Xtensa target API for OpenOCD *
5 * Copyright (C) 2020-2022 Cadence Design Systems, Inc. *
6 * Copyright (C) 2016-2019 Espressif Systems Ltd. *
7 * Derived from esp108.c *
8 * Author: Angus Gratton gus@projectgus.com *
9 ***************************************************************************/
10
11 #ifdef HAVE_CONFIG_H
12 #include "config.h"
13 #endif
14
15 #include <stdlib.h>
16 #include <helper/time_support.h>
17 #include <helper/align.h>
18 #include <target/register.h>
19
20 #include "xtensa_chip.h"
21 #include "xtensa.h"
22
23 /* Swap 4-bit Xtensa opcodes and fields */
24 #define XT_NIBSWAP8(V) \
25 ((((V) & 0x0F) << 4) \
26 | (((V) & 0xF0) >> 4))
27
28 #define XT_NIBSWAP16(V) \
29 ((((V) & 0x000F) << 12) \
30 | (((V) & 0x00F0) << 4) \
31 | (((V) & 0x0F00) >> 4) \
32 | (((V) & 0xF000) >> 12))
33
34 #define XT_NIBSWAP24(V) \
35 ((((V) & 0x00000F) << 20) \
36 | (((V) & 0x0000F0) << 12) \
37 | (((V) & 0x000F00) << 4) \
38 | (((V) & 0x00F000) >> 4) \
39 | (((V) & 0x0F0000) >> 12) \
40 | (((V) & 0xF00000) >> 20))
41
42 /* _XT_INS_FORMAT_*()
43 * Instruction formatting converted from little-endian inputs
44 * and shifted to the MSB-side of DIR for BE systems.
45 */
46 #define _XT_INS_FORMAT_RSR(X, OPCODE, SR, T) \
47 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
48 | (((T) & 0x0F) << 16) \
49 | (((SR) & 0xFF) << 8)) << 8 \
50 : (OPCODE) \
51 | (((SR) & 0xFF) << 8) \
52 | (((T) & 0x0F) << 4))
53
54 #define _XT_INS_FORMAT_RRR(X, OPCODE, ST, R) \
55 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
56 | ((XT_NIBSWAP8((ST) & 0xFF)) << 12) \
57 | (((R) & 0x0F) << 8)) << 8 \
58 : (OPCODE) \
59 | (((ST) & 0xFF) << 4) \
60 | (((R) & 0x0F) << 12))
61
62 #define _XT_INS_FORMAT_RRRN(X, OPCODE, S, T, IMM4) \
63 (XT_ISBE(X) ? (XT_NIBSWAP16(OPCODE) \
64 | (((T) & 0x0F) << 8) \
65 | (((S) & 0x0F) << 4) \
66 | ((IMM4) & 0x0F)) << 16 \
67 : (OPCODE) \
68 | (((T) & 0x0F) << 4) \
69 | (((S) & 0x0F) << 8) \
70 | (((IMM4) & 0x0F) << 12))
71
72 #define _XT_INS_FORMAT_RRI8(X, OPCODE, R, S, T, IMM8) \
73 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
74 | (((T) & 0x0F) << 16) \
75 | (((S) & 0x0F) << 12) \
76 | (((R) & 0x0F) << 8) \
77 | ((IMM8) & 0xFF)) << 8 \
78 : (OPCODE) \
79 | (((IMM8) & 0xFF) << 16) \
80 | (((R) & 0x0F) << 12) \
81 | (((S) & 0x0F) << 8) \
82 | (((T) & 0x0F) << 4))
83
84 #define _XT_INS_FORMAT_RRI4(X, OPCODE, IMM4, R, S, T) \
85 (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
86 | (((T) & 0x0F) << 16) \
87 | (((S) & 0x0F) << 12) \
88 | (((R) & 0x0F) << 8)) << 8 \
89 | ((IMM4) & 0x0F) \
90 : (OPCODE) \
91 | (((IMM4) & 0x0F) << 20) \
92 | (((R) & 0x0F) << 12) \
93 | (((S) & 0x0F) << 8) \
94 | (((T) & 0x0F) << 4))
95
96 /* Xtensa processor instruction opcodes
97 */
98 /* "Return From Debug Operation" to Normal */
99 #define XT_INS_RFDO(X) (XT_ISBE(X) ? 0x000e1f << 8 : 0xf1e000)
100 /* "Return From Debug and Dispatch" - allow sw debugging stuff to take over */
101 #define XT_INS_RFDD(X) (XT_ISBE(X) ? 0x010e1f << 8 : 0xf1e010)
102
103 /* Load to DDR register, increase addr register */
104 #define XT_INS_LDDR32P(X, S) (XT_ISBE(X) ? (0x0E0700 | ((S) << 12)) << 8 : (0x0070E0 | ((S) << 8)))
105 /* Store from DDR register, increase addr register */
106 #define XT_INS_SDDR32P(X, S) (XT_ISBE(X) ? (0x0F0700 | ((S) << 12)) << 8 : (0x0070F0 | ((S) << 8)))
107
108 /* Load 32-bit Indirect from A(S)+4*IMM8 to A(T) */
109 #define XT_INS_L32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x002002, 0, S, T, IMM8)
110 /* Load 16-bit Unsigned from A(S)+2*IMM8 to A(T) */
111 #define XT_INS_L16UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x001002, 0, S, T, IMM8)
112 /* Load 8-bit Unsigned from A(S)+IMM8 to A(T) */
113 #define XT_INS_L8UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x000002, 0, S, T, IMM8)
114
115 /* Store 32-bit Indirect to A(S)+4*IMM8 from A(T) */
116 #define XT_INS_S32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x006002, 0, S, T, IMM8)
117 /* Store 16-bit to A(S)+2*IMM8 from A(T) */
118 #define XT_INS_S16I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x005002, 0, S, T, IMM8)
119 /* Store 8-bit to A(S)+IMM8 from A(T) */
120 #define XT_INS_S8I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x004002, 0, S, T, IMM8)
121
122 /* Cache Instructions */
123 #define XT_INS_IHI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x0070E2, 0, S, 0, IMM8)
124 #define XT_INS_DHWBI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007052, 0, S, 0, IMM8)
125 #define XT_INS_DHWB(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007042, 0, S, 0, IMM8)
126 #define XT_INS_ISYNC(X) (XT_ISBE(X) ? 0x000200 << 8 : 0x002000)
127
128 /* Control Instructions */
129 #define XT_INS_JX(X, S) (XT_ISBE(X) ? (0x050000 | ((S) << 12)) : (0x0000a0 | ((S) << 8)))
130 #define XT_INS_CALL0(X, IMM18) (XT_ISBE(X) ? (0x500000 | ((IMM18) & 0x3ffff)) : (0x000005 | (((IMM18) & 0x3ffff) << 6)))
131
132 /* Read Special Register */
133 #define XT_INS_RSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x030000, SR, T)
134 /* Write Special Register */
135 #define XT_INS_WSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x130000, SR, T)
136 /* Swap Special Register */
137 #define XT_INS_XSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x610000, SR, T)
138
139 /* Rotate Window by (-8..7) */
140 #define XT_INS_ROTW(X, N) (XT_ISBE(X) ? ((0x000804) | (((N) & 15) << 16)) << 8 : ((0x408000) | (((N) & 15) << 4)))
141
142 /* Read User Register */
143 #define XT_INS_RUR(X, UR, T) _XT_INS_FORMAT_RRR(X, 0xE30000, UR, T)
144 /* Write User Register */
145 #define XT_INS_WUR(X, UR, T) _XT_INS_FORMAT_RSR(X, 0xF30000, UR, T)
146
147 /* Read Floating-Point Register */
148 #define XT_INS_RFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((FR << 4) | 0x4), T)
149 /* Write Floating-Point Register */
150 #define XT_INS_WFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((T << 4) | 0x5), FR)
151
152 #define XT_INS_L32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x090000, 0, R, S, T)
153 #define XT_INS_S32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x490000, 0, R, S, T)
154 #define XT_INS_L32E_S32E_MASK(X) (XT_ISBE(X) ? 0xF000FF << 8 : 0xFF000F)
155
156 #define XT_INS_RFWO(X) (XT_ISBE(X) ? 0x004300 << 8 : 0x003400)
157 #define XT_INS_RFWU(X) (XT_ISBE(X) ? 0x005300 << 8 : 0x003500)
158 #define XT_INS_RFWO_RFWU_MASK(X) (XT_ISBE(X) ? 0xFFFFFF << 8 : 0xFFFFFF)
159
160 #define XT_WATCHPOINTS_NUM_MAX 2
161
162 /* Special register number macro for DDR, PS, WB, A3, A4 registers.
163 * These get used a lot so making a shortcut is useful.
164 */
165 #define XT_SR_DDR (xtensa_regs[XT_REG_IDX_DDR].reg_num)
166 #define XT_SR_PS (xtensa_regs[XT_REG_IDX_PS].reg_num)
167 #define XT_SR_WB (xtensa_regs[XT_REG_IDX_WINDOWBASE].reg_num)
168 #define XT_REG_A3 (xtensa_regs[XT_REG_IDX_AR3].reg_num)
169 #define XT_REG_A4 (xtensa_regs[XT_REG_IDX_AR4].reg_num)
170
171 #define XT_PS_REG_NUM_BASE (0xc0U) /* (EPS2 - 2), for adding DBGLEVEL */
172 #define XT_PC_REG_NUM_BASE (0xb0U) /* (EPC1 - 1), for adding DBGLEVEL */
173 #define XT_PC_REG_NUM_VIRTUAL (0xffU) /* Marker for computing PC (EPC[DBGLEVEL) */
174 #define XT_PC_DBREG_NUM_BASE (0x20U) /* External (i.e., GDB) access */
175
176 #define XT_SW_BREAKPOINTS_MAX_NUM 32
177 #define XT_HW_IBREAK_MAX_NUM 2
178 #define XT_HW_DBREAK_MAX_NUM 2
179
180 struct xtensa_reg_desc xtensa_regs[XT_NUM_REGS] = {
181 XT_MK_REG_DESC("pc", XT_PC_REG_NUM_VIRTUAL, XT_REG_SPECIAL, 0),
182 XT_MK_REG_DESC("ar0", 0x00, XT_REG_GENERAL, 0),
183 XT_MK_REG_DESC("ar1", 0x01, XT_REG_GENERAL, 0),
184 XT_MK_REG_DESC("ar2", 0x02, XT_REG_GENERAL, 0),
185 XT_MK_REG_DESC("ar3", 0x03, XT_REG_GENERAL, 0),
186 XT_MK_REG_DESC("ar4", 0x04, XT_REG_GENERAL, 0),
187 XT_MK_REG_DESC("ar5", 0x05, XT_REG_GENERAL, 0),
188 XT_MK_REG_DESC("ar6", 0x06, XT_REG_GENERAL, 0),
189 XT_MK_REG_DESC("ar7", 0x07, XT_REG_GENERAL, 0),
190 XT_MK_REG_DESC("ar8", 0x08, XT_REG_GENERAL, 0),
191 XT_MK_REG_DESC("ar9", 0x09, XT_REG_GENERAL, 0),
192 XT_MK_REG_DESC("ar10", 0x0A, XT_REG_GENERAL, 0),
193 XT_MK_REG_DESC("ar11", 0x0B, XT_REG_GENERAL, 0),
194 XT_MK_REG_DESC("ar12", 0x0C, XT_REG_GENERAL, 0),
195 XT_MK_REG_DESC("ar13", 0x0D, XT_REG_GENERAL, 0),
196 XT_MK_REG_DESC("ar14", 0x0E, XT_REG_GENERAL, 0),
197 XT_MK_REG_DESC("ar15", 0x0F, XT_REG_GENERAL, 0),
198 XT_MK_REG_DESC("ar16", 0x10, XT_REG_GENERAL, 0),
199 XT_MK_REG_DESC("ar17", 0x11, XT_REG_GENERAL, 0),
200 XT_MK_REG_DESC("ar18", 0x12, XT_REG_GENERAL, 0),
201 XT_MK_REG_DESC("ar19", 0x13, XT_REG_GENERAL, 0),
202 XT_MK_REG_DESC("ar20", 0x14, XT_REG_GENERAL, 0),
203 XT_MK_REG_DESC("ar21", 0x15, XT_REG_GENERAL, 0),
204 XT_MK_REG_DESC("ar22", 0x16, XT_REG_GENERAL, 0),
205 XT_MK_REG_DESC("ar23", 0x17, XT_REG_GENERAL, 0),
206 XT_MK_REG_DESC("ar24", 0x18, XT_REG_GENERAL, 0),
207 XT_MK_REG_DESC("ar25", 0x19, XT_REG_GENERAL, 0),
208 XT_MK_REG_DESC("ar26", 0x1A, XT_REG_GENERAL, 0),
209 XT_MK_REG_DESC("ar27", 0x1B, XT_REG_GENERAL, 0),
210 XT_MK_REG_DESC("ar28", 0x1C, XT_REG_GENERAL, 0),
211 XT_MK_REG_DESC("ar29", 0x1D, XT_REG_GENERAL, 0),
212 XT_MK_REG_DESC("ar30", 0x1E, XT_REG_GENERAL, 0),
213 XT_MK_REG_DESC("ar31", 0x1F, XT_REG_GENERAL, 0),
214 XT_MK_REG_DESC("ar32", 0x20, XT_REG_GENERAL, 0),
215 XT_MK_REG_DESC("ar33", 0x21, XT_REG_GENERAL, 0),
216 XT_MK_REG_DESC("ar34", 0x22, XT_REG_GENERAL, 0),
217 XT_MK_REG_DESC("ar35", 0x23, XT_REG_GENERAL, 0),
218 XT_MK_REG_DESC("ar36", 0x24, XT_REG_GENERAL, 0),
219 XT_MK_REG_DESC("ar37", 0x25, XT_REG_GENERAL, 0),
220 XT_MK_REG_DESC("ar38", 0x26, XT_REG_GENERAL, 0),
221 XT_MK_REG_DESC("ar39", 0x27, XT_REG_GENERAL, 0),
222 XT_MK_REG_DESC("ar40", 0x28, XT_REG_GENERAL, 0),
223 XT_MK_REG_DESC("ar41", 0x29, XT_REG_GENERAL, 0),
224 XT_MK_REG_DESC("ar42", 0x2A, XT_REG_GENERAL, 0),
225 XT_MK_REG_DESC("ar43", 0x2B, XT_REG_GENERAL, 0),
226 XT_MK_REG_DESC("ar44", 0x2C, XT_REG_GENERAL, 0),
227 XT_MK_REG_DESC("ar45", 0x2D, XT_REG_GENERAL, 0),
228 XT_MK_REG_DESC("ar46", 0x2E, XT_REG_GENERAL, 0),
229 XT_MK_REG_DESC("ar47", 0x2F, XT_REG_GENERAL, 0),
230 XT_MK_REG_DESC("ar48", 0x30, XT_REG_GENERAL, 0),
231 XT_MK_REG_DESC("ar49", 0x31, XT_REG_GENERAL, 0),
232 XT_MK_REG_DESC("ar50", 0x32, XT_REG_GENERAL, 0),
233 XT_MK_REG_DESC("ar51", 0x33, XT_REG_GENERAL, 0),
234 XT_MK_REG_DESC("ar52", 0x34, XT_REG_GENERAL, 0),
235 XT_MK_REG_DESC("ar53", 0x35, XT_REG_GENERAL, 0),
236 XT_MK_REG_DESC("ar54", 0x36, XT_REG_GENERAL, 0),
237 XT_MK_REG_DESC("ar55", 0x37, XT_REG_GENERAL, 0),
238 XT_MK_REG_DESC("ar56", 0x38, XT_REG_GENERAL, 0),
239 XT_MK_REG_DESC("ar57", 0x39, XT_REG_GENERAL, 0),
240 XT_MK_REG_DESC("ar58", 0x3A, XT_REG_GENERAL, 0),
241 XT_MK_REG_DESC("ar59", 0x3B, XT_REG_GENERAL, 0),
242 XT_MK_REG_DESC("ar60", 0x3C, XT_REG_GENERAL, 0),
243 XT_MK_REG_DESC("ar61", 0x3D, XT_REG_GENERAL, 0),
244 XT_MK_REG_DESC("ar62", 0x3E, XT_REG_GENERAL, 0),
245 XT_MK_REG_DESC("ar63", 0x3F, XT_REG_GENERAL, 0),
246 XT_MK_REG_DESC("windowbase", 0x48, XT_REG_SPECIAL, 0),
247 XT_MK_REG_DESC("windowstart", 0x49, XT_REG_SPECIAL, 0),
248 XT_MK_REG_DESC("ps", 0xE6, XT_REG_SPECIAL, 0), /* PS (not mapped through EPS[]) */
249 XT_MK_REG_DESC("ibreakenable", 0x60, XT_REG_SPECIAL, 0),
250 XT_MK_REG_DESC("ddr", 0x68, XT_REG_DEBUG, XT_REGF_NOREAD),
251 XT_MK_REG_DESC("ibreaka0", 0x80, XT_REG_SPECIAL, 0),
252 XT_MK_REG_DESC("ibreaka1", 0x81, XT_REG_SPECIAL, 0),
253 XT_MK_REG_DESC("dbreaka0", 0x90, XT_REG_SPECIAL, 0),
254 XT_MK_REG_DESC("dbreaka1", 0x91, XT_REG_SPECIAL, 0),
255 XT_MK_REG_DESC("dbreakc0", 0xA0, XT_REG_SPECIAL, 0),
256 XT_MK_REG_DESC("dbreakc1", 0xA1, XT_REG_SPECIAL, 0),
257 XT_MK_REG_DESC("cpenable", 0xE0, XT_REG_SPECIAL, 0),
258 XT_MK_REG_DESC("exccause", 0xE8, XT_REG_SPECIAL, 0),
259 XT_MK_REG_DESC("debugcause", 0xE9, XT_REG_SPECIAL, 0),
260 XT_MK_REG_DESC("icount", 0xEC, XT_REG_SPECIAL, 0),
261 XT_MK_REG_DESC("icountlevel", 0xED, XT_REG_SPECIAL, 0),
262
263 /* WARNING: For these registers, regnum points to the
264 * index of the corresponding ARx registers, NOT to
265 * the processor register number! */
266 XT_MK_REG_DESC("a0", XT_REG_IDX_AR0, XT_REG_RELGEN, 0),
267 XT_MK_REG_DESC("a1", XT_REG_IDX_AR1, XT_REG_RELGEN, 0),
268 XT_MK_REG_DESC("a2", XT_REG_IDX_AR2, XT_REG_RELGEN, 0),
269 XT_MK_REG_DESC("a3", XT_REG_IDX_AR3, XT_REG_RELGEN, 0),
270 XT_MK_REG_DESC("a4", XT_REG_IDX_AR4, XT_REG_RELGEN, 0),
271 XT_MK_REG_DESC("a5", XT_REG_IDX_AR5, XT_REG_RELGEN, 0),
272 XT_MK_REG_DESC("a6", XT_REG_IDX_AR6, XT_REG_RELGEN, 0),
273 XT_MK_REG_DESC("a7", XT_REG_IDX_AR7, XT_REG_RELGEN, 0),
274 XT_MK_REG_DESC("a8", XT_REG_IDX_AR8, XT_REG_RELGEN, 0),
275 XT_MK_REG_DESC("a9", XT_REG_IDX_AR9, XT_REG_RELGEN, 0),
276 XT_MK_REG_DESC("a10", XT_REG_IDX_AR10, XT_REG_RELGEN, 0),
277 XT_MK_REG_DESC("a11", XT_REG_IDX_AR11, XT_REG_RELGEN, 0),
278 XT_MK_REG_DESC("a12", XT_REG_IDX_AR12, XT_REG_RELGEN, 0),
279 XT_MK_REG_DESC("a13", XT_REG_IDX_AR13, XT_REG_RELGEN, 0),
280 XT_MK_REG_DESC("a14", XT_REG_IDX_AR14, XT_REG_RELGEN, 0),
281 XT_MK_REG_DESC("a15", XT_REG_IDX_AR15, XT_REG_RELGEN, 0),
282 };
283
284 /**
285 * Types of memory used at xtensa target
286 */
287 enum xtensa_mem_region_type {
288 XTENSA_MEM_REG_IROM = 0x0,
289 XTENSA_MEM_REG_IRAM,
290 XTENSA_MEM_REG_DROM,
291 XTENSA_MEM_REG_DRAM,
292 XTENSA_MEM_REG_SRAM,
293 XTENSA_MEM_REG_SROM,
294 XTENSA_MEM_REGS_NUM
295 };
296
297 /* Register definition as union for list allocation */
298 union xtensa_reg_val_u {
299 xtensa_reg_val_t val;
300 uint8_t buf[4];
301 };
302
303 const struct xtensa_keyval_info_s xt_qerr[XT_QERR_NUM] = {
304 { .chrval = "E00", .intval = ERROR_FAIL },
305 { .chrval = "E01", .intval = ERROR_FAIL },
306 { .chrval = "E02", .intval = ERROR_COMMAND_ARGUMENT_INVALID },
307 { .chrval = "E03", .intval = ERROR_FAIL },
308 };
309
310 /* Set to true for extra debug logging */
311 static const bool xtensa_extra_debug_log;
312
313 /**
314 * Gets a config for the specific mem type
315 */
316 static inline const struct xtensa_local_mem_config *xtensa_get_mem_config(
317 struct xtensa *xtensa,
318 enum xtensa_mem_region_type type)
319 {
320 switch (type) {
321 case XTENSA_MEM_REG_IROM:
322 return &xtensa->core_config->irom;
323 case XTENSA_MEM_REG_IRAM:
324 return &xtensa->core_config->iram;
325 case XTENSA_MEM_REG_DROM:
326 return &xtensa->core_config->drom;
327 case XTENSA_MEM_REG_DRAM:
328 return &xtensa->core_config->dram;
329 case XTENSA_MEM_REG_SRAM:
330 return &xtensa->core_config->sram;
331 case XTENSA_MEM_REG_SROM:
332 return &xtensa->core_config->srom;
333 default:
334 return NULL;
335 }
336 }
337
338 /**
339 * Extracts an exact xtensa_local_mem_region_config from xtensa_local_mem_config
340 * for a given address
341 * Returns NULL if nothing found
342 */
343 static inline const struct xtensa_local_mem_region_config *xtensa_memory_region_find(
344 const struct xtensa_local_mem_config *mem,
345 target_addr_t address)
346 {
347 for (unsigned int i = 0; i < mem->count; i++) {
348 const struct xtensa_local_mem_region_config *region = &mem->regions[i];
349 if (address >= region->base && address < (region->base + region->size))
350 return region;
351 }
352 return NULL;
353 }
354
355 /**
356 * Returns a corresponding xtensa_local_mem_region_config from the xtensa target
357 * for a given address
358 * Returns NULL if nothing found
359 */
360 static inline const struct xtensa_local_mem_region_config *xtensa_target_memory_region_find(
361 struct xtensa *xtensa,
362 target_addr_t address)
363 {
364 const struct xtensa_local_mem_region_config *result;
365 const struct xtensa_local_mem_config *mcgf;
366 for (unsigned int mtype = 0; mtype < XTENSA_MEM_REGS_NUM; mtype++) {
367 mcgf = xtensa_get_mem_config(xtensa, mtype);
368 result = xtensa_memory_region_find(mcgf, address);
369 if (result)
370 return result;
371 }
372 return NULL;
373 }
374
375 static inline bool xtensa_is_cacheable(const struct xtensa_cache_config *cache,
376 const struct xtensa_local_mem_config *mem,
377 target_addr_t address)
378 {
379 if (!cache->size)
380 return false;
381 return xtensa_memory_region_find(mem, address);
382 }
383
384 static inline bool xtensa_is_icacheable(struct xtensa *xtensa, target_addr_t address)
385 {
386 return xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->iram, address) ||
387 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->irom, address) ||
388 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->sram, address) ||
389 xtensa_is_cacheable(&xtensa->core_config->icache, &xtensa->core_config->srom, address);
390 }
391
392 static inline bool xtensa_is_dcacheable(struct xtensa *xtensa, target_addr_t address)
393 {
394 return xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->dram, address) ||
395 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->drom, address) ||
396 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->sram, address) ||
397 xtensa_is_cacheable(&xtensa->core_config->dcache, &xtensa->core_config->srom, address);
398 }
399
400 static int xtensa_core_reg_get(struct reg *reg)
401 {
402 /* We don't need this because we read all registers on halt anyway. */
403 struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
404 struct target *target = xtensa->target;
405
406 if (target->state != TARGET_HALTED)
407 return ERROR_TARGET_NOT_HALTED;
408 if (!reg->exist) {
409 if (strncmp(reg->name, "?0x", 3) == 0) {
410 unsigned int regnum = strtoul(reg->name + 1, 0, 0);
411 LOG_WARNING("Read unknown register 0x%04x ignored", regnum);
412 return ERROR_OK;
413 }
414 return ERROR_COMMAND_ARGUMENT_INVALID;
415 }
416 return ERROR_OK;
417 }
418
419 static int xtensa_core_reg_set(struct reg *reg, uint8_t *buf)
420 {
421 struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
422 struct target *target = xtensa->target;
423
424 assert(reg->size <= 64 && "up to 64-bit regs are supported only!");
425 if (target->state != TARGET_HALTED)
426 return ERROR_TARGET_NOT_HALTED;
427
428 if (!reg->exist) {
429 if (strncmp(reg->name, "?0x", 3) == 0) {
430 unsigned int regnum = strtoul(reg->name + 1, 0, 0);
431 LOG_WARNING("Write unknown register 0x%04x ignored", regnum);
432 return ERROR_OK;
433 }
434 return ERROR_COMMAND_ARGUMENT_INVALID;
435 }
436
437 buf_cpy(buf, reg->value, reg->size);
438
439 if (xtensa->core_config->windowed) {
440 /* If the user updates a potential scratch register, track for conflicts */
441 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
442 if (strcmp(reg->name, xtensa->scratch_ars[s].chrval) == 0) {
443 LOG_DEBUG("Scratch reg %s [0x%08" PRIx32 "] set from gdb", reg->name,
444 buf_get_u32(reg->value, 0, 32));
445 LOG_DEBUG("scratch_ars mapping: a3/%s, a4/%s",
446 xtensa->scratch_ars[XT_AR_SCRATCH_AR3].chrval,
447 xtensa->scratch_ars[XT_AR_SCRATCH_AR4].chrval);
448 xtensa->scratch_ars[s].intval = true;
449 break;
450 }
451 }
452 }
453 reg->dirty = true;
454 reg->valid = true;
455
456 return ERROR_OK;
457 }
458
459 static const struct reg_arch_type xtensa_reg_type = {
460 .get = xtensa_core_reg_get,
461 .set = xtensa_core_reg_set,
462 };
463
464 /* Convert a register index that's indexed relative to windowbase, to the real address. */
465 static enum xtensa_reg_id xtensa_windowbase_offset_to_canonical(struct xtensa *xtensa,
466 enum xtensa_reg_id reg_idx,
467 int windowbase)
468 {
469 unsigned int idx;
470 if (reg_idx >= XT_REG_IDX_AR0 && reg_idx <= XT_REG_IDX_ARLAST) {
471 idx = reg_idx - XT_REG_IDX_AR0;
472 } else if (reg_idx >= XT_REG_IDX_A0 && reg_idx <= XT_REG_IDX_A15) {
473 idx = reg_idx - XT_REG_IDX_A0;
474 } else {
475 LOG_ERROR("Error: can't convert register %d to non-windowbased register!", reg_idx);
476 return -1;
477 }
478 return ((idx + windowbase * 4) & (xtensa->core_config->aregs_num - 1)) + XT_REG_IDX_AR0;
479 }
480
481 static enum xtensa_reg_id xtensa_canonical_to_windowbase_offset(struct xtensa *xtensa,
482 enum xtensa_reg_id reg_idx,
483 int windowbase)
484 {
485 return xtensa_windowbase_offset_to_canonical(xtensa, reg_idx, -windowbase);
486 }
487
488 static void xtensa_mark_register_dirty(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
489 {
490 struct reg *reg_list = xtensa->core_cache->reg_list;
491 reg_list[reg_idx].dirty = true;
492 }
493
494 static void xtensa_queue_exec_ins(struct xtensa *xtensa, uint32_t ins)
495 {
496 xtensa_queue_dbg_reg_write(xtensa, NARADR_DIR0EXEC, ins);
497 }
498
499 static void xtensa_queue_exec_ins_wide(struct xtensa *xtensa, uint8_t *ops, uint8_t oplen)
500 {
501 if ((oplen > 0) && (oplen <= 64)) {
502 uint32_t opsw[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; /* 8 DIRx regs: max width 64B */
503 uint8_t oplenw = (oplen + 3) / 4;
504 if (xtensa->target->endianness == TARGET_BIG_ENDIAN)
505 buf_bswap32((uint8_t *)opsw, ops, oplenw * 4);
506 else
507 memcpy(opsw, ops, oplen);
508 for (int32_t i = oplenw - 1; i > 0; i--)
509 xtensa_queue_dbg_reg_write(xtensa, NARADR_DIR0 + i, opsw[i]);
510 /* Write DIR0EXEC last */
511 xtensa_queue_dbg_reg_write(xtensa, NARADR_DIR0EXEC, opsw[0]);
512 }
513 }
514
515 static int xtensa_queue_pwr_reg_write(struct xtensa *xtensa, unsigned int reg, uint32_t data)
516 {
517 struct xtensa_debug_module *dm = &xtensa->dbg_mod;
518 return dm->pwr_ops->queue_reg_write(dm, reg, data);
519 }
520
521 /* NOTE: Assumes A3 has already been saved */
522 int xtensa_window_state_save(struct target *target, uint32_t *woe)
523 {
524 struct xtensa *xtensa = target_to_xtensa(target);
525 int woe_dis;
526 uint8_t woe_buf[4];
527
528 if (xtensa->core_config->windowed) {
529 /* Save PS (LX) and disable window overflow exceptions prior to AR save */
530 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_PS, XT_REG_A3));
531 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
532 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, woe_buf);
533 int res = jtag_execute_queue();
534 if (res != ERROR_OK) {
535 LOG_ERROR("Failed to read PS (%d)!", res);
536 return res;
537 }
538 xtensa_core_status_check(target);
539 *woe = buf_get_u32(woe_buf, 0, 32);
540 woe_dis = *woe & ~XT_PS_WOE_MSK;
541 LOG_DEBUG("Clearing PS.WOE (0x%08" PRIx32 " -> 0x%08" PRIx32 ")", *woe, woe_dis);
542 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, woe_dis);
543 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
544 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_PS, XT_REG_A3));
545 }
546 return ERROR_OK;
547 }
548
549 /* NOTE: Assumes A3 has already been saved */
550 void xtensa_window_state_restore(struct target *target, uint32_t woe)
551 {
552 struct xtensa *xtensa = target_to_xtensa(target);
553 if (xtensa->core_config->windowed) {
554 /* Restore window overflow exception state */
555 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, woe);
556 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
557 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_PS, XT_REG_A3));
558 LOG_DEBUG("Restored PS.WOE (0x%08" PRIx32 ")", woe);
559 }
560 }
561
562 static bool xtensa_reg_is_readable(int flags, int cpenable)
563 {
564 if (flags & XT_REGF_NOREAD)
565 return false;
566 if ((flags & XT_REGF_COPROC0) && (cpenable & BIT(0)) == 0)
567 return false;
568 return true;
569 }
570
571 static bool xtensa_scratch_regs_fixup(struct xtensa *xtensa, struct reg *reg_list, int i, int j, int a_idx, int ar_idx)
572 {
573 int a_name = (a_idx == XT_AR_SCRATCH_A3) ? 3 : 4;
574 if (xtensa->scratch_ars[a_idx].intval && !xtensa->scratch_ars[ar_idx].intval) {
575 LOG_DEBUG("AR conflict: a%d -> ar%d", a_name, j - XT_REG_IDX_AR0);
576 memcpy(reg_list[j].value, reg_list[i].value, sizeof(xtensa_reg_val_t));
577 } else {
578 LOG_DEBUG("AR conflict: ar%d -> a%d", j - XT_REG_IDX_AR0, a_name);
579 memcpy(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t));
580 }
581 return xtensa->scratch_ars[a_idx].intval && xtensa->scratch_ars[ar_idx].intval;
582 }
583
584 static int xtensa_write_dirty_registers(struct target *target)
585 {
586 struct xtensa *xtensa = target_to_xtensa(target);
587 int res;
588 xtensa_reg_val_t regval, windowbase = 0;
589 bool scratch_reg_dirty = false, delay_cpenable = false;
590 struct reg *reg_list = xtensa->core_cache->reg_list;
591 unsigned int reg_list_size = xtensa->core_cache->num_regs;
592 bool preserve_a3 = false;
593 uint8_t a3_buf[4];
594 xtensa_reg_val_t a3 = 0, woe;
595
596 LOG_TARGET_DEBUG(target, "start");
597
598 /* We need to write the dirty registers in the cache list back to the processor.
599 * Start by writing the SFR/user registers. */
600 for (unsigned int i = 0; i < reg_list_size; i++) {
601 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
602 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
603 if (reg_list[i].dirty) {
604 if (rlist[ridx].type == XT_REG_SPECIAL ||
605 rlist[ridx].type == XT_REG_USER ||
606 rlist[ridx].type == XT_REG_FR) {
607 scratch_reg_dirty = true;
608 if (i == XT_REG_IDX_CPENABLE) {
609 delay_cpenable = true;
610 continue;
611 }
612 regval = xtensa_reg_get(target, i);
613 LOG_TARGET_DEBUG(target, "Writing back reg %s (%d) val %08" PRIX32,
614 reg_list[i].name,
615 rlist[ridx].reg_num,
616 regval);
617 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, regval);
618 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
619 if (reg_list[i].exist) {
620 unsigned int reg_num = rlist[ridx].reg_num;
621 if (rlist[ridx].type == XT_REG_USER) {
622 xtensa_queue_exec_ins(xtensa, XT_INS_WUR(xtensa, reg_num, XT_REG_A3));
623 } else if (rlist[ridx].type == XT_REG_FR) {
624 xtensa_queue_exec_ins(xtensa, XT_INS_WFR(xtensa, reg_num, XT_REG_A3));
625 } else {/*SFR */
626 if (reg_num == XT_PC_REG_NUM_VIRTUAL)
627 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL
628 **/
629 reg_num =
630 (XT_PC_REG_NUM_BASE +
631 xtensa->core_config->debug.irq_level);
632 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, reg_num, XT_REG_A3));
633 }
634 }
635 reg_list[i].dirty = false;
636 }
637 }
638 }
639 if (scratch_reg_dirty)
640 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
641 if (delay_cpenable) {
642 regval = xtensa_reg_get(target, XT_REG_IDX_CPENABLE);
643 LOG_TARGET_DEBUG(target, "Writing back reg cpenable (224) val %08" PRIX32, regval);
644 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, regval);
645 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
646 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa,
647 xtensa_regs[XT_REG_IDX_CPENABLE].reg_num,
648 XT_REG_A3));
649 reg_list[XT_REG_IDX_CPENABLE].dirty = false;
650 }
651
652 preserve_a3 = (xtensa->core_config->windowed);
653 if (preserve_a3) {
654 /* Save (windowed) A3 for scratch use */
655 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
656 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, a3_buf);
657 res = jtag_execute_queue();
658 if (res != ERROR_OK)
659 return res;
660 xtensa_core_status_check(target);
661 a3 = buf_get_u32(a3_buf, 0, 32);
662 }
663
664 if (xtensa->core_config->windowed) {
665 res = xtensa_window_state_save(target, &woe);
666 if (res != ERROR_OK)
667 return res;
668 /* Grab the windowbase, we need it. */
669 windowbase = xtensa_reg_get(target, XT_REG_IDX_WINDOWBASE);
670 /* Check if there are mismatches between the ARx and corresponding Ax registers.
671 * When the user sets a register on a windowed config, xt-gdb may set the ARx
672 * register directly. Thus we take ARx as priority over Ax if both are dirty
673 * and it's unclear if the user set one over the other explicitly.
674 */
675 for (unsigned int i = XT_REG_IDX_A0; i <= XT_REG_IDX_A15; i++) {
676 unsigned int j = xtensa_windowbase_offset_to_canonical(xtensa, i, windowbase);
677 if (reg_list[i].dirty && reg_list[j].dirty) {
678 if (memcmp(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t)) != 0) {
679 bool show_warning = true;
680 if (i == XT_REG_IDX_A3)
681 show_warning = xtensa_scratch_regs_fixup(xtensa,
682 reg_list, i, j, XT_AR_SCRATCH_A3, XT_AR_SCRATCH_AR3);
683 else if (i == XT_REG_IDX_A4)
684 show_warning = xtensa_scratch_regs_fixup(xtensa,
685 reg_list, i, j, XT_AR_SCRATCH_A4, XT_AR_SCRATCH_AR4);
686 if (show_warning)
687 LOG_WARNING(
688 "Warning: Both A%d [0x%08" PRIx32
689 "] as well as its underlying physical register "
690 "(AR%d) [0x%08" PRIx32 "] are dirty and differ in value",
691 i - XT_REG_IDX_A0,
692 buf_get_u32(reg_list[i].value, 0, 32),
693 j - XT_REG_IDX_AR0,
694 buf_get_u32(reg_list[j].value, 0, 32));
695 }
696 }
697 }
698 }
699
700 /* Write A0-A16. */
701 for (unsigned int i = 0; i < 16; i++) {
702 if (reg_list[XT_REG_IDX_A0 + i].dirty) {
703 regval = xtensa_reg_get(target, XT_REG_IDX_A0 + i);
704 LOG_TARGET_DEBUG(target, "Writing back reg %s value %08" PRIX32 ", num =%i",
705 xtensa_regs[XT_REG_IDX_A0 + i].name,
706 regval,
707 xtensa_regs[XT_REG_IDX_A0 + i].reg_num);
708 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, regval);
709 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, i));
710 reg_list[XT_REG_IDX_A0 + i].dirty = false;
711 if (i == 3) {
712 /* Avoid stomping A3 during restore at end of function */
713 a3 = regval;
714 }
715 }
716 }
717
718 if (xtensa->core_config->windowed) {
719 /* Now write AR registers */
720 for (unsigned int j = 0; j < XT_REG_IDX_ARLAST; j += 16) {
721 /* Write the 16 registers we can see */
722 for (unsigned int i = 0; i < 16; i++) {
723 if (i + j < xtensa->core_config->aregs_num) {
724 enum xtensa_reg_id realadr =
725 xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_AR0 + i + j,
726 windowbase);
727 /* Write back any dirty un-windowed registers */
728 if (reg_list[realadr].dirty) {
729 regval = xtensa_reg_get(target, realadr);
730 LOG_TARGET_DEBUG(
731 target,
732 "Writing back reg %s value %08" PRIX32 ", num =%i",
733 xtensa_regs[realadr].name,
734 regval,
735 xtensa_regs[realadr].reg_num);
736 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, regval);
737 xtensa_queue_exec_ins(xtensa,
738 XT_INS_RSR(xtensa, XT_SR_DDR,
739 xtensa_regs[XT_REG_IDX_AR0 + i].reg_num));
740 reg_list[realadr].dirty = false;
741 if ((i + j) == 3)
742 /* Avoid stomping AR during A3 restore at end of function */
743 a3 = regval;
744 }
745 }
746 }
747 /*Now rotate the window so we'll see the next 16 registers. The final rotate
748 * will wraparound, */
749 /*leaving us in the state we were. */
750 xtensa_queue_exec_ins(xtensa, XT_INS_ROTW(xtensa, 4));
751 }
752
753 xtensa_window_state_restore(target, woe);
754
755 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
756 xtensa->scratch_ars[s].intval = false;
757 }
758
759 if (preserve_a3) {
760 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, a3);
761 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
762 }
763
764 res = jtag_execute_queue();
765 xtensa_core_status_check(target);
766
767 return res;
768 }
769
770 static inline bool xtensa_is_stopped(struct target *target)
771 {
772 struct xtensa *xtensa = target_to_xtensa(target);
773 return xtensa->dbg_mod.core_status.dsr & OCDDSR_STOPPED;
774 }
775
776 int xtensa_examine(struct target *target)
777 {
778 struct xtensa *xtensa = target_to_xtensa(target);
779 unsigned int cmd = PWRCTL_DEBUGWAKEUP | PWRCTL_MEMWAKEUP | PWRCTL_COREWAKEUP;
780
781 LOG_DEBUG("coreid = %d", target->coreid);
782
783 if (xtensa->core_config->core_type == XT_UNDEF) {
784 LOG_ERROR("XTensa core not configured; is xtensa-core-openocd.cfg missing?");
785 return ERROR_FAIL;
786 }
787
788 xtensa_queue_pwr_reg_write(xtensa, DMREG_PWRCTL, cmd);
789 xtensa_queue_pwr_reg_write(xtensa, DMREG_PWRCTL, cmd | PWRCTL_JTAGDEBUGUSE);
790 xtensa_dm_queue_enable(&xtensa->dbg_mod);
791 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
792 int res = jtag_execute_queue();
793 if (res != ERROR_OK)
794 return res;
795 if (!xtensa_dm_is_online(&xtensa->dbg_mod)) {
796 LOG_ERROR("Unexpected OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
797 return ERROR_TARGET_FAILURE;
798 }
799 LOG_DEBUG("OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
800 if (!target_was_examined(target))
801 target_set_examined(target);
802 xtensa_smpbreak_write(xtensa, xtensa->smp_break);
803 return ERROR_OK;
804 }
805
806 int xtensa_wakeup(struct target *target)
807 {
808 struct xtensa *xtensa = target_to_xtensa(target);
809 unsigned int cmd = PWRCTL_DEBUGWAKEUP | PWRCTL_MEMWAKEUP | PWRCTL_COREWAKEUP;
810
811 if (xtensa->reset_asserted)
812 cmd |= PWRCTL_CORERESET;
813 xtensa_queue_pwr_reg_write(xtensa, DMREG_PWRCTL, cmd);
814 /* TODO: can we join this with the write above? */
815 xtensa_queue_pwr_reg_write(xtensa, DMREG_PWRCTL, cmd | PWRCTL_JTAGDEBUGUSE);
816 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
817 return jtag_execute_queue();
818 }
819
820 int xtensa_smpbreak_write(struct xtensa *xtensa, uint32_t set)
821 {
822 uint32_t dsr_data = 0x00110000;
823 uint32_t clear = (set | OCDDCR_ENABLEOCD) ^
824 (OCDDCR_BREAKINEN | OCDDCR_BREAKOUTEN | OCDDCR_RUNSTALLINEN |
825 OCDDCR_DEBUGMODEOUTEN | OCDDCR_ENABLEOCD);
826
827 LOG_TARGET_DEBUG(xtensa->target, "write smpbreak set=0x%" PRIx32 " clear=0x%" PRIx32, set, clear);
828 xtensa_queue_dbg_reg_write(xtensa, NARADR_DCRSET, set | OCDDCR_ENABLEOCD);
829 xtensa_queue_dbg_reg_write(xtensa, NARADR_DCRCLR, clear);
830 xtensa_queue_dbg_reg_write(xtensa, NARADR_DSR, dsr_data);
831 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
832 return jtag_execute_queue();
833 }
834
835 int xtensa_smpbreak_set(struct target *target, uint32_t set)
836 {
837 struct xtensa *xtensa = target_to_xtensa(target);
838 int res = ERROR_OK;
839
840 xtensa->smp_break = set;
841 if (target_was_examined(target))
842 res = xtensa_smpbreak_write(xtensa, xtensa->smp_break);
843 LOG_TARGET_DEBUG(target, "set smpbreak=%" PRIx32 ", state=%i", set, target->state);
844 return res;
845 }
846
847 int xtensa_smpbreak_read(struct xtensa *xtensa, uint32_t *val)
848 {
849 uint8_t dcr_buf[sizeof(uint32_t)];
850
851 xtensa_queue_dbg_reg_read(xtensa, NARADR_DCRSET, dcr_buf);
852 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
853 int res = jtag_execute_queue();
854 *val = buf_get_u32(dcr_buf, 0, 32);
855
856 return res;
857 }
858
859 int xtensa_smpbreak_get(struct target *target, uint32_t *val)
860 {
861 struct xtensa *xtensa = target_to_xtensa(target);
862 *val = xtensa->smp_break;
863 return ERROR_OK;
864 }
865
866 static inline xtensa_reg_val_t xtensa_reg_get_value(struct reg *reg)
867 {
868 return buf_get_u32(reg->value, 0, 32);
869 }
870
871 static inline void xtensa_reg_set_value(struct reg *reg, xtensa_reg_val_t value)
872 {
873 buf_set_u32(reg->value, 0, 32, value);
874 reg->dirty = true;
875 }
876
877 int xtensa_core_status_check(struct target *target)
878 {
879 struct xtensa *xtensa = target_to_xtensa(target);
880 int res, needclear = 0;
881
882 xtensa_dm_core_status_read(&xtensa->dbg_mod);
883 xtensa_dsr_t dsr = xtensa_dm_core_status_get(&xtensa->dbg_mod);
884 LOG_TARGET_DEBUG(target, "DSR (%08" PRIX32 ")", dsr);
885 if (dsr & OCDDSR_EXECBUSY) {
886 if (!xtensa->suppress_dsr_errors)
887 LOG_TARGET_ERROR(target, "DSR (%08" PRIX32 ") indicates target still busy!", dsr);
888 needclear = 1;
889 }
890 if (dsr & OCDDSR_EXECEXCEPTION) {
891 if (!xtensa->suppress_dsr_errors)
892 LOG_TARGET_ERROR(target,
893 "DSR (%08" PRIX32 ") indicates DIR instruction generated an exception!",
894 dsr);
895 needclear = 1;
896 }
897 if (dsr & OCDDSR_EXECOVERRUN) {
898 if (!xtensa->suppress_dsr_errors)
899 LOG_TARGET_ERROR(target,
900 "DSR (%08" PRIX32 ") indicates DIR instruction generated an overrun!",
901 dsr);
902 needclear = 1;
903 }
904 if (needclear) {
905 res = xtensa_dm_core_status_clear(&xtensa->dbg_mod,
906 OCDDSR_EXECEXCEPTION | OCDDSR_EXECOVERRUN);
907 if (res != ERROR_OK && !xtensa->suppress_dsr_errors)
908 LOG_TARGET_ERROR(target, "clearing DSR failed!");
909 return ERROR_FAIL;
910 }
911 return ERROR_OK;
912 }
913
914 xtensa_reg_val_t xtensa_reg_get(struct target *target, enum xtensa_reg_id reg_id)
915 {
916 struct xtensa *xtensa = target_to_xtensa(target);
917 struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
918 return xtensa_reg_get_value(reg);
919 }
920
921 void xtensa_reg_set(struct target *target, enum xtensa_reg_id reg_id, xtensa_reg_val_t value)
922 {
923 struct xtensa *xtensa = target_to_xtensa(target);
924 struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
925 if (xtensa_reg_get_value(reg) == value)
926 return;
927 xtensa_reg_set_value(reg, value);
928 }
929
930 /* Set Ax (XT_REG_RELGEN) register along with its underlying ARx (XT_REG_GENERAL) */
931 void xtensa_reg_set_deep_relgen(struct target *target, enum xtensa_reg_id a_idx, xtensa_reg_val_t value)
932 {
933 struct xtensa *xtensa = target_to_xtensa(target);
934 uint32_t windowbase = (xtensa->core_config->windowed ?
935 xtensa_reg_get(target, XT_REG_IDX_WINDOWBASE) : 0);
936 int ar_idx = xtensa_windowbase_offset_to_canonical(xtensa, a_idx, windowbase);
937 xtensa_reg_set(target, a_idx, value);
938 xtensa_reg_set(target, ar_idx, value);
939 }
940
941 /* Read cause for entering halted state; return bitmask in DEBUGCAUSE_* format */
942 uint32_t xtensa_cause_get(struct target *target)
943 {
944 return xtensa_reg_get(target, XT_REG_IDX_DEBUGCAUSE);
945 }
946
947 void xtensa_cause_clear(struct target *target)
948 {
949 struct xtensa *xtensa = target_to_xtensa(target);
950 xtensa_reg_set(target, XT_REG_IDX_DEBUGCAUSE, 0);
951 xtensa->core_cache->reg_list[XT_REG_IDX_DEBUGCAUSE].dirty = false;
952 }
953
954 int xtensa_assert_reset(struct target *target)
955 {
956 struct xtensa *xtensa = target_to_xtensa(target);
957
958 LOG_TARGET_DEBUG(target, "target_number=%i, begin", target->target_number);
959 target->state = TARGET_RESET;
960 xtensa_queue_pwr_reg_write(xtensa,
961 DMREG_PWRCTL,
962 PWRCTL_JTAGDEBUGUSE | PWRCTL_DEBUGWAKEUP | PWRCTL_MEMWAKEUP | PWRCTL_COREWAKEUP |
963 PWRCTL_CORERESET);
964 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
965 int res = jtag_execute_queue();
966 if (res != ERROR_OK)
967 return res;
968 xtensa->reset_asserted = true;
969 return res;
970 }
971
972 int xtensa_deassert_reset(struct target *target)
973 {
974 struct xtensa *xtensa = target_to_xtensa(target);
975
976 LOG_TARGET_DEBUG(target, "halt=%d", target->reset_halt);
977 if (target->reset_halt)
978 xtensa_queue_dbg_reg_write(xtensa,
979 NARADR_DCRSET,
980 OCDDCR_ENABLEOCD | OCDDCR_DEBUGINTERRUPT);
981 xtensa_queue_pwr_reg_write(xtensa,
982 DMREG_PWRCTL,
983 PWRCTL_JTAGDEBUGUSE | PWRCTL_DEBUGWAKEUP | PWRCTL_MEMWAKEUP | PWRCTL_COREWAKEUP);
984 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
985 int res = jtag_execute_queue();
986 if (res != ERROR_OK)
987 return res;
988 target->state = TARGET_RUNNING;
989 xtensa->reset_asserted = false;
990 return res;
991 }
992
993 int xtensa_soft_reset_halt(struct target *target)
994 {
995 LOG_TARGET_DEBUG(target, "begin");
996 return xtensa_assert_reset(target);
997 }
998
999 int xtensa_fetch_all_regs(struct target *target)
1000 {
1001 struct xtensa *xtensa = target_to_xtensa(target);
1002 struct reg *reg_list = xtensa->core_cache->reg_list;
1003 unsigned int reg_list_size = xtensa->core_cache->num_regs;
1004 xtensa_reg_val_t cpenable = 0, windowbase = 0, a3;
1005 uint32_t woe;
1006 uint8_t a3_buf[4];
1007 bool debug_dsrs = !xtensa->regs_fetched || LOG_LEVEL_IS(LOG_LVL_DEBUG);
1008
1009 union xtensa_reg_val_u *regvals = calloc(reg_list_size, sizeof(*regvals));
1010 if (!regvals) {
1011 LOG_TARGET_ERROR(target, "unable to allocate memory for regvals!");
1012 return ERROR_FAIL;
1013 }
1014 union xtensa_reg_val_u *dsrs = calloc(reg_list_size, sizeof(*dsrs));
1015 if (!dsrs) {
1016 LOG_TARGET_ERROR(target, "unable to allocate memory for dsrs!");
1017 free(regvals);
1018 return ERROR_FAIL;
1019 }
1020
1021 LOG_TARGET_DEBUG(target, "start");
1022
1023 /* Save (windowed) A3 so cache matches physical AR3; A3 usable as scratch */
1024 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1025 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, a3_buf);
1026 int res = xtensa_window_state_save(target, &woe);
1027 if (res != ERROR_OK)
1028 goto xtensa_fetch_all_regs_done;
1029
1030 /* Assume the CPU has just halted. We now want to fill the register cache with all the
1031 * register contents GDB needs. For speed, we pipeline all the read operations, execute them
1032 * in one go, then sort everything out from the regvals variable. */
1033
1034 /* Start out with AREGS; we can reach those immediately. Grab them per 16 registers. */
1035 for (unsigned int j = 0; j < XT_AREGS_NUM_MAX; j += 16) {
1036 /*Grab the 16 registers we can see */
1037 for (unsigned int i = 0; i < 16; i++) {
1038 if (i + j < xtensa->core_config->aregs_num) {
1039 xtensa_queue_exec_ins(xtensa,
1040 XT_INS_WSR(xtensa, XT_SR_DDR, xtensa_regs[XT_REG_IDX_AR0 + i].reg_num));
1041 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR,
1042 regvals[XT_REG_IDX_AR0 + i + j].buf);
1043 if (debug_dsrs)
1044 xtensa_queue_dbg_reg_read(xtensa, NARADR_DSR,
1045 dsrs[XT_REG_IDX_AR0 + i + j].buf);
1046 }
1047 }
1048 if (xtensa->core_config->windowed)
1049 /* Now rotate the window so we'll see the next 16 registers. The final rotate
1050 * will wraparound, */
1051 /* leaving us in the state we were. */
1052 xtensa_queue_exec_ins(xtensa, XT_INS_ROTW(xtensa, 4));
1053 }
1054 xtensa_window_state_restore(target, woe);
1055
1056 if (xtensa->core_config->coproc) {
1057 /* As the very first thing after AREGS, go grab CPENABLE */
1058 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
1059 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1060 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, regvals[XT_REG_IDX_CPENABLE].buf);
1061 }
1062 res = jtag_execute_queue();
1063 if (res != ERROR_OK) {
1064 LOG_ERROR("Failed to read ARs (%d)!", res);
1065 goto xtensa_fetch_all_regs_done;
1066 }
1067 xtensa_core_status_check(target);
1068
1069 a3 = buf_get_u32(a3_buf, 0, 32);
1070
1071 if (xtensa->core_config->coproc) {
1072 cpenable = buf_get_u32(regvals[XT_REG_IDX_CPENABLE].buf, 0, 32);
1073
1074 /* Enable all coprocessors (by setting all bits in CPENABLE) so we can read FP and user registers. */
1075 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, 0xffffffff);
1076 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1077 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
1078
1079 /* Save CPENABLE; flag dirty later (when regcache updated) so original value is always restored */
1080 LOG_TARGET_DEBUG(target, "CPENABLE: was 0x%" PRIx32 ", all enabled", cpenable);
1081 xtensa_reg_set(target, XT_REG_IDX_CPENABLE, cpenable);
1082 }
1083 /* We're now free to use any of A0-A15 as scratch registers
1084 * Grab the SFRs and user registers first. We use A3 as a scratch register. */
1085 for (unsigned int i = 0; i < reg_list_size; i++) {
1086 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1087 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1088 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1089 bool reg_fetched = true;
1090 unsigned int reg_num = rlist[ridx].reg_num;
1091 switch (rlist[ridx].type) {
1092 case XT_REG_USER:
1093 xtensa_queue_exec_ins(xtensa, XT_INS_RUR(xtensa, reg_num, XT_REG_A3));
1094 break;
1095 case XT_REG_FR:
1096 xtensa_queue_exec_ins(xtensa, XT_INS_RFR(xtensa, reg_num, XT_REG_A3));
1097 break;
1098 case XT_REG_SPECIAL:
1099 if (reg_num == XT_PC_REG_NUM_VIRTUAL) {
1100 /* reg number of PC for debug interrupt depends on NDEBUGLEVEL */
1101 reg_num = (XT_PC_REG_NUM_BASE + xtensa->core_config->debug.irq_level);
1102 } else if (reg_num == xtensa_regs[XT_REG_IDX_CPENABLE].reg_num) {
1103 /* CPENABLE already read/updated; don't re-read */
1104 reg_fetched = false;
1105 break;
1106 }
1107 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, reg_num, XT_REG_A3));
1108 break;
1109 default:
1110 reg_fetched = false;
1111 }
1112 if (reg_fetched) {
1113 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1114 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, regvals[i].buf);
1115 if (debug_dsrs)
1116 xtensa_queue_dbg_reg_read(xtensa, NARADR_DSR, dsrs[i].buf);
1117 }
1118 }
1119 }
1120 /* Ok, send the whole mess to the CPU. */
1121 res = jtag_execute_queue();
1122 if (res != ERROR_OK) {
1123 LOG_ERROR("Failed to fetch AR regs!");
1124 goto xtensa_fetch_all_regs_done;
1125 }
1126 xtensa_core_status_check(target);
1127
1128 if (debug_dsrs) {
1129 /* DSR checking: follows order in which registers are requested. */
1130 for (unsigned int i = 0; i < reg_list_size; i++) {
1131 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1132 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1133 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist &&
1134 (rlist[ridx].type != XT_REG_DEBUG) &&
1135 (rlist[ridx].type != XT_REG_RELGEN) &&
1136 (rlist[ridx].type != XT_REG_TIE) &&
1137 (rlist[ridx].type != XT_REG_OTHER)) {
1138 if (buf_get_u32(dsrs[i].buf, 0, 32) & OCDDSR_EXECEXCEPTION) {
1139 LOG_ERROR("Exception reading %s!", reg_list[i].name);
1140 res = ERROR_FAIL;
1141 goto xtensa_fetch_all_regs_done;
1142 }
1143 }
1144 }
1145 }
1146
1147 if (xtensa->core_config->windowed)
1148 /* We need the windowbase to decode the general addresses. */
1149 windowbase = buf_get_u32(regvals[XT_REG_IDX_WINDOWBASE].buf, 0, 32);
1150 /* Decode the result and update the cache. */
1151 for (unsigned int i = 0; i < reg_list_size; i++) {
1152 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1153 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1154 if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1155 if ((xtensa->core_config->windowed) && (rlist[ridx].type == XT_REG_GENERAL)) {
1156 /* The 64-value general register set is read from (windowbase) on down.
1157 * We need to get the real register address by subtracting windowbase and
1158 * wrapping around. */
1159 enum xtensa_reg_id realadr = xtensa_canonical_to_windowbase_offset(xtensa, i,
1160 windowbase);
1161 buf_cpy(regvals[realadr].buf, reg_list[i].value, reg_list[i].size);
1162 } else if (rlist[ridx].type == XT_REG_RELGEN) {
1163 buf_cpy(regvals[rlist[ridx].reg_num].buf, reg_list[i].value, reg_list[i].size);
1164 if (xtensa_extra_debug_log) {
1165 xtensa_reg_val_t regval = buf_get_u32(regvals[rlist[ridx].reg_num].buf, 0, 32);
1166 LOG_DEBUG("%s = 0x%x", rlist[ridx].name, regval);
1167 }
1168 } else {
1169 xtensa_reg_val_t regval = buf_get_u32(regvals[i].buf, 0, 32);
1170 bool is_dirty = (i == XT_REG_IDX_CPENABLE);
1171 if (xtensa_extra_debug_log)
1172 LOG_INFO("Register %s: 0x%X", reg_list[i].name, regval);
1173 xtensa_reg_set(target, i, regval);
1174 reg_list[i].dirty = is_dirty; /*always do this _after_ xtensa_reg_set! */
1175 }
1176 reg_list[i].valid = true;
1177 } else {
1178 if ((rlist[ridx].flags & XT_REGF_MASK) == XT_REGF_NOREAD) {
1179 /* Report read-only registers all-zero but valid */
1180 reg_list[i].valid = true;
1181 xtensa_reg_set(target, i, 0);
1182 } else {
1183 reg_list[i].valid = false;
1184 }
1185 }
1186 }
1187
1188 if (xtensa->core_config->windowed) {
1189 /* We have used A3 as a scratch register.
1190 * Windowed configs: restore A3's AR (XT_REG_GENERAL) and and flag for write-back.
1191 */
1192 enum xtensa_reg_id ar3_idx = xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_A3, windowbase);
1193 xtensa_reg_set(target, ar3_idx, a3);
1194 xtensa_mark_register_dirty(xtensa, ar3_idx);
1195
1196 /* Reset scratch_ars[] on fetch. .chrval tracks AR mapping and changes w/ window */
1197 sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR3].chrval, "ar%d", ar3_idx - XT_REG_IDX_AR0);
1198 enum xtensa_reg_id ar4_idx = xtensa_windowbase_offset_to_canonical(xtensa, XT_REG_IDX_A4, windowbase);
1199 sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR4].chrval, "ar%d", ar4_idx - XT_REG_IDX_AR0);
1200 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
1201 xtensa->scratch_ars[s].intval = false;
1202 }
1203
1204 /* We have used A3 (XT_REG_RELGEN) as a scratch register. Restore and flag for write-back. */
1205 xtensa_reg_set(target, XT_REG_IDX_A3, a3);
1206 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1207 xtensa->regs_fetched = true;
1208 xtensa_fetch_all_regs_done:
1209 free(regvals);
1210 free(dsrs);
1211 return res;
1212 }
1213
1214 int xtensa_get_gdb_reg_list(struct target *target,
1215 struct reg **reg_list[],
1216 int *reg_list_size,
1217 enum target_register_class reg_class)
1218 {
1219 struct xtensa *xtensa = target_to_xtensa(target);
1220 unsigned int num_regs;
1221
1222 if (reg_class == REG_CLASS_GENERAL) {
1223 if ((xtensa->genpkt_regs_num == 0) || !xtensa->contiguous_regs_list) {
1224 LOG_ERROR("reg_class %d unhandled; 'xtgregs' not found", reg_class);
1225 return ERROR_FAIL;
1226 }
1227 num_regs = xtensa->genpkt_regs_num;
1228 } else {
1229 /* Determine whether to return a contiguous or sparse register map */
1230 num_regs = xtensa->regmap_contiguous ? xtensa->total_regs_num : xtensa->dbregs_num;
1231 }
1232
1233 LOG_DEBUG("reg_class=%i, num_regs=%d", (int)reg_class, num_regs);
1234
1235 *reg_list = calloc(num_regs, sizeof(struct reg *));
1236 if (!*reg_list)
1237 return ERROR_FAIL;
1238
1239 *reg_list_size = num_regs;
1240 if (xtensa->regmap_contiguous) {
1241 assert((num_regs <= xtensa->total_regs_num) && "contiguous regmap size internal error!");
1242 for (unsigned int i = 0; i < num_regs; i++)
1243 (*reg_list)[i] = xtensa->contiguous_regs_list[i];
1244 return ERROR_OK;
1245 }
1246
1247 for (unsigned int i = 0; i < num_regs; i++)
1248 (*reg_list)[i] = (struct reg *)&xtensa->empty_regs[i];
1249 unsigned int k = 0;
1250 for (unsigned int i = 0; i < xtensa->core_cache->num_regs && k < num_regs; i++) {
1251 if (xtensa->core_cache->reg_list[i].exist) {
1252 struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1253 unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1254 int sparse_idx = rlist[ridx].dbreg_num;
1255 if (i == XT_REG_IDX_PS) {
1256 if (xtensa->eps_dbglevel_idx == 0) {
1257 LOG_ERROR("eps_dbglevel_idx not set\n");
1258 return ERROR_FAIL;
1259 }
1260 (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx];
1261 if (xtensa_extra_debug_log)
1262 LOG_DEBUG("SPARSE GDB reg 0x%x getting EPS%d 0x%x",
1263 sparse_idx, xtensa->core_config->debug.irq_level,
1264 xtensa_reg_get_value((*reg_list)[sparse_idx]));
1265 } else if (rlist[ridx].type == XT_REG_RELGEN) {
1266 (*reg_list)[sparse_idx - XT_REG_IDX_ARFIRST] = &xtensa->core_cache->reg_list[i];
1267 } else {
1268 (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[i];
1269 }
1270 if (i == XT_REG_IDX_PC)
1271 /* Make a duplicate copy of PC for external access */
1272 (*reg_list)[XT_PC_DBREG_NUM_BASE] = &xtensa->core_cache->reg_list[i];
1273 k++;
1274 }
1275 }
1276
1277 if (k == num_regs)
1278 LOG_ERROR("SPARSE GDB reg list full (size %d)", k);
1279
1280 return ERROR_OK;
1281 }
1282
1283 int xtensa_mmu_is_enabled(struct target *target, int *enabled)
1284 {
1285 struct xtensa *xtensa = target_to_xtensa(target);
1286 *enabled = xtensa->core_config->mmu.itlb_entries_count > 0 ||
1287 xtensa->core_config->mmu.dtlb_entries_count > 0;
1288 return ERROR_OK;
1289 }
1290
1291 int xtensa_halt(struct target *target)
1292 {
1293 struct xtensa *xtensa = target_to_xtensa(target);
1294
1295 LOG_TARGET_DEBUG(target, "start");
1296 if (target->state == TARGET_HALTED) {
1297 LOG_TARGET_DEBUG(target, "target was already halted");
1298 return ERROR_OK;
1299 }
1300 /* First we have to read dsr and check if the target stopped */
1301 int res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1302 if (res != ERROR_OK) {
1303 LOG_TARGET_ERROR(target, "Failed to read core status!");
1304 return res;
1305 }
1306 LOG_TARGET_DEBUG(target, "Core status 0x%" PRIx32, xtensa_dm_core_status_get(&xtensa->dbg_mod));
1307 if (!xtensa_is_stopped(target)) {
1308 xtensa_queue_dbg_reg_write(xtensa, NARADR_DCRSET, OCDDCR_ENABLEOCD | OCDDCR_DEBUGINTERRUPT);
1309 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
1310 res = jtag_execute_queue();
1311 if (res != ERROR_OK)
1312 LOG_TARGET_ERROR(target, "Failed to set OCDDCR_DEBUGINTERRUPT. Can't halt.");
1313 }
1314
1315 return res;
1316 }
1317
1318 int xtensa_prepare_resume(struct target *target,
1319 int current,
1320 target_addr_t address,
1321 int handle_breakpoints,
1322 int debug_execution)
1323 {
1324 struct xtensa *xtensa = target_to_xtensa(target);
1325 uint32_t bpena = 0;
1326
1327 LOG_TARGET_DEBUG(target,
1328 "current=%d address=" TARGET_ADDR_FMT ", handle_breakpoints=%i, debug_execution=%i)",
1329 current,
1330 address,
1331 handle_breakpoints,
1332 debug_execution);
1333
1334 if (target->state != TARGET_HALTED) {
1335 LOG_TARGET_WARNING(target, "target not halted");
1336 return ERROR_TARGET_NOT_HALTED;
1337 }
1338
1339 if (address && !current) {
1340 xtensa_reg_set(target, XT_REG_IDX_PC, address);
1341 } else {
1342 uint32_t cause = xtensa_cause_get(target);
1343 LOG_TARGET_DEBUG(target, "DEBUGCAUSE 0x%x (watchpoint %lu) (break %lu)",
1344 cause, (cause & DEBUGCAUSE_DB), (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)));
1345 if (cause & DEBUGCAUSE_DB)
1346 /* We stopped due to a watchpoint. We can't just resume executing the
1347 * instruction again because */
1348 /* that would trigger the watchpoint again. To fix this, we single-step,
1349 * which ignores watchpoints. */
1350 xtensa_do_step(target, current, address, handle_breakpoints);
1351 if (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))
1352 /* We stopped due to a break instruction. We can't just resume executing the
1353 * instruction again because */
1354 /* that would trigger the break again. To fix this, we single-step, which
1355 * ignores break. */
1356 xtensa_do_step(target, current, address, handle_breakpoints);
1357 }
1358
1359 /* Write back hw breakpoints. Current FreeRTOS SMP code can set a hw breakpoint on an
1360 * exception; we need to clear that and return to the breakpoints gdb has set on resume. */
1361 for (unsigned int slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
1362 if (xtensa->hw_brps[slot]) {
1363 /* Write IBREAKA[slot] and set bit #slot in IBREAKENABLE */
1364 xtensa_reg_set(target, XT_REG_IDX_IBREAKA0 + slot, xtensa->hw_brps[slot]->address);
1365 bpena |= BIT(slot);
1366 }
1367 }
1368 xtensa_reg_set(target, XT_REG_IDX_IBREAKENABLE, bpena);
1369
1370 /* Here we write all registers to the targets */
1371 int res = xtensa_write_dirty_registers(target);
1372 if (res != ERROR_OK)
1373 LOG_TARGET_ERROR(target, "Failed to write back register cache.");
1374 return res;
1375 }
1376
1377 int xtensa_do_resume(struct target *target)
1378 {
1379 struct xtensa *xtensa = target_to_xtensa(target);
1380
1381 LOG_TARGET_DEBUG(target, "start");
1382
1383 xtensa_queue_exec_ins(xtensa, XT_INS_RFDO(xtensa));
1384 int res = jtag_execute_queue();
1385 if (res != ERROR_OK) {
1386 LOG_TARGET_ERROR(target, "Failed to exec RFDO %d!", res);
1387 return res;
1388 }
1389 xtensa_core_status_check(target);
1390 return ERROR_OK;
1391 }
1392
1393 int xtensa_resume(struct target *target,
1394 int current,
1395 target_addr_t address,
1396 int handle_breakpoints,
1397 int debug_execution)
1398 {
1399 LOG_TARGET_DEBUG(target, "start");
1400 int res = xtensa_prepare_resume(target, current, address, handle_breakpoints, debug_execution);
1401 if (res != ERROR_OK) {
1402 LOG_TARGET_ERROR(target, "Failed to prepare for resume!");
1403 return res;
1404 }
1405 res = xtensa_do_resume(target);
1406 if (res != ERROR_OK) {
1407 LOG_TARGET_ERROR(target, "Failed to resume!");
1408 return res;
1409 }
1410
1411 target->debug_reason = DBG_REASON_NOTHALTED;
1412 if (!debug_execution)
1413 target->state = TARGET_RUNNING;
1414 else
1415 target->state = TARGET_DEBUG_RUNNING;
1416
1417 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1418
1419 return ERROR_OK;
1420 }
1421
1422 static bool xtensa_pc_in_winexc(struct target *target, target_addr_t pc)
1423 {
1424 struct xtensa *xtensa = target_to_xtensa(target);
1425 uint8_t insn_buf[XT_ISNS_SZ_MAX];
1426 int err = xtensa_read_buffer(target, pc, sizeof(insn_buf), insn_buf);
1427 if (err != ERROR_OK)
1428 return false;
1429
1430 xtensa_insn_t insn = buf_get_u32(insn_buf, 0, 24);
1431 xtensa_insn_t masked = insn & XT_INS_L32E_S32E_MASK(xtensa);
1432 if (masked == XT_INS_L32E(xtensa, 0, 0, 0) || masked == XT_INS_S32E(xtensa, 0, 0, 0))
1433 return true;
1434
1435 masked = insn & XT_INS_RFWO_RFWU_MASK(xtensa);
1436 if (masked == XT_INS_RFWO(xtensa) || masked == XT_INS_RFWU(xtensa))
1437 return true;
1438
1439 return false;
1440 }
1441
1442 int xtensa_do_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1443 {
1444 struct xtensa *xtensa = target_to_xtensa(target);
1445 int res;
1446 const uint32_t icount_val = -2; /* ICOUNT value to load for 1 step */
1447 xtensa_reg_val_t dbreakc[XT_WATCHPOINTS_NUM_MAX];
1448 xtensa_reg_val_t icountlvl, cause;
1449 xtensa_reg_val_t oldps, oldpc, cur_pc;
1450 bool ps_lowered = false;
1451
1452 LOG_TARGET_DEBUG(target, "current=%d, address=" TARGET_ADDR_FMT ", handle_breakpoints=%i",
1453 current, address, handle_breakpoints);
1454
1455 if (target->state != TARGET_HALTED) {
1456 LOG_TARGET_WARNING(target, "target not halted");
1457 return ERROR_TARGET_NOT_HALTED;
1458 }
1459
1460 if (xtensa->eps_dbglevel_idx == 0) {
1461 LOG_ERROR("eps_dbglevel_idx not set\n");
1462 return ERROR_FAIL;
1463 }
1464
1465 /* Save old ps (EPS[dbglvl] on LX), pc */
1466 oldps = xtensa_reg_get(target, xtensa->eps_dbglevel_idx);
1467 oldpc = xtensa_reg_get(target, XT_REG_IDX_PC);
1468
1469 cause = xtensa_cause_get(target);
1470 LOG_TARGET_DEBUG(target, "oldps=%" PRIx32 ", oldpc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1471 oldps,
1472 oldpc,
1473 cause,
1474 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
1475 if (handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))) {
1476 /* handle hard-coded SW breakpoints (e.g. syscalls) */
1477 LOG_TARGET_DEBUG(target, "Increment PC to pass break instruction...");
1478 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1479 /* pretend that we have stepped */
1480 if (cause & DEBUGCAUSE_BI)
1481 xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 3); /* PC = PC+3 */
1482 else
1483 xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 2); /* PC = PC+2 */
1484 return ERROR_OK;
1485 }
1486
1487 /* Xtensa LX has an ICOUNTLEVEL register which sets the maximum interrupt level
1488 * at which the instructions are to be counted while stepping.
1489 *
1490 * For example, if we need to step by 2 instructions, and an interrupt occurs
1491 * in between, the processor will trigger the interrupt and halt after the 2nd
1492 * instruction within the interrupt vector and/or handler.
1493 *
1494 * However, sometimes we don't want the interrupt handlers to be executed at all
1495 * while stepping through the code. In this case (XT_STEPPING_ISR_OFF),
1496 * ICOUNTLEVEL can be lowered to the executing code's (level + 1) to prevent ISR
1497 * code from being counted during stepping. Note that C exception handlers must
1498 * run at level 0 and hence will be counted and stepped into, should one occur.
1499 *
1500 * TODO: Certain instructions should never be single-stepped and should instead
1501 * be emulated (per DUG): RSIL >= DBGLEVEL, RSR/WSR [ICOUNT|ICOUNTLEVEL], and
1502 * RFI >= DBGLEVEL.
1503 */
1504 if (xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF) {
1505 if (!xtensa->core_config->high_irq.enabled) {
1506 LOG_TARGET_WARNING(
1507 target,
1508 "disabling IRQs while stepping is not implemented w/o high prio IRQs option!");
1509 return ERROR_FAIL;
1510 }
1511 /* Update ICOUNTLEVEL accordingly */
1512 icountlvl = MIN((oldps & 0xF) + 1, xtensa->core_config->debug.irq_level);
1513 } else {
1514 icountlvl = xtensa->core_config->debug.irq_level;
1515 }
1516
1517 if (cause & DEBUGCAUSE_DB) {
1518 /* We stopped due to a watchpoint. We can't just resume executing the instruction again because
1519 * that would trigger the watchpoint again. To fix this, we remove watchpoints,single-step and
1520 * re-enable the watchpoint. */
1521 LOG_TARGET_DEBUG(
1522 target,
1523 "Single-stepping to get past instruction that triggered the watchpoint...");
1524 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1525 /* Save all DBREAKCx registers and set to 0 to disable watchpoints */
1526 for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
1527 dbreakc[slot] = xtensa_reg_get(target, XT_REG_IDX_DBREAKC0 + slot);
1528 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, 0);
1529 }
1530 }
1531
1532 if (!handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)))
1533 /* handle normal SW breakpoint */
1534 xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1535 if ((oldps & 0xf) >= icountlvl) {
1536 /* Lower interrupt level to allow stepping, but flag eps[dbglvl] to be restored */
1537 ps_lowered = true;
1538 uint32_t newps = (oldps & ~0xf) | (icountlvl - 1);
1539 xtensa_reg_set(target, xtensa->eps_dbglevel_idx, newps);
1540 LOG_TARGET_DEBUG(target,
1541 "Lowering PS.INTLEVEL to allow stepping: %s <- 0x%08" PRIx32 " (was 0x%08" PRIx32 ")",
1542 xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx].name,
1543 newps,
1544 oldps);
1545 }
1546 do {
1547 xtensa_reg_set(target, XT_REG_IDX_ICOUNTLEVEL, icountlvl);
1548 xtensa_reg_set(target, XT_REG_IDX_ICOUNT, icount_val);
1549
1550 /* Now ICOUNT is set, we can resume as if we were going to run */
1551 res = xtensa_prepare_resume(target, current, address, 0, 0);
1552 if (res != ERROR_OK) {
1553 LOG_TARGET_ERROR(target, "Failed to prepare resume for single step");
1554 return res;
1555 }
1556 res = xtensa_do_resume(target);
1557 if (res != ERROR_OK) {
1558 LOG_TARGET_ERROR(target, "Failed to resume after setting up single step");
1559 return res;
1560 }
1561
1562 /* Wait for stepping to complete */
1563 long long start = timeval_ms();
1564 while (timeval_ms() < start + 500) {
1565 /* Do not use target_poll here, it also triggers other things... just manually read the DSR
1566 *until stepping is complete. */
1567 usleep(1000);
1568 res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
1569 if (res != ERROR_OK) {
1570 LOG_TARGET_ERROR(target, "Failed to read core status!");
1571 return res;
1572 }
1573 if (xtensa_is_stopped(target))
1574 break;
1575 usleep(1000);
1576 }
1577 LOG_TARGET_DEBUG(target, "Finish stepping. dsr=0x%08" PRIx32,
1578 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1579 if (!xtensa_is_stopped(target)) {
1580 LOG_TARGET_WARNING(
1581 target,
1582 "Timed out waiting for target to finish stepping. dsr=0x%08" PRIx32,
1583 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1584 target->debug_reason = DBG_REASON_NOTHALTED;
1585 target->state = TARGET_RUNNING;
1586 return ERROR_FAIL;
1587 }
1588 target->debug_reason = DBG_REASON_SINGLESTEP;
1589 target->state = TARGET_HALTED;
1590
1591 xtensa_fetch_all_regs(target);
1592
1593 cur_pc = xtensa_reg_get(target, XT_REG_IDX_PC);
1594
1595 LOG_TARGET_DEBUG(target,
1596 "cur_ps=%" PRIx32 ", cur_pc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1597 xtensa_reg_get(target, XT_REG_IDX_PS),
1598 cur_pc,
1599 xtensa_cause_get(target),
1600 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE));
1601
1602 /* Do not step into WindowOverflow if ISRs are masked.
1603 If we stop in WindowOverflow at breakpoint with masked ISRs and
1604 try to do a step it will get us out of that handler */
1605 if (xtensa->core_config->windowed &&
1606 xtensa->stepping_isr_mode == XT_STEPPING_ISR_OFF &&
1607 xtensa_pc_in_winexc(target, cur_pc)) {
1608 /* isrmask = on, need to step out of the window exception handler */
1609 LOG_DEBUG("Stepping out of window exception, PC=%" PRIX32, cur_pc);
1610 oldpc = cur_pc;
1611 address = oldpc + 3;
1612 continue;
1613 }
1614
1615 if (oldpc == cur_pc)
1616 LOG_TARGET_WARNING(target, "Stepping doesn't seem to change PC! dsr=0x%08" PRIx32,
1617 xtensa_dm_core_status_get(&xtensa->dbg_mod));
1618 else
1619 LOG_DEBUG("Stepped from %" PRIX32 " to %" PRIX32, oldpc, cur_pc);
1620 break;
1621 } while (true);
1622 LOG_DEBUG("Done stepping, PC=%" PRIX32, cur_pc);
1623
1624 if (cause & DEBUGCAUSE_DB) {
1625 LOG_TARGET_DEBUG(target, "...Done, re-installing watchpoints.");
1626 /* Restore the DBREAKCx registers */
1627 for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++)
1628 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, dbreakc[slot]);
1629 }
1630
1631 /* Restore int level */
1632 if (ps_lowered) {
1633 LOG_DEBUG("Restoring %s after stepping: 0x%08" PRIx32,
1634 xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx].name,
1635 oldps);
1636 xtensa_reg_set(target, xtensa->eps_dbglevel_idx, oldps);
1637 }
1638
1639 /* write ICOUNTLEVEL back to zero */
1640 xtensa_reg_set(target, XT_REG_IDX_ICOUNTLEVEL, 0);
1641 /* TODO: can we skip writing dirty registers and re-fetching them? */
1642 res = xtensa_write_dirty_registers(target);
1643 xtensa_fetch_all_regs(target);
1644 return res;
1645 }
1646
1647 int xtensa_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1648 {
1649 return xtensa_do_step(target, current, address, handle_breakpoints);
1650 }
1651
1652 /**
1653 * Returns true if two ranges are overlapping
1654 */
1655 static inline bool xtensa_memory_regions_overlap(target_addr_t r1_start,
1656 target_addr_t r1_end,
1657 target_addr_t r2_start,
1658 target_addr_t r2_end)
1659 {
1660 if ((r2_start >= r1_start) && (r2_start < r1_end))
1661 return true; /* r2_start is in r1 region */
1662 if ((r2_end > r1_start) && (r2_end <= r1_end))
1663 return true; /* r2_end is in r1 region */
1664 return false;
1665 }
1666
1667 /**
1668 * Returns a size of overlapped region of two ranges.
1669 */
1670 static inline target_addr_t xtensa_get_overlap_size(target_addr_t r1_start,
1671 target_addr_t r1_end,
1672 target_addr_t r2_start,
1673 target_addr_t r2_end)
1674 {
1675 if (xtensa_memory_regions_overlap(r1_start, r1_end, r2_start, r2_end)) {
1676 target_addr_t ov_start = r1_start < r2_start ? r2_start : r1_start;
1677 target_addr_t ov_end = r1_end > r2_end ? r2_end : r1_end;
1678 return ov_end - ov_start;
1679 }
1680 return 0;
1681 }
1682
1683 /**
1684 * Check if the address gets to memory regions, and its access mode
1685 */
1686 static bool xtensa_memory_op_validate_range(struct xtensa *xtensa, target_addr_t address, size_t size, int access)
1687 {
1688 target_addr_t adr_pos = address; /* address cursor set to the beginning start */
1689 target_addr_t adr_end = address + size; /* region end */
1690 target_addr_t overlap_size;
1691 const struct xtensa_local_mem_region_config *cm; /* current mem region */
1692
1693 while (adr_pos < adr_end) {
1694 cm = xtensa_target_memory_region_find(xtensa, adr_pos);
1695 if (!cm) /* address is not belong to anything */
1696 return false;
1697 if ((cm->access & access) != access) /* access check */
1698 return false;
1699 overlap_size = xtensa_get_overlap_size(cm->base, (cm->base + cm->size), adr_pos, adr_end);
1700 assert(overlap_size != 0);
1701 adr_pos += overlap_size;
1702 }
1703 return true;
1704 }
1705
1706 int xtensa_read_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1707 {
1708 struct xtensa *xtensa = target_to_xtensa(target);
1709 /* We are going to read memory in 32-bit increments. This may not be what the calling
1710 * function expects, so we may need to allocate a temp buffer and read into that first. */
1711 target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
1712 target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
1713 target_addr_t adr = addrstart_al;
1714 uint8_t *albuff;
1715 bool bswap = xtensa->target->endianness == TARGET_BIG_ENDIAN;
1716
1717 if (target->state != TARGET_HALTED) {
1718 LOG_TARGET_WARNING(target, "target not halted");
1719 return ERROR_TARGET_NOT_HALTED;
1720 }
1721
1722 if (!xtensa->permissive_mode) {
1723 if (!xtensa_memory_op_validate_range(xtensa, address, (size * count),
1724 XT_MEM_ACCESS_READ)) {
1725 LOG_DEBUG("address " TARGET_ADDR_FMT " not readable", address);
1726 return ERROR_FAIL;
1727 }
1728 }
1729
1730 if (addrstart_al == address && addrend_al == address + (size * count)) {
1731 albuff = buffer;
1732 } else {
1733 albuff = malloc(addrend_al - addrstart_al);
1734 if (!albuff) {
1735 LOG_TARGET_ERROR(target, "Out of memory allocating %" TARGET_PRIdADDR " bytes!",
1736 addrend_al - addrstart_al);
1737 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1738 }
1739 }
1740
1741 /* We're going to use A3 here */
1742 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1743 /* Write start address to A3 */
1744 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, addrstart_al);
1745 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1746 /* Now we can safely read data from addrstart_al up to addrend_al into albuff */
1747 if (xtensa->probe_lsddr32p != 0) {
1748 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
1749 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t))
1750 xtensa_queue_dbg_reg_read(xtensa,
1751 (adr + sizeof(uint32_t) == addrend_al) ? NARADR_DDR : NARADR_DDREXEC,
1752 &albuff[i]);
1753 } else {
1754 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A4);
1755 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1756 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A4, 0));
1757 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A4));
1758 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, &albuff[i]);
1759 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, adr + sizeof(uint32_t));
1760 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1761 }
1762 }
1763 int res = jtag_execute_queue();
1764 if (res == ERROR_OK) {
1765 bool prev_suppress = xtensa->suppress_dsr_errors;
1766 xtensa->suppress_dsr_errors = true;
1767 res = xtensa_core_status_check(target);
1768 if (xtensa->probe_lsddr32p == -1)
1769 xtensa->probe_lsddr32p = 1;
1770 xtensa->suppress_dsr_errors = prev_suppress;
1771 }
1772 if (res != ERROR_OK) {
1773 if (xtensa->probe_lsddr32p != 0) {
1774 /* Disable fast memory access instructions and retry before reporting an error */
1775 LOG_TARGET_INFO(target, "Disabling LDDR32.P/SDDR32.P");
1776 xtensa->probe_lsddr32p = 0;
1777 res = xtensa_read_memory(target, address, size, count, buffer);
1778 bswap = false;
1779 } else {
1780 LOG_TARGET_WARNING(target, "Failed reading %d bytes at address "TARGET_ADDR_FMT,
1781 count * size, address);
1782 }
1783 }
1784
1785 if (bswap)
1786 buf_bswap32(albuff, albuff, addrend_al - addrstart_al);
1787 if (albuff != buffer) {
1788 memcpy(buffer, albuff + (address & 3), (size * count));
1789 free(albuff);
1790 }
1791
1792 return res;
1793 }
1794
1795 int xtensa_read_buffer(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
1796 {
1797 /* xtensa_read_memory can also read unaligned stuff. Just pass through to that routine. */
1798 return xtensa_read_memory(target, address, 1, count, buffer);
1799 }
1800
1801 int xtensa_write_memory(struct target *target,
1802 target_addr_t address,
1803 uint32_t size,
1804 uint32_t count,
1805 const uint8_t *buffer)
1806 {
1807 /* This memory write function can get thrown nigh everything into it, from
1808 * aligned uint32 writes to unaligned uint8ths. The Xtensa memory doesn't always
1809 * accept anything but aligned uint32 writes, though. That is why we convert
1810 * everything into that. */
1811 struct xtensa *xtensa = target_to_xtensa(target);
1812 target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
1813 target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
1814 target_addr_t adr = addrstart_al;
1815 int res;
1816 uint8_t *albuff;
1817 bool fill_head_tail = false;
1818
1819 if (target->state != TARGET_HALTED) {
1820 LOG_TARGET_WARNING(target, "target not halted");
1821 return ERROR_TARGET_NOT_HALTED;
1822 }
1823
1824 if (!xtensa->permissive_mode) {
1825 if (!xtensa_memory_op_validate_range(xtensa, address, (size * count), XT_MEM_ACCESS_WRITE)) {
1826 LOG_WARNING("address " TARGET_ADDR_FMT " not writable", address);
1827 return ERROR_FAIL;
1828 }
1829 }
1830
1831 if (size == 0 || count == 0 || !buffer)
1832 return ERROR_COMMAND_SYNTAX_ERROR;
1833
1834 /* Allocate a temporary buffer to put the aligned bytes in, if needed. */
1835 if (addrstart_al == address && addrend_al == address + (size * count)) {
1836 if (xtensa->target->endianness == TARGET_BIG_ENDIAN)
1837 /* Need a buffer for byte-swapping */
1838 albuff = malloc(addrend_al - addrstart_al);
1839 else
1840 /* We discard the const here because albuff can also be non-const */
1841 albuff = (uint8_t *)buffer;
1842 } else {
1843 fill_head_tail = true;
1844 albuff = malloc(addrend_al - addrstart_al);
1845 }
1846 if (!albuff) {
1847 LOG_TARGET_ERROR(target, "Out of memory allocating %" TARGET_PRIdADDR " bytes!",
1848 addrend_al - addrstart_al);
1849 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1850 }
1851
1852 /* We're going to use A3 here */
1853 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
1854
1855 /* If we're using a temp aligned buffer, we need to fill the head and/or tail bit of it. */
1856 if (fill_head_tail) {
1857 /* See if we need to read the first and/or last word. */
1858 if (address & 3) {
1859 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, addrstart_al);
1860 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1861 if (xtensa->probe_lsddr32p == 1) {
1862 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
1863 } else {
1864 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A3, 0));
1865 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1866 }
1867 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR, &albuff[0]);
1868 }
1869 if ((address + (size * count)) & 3) {
1870 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, addrend_al - 4);
1871 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1872 if (xtensa->probe_lsddr32p == 1) {
1873 xtensa_queue_exec_ins(xtensa, XT_INS_LDDR32P(xtensa, XT_REG_A3));
1874 } else {
1875 xtensa_queue_exec_ins(xtensa, XT_INS_L32I(xtensa, XT_REG_A3, XT_REG_A3, 0));
1876 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
1877 }
1878 xtensa_queue_dbg_reg_read(xtensa, NARADR_DDR,
1879 &albuff[addrend_al - addrstart_al - 4]);
1880 }
1881 /* Grab bytes */
1882 res = jtag_execute_queue();
1883 if (res != ERROR_OK) {
1884 LOG_ERROR("Error issuing unaligned memory write context instruction(s): %d", res);
1885 if (albuff != buffer)
1886 free(albuff);
1887 return res;
1888 }
1889 xtensa_core_status_check(target);
1890 if (xtensa->target->endianness == TARGET_BIG_ENDIAN) {
1891 bool swapped_w0 = false;
1892 if (address & 3) {
1893 buf_bswap32(&albuff[0], &albuff[0], 4);
1894 swapped_w0 = true;
1895 }
1896 if ((address + (size * count)) & 3) {
1897 if ((addrend_al - addrstart_al - 4 == 0) && swapped_w0) {
1898 /* Don't double-swap if buffer start/end are within the same word */
1899 } else {
1900 buf_bswap32(&albuff[addrend_al - addrstart_al - 4],
1901 &albuff[addrend_al - addrstart_al - 4], 4);
1902 }
1903 }
1904 }
1905 /* Copy data to be written into the aligned buffer (in host-endianness) */
1906 memcpy(&albuff[address & 3], buffer, size * count);
1907 /* Now we can write albuff in aligned uint32s. */
1908 }
1909
1910 if (xtensa->target->endianness == TARGET_BIG_ENDIAN)
1911 buf_bswap32(albuff, fill_head_tail ? albuff : buffer, addrend_al - addrstart_al);
1912
1913 /* Write start address to A3 */
1914 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, addrstart_al);
1915 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1916 /* Write the aligned buffer */
1917 if (xtensa->probe_lsddr32p != 0) {
1918 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1919 if (i == 0) {
1920 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, buf_get_u32(&albuff[i], 0, 32));
1921 xtensa_queue_exec_ins(xtensa, XT_INS_SDDR32P(xtensa, XT_REG_A3));
1922 } else {
1923 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDREXEC, buf_get_u32(&albuff[i], 0, 32));
1924 }
1925 }
1926 } else {
1927 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A4);
1928 for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1929 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, buf_get_u32(&albuff[i], 0, 32));
1930 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
1931 xtensa_queue_exec_ins(xtensa, XT_INS_S32I(xtensa, XT_REG_A3, XT_REG_A4, 0));
1932 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, adr + sizeof(uint32_t));
1933 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1934 }
1935 }
1936
1937 res = jtag_execute_queue();
1938 if (res == ERROR_OK) {
1939 bool prev_suppress = xtensa->suppress_dsr_errors;
1940 xtensa->suppress_dsr_errors = true;
1941 res = xtensa_core_status_check(target);
1942 if (xtensa->probe_lsddr32p == -1)
1943 xtensa->probe_lsddr32p = 1;
1944 xtensa->suppress_dsr_errors = prev_suppress;
1945 }
1946 if (res != ERROR_OK) {
1947 if (xtensa->probe_lsddr32p != 0) {
1948 /* Disable fast memory access instructions and retry before reporting an error */
1949 LOG_TARGET_INFO(target, "Disabling LDDR32.P/SDDR32.P");
1950 xtensa->probe_lsddr32p = 0;
1951 res = xtensa_write_memory(target, address, size, count, buffer);
1952 } else {
1953 LOG_TARGET_WARNING(target, "Failed writing %d bytes at address "TARGET_ADDR_FMT,
1954 count * size, address);
1955 }
1956 } else {
1957 /* Invalidate ICACHE, writeback DCACHE if present */
1958 uint32_t issue_ihi = xtensa_is_icacheable(xtensa, address);
1959 uint32_t issue_dhwb = xtensa_is_dcacheable(xtensa, address);
1960 if (issue_ihi || issue_dhwb) {
1961 uint32_t ilinesize = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
1962 uint32_t dlinesize = issue_dhwb ? xtensa->core_config->dcache.line_size : UINT32_MAX;
1963 uint32_t linesize = MIN(ilinesize, dlinesize);
1964 uint32_t off = 0;
1965 adr = addrstart_al;
1966
1967 while ((adr + off) < addrend_al) {
1968 if (off == 0) {
1969 /* Write start address to A3 */
1970 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, adr);
1971 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
1972 }
1973 if (issue_ihi)
1974 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, off));
1975 if (issue_dhwb)
1976 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, off));
1977 off += linesize;
1978 if (off > 1020) {
1979 /* IHI, DHWB have 8-bit immediate operands (0..1020) */
1980 adr += off;
1981 off = 0;
1982 }
1983 }
1984
1985 /* Execute cache WB/INV instructions */
1986 res = jtag_execute_queue();
1987 xtensa_core_status_check(target);
1988 if (res != ERROR_OK)
1989 LOG_TARGET_ERROR(target,
1990 "Error issuing cache writeback/invaldate instruction(s): %d",
1991 res);
1992 }
1993 }
1994 if (albuff != buffer)
1995 free(albuff);
1996
1997 return res;
1998 }
1999
2000 int xtensa_write_buffer(struct target *target, target_addr_t address, uint32_t count, const uint8_t *buffer)
2001 {
2002 /* xtensa_write_memory can handle everything. Just pass on to that. */
2003 return xtensa_write_memory(target, address, 1, count, buffer);
2004 }
2005
2006 int xtensa_checksum_memory(struct target *target, target_addr_t address, uint32_t count, uint32_t *checksum)
2007 {
2008 LOG_WARNING("not implemented yet");
2009 return ERROR_FAIL;
2010 }
2011
2012 int xtensa_poll(struct target *target)
2013 {
2014 struct xtensa *xtensa = target_to_xtensa(target);
2015
2016 int res = xtensa_dm_power_status_read(&xtensa->dbg_mod, PWRSTAT_DEBUGWASRESET | PWRSTAT_COREWASRESET);
2017 if (xtensa->dbg_mod.power_status.stat != xtensa->dbg_mod.power_status.stath)
2018 LOG_TARGET_DEBUG(target, "PWRSTAT: read 0x%08" PRIx32 ", clear 0x%08lx, reread 0x%08" PRIx32,
2019 xtensa->dbg_mod.power_status.stat,
2020 PWRSTAT_DEBUGWASRESET | PWRSTAT_COREWASRESET,
2021 xtensa->dbg_mod.power_status.stath);
2022 if (res != ERROR_OK)
2023 return res;
2024
2025 if (xtensa_dm_tap_was_reset(&xtensa->dbg_mod)) {
2026 LOG_TARGET_INFO(target, "Debug controller was reset.");
2027 res = xtensa_smpbreak_write(xtensa, xtensa->smp_break);
2028 if (res != ERROR_OK)
2029 return res;
2030 }
2031 if (xtensa_dm_core_was_reset(&xtensa->dbg_mod))
2032 LOG_TARGET_INFO(target, "Core was reset.");
2033 xtensa_dm_power_status_cache(&xtensa->dbg_mod);
2034 /* Enable JTAG, set reset if needed */
2035 res = xtensa_wakeup(target);
2036 if (res != ERROR_OK)
2037 return res;
2038
2039 uint32_t prev_dsr = xtensa->dbg_mod.core_status.dsr;
2040 res = xtensa_dm_core_status_read(&xtensa->dbg_mod);
2041 if (res != ERROR_OK)
2042 return res;
2043 if (prev_dsr != xtensa->dbg_mod.core_status.dsr)
2044 LOG_TARGET_DEBUG(target,
2045 "DSR has changed: was 0x%08" PRIx32 " now 0x%08" PRIx32,
2046 prev_dsr,
2047 xtensa->dbg_mod.core_status.dsr);
2048 if (xtensa->dbg_mod.power_status.stath & PWRSTAT_COREWASRESET) {
2049 /* if RESET state is persitent */
2050 target->state = TARGET_RESET;
2051 } else if (!xtensa_dm_is_powered(&xtensa->dbg_mod)) {
2052 LOG_TARGET_DEBUG(target, "not powered 0x%" PRIX32 "%ld",
2053 xtensa->dbg_mod.core_status.dsr,
2054 xtensa->dbg_mod.core_status.dsr & OCDDSR_STOPPED);
2055 target->state = TARGET_UNKNOWN;
2056 if (xtensa->come_online_probes_num == 0)
2057 target->examined = false;
2058 else
2059 xtensa->come_online_probes_num--;
2060 } else if (xtensa_is_stopped(target)) {
2061 if (target->state != TARGET_HALTED) {
2062 enum target_state oldstate = target->state;
2063 target->state = TARGET_HALTED;
2064 /* Examine why the target has been halted */
2065 target->debug_reason = DBG_REASON_DBGRQ;
2066 xtensa_fetch_all_regs(target);
2067 /* When setting debug reason DEBUGCAUSE events have the following
2068 * priorities: watchpoint == breakpoint > single step > debug interrupt. */
2069 /* Watchpoint and breakpoint events at the same time results in special
2070 * debug reason: DBG_REASON_WPTANDBKPT. */
2071 uint32_t halt_cause = xtensa_cause_get(target);
2072 /* TODO: Add handling of DBG_REASON_EXC_CATCH */
2073 if (halt_cause & DEBUGCAUSE_IC)
2074 target->debug_reason = DBG_REASON_SINGLESTEP;
2075 if (halt_cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BN | DEBUGCAUSE_BI)) {
2076 if (halt_cause & DEBUGCAUSE_DB)
2077 target->debug_reason = DBG_REASON_WPTANDBKPT;
2078 else
2079 target->debug_reason = DBG_REASON_BREAKPOINT;
2080 } else if (halt_cause & DEBUGCAUSE_DB) {
2081 target->debug_reason = DBG_REASON_WATCHPOINT;
2082 }
2083 LOG_TARGET_DEBUG(target, "Target halted, pc=0x%08" PRIx32
2084 ", debug_reason=%08" PRIx32 ", oldstate=%08" PRIx32,
2085 xtensa_reg_get(target, XT_REG_IDX_PC),
2086 target->debug_reason,
2087 oldstate);
2088 LOG_TARGET_DEBUG(target, "Halt reason=0x%08" PRIX32 ", exc_cause=%" PRId32 ", dsr=0x%08" PRIx32,
2089 halt_cause,
2090 xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE),
2091 xtensa->dbg_mod.core_status.dsr);
2092 xtensa_dm_core_status_clear(
2093 &xtensa->dbg_mod,
2094 OCDDSR_DEBUGPENDBREAK | OCDDSR_DEBUGINTBREAK | OCDDSR_DEBUGPENDTRAX |
2095 OCDDSR_DEBUGINTTRAX |
2096 OCDDSR_DEBUGPENDHOST | OCDDSR_DEBUGINTHOST);
2097 }
2098 } else {
2099 target->debug_reason = DBG_REASON_NOTHALTED;
2100 if (target->state != TARGET_RUNNING && target->state != TARGET_DEBUG_RUNNING) {
2101 target->state = TARGET_RUNNING;
2102 target->debug_reason = DBG_REASON_NOTHALTED;
2103 }
2104 }
2105 if (xtensa->trace_active) {
2106 /* Detect if tracing was active but has stopped. */
2107 struct xtensa_trace_status trace_status;
2108 res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
2109 if (res == ERROR_OK) {
2110 if (!(trace_status.stat & TRAXSTAT_TRACT)) {
2111 LOG_INFO("Detected end of trace.");
2112 if (trace_status.stat & TRAXSTAT_PCMTG)
2113 LOG_TARGET_INFO(target, "Trace stop triggered by PC match");
2114 if (trace_status.stat & TRAXSTAT_PTITG)
2115 LOG_TARGET_INFO(target, "Trace stop triggered by Processor Trigger Input");
2116 if (trace_status.stat & TRAXSTAT_CTITG)
2117 LOG_TARGET_INFO(target, "Trace stop triggered by Cross-trigger Input");
2118 xtensa->trace_active = false;
2119 }
2120 }
2121 }
2122 return ERROR_OK;
2123 }
2124
2125 static int xtensa_update_instruction(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2126 {
2127 struct xtensa *xtensa = target_to_xtensa(target);
2128 unsigned int issue_ihi = xtensa_is_icacheable(xtensa, address);
2129 unsigned int issue_dhwbi = xtensa_is_dcacheable(xtensa, address);
2130 uint32_t icache_line_size = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
2131 uint32_t dcache_line_size = issue_dhwbi ? xtensa->core_config->dcache.line_size : UINT32_MAX;
2132 unsigned int same_ic_line = ((address & (icache_line_size - 1)) + size) <= icache_line_size;
2133 unsigned int same_dc_line = ((address & (dcache_line_size - 1)) + size) <= dcache_line_size;
2134 int ret;
2135
2136 if (size > icache_line_size)
2137 return ERROR_FAIL;
2138
2139 if (issue_ihi || issue_dhwbi) {
2140 /* We're going to use A3 here */
2141 xtensa_mark_register_dirty(xtensa, XT_REG_IDX_A3);
2142
2143 /* Write start address to A3 and invalidate */
2144 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, address);
2145 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2146 LOG_TARGET_DEBUG(target, "DHWBI, IHI for address "TARGET_ADDR_FMT, address);
2147 if (issue_dhwbi) {
2148 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, 0));
2149 if (!same_dc_line) {
2150 LOG_TARGET_DEBUG(target,
2151 "DHWBI second dcache line for address "TARGET_ADDR_FMT,
2152 address + 4);
2153 xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, 4));
2154 }
2155 }
2156 if (issue_ihi) {
2157 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, 0));
2158 if (!same_ic_line) {
2159 LOG_TARGET_DEBUG(target,
2160 "IHI second icache line for address "TARGET_ADDR_FMT,
2161 address + 4);
2162 xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, 4));
2163 }
2164 }
2165
2166 /* Execute invalidate instructions */
2167 ret = jtag_execute_queue();
2168 xtensa_core_status_check(target);
2169 if (ret != ERROR_OK) {
2170 LOG_ERROR("Error issuing cache invaldate instruction(s): %d", ret);
2171 return ret;
2172 }
2173 }
2174
2175 /* Write new instructions to memory */
2176 ret = target_write_buffer(target, address, size, buffer);
2177 if (ret != ERROR_OK) {
2178 LOG_TARGET_ERROR(target, "Error writing instruction to memory: %d", ret);
2179 return ret;
2180 }
2181
2182 if (issue_dhwbi) {
2183 /* Flush dcache so instruction propagates. A3 may be corrupted during memory write */
2184 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, address);
2185 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
2186 xtensa_queue_exec_ins(xtensa, XT_INS_DHWB(xtensa, XT_REG_A3, 0));
2187 LOG_DEBUG("DHWB dcache line for address "TARGET_ADDR_FMT, address);
2188 if (!same_dc_line) {
2189 LOG_TARGET_DEBUG(target, "DHWB second dcache line for address "TARGET_ADDR_FMT, address + 4);
2190 xtensa_queue_exec_ins(xtensa, XT_INS_DHWB(xtensa, XT_REG_A3, 4));
2191 }
2192
2193 /* Execute invalidate instructions */
2194 ret = jtag_execute_queue();
2195 xtensa_core_status_check(target);
2196 }
2197
2198 /* TODO: Handle L2 cache if present */
2199 return ret;
2200 }
2201
2202 static int xtensa_sw_breakpoint_add(struct target *target,
2203 struct breakpoint *breakpoint,
2204 struct xtensa_sw_breakpoint *sw_bp)
2205 {
2206 struct xtensa *xtensa = target_to_xtensa(target);
2207 int ret = target_read_buffer(target, breakpoint->address, XT_ISNS_SZ_MAX, sw_bp->insn);
2208 if (ret != ERROR_OK) {
2209 LOG_TARGET_ERROR(target, "Failed to read original instruction (%d)!", ret);
2210 return ret;
2211 }
2212
2213 sw_bp->insn_sz = MIN(XT_ISNS_SZ_MAX, breakpoint->length);
2214 sw_bp->oocd_bp = breakpoint;
2215
2216 uint32_t break_insn = sw_bp->insn_sz == XT_ISNS_SZ_MAX ? XT_INS_BREAK(xtensa, 0, 0) : XT_INS_BREAKN(xtensa, 0);
2217
2218 /* Underlying memory write will convert instruction endianness, don't do that here */
2219 ret = xtensa_update_instruction(target, breakpoint->address, sw_bp->insn_sz, (uint8_t *)&break_insn);
2220 if (ret != ERROR_OK) {
2221 LOG_TARGET_ERROR(target, "Failed to write breakpoint instruction (%d)!", ret);
2222 return ret;
2223 }
2224
2225 return ERROR_OK;
2226 }
2227
2228 static int xtensa_sw_breakpoint_remove(struct target *target, struct xtensa_sw_breakpoint *sw_bp)
2229 {
2230 int ret = xtensa_update_instruction(target, sw_bp->oocd_bp->address, sw_bp->insn_sz, sw_bp->insn);
2231 if (ret != ERROR_OK) {
2232 LOG_TARGET_ERROR(target, "Failed to write insn (%d)!", ret);
2233 return ret;
2234 }
2235 sw_bp->oocd_bp = NULL;
2236 return ERROR_OK;
2237 }
2238
2239 int xtensa_breakpoint_add(struct target *target, struct breakpoint *breakpoint)
2240 {
2241 struct xtensa *xtensa = target_to_xtensa(target);
2242 unsigned int slot;
2243
2244 if (breakpoint->type == BKPT_SOFT) {
2245 for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2246 if (!xtensa->sw_brps[slot].oocd_bp ||
2247 xtensa->sw_brps[slot].oocd_bp == breakpoint)
2248 break;
2249 }
2250 if (slot == XT_SW_BREAKPOINTS_MAX_NUM) {
2251 LOG_TARGET_WARNING(target, "No free slots to add SW breakpoint!");
2252 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2253 }
2254 int ret = xtensa_sw_breakpoint_add(target, breakpoint, &xtensa->sw_brps[slot]);
2255 if (ret != ERROR_OK) {
2256 LOG_TARGET_ERROR(target, "Failed to add SW breakpoint!");
2257 return ret;
2258 }
2259 LOG_TARGET_DEBUG(target, "placed SW breakpoint %u @ " TARGET_ADDR_FMT,
2260 slot,
2261 breakpoint->address);
2262 return ERROR_OK;
2263 }
2264
2265 for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2266 if (!xtensa->hw_brps[slot] || xtensa->hw_brps[slot] == breakpoint)
2267 break;
2268 }
2269 if (slot == xtensa->core_config->debug.ibreaks_num) {
2270 LOG_TARGET_ERROR(target, "No free slots to add HW breakpoint!");
2271 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2272 }
2273
2274 xtensa->hw_brps[slot] = breakpoint;
2275 /* We will actually write the breakpoints when we resume the target. */
2276 LOG_TARGET_DEBUG(target, "placed HW breakpoint %u @ " TARGET_ADDR_FMT,
2277 slot,
2278 breakpoint->address);
2279
2280 return ERROR_OK;
2281 }
2282
2283 int xtensa_breakpoint_remove(struct target *target, struct breakpoint *breakpoint)
2284 {
2285 struct xtensa *xtensa = target_to_xtensa(target);
2286 unsigned int slot;
2287
2288 if (breakpoint->type == BKPT_SOFT) {
2289 for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2290 if (xtensa->sw_brps[slot].oocd_bp && xtensa->sw_brps[slot].oocd_bp == breakpoint)
2291 break;
2292 }
2293 if (slot == XT_SW_BREAKPOINTS_MAX_NUM) {
2294 LOG_TARGET_WARNING(target, "Max SW breakpoints slot reached, slot=%u!", slot);
2295 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2296 }
2297 int ret = xtensa_sw_breakpoint_remove(target, &xtensa->sw_brps[slot]);
2298 if (ret != ERROR_OK) {
2299 LOG_TARGET_ERROR(target, "Failed to remove SW breakpoint (%d)!", ret);
2300 return ret;
2301 }
2302 LOG_TARGET_DEBUG(target, "cleared SW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2303 return ERROR_OK;
2304 }
2305
2306 for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2307 if (xtensa->hw_brps[slot] == breakpoint)
2308 break;
2309 }
2310 if (slot == xtensa->core_config->debug.ibreaks_num) {
2311 LOG_TARGET_ERROR(target, "HW breakpoint not found!");
2312 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2313 }
2314 xtensa->hw_brps[slot] = NULL;
2315 LOG_TARGET_DEBUG(target, "cleared HW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2316 return ERROR_OK;
2317 }
2318
2319 int xtensa_watchpoint_add(struct target *target, struct watchpoint *watchpoint)
2320 {
2321 struct xtensa *xtensa = target_to_xtensa(target);
2322 unsigned int slot;
2323 xtensa_reg_val_t dbreakcval;
2324
2325 if (target->state != TARGET_HALTED) {
2326 LOG_TARGET_WARNING(target, "target not halted");
2327 return ERROR_TARGET_NOT_HALTED;
2328 }
2329
2330 if (watchpoint->mask != ~(uint32_t)0) {
2331 LOG_TARGET_ERROR(target, "watchpoint value masks not supported");
2332 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2333 }
2334
2335 for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2336 if (!xtensa->hw_wps[slot] || xtensa->hw_wps[slot] == watchpoint)
2337 break;
2338 }
2339 if (slot == xtensa->core_config->debug.dbreaks_num) {
2340 LOG_TARGET_WARNING(target, "No free slots to add HW watchpoint!");
2341 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2342 }
2343
2344 /* Figure out value for dbreakc5..0
2345 * It's basically 0x3F with an incremental bit removed from the LSB for each extra length power of 2. */
2346 if (watchpoint->length < 1 || watchpoint->length > 64 ||
2347 !IS_PWR_OF_2(watchpoint->length) ||
2348 !IS_ALIGNED(watchpoint->address, watchpoint->length)) {
2349 LOG_TARGET_WARNING(
2350 target,
2351 "Watchpoint with length %d on address " TARGET_ADDR_FMT
2352 " not supported by hardware.",
2353 watchpoint->length,
2354 watchpoint->address);
2355 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2356 }
2357 dbreakcval = ALIGN_DOWN(0x3F, watchpoint->length);
2358
2359 if (watchpoint->rw == WPT_READ)
2360 dbreakcval |= BIT(30);
2361 if (watchpoint->rw == WPT_WRITE)
2362 dbreakcval |= BIT(31);
2363 if (watchpoint->rw == WPT_ACCESS)
2364 dbreakcval |= BIT(30) | BIT(31);
2365
2366 /* Write DBREAKA[slot] and DBCREAKC[slot] */
2367 xtensa_reg_set(target, XT_REG_IDX_DBREAKA0 + slot, watchpoint->address);
2368 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, dbreakcval);
2369 xtensa->hw_wps[slot] = watchpoint;
2370 LOG_TARGET_DEBUG(target, "placed HW watchpoint @ " TARGET_ADDR_FMT,
2371 watchpoint->address);
2372 return ERROR_OK;
2373 }
2374
2375 int xtensa_watchpoint_remove(struct target *target, struct watchpoint *watchpoint)
2376 {
2377 struct xtensa *xtensa = target_to_xtensa(target);
2378 unsigned int slot;
2379
2380 for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2381 if (xtensa->hw_wps[slot] == watchpoint)
2382 break;
2383 }
2384 if (slot == xtensa->core_config->debug.dbreaks_num) {
2385 LOG_TARGET_WARNING(target, "HW watchpoint " TARGET_ADDR_FMT " not found!", watchpoint->address);
2386 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2387 }
2388 xtensa_reg_set(target, XT_REG_IDX_DBREAKC0 + slot, 0);
2389 xtensa->hw_wps[slot] = NULL;
2390 LOG_TARGET_DEBUG(target, "cleared HW watchpoint @ " TARGET_ADDR_FMT,
2391 watchpoint->address);
2392 return ERROR_OK;
2393 }
2394
2395 static int xtensa_build_reg_cache(struct target *target)
2396 {
2397 struct xtensa *xtensa = target_to_xtensa(target);
2398 struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2399 unsigned int last_dbreg_num = 0;
2400
2401 if (xtensa->core_regs_num + xtensa->num_optregs != xtensa->total_regs_num)
2402 LOG_TARGET_WARNING(target, "Register count MISMATCH: %d core regs, %d extended regs; %d expected",
2403 xtensa->core_regs_num, xtensa->num_optregs, xtensa->total_regs_num);
2404
2405 struct reg_cache *reg_cache = calloc(1, sizeof(struct reg_cache));
2406
2407 if (!reg_cache) {
2408 LOG_ERROR("Failed to alloc reg cache!");
2409 return ERROR_FAIL;
2410 }
2411 reg_cache->name = "Xtensa registers";
2412 reg_cache->next = NULL;
2413 /* Init reglist */
2414 unsigned int reg_list_size = XT_NUM_REGS + xtensa->num_optregs;
2415 struct reg *reg_list = calloc(reg_list_size, sizeof(struct reg));
2416 if (!reg_list) {
2417 LOG_ERROR("Failed to alloc reg list!");
2418 goto fail;
2419 }
2420 xtensa->dbregs_num = 0;
2421 unsigned int didx = 0;
2422 for (unsigned int whichlist = 0; whichlist < 2; whichlist++) {
2423 struct xtensa_reg_desc *rlist = (whichlist == 0) ? xtensa_regs : xtensa->optregs;
2424 unsigned int listsize = (whichlist == 0) ? XT_NUM_REGS : xtensa->num_optregs;
2425 for (unsigned int i = 0; i < listsize; i++, didx++) {
2426 reg_list[didx].exist = rlist[i].exist;
2427 reg_list[didx].name = rlist[i].name;
2428 reg_list[didx].size = 32;
2429 reg_list[didx].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2430 if (!reg_list[didx].value) {
2431 LOG_ERROR("Failed to alloc reg list value!");
2432 goto fail;
2433 }
2434 reg_list[didx].dirty = false;
2435 reg_list[didx].valid = false;
2436 reg_list[didx].type = &xtensa_reg_type;
2437 reg_list[didx].arch_info = xtensa;
2438 if (rlist[i].exist && (rlist[i].dbreg_num > last_dbreg_num))
2439 last_dbreg_num = rlist[i].dbreg_num;
2440
2441 if (xtensa_extra_debug_log) {
2442 LOG_TARGET_DEBUG(target,
2443 "POPULATE %-16s list %d exist %d, idx %d, type %d, dbreg_num 0x%04x",
2444 reg_list[didx].name,
2445 whichlist,
2446 reg_list[didx].exist,
2447 didx,
2448 rlist[i].type,
2449 rlist[i].dbreg_num);
2450 }
2451 }
2452 }
2453
2454 xtensa->dbregs_num = last_dbreg_num + 1;
2455 reg_cache->reg_list = reg_list;
2456 reg_cache->num_regs = reg_list_size;
2457
2458 LOG_TARGET_DEBUG(target, "xtensa->total_regs_num %d reg_list_size %d xtensa->dbregs_num %d",
2459 xtensa->total_regs_num, reg_list_size, xtensa->dbregs_num);
2460
2461 /* Construct empty-register list for handling unknown register requests */
2462 xtensa->empty_regs = calloc(xtensa->dbregs_num, sizeof(struct reg));
2463 if (!xtensa->empty_regs) {
2464 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2465 goto fail;
2466 }
2467 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2468 xtensa->empty_regs[i].name = calloc(8, sizeof(char));
2469 if (!xtensa->empty_regs[i].name) {
2470 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2471 goto fail;
2472 }
2473 sprintf((char *)xtensa->empty_regs[i].name, "?0x%04x", i & 0x0000FFFF);
2474 xtensa->empty_regs[i].size = 32;
2475 xtensa->empty_regs[i].type = &xtensa_reg_type;
2476 xtensa->empty_regs[i].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2477 if (!xtensa->empty_regs[i].value) {
2478 LOG_ERROR("Failed to alloc empty reg list value!");
2479 goto fail;
2480 }
2481 xtensa->empty_regs[i].arch_info = xtensa;
2482 }
2483
2484 /* Construct contiguous register list from contiguous descriptor list */
2485 if (xtensa->regmap_contiguous && xtensa->contiguous_regs_desc) {
2486 xtensa->contiguous_regs_list = calloc(xtensa->total_regs_num, sizeof(struct reg *));
2487 if (!xtensa->contiguous_regs_list) {
2488 LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2489 goto fail;
2490 }
2491 for (unsigned int i = 0; i < xtensa->total_regs_num; i++) {
2492 unsigned int j;
2493 for (j = 0; j < reg_cache->num_regs; j++) {
2494 if (!strcmp(reg_cache->reg_list[j].name, xtensa->contiguous_regs_desc[i]->name)) {
2495 xtensa->contiguous_regs_list[i] = &(reg_cache->reg_list[j]);
2496 LOG_TARGET_DEBUG(target,
2497 "POPULATE contiguous regs list: %-16s, dbreg_num 0x%04x",
2498 xtensa->contiguous_regs_list[i]->name,
2499 xtensa->contiguous_regs_desc[i]->dbreg_num);
2500 break;
2501 }
2502 }
2503 if (j == reg_cache->num_regs)
2504 LOG_TARGET_WARNING(target, "contiguous register %s not found",
2505 xtensa->contiguous_regs_desc[i]->name);
2506 }
2507 }
2508
2509 xtensa->algo_context_backup = calloc(reg_cache->num_regs, sizeof(void *));
2510 if (!xtensa->algo_context_backup) {
2511 LOG_ERROR("Failed to alloc mem for algorithm context backup!");
2512 goto fail;
2513 }
2514 for (unsigned int i = 0; i < reg_cache->num_regs; i++) {
2515 struct reg *reg = &reg_cache->reg_list[i];
2516 xtensa->algo_context_backup[i] = calloc(1, reg->size / 8);
2517 if (!xtensa->algo_context_backup[i]) {
2518 LOG_ERROR("Failed to alloc mem for algorithm context!");
2519 goto fail;
2520 }
2521 }
2522 xtensa->core_cache = reg_cache;
2523 if (cache_p)
2524 *cache_p = reg_cache;
2525 return ERROR_OK;
2526
2527 fail:
2528 if (reg_list) {
2529 for (unsigned int i = 0; i < reg_list_size; i++)
2530 free(reg_list[i].value);
2531 free(reg_list);
2532 }
2533 if (xtensa->empty_regs) {
2534 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2535 free((void *)xtensa->empty_regs[i].name);
2536 free(xtensa->empty_regs[i].value);
2537 }
2538 free(xtensa->empty_regs);
2539 }
2540 if (xtensa->algo_context_backup) {
2541 for (unsigned int i = 0; i < reg_cache->num_regs; i++)
2542 free(xtensa->algo_context_backup[i]);
2543 free(xtensa->algo_context_backup);
2544 }
2545 free(reg_cache);
2546
2547 return ERROR_FAIL;
2548 }
2549
2550 static int32_t xtensa_gdbqc_parse_exec_tie_ops(struct target *target, char *opstr)
2551 {
2552 struct xtensa *xtensa = target_to_xtensa(target);
2553 int32_t status = ERROR_COMMAND_ARGUMENT_INVALID;
2554 /* Process op[] list */
2555 while (opstr && (*opstr == ':')) {
2556 uint8_t ops[32];
2557 unsigned int oplen = strtoul(opstr + 1, &opstr, 16);
2558 if (oplen > 32) {
2559 LOG_TARGET_ERROR(target, "TIE access instruction too long (%d)\n", oplen);
2560 break;
2561 }
2562 unsigned int i = 0;
2563 while ((i < oplen) && opstr && (*opstr == ':'))
2564 ops[i++] = strtoul(opstr + 1, &opstr, 16);
2565 if (i != oplen) {
2566 LOG_TARGET_ERROR(target, "TIE access instruction malformed (%d)\n", i);
2567 break;
2568 }
2569
2570 char insn_buf[128];
2571 sprintf(insn_buf, "Exec %d-byte TIE sequence: ", oplen);
2572 for (i = 0; i < oplen; i++)
2573 sprintf(insn_buf + strlen(insn_buf), "%02x:", ops[i]);
2574 LOG_TARGET_DEBUG(target, "%s", insn_buf);
2575 xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
2576 status = ERROR_OK;
2577 }
2578 return status;
2579 }
2580
2581 static int xtensa_gdbqc_qxtreg(struct target *target, const char *packet, char **response_p)
2582 {
2583 struct xtensa *xtensa = target_to_xtensa(target);
2584 bool iswrite = (packet[0] == 'Q');
2585 enum xtensa_qerr_e error;
2586
2587 /* Read/write TIE register. Requires spill location.
2588 * qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]
2589 * Qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]=<value>
2590 */
2591 if (!(xtensa->spill_buf)) {
2592 LOG_ERROR("Spill location not specified. Try 'target remote <host>:3333 &spill_location0'");
2593 error = XT_QERR_FAIL;
2594 goto xtensa_gdbqc_qxtreg_fail;
2595 }
2596
2597 char *delim;
2598 uint32_t regnum = strtoul(packet + 6, &delim, 16);
2599 if (*delim != ':') {
2600 LOG_ERROR("Malformed qxtreg packet");
2601 error = XT_QERR_INVAL;
2602 goto xtensa_gdbqc_qxtreg_fail;
2603 }
2604 uint32_t reglen = strtoul(delim + 1, &delim, 16);
2605 if (*delim != ':') {
2606 LOG_ERROR("Malformed qxtreg packet");
2607 error = XT_QERR_INVAL;
2608 goto xtensa_gdbqc_qxtreg_fail;
2609 }
2610 uint8_t regbuf[XT_QUERYPKT_RESP_MAX];
2611 LOG_DEBUG("TIE reg 0x%08" PRIx32 " %s (%d bytes)", regnum, iswrite ? "write" : "read", reglen);
2612 if (reglen * 2 + 1 > XT_QUERYPKT_RESP_MAX) {
2613 LOG_ERROR("TIE register too large");
2614 error = XT_QERR_MEM;
2615 goto xtensa_gdbqc_qxtreg_fail;
2616 }
2617
2618 /* (1) Save spill memory, (1.5) [if write then store value to spill location],
2619 * (2) read old a4, (3) write spill address to a4.
2620 * NOTE: ensure a4 is restored properly by all error handling logic
2621 */
2622 unsigned int memop_size = (xtensa->spill_loc & 3) ? 1 : 4;
2623 int status = xtensa_read_memory(target, xtensa->spill_loc, memop_size,
2624 xtensa->spill_bytes / memop_size, xtensa->spill_buf);
2625 if (status != ERROR_OK) {
2626 LOG_ERROR("Spill memory save");
2627 error = XT_QERR_MEM;
2628 goto xtensa_gdbqc_qxtreg_fail;
2629 }
2630 if (iswrite) {
2631 /* Extract value and store in spill memory */
2632 unsigned int b = 0;
2633 char *valbuf = strchr(delim, '=');
2634 if (!(valbuf && (*valbuf == '='))) {
2635 LOG_ERROR("Malformed Qxtreg packet");
2636 error = XT_QERR_INVAL;
2637 goto xtensa_gdbqc_qxtreg_fail;
2638 }
2639 valbuf++;
2640 while (*valbuf && *(valbuf + 1)) {
2641 char bytestr[3] = { 0, 0, 0 };
2642 strncpy(bytestr, valbuf, 2);
2643 regbuf[b++] = strtoul(bytestr, NULL, 16);
2644 valbuf += 2;
2645 }
2646 if (b != reglen) {
2647 LOG_ERROR("Malformed Qxtreg packet");
2648 error = XT_QERR_INVAL;
2649 goto xtensa_gdbqc_qxtreg_fail;
2650 }
2651 status = xtensa_write_memory(target, xtensa->spill_loc, memop_size,
2652 reglen / memop_size, regbuf);
2653 if (status != ERROR_OK) {
2654 LOG_ERROR("TIE value store");
2655 error = XT_QERR_MEM;
2656 goto xtensa_gdbqc_qxtreg_fail;
2657 }
2658 }
2659 xtensa_reg_val_t orig_a4 = xtensa_reg_get(target, XT_REG_IDX_A4);
2660 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, xtensa->spill_loc);
2661 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
2662
2663 int32_t tieop_status = xtensa_gdbqc_parse_exec_tie_ops(target, delim);
2664
2665 /* Restore a4 but not yet spill memory. Execute it all... */
2666 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, orig_a4);
2667 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A4));
2668 status = jtag_execute_queue();
2669 if (status != ERROR_OK) {
2670 LOG_TARGET_ERROR(target, "TIE queue execute: %d\n", status);
2671 tieop_status = status;
2672 }
2673 status = xtensa_core_status_check(target);
2674 if (status != ERROR_OK) {
2675 LOG_TARGET_ERROR(target, "TIE instr execute: %d\n", status);
2676 tieop_status = status;
2677 }
2678
2679 if (tieop_status == ERROR_OK) {
2680 if (iswrite) {
2681 /* TIE write succeeded; send OK */
2682 strcpy(*response_p, "OK");
2683 } else {
2684 /* TIE read succeeded; copy result from spill memory */
2685 status = xtensa_read_memory(target, xtensa->spill_loc, memop_size, reglen, regbuf);
2686 if (status != ERROR_OK) {
2687 LOG_TARGET_ERROR(target, "TIE result read");
2688 tieop_status = status;
2689 }
2690 unsigned int i;
2691 for (i = 0; i < reglen; i++)
2692 sprintf(*response_p + 2 * i, "%02x", regbuf[i]);
2693 *(*response_p + 2 * i) = '\0';
2694 LOG_TARGET_DEBUG(target, "TIE response: %s", *response_p);
2695 }
2696 }
2697
2698 /* Restore spill memory first, then report any previous errors */
2699 status = xtensa_write_memory(target, xtensa->spill_loc, memop_size,
2700 xtensa->spill_bytes / memop_size, xtensa->spill_buf);
2701 if (status != ERROR_OK) {
2702 LOG_ERROR("Spill memory restore");
2703 error = XT_QERR_MEM;
2704 goto xtensa_gdbqc_qxtreg_fail;
2705 }
2706 if (tieop_status != ERROR_OK) {
2707 LOG_ERROR("TIE execution");
2708 error = XT_QERR_FAIL;
2709 goto xtensa_gdbqc_qxtreg_fail;
2710 }
2711 return ERROR_OK;
2712
2713 xtensa_gdbqc_qxtreg_fail:
2714 strcpy(*response_p, xt_qerr[error].chrval);
2715 return xt_qerr[error].intval;
2716 }
2717
2718 int xtensa_gdb_query_custom(struct target *target, const char *packet, char **response_p)
2719 {
2720 struct xtensa *xtensa = target_to_xtensa(target);
2721 enum xtensa_qerr_e error;
2722 if (!packet || !response_p) {
2723 LOG_TARGET_ERROR(target, "invalid parameter: packet %p response_p %p", packet, response_p);
2724 return ERROR_FAIL;
2725 }
2726
2727 *response_p = xtensa->qpkt_resp;
2728 if (strncmp(packet, "qxtn", 4) == 0) {
2729 strcpy(*response_p, "OpenOCD");
2730 return ERROR_OK;
2731 } else if (strncasecmp(packet, "qxtgdbversion=", 14) == 0) {
2732 return ERROR_OK;
2733 } else if ((strncmp(packet, "Qxtsis=", 7) == 0) || (strncmp(packet, "Qxtsds=", 7) == 0)) {
2734 /* Confirm host cache params match core .cfg file */
2735 struct xtensa_cache_config *cachep = (packet[4] == 'i') ?
2736 &xtensa->core_config->icache : &xtensa->core_config->dcache;
2737 unsigned int line_size = 0, size = 0, way_count = 0;
2738 sscanf(&packet[7], "%x,%x,%x", &line_size, &size, &way_count);
2739 if ((cachep->line_size != line_size) ||
2740 (cachep->size != size) ||
2741 (cachep->way_count != way_count)) {
2742 LOG_TARGET_WARNING(target, "%cCache mismatch; check xtensa-core-XXX.cfg file",
2743 cachep == &xtensa->core_config->icache ? 'I' : 'D');
2744 }
2745 strcpy(*response_p, "OK");
2746 return ERROR_OK;
2747 } else if ((strncmp(packet, "Qxtiram=", 8) == 0) || (strncmp(packet, "Qxtirom=", 8) == 0)) {
2748 /* Confirm host IRAM/IROM params match core .cfg file */
2749 struct xtensa_local_mem_config *memp = (packet[5] == 'a') ?
2750 &xtensa->core_config->iram : &xtensa->core_config->irom;
2751 unsigned int base = 0, size = 0, i;
2752 char *pkt = (char *)&packet[7];
2753 do {
2754 pkt++;
2755 size = strtoul(pkt, &pkt, 16);
2756 pkt++;
2757 base = strtoul(pkt, &pkt, 16);
2758 LOG_TARGET_DEBUG(target, "memcheck: %dB @ 0x%08x", size, base);
2759 for (i = 0; i < memp->count; i++) {
2760 if ((memp->regions[i].base == base) && (memp->regions[i].size == size))
2761 break;
2762 }
2763 if (i == memp->count) {
2764 LOG_TARGET_WARNING(target, "%s mismatch; check xtensa-core-XXX.cfg file",
2765 memp == &xtensa->core_config->iram ? "IRAM" : "IROM");
2766 break;
2767 }
2768 for (i = 0; i < 11; i++) {
2769 pkt++;
2770 strtoul(pkt, &pkt, 16);
2771 }
2772 } while (pkt && (pkt[0] == ','));
2773 strcpy(*response_p, "OK");
2774 return ERROR_OK;
2775 } else if (strncmp(packet, "Qxtexcmlvl=", 11) == 0) {
2776 /* Confirm host EXCM_LEVEL matches core .cfg file */
2777 unsigned int excm_level = strtoul(&packet[11], NULL, 0);
2778 if (!xtensa->core_config->high_irq.enabled ||
2779 (excm_level != xtensa->core_config->high_irq.excm_level))
2780 LOG_TARGET_WARNING(target, "EXCM_LEVEL mismatch; check xtensa-core-XXX.cfg file");
2781 strcpy(*response_p, "OK");
2782 return ERROR_OK;
2783 } else if ((strncmp(packet, "Qxtl2cs=", 8) == 0) ||
2784 (strncmp(packet, "Qxtl2ca=", 8) == 0) ||
2785 (strncmp(packet, "Qxtdensity=", 11) == 0)) {
2786 strcpy(*response_p, "OK");
2787 return ERROR_OK;
2788 } else if (strncmp(packet, "Qxtspill=", 9) == 0) {
2789 char *delim;
2790 uint32_t spill_loc = strtoul(packet + 9, &delim, 16);
2791 if (*delim != ':') {
2792 LOG_ERROR("Malformed Qxtspill packet");
2793 error = XT_QERR_INVAL;
2794 goto xtensa_gdb_query_custom_fail;
2795 }
2796 xtensa->spill_loc = spill_loc;
2797 xtensa->spill_bytes = strtoul(delim + 1, NULL, 16);
2798 if (xtensa->spill_buf)
2799 free(xtensa->spill_buf);
2800 xtensa->spill_buf = calloc(1, xtensa->spill_bytes);
2801 if (!xtensa->spill_buf) {
2802 LOG_ERROR("Spill buf alloc");
2803 error = XT_QERR_MEM;
2804 goto xtensa_gdb_query_custom_fail;
2805 }
2806 LOG_TARGET_DEBUG(target, "Set spill 0x%08" PRIx32 " (%d)", xtensa->spill_loc, xtensa->spill_bytes);
2807 strcpy(*response_p, "OK");
2808 return ERROR_OK;
2809 } else if (strncasecmp(packet, "qxtreg", 6) == 0) {
2810 return xtensa_gdbqc_qxtreg(target, packet, response_p);
2811 } else if ((strncmp(packet, "qTStatus", 8) == 0) ||
2812 (strncmp(packet, "qxtftie", 7) == 0) ||
2813 (strncmp(packet, "qxtstie", 7) == 0)) {
2814 /* Return empty string to indicate trace, TIE wire debug are unsupported */
2815 strcpy(*response_p, "");
2816 return ERROR_OK;
2817 }
2818
2819 /* Warn for all other queries, but do not return errors */
2820 LOG_TARGET_WARNING(target, "Unknown target-specific query packet: %s", packet);
2821 strcpy(*response_p, "");
2822 return ERROR_OK;
2823
2824 xtensa_gdb_query_custom_fail:
2825 strcpy(*response_p, xt_qerr[error].chrval);
2826 return xt_qerr[error].intval;
2827 }
2828
2829 int xtensa_init_arch_info(struct target *target, struct xtensa *xtensa,
2830 const struct xtensa_debug_module_config *dm_cfg)
2831 {
2832 target->arch_info = xtensa;
2833 xtensa->common_magic = XTENSA_COMMON_MAGIC;
2834 xtensa->target = target;
2835 xtensa->stepping_isr_mode = XT_STEPPING_ISR_ON;
2836
2837 xtensa->core_config = calloc(1, sizeof(struct xtensa_config));
2838 if (!xtensa->core_config) {
2839 LOG_ERROR("Xtensa configuration alloc failed\n");
2840 return ERROR_FAIL;
2841 }
2842
2843 /* Default cache settings are disabled with 1 way */
2844 xtensa->core_config->icache.way_count = 1;
2845 xtensa->core_config->dcache.way_count = 1;
2846
2847 /* chrval: AR3/AR4 register names will change with window mapping.
2848 * intval: tracks whether scratch register was set through gdb P packet.
2849 */
2850 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
2851 xtensa->scratch_ars[s].chrval = calloc(8, sizeof(char));
2852 if (!xtensa->scratch_ars[s].chrval) {
2853 for (enum xtensa_ar_scratch_set_e f = 0; f < s; f++)
2854 free(xtensa->scratch_ars[f].chrval);
2855 free(xtensa->core_config);
2856 LOG_ERROR("Xtensa scratch AR alloc failed\n");
2857 return ERROR_FAIL;
2858 }
2859 xtensa->scratch_ars[s].intval = false;
2860 sprintf(xtensa->scratch_ars[s].chrval, "%s%d",
2861 ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_A4)) ? "a" : "ar",
2862 ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_AR3)) ? 3 : 4);
2863 }
2864
2865 return xtensa_dm_init(&xtensa->dbg_mod, dm_cfg);
2866 }
2867
2868 void xtensa_set_permissive_mode(struct target *target, bool state)
2869 {
2870 target_to_xtensa(target)->permissive_mode = state;
2871 }
2872
2873 int xtensa_target_init(struct command_context *cmd_ctx, struct target *target)
2874 {
2875 struct xtensa *xtensa = target_to_xtensa(target);
2876
2877 xtensa->come_online_probes_num = 3;
2878 xtensa->hw_brps = calloc(XT_HW_IBREAK_MAX_NUM, sizeof(struct breakpoint *));
2879 if (!xtensa->hw_brps) {
2880 LOG_ERROR("Failed to alloc memory for HW breakpoints!");
2881 return ERROR_FAIL;
2882 }
2883 xtensa->hw_wps = calloc(XT_HW_DBREAK_MAX_NUM, sizeof(struct watchpoint *));
2884 if (!xtensa->hw_wps) {
2885 free(xtensa->hw_brps);
2886 LOG_ERROR("Failed to alloc memory for HW watchpoints!");
2887 return ERROR_FAIL;
2888 }
2889 xtensa->sw_brps = calloc(XT_SW_BREAKPOINTS_MAX_NUM, sizeof(struct xtensa_sw_breakpoint));
2890 if (!xtensa->sw_brps) {
2891 free(xtensa->hw_brps);
2892 free(xtensa->hw_wps);
2893 LOG_ERROR("Failed to alloc memory for SW breakpoints!");
2894 return ERROR_FAIL;
2895 }
2896
2897 xtensa->spill_loc = 0xffffffff;
2898 xtensa->spill_bytes = 0;
2899 xtensa->spill_buf = NULL;
2900 xtensa->probe_lsddr32p = -1; /* Probe for fast load/store operations */
2901
2902 return xtensa_build_reg_cache(target);
2903 }
2904
2905 static void xtensa_free_reg_cache(struct target *target)
2906 {
2907 struct xtensa *xtensa = target_to_xtensa(target);
2908 struct reg_cache *cache = xtensa->core_cache;
2909
2910 if (cache) {
2911 register_unlink_cache(&target->reg_cache, cache);
2912 for (unsigned int i = 0; i < cache->num_regs; i++) {
2913 free(xtensa->algo_context_backup[i]);
2914 free(cache->reg_list[i].value);
2915 }
2916 free(xtensa->algo_context_backup);
2917 free(cache->reg_list);
2918 free(cache);
2919 }
2920 xtensa->core_cache = NULL;
2921 xtensa->algo_context_backup = NULL;
2922
2923 if (xtensa->empty_regs) {
2924 for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2925 free((void *)xtensa->empty_regs[i].name);
2926 free(xtensa->empty_regs[i].value);
2927 }
2928 free(xtensa->empty_regs);
2929 }
2930 xtensa->empty_regs = NULL;
2931 if (xtensa->optregs) {
2932 for (unsigned int i = 0; i < xtensa->num_optregs; i++)
2933 free((void *)xtensa->optregs[i].name);
2934 free(xtensa->optregs);
2935 }
2936 xtensa->optregs = NULL;
2937 }
2938
2939 void xtensa_target_deinit(struct target *target)
2940 {
2941 struct xtensa *xtensa = target_to_xtensa(target);
2942
2943 LOG_DEBUG("start");
2944
2945 if (target_was_examined(target)) {
2946 int ret = xtensa_queue_dbg_reg_write(xtensa, NARADR_DCRCLR, OCDDCR_ENABLEOCD);
2947 if (ret != ERROR_OK) {
2948 LOG_ERROR("Failed to queue OCDDCR_ENABLEOCD clear operation!");
2949 return;
2950 }
2951 xtensa_dm_queue_tdi_idle(&xtensa->dbg_mod);
2952 ret = jtag_execute_queue();
2953 if (ret != ERROR_OK) {
2954 LOG_ERROR("Failed to clear OCDDCR_ENABLEOCD!");
2955 return;
2956 }
2957 }
2958 xtensa_free_reg_cache(target);
2959 free(xtensa->hw_brps);
2960 free(xtensa->hw_wps);
2961 free(xtensa->sw_brps);
2962 if (xtensa->spill_buf) {
2963 free(xtensa->spill_buf);
2964 xtensa->spill_buf = NULL;
2965 }
2966 for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
2967 free(xtensa->scratch_ars[s].chrval);
2968 free(xtensa->core_config);
2969 }
2970
2971 const char *xtensa_get_gdb_arch(struct target *target)
2972 {
2973 return "xtensa";
2974 }
2975
2976 /* exe <ascii-encoded hexadecimal instruction bytes> */
2977 COMMAND_HELPER(xtensa_cmd_exe_do, struct target *target)
2978 {
2979 struct xtensa *xtensa = target_to_xtensa(target);
2980
2981 if (CMD_ARGC != 1)
2982 return ERROR_COMMAND_SYNTAX_ERROR;
2983
2984 /* Process ascii-encoded hex byte string */
2985 const char *parm = CMD_ARGV[0];
2986 unsigned int parm_len = strlen(parm);
2987 if ((parm_len >= 64) || (parm_len & 1)) {
2988 LOG_ERROR("Invalid parameter length (%d): must be even, < 64 characters", parm_len);
2989 return ERROR_FAIL;
2990 }
2991
2992 uint8_t ops[32];
2993 unsigned int oplen = parm_len / 2;
2994 char encoded_byte[3] = { 0, 0, 0 };
2995 for (unsigned int i = 0; i < oplen; i++) {
2996 encoded_byte[0] = *parm++;
2997 encoded_byte[1] = *parm++;
2998 ops[i] = strtoul(encoded_byte, NULL, 16);
2999 }
3000
3001 /* GDB must handle state save/restore.
3002 * Flush reg cache in case spill location is in an AR
3003 * Update CPENABLE only for this execution; later restore cached copy
3004 * Keep a copy of exccause in case executed code triggers an exception
3005 */
3006 int status = xtensa_write_dirty_registers(target);
3007 if (status != ERROR_OK) {
3008 LOG_ERROR("%s: Failed to write back register cache.", target_name(target));
3009 return ERROR_FAIL;
3010 }
3011 xtensa_reg_val_t exccause = xtensa_reg_get(target, XT_REG_IDX_EXCCAUSE);
3012 xtensa_reg_val_t cpenable = xtensa_reg_get(target, XT_REG_IDX_CPENABLE);
3013 xtensa_reg_val_t a3 = xtensa_reg_get(target, XT_REG_IDX_A3);
3014 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, 0xffffffff);
3015 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
3016 xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa,
3017 xtensa_regs[XT_REG_IDX_CPENABLE].reg_num, XT_REG_A3));
3018 xtensa_queue_dbg_reg_write(xtensa, NARADR_DDR, a3);
3019 xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
3020
3021 /* Queue instruction list and execute everything */
3022 LOG_TARGET_DEBUG(target, "execute stub: %s", CMD_ARGV[0]);
3023 xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
3024 status = jtag_execute_queue();
3025 if (status != ERROR_OK)
3026 LOG_TARGET_ERROR(target, "TIE queue execute: %d\n", status);
3027 status = xtensa_core_status_check(target);
3028 if (status != ERROR_OK)
3029 LOG_TARGET_ERROR(target, "TIE instr execute: %d\n", status);
3030
3031 /* Reread register cache and restore saved regs after instruction execution */
3032 if (xtensa_fetch_all_regs(target) != ERROR_OK)
3033 LOG_TARGET_ERROR(target, "%s: Failed to fetch register cache (post-exec).", target_name(target));
3034 xtensa_reg_set(target, XT_REG_IDX_EXCCAUSE, exccause);
3035 xtensa_reg_set(target, XT_REG_IDX_CPENABLE, cpenable);
3036 return status;
3037 }
3038
3039 COMMAND_HANDLER(xtensa_cmd_exe)
3040 {
3041 return CALL_COMMAND_HANDLER(xtensa_cmd_exe_do, get_current_target(CMD_CTX));
3042 }
3043
3044 /* xtdef <name> */
3045 COMMAND_HELPER(xtensa_cmd_xtdef_do, struct xtensa *xtensa)
3046 {
3047 if (CMD_ARGC != 1)
3048 return ERROR_COMMAND_SYNTAX_ERROR;
3049
3050 const char *core_name = CMD_ARGV[0];
3051 if (strcasecmp(core_name, "LX") == 0) {
3052 xtensa->core_config->core_type = XT_LX;
3053 } else {
3054 LOG_ERROR("xtdef [LX]\n");
3055 return ERROR_COMMAND_SYNTAX_ERROR;
3056 }
3057 return ERROR_OK;
3058 }
3059
3060 COMMAND_HANDLER(xtensa_cmd_xtdef)
3061 {
3062 return CALL_COMMAND_HANDLER(xtensa_cmd_xtdef_do,
3063 target_to_xtensa(get_current_target(CMD_CTX)));
3064 }
3065
3066 static inline bool xtensa_cmd_xtopt_legal_val(char *opt, int val, int min, int max)
3067 {
3068 if ((val < min) || (val > max)) {
3069 LOG_ERROR("xtopt %s (%d) out of range [%d..%d]\n", opt, val, min, max);
3070 return false;
3071 }
3072 return true;
3073 }
3074
3075 /* xtopt <name> <value> */
3076 COMMAND_HELPER(xtensa_cmd_xtopt_do, struct xtensa *xtensa)
3077 {
3078 if (CMD_ARGC != 2)
3079 return ERROR_COMMAND_SYNTAX_ERROR;
3080
3081 const char *opt_name = CMD_ARGV[0];
3082 int opt_val = strtol(CMD_ARGV[1], NULL, 0);
3083 if (strcasecmp(opt_name, "arnum") == 0) {
3084 if (!xtensa_cmd_xtopt_legal_val("arnum", opt_val, 0, 64))
3085 return ERROR_COMMAND_ARGUMENT_INVALID;
3086 xtensa->core_config->aregs_num = opt_val;
3087 } else if (strcasecmp(opt_name, "windowed") == 0) {
3088 if (!xtensa_cmd_xtopt_legal_val("windowed", opt_val, 0, 1))
3089 return ERROR_COMMAND_ARGUMENT_INVALID;
3090 xtensa->core_config->windowed = opt_val;
3091 } else if (strcasecmp(opt_name, "cpenable") == 0) {
3092 if (!xtensa_cmd_xtopt_legal_val("cpenable", opt_val, 0, 1))
3093 return ERROR_COMMAND_ARGUMENT_INVALID;
3094 xtensa->core_config->coproc = opt_val;
3095 } else if (strcasecmp(opt_name, "exceptions") == 0) {
3096 if (!xtensa_cmd_xtopt_legal_val("exceptions", opt_val, 0, 1))
3097 return ERROR_COMMAND_ARGUMENT_INVALID;
3098 xtensa->core_config->exceptions = opt_val;
3099 } else if (strcasecmp(opt_name, "intnum") == 0) {
3100 if (!xtensa_cmd_xtopt_legal_val("intnum", opt_val, 0, 32))
3101 return ERROR_COMMAND_ARGUMENT_INVALID;
3102 xtensa->core_config->irq.enabled = (opt_val > 0);
3103 xtensa->core_config->irq.irq_num = opt_val;
3104 } else if (strcasecmp(opt_name, "hipriints") == 0) {
3105 if (!xtensa_cmd_xtopt_legal_val("hipriints", opt_val, 0, 1))
3106 return ERROR_COMMAND_ARGUMENT_INVALID;
3107 xtensa->core_config->high_irq.enabled = opt_val;
3108 } else if (strcasecmp(opt_name, "excmlevel") == 0) {
3109 if (!xtensa_cmd_xtopt_legal_val("excmlevel", opt_val, 1, 6))
3110 return ERROR_COMMAND_ARGUMENT_INVALID;
3111 if (!xtensa->core_config->high_irq.enabled) {
3112 LOG_ERROR("xtopt excmlevel requires hipriints\n");
3113 return ERROR_COMMAND_ARGUMENT_INVALID;
3114 }
3115 xtensa->core_config->high_irq.excm_level = opt_val;
3116 } else if (strcasecmp(opt_name, "intlevels") == 0) {
3117 if (xtensa->core_config->core_type == XT_LX) {
3118 if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 2, 6))
3119 return ERROR_COMMAND_ARGUMENT_INVALID;
3120 } else {
3121 if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 1, 255))
3122 return ERROR_COMMAND_ARGUMENT_INVALID;
3123 }
3124 if (!xtensa->core_config->high_irq.enabled) {
3125 LOG_ERROR("xtopt intlevels requires hipriints\n");
3126 return ERROR_COMMAND_ARGUMENT_INVALID;
3127 }
3128 xtensa->core_config->high_irq.level_num = opt_val;
3129 } else if (strcasecmp(opt_name, "debuglevel") == 0) {
3130 if (xtensa->core_config->core_type == XT_LX) {
3131 if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 2, 6))
3132 return ERROR_COMMAND_ARGUMENT_INVALID;
3133 } else {
3134 if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 0, 0))
3135 return ERROR_COMMAND_ARGUMENT_INVALID;
3136 }
3137 xtensa->core_config->debug.enabled = 1;
3138 xtensa->core_config->debug.irq_level = opt_val;
3139 } else if (strcasecmp(opt_name, "ibreaknum") == 0) {
3140 if (!xtensa_cmd_xtopt_legal_val("ibreaknum", opt_val, 0, 2))
3141 return ERROR_COMMAND_ARGUMENT_INVALID;
3142 xtensa->core_config->debug.ibreaks_num = opt_val;
3143 } else if (strcasecmp(opt_name, "dbreaknum") == 0) {
3144 if (!xtensa_cmd_xtopt_legal_val("dbreaknum", opt_val, 0, 2))
3145 return ERROR_COMMAND_ARGUMENT_INVALID;
3146 xtensa->core_config->debug.dbreaks_num = opt_val;
3147 } else if (strcasecmp(opt_name, "tracemem") == 0) {
3148 if (!xtensa_cmd_xtopt_legal_val("tracemem", opt_val, 0, 256 * 1024))
3149 return ERROR_COMMAND_ARGUMENT_INVALID;
3150 xtensa->core_config->trace.mem_sz = opt_val;
3151 xtensa->core_config->trace.enabled = (opt_val > 0);
3152 } else if (strcasecmp(opt_name, "tracememrev") == 0) {
3153 if (!xtensa_cmd_xtopt_legal_val("tracememrev", opt_val, 0, 1))
3154 return ERROR_COMMAND_ARGUMENT_INVALID;
3155 xtensa->core_config->trace.reversed_mem_access = opt_val;
3156 } else if (strcasecmp(opt_name, "perfcount") == 0) {
3157 if (!xtensa_cmd_xtopt_legal_val("perfcount", opt_val, 0, 8))
3158 return ERROR_COMMAND_ARGUMENT_INVALID;
3159 xtensa->core_config->debug.perfcount_num = opt_val;
3160 } else {
3161 LOG_WARNING("Unknown xtensa command ignored: \"xtopt %s %s\"", CMD_ARGV[0], CMD_ARGV[1]);
3162 return ERROR_OK;
3163 }
3164
3165 return ERROR_OK;
3166 }
3167
3168 COMMAND_HANDLER(xtensa_cmd_xtopt)
3169 {
3170 return CALL_COMMAND_HANDLER(xtensa_cmd_xtopt_do,
3171 target_to_xtensa(get_current_target(CMD_CTX)));
3172 }
3173
3174 /* xtmem <type> [parameters] */
3175 COMMAND_HELPER(xtensa_cmd_xtmem_do, struct xtensa *xtensa)
3176 {
3177 struct xtensa_cache_config *cachep = NULL;
3178 struct xtensa_local_mem_config *memp = NULL;
3179 int mem_access = 0;
3180 bool is_dcache = false;
3181
3182 if (CMD_ARGC == 0) {
3183 LOG_ERROR("xtmem <type> [parameters]\n");
3184 return ERROR_COMMAND_SYNTAX_ERROR;
3185 }
3186
3187 const char *mem_name = CMD_ARGV[0];
3188 if (strcasecmp(mem_name, "icache") == 0) {
3189 cachep = &xtensa->core_config->icache;
3190 } else if (strcasecmp(mem_name, "dcache") == 0) {
3191 cachep = &xtensa->core_config->dcache;
3192 is_dcache = true;
3193 } else if (strcasecmp(mem_name, "l2cache") == 0) {
3194 /* TODO: support L2 cache */
3195 } else if (strcasecmp(mem_name, "l2addr") == 0) {
3196 /* TODO: support L2 cache */
3197 } else if (strcasecmp(mem_name, "iram") == 0) {
3198 memp = &xtensa->core_config->iram;
3199 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3200 } else if (strcasecmp(mem_name, "dram") == 0) {
3201 memp = &xtensa->core_config->dram;
3202 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3203 } else if (strcasecmp(mem_name, "sram") == 0) {
3204 memp = &xtensa->core_config->sram;
3205 mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3206 } else if (strcasecmp(mem_name, "irom") == 0) {
3207 memp = &xtensa->core_config->irom;
3208 mem_access = XT_MEM_ACCESS_READ;
3209 } else if (strcasecmp(mem_name, "drom") == 0) {
3210 memp = &xtensa->core_config->drom;
3211 mem_access = XT_MEM_ACCESS_READ;
3212 } else if (strcasecmp(mem_name, "srom") == 0) {
3213 memp = &xtensa->core_config->srom;
3214 mem_access = XT_MEM_ACCESS_READ;
3215 } else {
3216 LOG_ERROR("xtmem types: <icache|dcache|l2cache|l2addr|iram|irom|dram|drom|sram|srom>\n");
3217 return ERROR_COMMAND_ARGUMENT_INVALID;
3218 }
3219
3220 if (cachep) {
3221 if ((CMD_ARGC != 4) && (CMD_ARGC != 5)) {
3222 LOG_ERROR("xtmem <cachetype> <linebytes> <cachebytes> <ways> [writeback]\n");
3223 return ERROR_COMMAND_SYNTAX_ERROR;
3224 }
3225 cachep->line_size = strtoul(CMD_ARGV[1], NULL, 0);
3226 cachep->size = strtoul(CMD_ARGV[2], NULL, 0);
3227 cachep->way_count = strtoul(CMD_ARGV[3], NULL, 0);
3228 cachep->writeback = ((CMD_ARGC == 5) && is_dcache) ?
3229 strtoul(CMD_ARGV[4], NULL, 0) : 0;
3230 } else if (memp) {
3231 if (CMD_ARGC != 3) {
3232 LOG_ERROR("xtmem <memtype> <baseaddr> <bytes>\n");
3233 return ERROR_COMMAND_SYNTAX_ERROR;
3234 }
3235 struct xtensa_local_mem_region_config *memcfgp = &memp->regions[memp->count];
3236 memcfgp->base = strtoul(CMD_ARGV[1], NULL, 0);
3237 memcfgp->size = strtoul(CMD_ARGV[2], NULL, 0);
3238 memcfgp->access = mem_access;
3239 memp->count++;
3240 }
3241
3242 return ERROR_OK;
3243 }
3244
3245 COMMAND_HANDLER(xtensa_cmd_xtmem)
3246 {
3247 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmem_do,
3248 target_to_xtensa(get_current_target(CMD_CTX)));
3249 }
3250
3251 /* xtmpu <num FG seg> <min seg size> <lockable> <executeonly> */
3252 COMMAND_HELPER(xtensa_cmd_xtmpu_do, struct xtensa *xtensa)
3253 {
3254 if (CMD_ARGC != 4) {
3255 LOG_ERROR("xtmpu <num FG seg> <min seg size> <lockable> <executeonly>\n");
3256 return ERROR_COMMAND_SYNTAX_ERROR;
3257 }
3258
3259 unsigned int nfgseg = strtoul(CMD_ARGV[0], NULL, 0);
3260 unsigned int minsegsize = strtoul(CMD_ARGV[1], NULL, 0);
3261 unsigned int lockable = strtoul(CMD_ARGV[2], NULL, 0);
3262 unsigned int execonly = strtoul(CMD_ARGV[3], NULL, 0);
3263
3264 if ((nfgseg > 32)) {
3265 LOG_ERROR("<nfgseg> must be within [0..32]\n");
3266 return ERROR_COMMAND_ARGUMENT_INVALID;
3267 } else if (minsegsize & (minsegsize - 1)) {
3268 LOG_ERROR("<minsegsize> must be a power of 2 >= 32\n");
3269 return ERROR_COMMAND_ARGUMENT_INVALID;
3270 } else if (lockable > 1) {
3271 LOG_ERROR("<lockable> must be 0 or 1\n");
3272 return ERROR_COMMAND_ARGUMENT_INVALID;
3273 } else if (execonly > 1) {
3274 LOG_ERROR("<execonly> must be 0 or 1\n");
3275 return ERROR_COMMAND_ARGUMENT_INVALID;
3276 }
3277
3278 xtensa->core_config->mpu.enabled = true;
3279 xtensa->core_config->mpu.nfgseg = nfgseg;
3280 xtensa->core_config->mpu.minsegsize = minsegsize;
3281 xtensa->core_config->mpu.lockable = lockable;
3282 xtensa->core_config->mpu.execonly = execonly;
3283 return ERROR_OK;
3284 }
3285
3286 COMMAND_HANDLER(xtensa_cmd_xtmpu)
3287 {
3288 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmpu_do,
3289 target_to_xtensa(get_current_target(CMD_CTX)));
3290 }
3291
3292 /* xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56> */
3293 COMMAND_HELPER(xtensa_cmd_xtmmu_do, struct xtensa *xtensa)
3294 {
3295 if (CMD_ARGC != 2) {
3296 LOG_ERROR("xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES>\n");
3297 return ERROR_COMMAND_SYNTAX_ERROR;
3298 }
3299
3300 unsigned int nirefillentries = strtoul(CMD_ARGV[0], NULL, 0);
3301 unsigned int ndrefillentries = strtoul(CMD_ARGV[1], NULL, 0);
3302 if ((nirefillentries != 16) && (nirefillentries != 32)) {
3303 LOG_ERROR("<nirefillentries> must be 16 or 32\n");
3304 return ERROR_COMMAND_ARGUMENT_INVALID;
3305 } else if ((ndrefillentries != 16) && (ndrefillentries != 32)) {
3306 LOG_ERROR("<ndrefillentries> must be 16 or 32\n");
3307 return ERROR_COMMAND_ARGUMENT_INVALID;
3308 }
3309
3310 xtensa->core_config->mmu.enabled = true;
3311 xtensa->core_config->mmu.itlb_entries_count = nirefillentries;
3312 xtensa->core_config->mmu.dtlb_entries_count = ndrefillentries;
3313 return ERROR_OK;
3314 }
3315
3316 COMMAND_HANDLER(xtensa_cmd_xtmmu)
3317 {
3318 return CALL_COMMAND_HANDLER(xtensa_cmd_xtmmu_do,
3319 target_to_xtensa(get_current_target(CMD_CTX)));
3320 }
3321
3322 /* xtregs <numregs>
3323 * xtreg <regname> <regnum> */
3324 COMMAND_HELPER(xtensa_cmd_xtreg_do, struct xtensa *xtensa)
3325 {
3326 if (CMD_ARGC == 1) {
3327 int32_t numregs = strtoul(CMD_ARGV[0], NULL, 0);
3328 if ((numregs <= 0) || (numregs > UINT16_MAX)) {
3329 LOG_ERROR("xtreg <numregs>: Invalid 'numregs' (%d)", numregs);
3330 return ERROR_COMMAND_SYNTAX_ERROR;
3331 }
3332 if ((xtensa->genpkt_regs_num > 0) && (numregs < (int32_t)xtensa->genpkt_regs_num)) {
3333 LOG_ERROR("xtregs (%d) must be larger than numgenregs (%d) (if xtregfmt specified)",
3334 numregs, xtensa->genpkt_regs_num);
3335 return ERROR_COMMAND_SYNTAX_ERROR;
3336 }
3337 xtensa->total_regs_num = numregs;
3338 xtensa->core_regs_num = 0;
3339 xtensa->num_optregs = 0;
3340 /* A little more memory than required, but saves a second initialization pass */
3341 xtensa->optregs = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc));
3342 if (!xtensa->optregs) {
3343 LOG_ERROR("Failed to allocate xtensa->optregs!");
3344 return ERROR_FAIL;
3345 }
3346 return ERROR_OK;
3347 } else if (CMD_ARGC != 2)
3348 return ERROR_COMMAND_SYNTAX_ERROR;
3349
3350 /* "xtregfmt contiguous" must be specified prior to the first "xtreg" definition
3351 * if general register (g-packet) requests or contiguous register maps are supported */
3352 if (xtensa->regmap_contiguous && !xtensa->contiguous_regs_desc) {
3353 xtensa->contiguous_regs_desc = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc *));
3354 if (!xtensa->contiguous_regs_desc) {
3355 LOG_ERROR("Failed to allocate xtensa->contiguous_regs_desc!");
3356 return ERROR_FAIL;
3357 }
3358 }
3359
3360 const char *regname = CMD_ARGV[0];
3361 unsigned int regnum = strtoul(CMD_ARGV[1], NULL, 0);
3362 if (regnum > UINT16_MAX) {
3363 LOG_ERROR("<regnum> must be a 16-bit number");
3364 return ERROR_COMMAND_ARGUMENT_INVALID;
3365 }
3366
3367 if ((xtensa->num_optregs + xtensa->core_regs_num) >= xtensa->total_regs_num) {
3368 if (xtensa->total_regs_num)
3369 LOG_ERROR("'xtreg %s 0x%04x': Too many registers (%d expected, %d core %d extended)",
3370 regname, regnum,
3371 xtensa->total_regs_num, xtensa->core_regs_num, xtensa->num_optregs);
3372 else
3373 LOG_ERROR("'xtreg %s 0x%04x': Number of registers unspecified",
3374 regname, regnum);
3375 return ERROR_FAIL;
3376 }
3377
3378 /* Determine whether register belongs in xtensa_regs[] or xtensa->xtensa_spec_regs[] */
3379 struct xtensa_reg_desc *rptr = &xtensa->optregs[xtensa->num_optregs];
3380 bool is_extended_reg = true;
3381 unsigned int ridx;
3382 for (ridx = 0; ridx < XT_NUM_REGS; ridx++) {
3383 if (strcmp(CMD_ARGV[0], xtensa_regs[ridx].name) == 0) {
3384 /* Flag core register as defined */
3385 rptr = &xtensa_regs[ridx];
3386 xtensa->core_regs_num++;
3387 is_extended_reg = false;
3388 break;
3389 }
3390 }
3391
3392 rptr->exist = true;
3393 if (is_extended_reg) {
3394 /* Register ID, debugger-visible register ID */
3395 rptr->name = strdup(CMD_ARGV[0]);
3396 rptr->dbreg_num = regnum;
3397 rptr->reg_num = (regnum & XT_REG_INDEX_MASK);
3398 xtensa->num_optregs++;
3399
3400 /* Register type */
3401 if ((regnum & XT_REG_GENERAL_MASK) == XT_REG_GENERAL_VAL) {
3402 rptr->type = XT_REG_GENERAL;
3403 } else if ((regnum & XT_REG_USER_MASK) == XT_REG_USER_VAL) {
3404 rptr->type = XT_REG_USER;
3405 } else if ((regnum & XT_REG_FR_MASK) == XT_REG_FR_VAL) {
3406 rptr->type = XT_REG_FR;
3407 } else if ((regnum & XT_REG_SPECIAL_MASK) == XT_REG_SPECIAL_VAL) {
3408 rptr->type = XT_REG_SPECIAL;
3409 } else if ((regnum & XT_REG_RELGEN_MASK) == XT_REG_RELGEN_VAL) {
3410 /* WARNING: For these registers, regnum points to the
3411 * index of the corresponding ARx registers, NOT to
3412 * the processor register number! */
3413 rptr->type = XT_REG_RELGEN;
3414 rptr->reg_num += XT_REG_IDX_ARFIRST;
3415 rptr->dbreg_num += XT_REG_IDX_ARFIRST;
3416 } else if ((regnum & XT_REG_TIE_MASK) != 0) {
3417 rptr->type = XT_REG_TIE;
3418 } else {
3419 rptr->type = XT_REG_OTHER;
3420 }
3421
3422 /* Register flags */
3423 if ((strcmp(rptr->name, "mmid") == 0) || (strcmp(rptr->name, "eraccess") == 0) ||
3424 (strcmp(rptr->name, "ddr") == 0) || (strcmp(rptr->name, "intset") == 0) ||
3425 (strcmp(rptr->name, "intclear") == 0))
3426 rptr->flags = XT_REGF_NOREAD;
3427 else
3428 rptr->flags = 0;
3429
3430 if ((rptr->reg_num == (XT_PS_REG_NUM_BASE + xtensa->core_config->debug.irq_level)) &&
3431 (xtensa->core_config->core_type == XT_LX) && (rptr->type == XT_REG_SPECIAL)) {
3432 xtensa->eps_dbglevel_idx = XT_NUM_REGS + xtensa->num_optregs - 1;
3433 LOG_DEBUG("Setting PS (%s) index to %d", rptr->name, xtensa->eps_dbglevel_idx);
3434 }
3435 } else if (strcmp(rptr->name, "cpenable") == 0) {
3436 xtensa->core_config->coproc = true;
3437 }
3438
3439 /* Build out list of contiguous registers in specified order */
3440 unsigned int running_reg_count = xtensa->num_optregs + xtensa->core_regs_num;
3441 if (xtensa->contiguous_regs_desc) {
3442 assert((running_reg_count <= xtensa->total_regs_num) && "contiguous register address internal error!");
3443 xtensa->contiguous_regs_desc[running_reg_count - 1] = rptr;
3444 }
3445 if (xtensa_extra_debug_log)
3446 LOG_DEBUG("Added %s register %-16s: 0x%04x/0x%02x t%d (%d of %d)",
3447 is_extended_reg ? "config-specific" : "core",
3448 rptr->name, rptr->dbreg_num, rptr->reg_num, rptr->type,
3449 is_extended_reg ? xtensa->num_optregs : ridx,
3450 is_extended_reg ? xtensa->total_regs_num : XT_NUM_REGS);
3451 return ERROR_OK;
3452 }
3453
3454 COMMAND_HANDLER(xtensa_cmd_xtreg)
3455 {
3456 return CALL_COMMAND_HANDLER(xtensa_cmd_xtreg_do,
3457 target_to_xtensa(get_current_target(CMD_CTX)));
3458 }
3459
3460 /* xtregfmt <contiguous|sparse> [numgregs] */
3461 COMMAND_HELPER(xtensa_cmd_xtregfmt_do, struct xtensa *xtensa)
3462 {
3463 if ((CMD_ARGC == 1) || (CMD_ARGC == 2)) {
3464 if (!strcasecmp(CMD_ARGV[0], "sparse")) {
3465 return ERROR_OK;
3466 } else if (!strcasecmp(CMD_ARGV[0], "contiguous")) {
3467 xtensa->regmap_contiguous = true;
3468 if (CMD_ARGC == 2) {
3469 unsigned int numgregs = strtoul(CMD_ARGV[1], NULL, 0);
3470 if ((numgregs <= 0) ||
3471 ((numgregs > xtensa->total_regs_num) &&
3472 (xtensa->total_regs_num > 0))) {
3473 LOG_ERROR("xtregfmt: if specified, numgregs (%d) must be <= numregs (%d)",
3474 numgregs, xtensa->total_regs_num);
3475 return ERROR_COMMAND_SYNTAX_ERROR;
3476 }
3477 xtensa->genpkt_regs_num = numgregs;
3478 }
3479 return ERROR_OK;
3480 }
3481 }
3482 return ERROR_COMMAND_SYNTAX_ERROR;
3483 }
3484
3485 COMMAND_HANDLER(xtensa_cmd_xtregfmt)
3486 {
3487 return CALL_COMMAND_HANDLER(xtensa_cmd_xtregfmt_do,
3488 target_to_xtensa(get_current_target(CMD_CTX)));
3489 }
3490
3491 COMMAND_HELPER(xtensa_cmd_permissive_mode_do, struct xtensa *xtensa)
3492 {
3493 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
3494 &xtensa->permissive_mode, "xtensa permissive mode");
3495 }
3496
3497 COMMAND_HANDLER(xtensa_cmd_permissive_mode)
3498 {
3499 return CALL_COMMAND_HANDLER(xtensa_cmd_permissive_mode_do,
3500 target_to_xtensa(get_current_target(CMD_CTX)));
3501 }
3502
3503 /* perfmon_enable <counter_id> <select> [mask] [kernelcnt] [tracelevel] */
3504 COMMAND_HELPER(xtensa_cmd_perfmon_enable_do, struct xtensa *xtensa)
3505 {
3506 struct xtensa_perfmon_config config = {
3507 .mask = 0xffff,
3508 .kernelcnt = 0,
3509 .tracelevel = -1 /* use DEBUGLEVEL by default */
3510 };
3511
3512 if (CMD_ARGC < 2 || CMD_ARGC > 6)
3513 return ERROR_COMMAND_SYNTAX_ERROR;
3514
3515 unsigned int counter_id = strtoul(CMD_ARGV[0], NULL, 0);
3516 if (counter_id >= XTENSA_MAX_PERF_COUNTERS) {
3517 command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
3518 return ERROR_COMMAND_ARGUMENT_INVALID;
3519 }
3520
3521 config.select = strtoul(CMD_ARGV[1], NULL, 0);
3522 if (config.select > XTENSA_MAX_PERF_SELECT) {
3523 command_print(CMD, "select should be < %d", XTENSA_MAX_PERF_SELECT);
3524 return ERROR_COMMAND_ARGUMENT_INVALID;
3525 }
3526
3527 if (CMD_ARGC >= 3) {
3528 config.mask = strtoul(CMD_ARGV[2], NULL, 0);
3529 if (config.mask > XTENSA_MAX_PERF_MASK) {
3530 command_print(CMD, "mask should be < %d", XTENSA_MAX_PERF_MASK);
3531 return ERROR_COMMAND_ARGUMENT_INVALID;
3532 }
3533 }
3534
3535 if (CMD_ARGC >= 4) {
3536 config.kernelcnt = strtoul(CMD_ARGV[3], NULL, 0);
3537 if (config.kernelcnt > 1) {
3538 command_print(CMD, "kernelcnt should be 0 or 1");
3539 return ERROR_COMMAND_ARGUMENT_INVALID;
3540 }
3541 }
3542
3543 if (CMD_ARGC >= 5) {
3544 config.tracelevel = strtoul(CMD_ARGV[4], NULL, 0);
3545 if (config.tracelevel > 7) {
3546 command_print(CMD, "tracelevel should be <=7");
3547 return ERROR_COMMAND_ARGUMENT_INVALID;
3548 }
3549 }
3550
3551 if (config.tracelevel == -1)
3552 config.tracelevel = xtensa->core_config->debug.irq_level;
3553
3554 return xtensa_dm_perfmon_enable(&xtensa->dbg_mod, counter_id, &config);
3555 }
3556
3557 COMMAND_HANDLER(xtensa_cmd_perfmon_enable)
3558 {
3559 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_enable_do,
3560 target_to_xtensa(get_current_target(CMD_CTX)));
3561 }
3562
3563 /* perfmon_dump [counter_id] */
3564 COMMAND_HELPER(xtensa_cmd_perfmon_dump_do, struct xtensa *xtensa)
3565 {
3566 if (CMD_ARGC > 1)
3567 return ERROR_COMMAND_SYNTAX_ERROR;
3568
3569 int counter_id = -1;
3570 if (CMD_ARGC == 1) {
3571 counter_id = strtol(CMD_ARGV[0], NULL, 0);
3572 if (counter_id > XTENSA_MAX_PERF_COUNTERS) {
3573 command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
3574 return ERROR_COMMAND_ARGUMENT_INVALID;
3575 }
3576 }
3577
3578 unsigned int counter_start = (counter_id < 0) ? 0 : counter_id;
3579 unsigned int counter_end = (counter_id < 0) ? XTENSA_MAX_PERF_COUNTERS : counter_id + 1;
3580 for (unsigned int counter = counter_start; counter < counter_end; ++counter) {
3581 char result_buf[128] = { 0 };
3582 size_t result_pos = snprintf(result_buf, sizeof(result_buf), "Counter %d: ", counter);
3583 struct xtensa_perfmon_result result;
3584 int res = xtensa_dm_perfmon_dump(&xtensa->dbg_mod, counter, &result);
3585 if (res != ERROR_OK)
3586 return res;
3587 snprintf(result_buf + result_pos, sizeof(result_buf) - result_pos,
3588 "%-12" PRIu64 "%s",
3589 result.value,
3590 result.overflow ? " (overflow)" : "");
3591 LOG_INFO("%s", result_buf);
3592 }
3593
3594 return ERROR_OK;
3595 }
3596
3597 COMMAND_HANDLER(xtensa_cmd_perfmon_dump)
3598 {
3599 return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_dump_do,
3600 target_to_xtensa(get_current_target(CMD_CTX)));
3601 }
3602
3603 COMMAND_HELPER(xtensa_cmd_mask_interrupts_do, struct xtensa *xtensa)
3604 {
3605 int state = -1;
3606
3607 if (CMD_ARGC < 1) {
3608 const char *st;
3609 state = xtensa->stepping_isr_mode;
3610 if (state == XT_STEPPING_ISR_ON)
3611 st = "OFF";
3612 else if (state == XT_STEPPING_ISR_OFF)
3613 st = "ON";
3614 else
3615 st = "UNKNOWN";
3616 command_print(CMD, "Current ISR step mode: %s", st);
3617 return ERROR_OK;
3618 }
3619 /* Masking is ON -> interrupts during stepping are OFF, and vice versa */
3620 if (!strcasecmp(CMD_ARGV[0], "off"))
3621 state = XT_STEPPING_ISR_ON;
3622 else if (!strcasecmp(CMD_ARGV[0], "on"))
3623 state = XT_STEPPING_ISR_OFF;
3624
3625 if (state == -1) {
3626 command_print(CMD, "Argument unknown. Please pick one of ON, OFF");
3627 return ERROR_FAIL;
3628 }
3629 xtensa->stepping_isr_mode = state;
3630 return ERROR_OK;
3631 }
3632
3633 COMMAND_HANDLER(xtensa_cmd_mask_interrupts)
3634 {
3635 return CALL_COMMAND_HANDLER(xtensa_cmd_mask_interrupts_do,
3636 target_to_xtensa(get_current_target(CMD_CTX)));
3637 }
3638
3639 COMMAND_HELPER(xtensa_cmd_smpbreak_do, struct target *target)
3640 {
3641 int res;
3642 uint32_t val = 0;
3643
3644 if (CMD_ARGC >= 1) {
3645 for (unsigned int i = 0; i < CMD_ARGC; i++) {
3646 if (!strcasecmp(CMD_ARGV[0], "none")) {
3647 val = 0;
3648 } else if (!strcasecmp(CMD_ARGV[i], "BreakIn")) {
3649 val |= OCDDCR_BREAKINEN;
3650 } else if (!strcasecmp(CMD_ARGV[i], "BreakOut")) {
3651 val |= OCDDCR_BREAKOUTEN;
3652 } else if (!strcasecmp(CMD_ARGV[i], "RunStallIn")) {
3653 val |= OCDDCR_RUNSTALLINEN;
3654 } else if (!strcasecmp(CMD_ARGV[i], "DebugModeOut")) {
3655 val |= OCDDCR_DEBUGMODEOUTEN;
3656 } else if (!strcasecmp(CMD_ARGV[i], "BreakInOut")) {
3657 val |= OCDDCR_BREAKINEN | OCDDCR_BREAKOUTEN;
3658 } else if (!strcasecmp(CMD_ARGV[i], "RunStall")) {
3659 val |= OCDDCR_RUNSTALLINEN | OCDDCR_DEBUGMODEOUTEN;
3660 } else {
3661 command_print(CMD, "Unknown arg %s", CMD_ARGV[i]);
3662 command_print(
3663 CMD,
3664 "use either BreakInOut, None or RunStall as arguments, or any combination of BreakIn, BreakOut, RunStallIn and DebugModeOut.");
3665 return ERROR_OK;
3666 }
3667 }
3668 res = xtensa_smpbreak_set(target, val);
3669 if (res != ERROR_OK)
3670 command_print(CMD, "Failed to set smpbreak config %d", res);
3671 } else {
3672 struct xtensa *xtensa = target_to_xtensa(target);
3673 res = xtensa_smpbreak_read(xtensa, &val);
3674 if (res == ERROR_OK)
3675 command_print(CMD, "Current bits set:%s%s%s%s",
3676 (val & OCDDCR_BREAKINEN) ? " BreakIn" : "",
3677 (val & OCDDCR_BREAKOUTEN) ? " BreakOut" : "",
3678 (val & OCDDCR_RUNSTALLINEN) ? " RunStallIn" : "",
3679 (val & OCDDCR_DEBUGMODEOUTEN) ? " DebugModeOut" : ""
3680 );
3681 else
3682 command_print(CMD, "Failed to get smpbreak config %d", res);
3683 }
3684 return res;
3685 }
3686
3687 COMMAND_HANDLER(xtensa_cmd_smpbreak)
3688 {
3689 return CALL_COMMAND_HANDLER(xtensa_cmd_smpbreak_do,
3690 get_current_target(CMD_CTX));
3691 }
3692
3693 COMMAND_HELPER(xtensa_cmd_tracestart_do, struct xtensa *xtensa)
3694 {
3695 struct xtensa_trace_status trace_status;
3696 struct xtensa_trace_start_config cfg = {
3697 .stoppc = 0,
3698 .stopmask = XTENSA_STOPMASK_DISABLED,
3699 .after = 0,
3700 .after_is_words = false
3701 };
3702
3703 /* Parse arguments */
3704 for (unsigned int i = 0; i < CMD_ARGC; i++) {
3705 if ((!strcasecmp(CMD_ARGV[i], "pc")) && CMD_ARGC > i) {
3706 char *e;
3707 i++;
3708 cfg.stoppc = strtol(CMD_ARGV[i], &e, 0);
3709 cfg.stopmask = 0;
3710 if (*e == '/')
3711 cfg.stopmask = strtol(e, NULL, 0);
3712 } else if ((!strcasecmp(CMD_ARGV[i], "after")) && CMD_ARGC > i) {
3713 i++;
3714 cfg.after = strtol(CMD_ARGV[i], NULL, 0);
3715 } else if (!strcasecmp(CMD_ARGV[i], "ins")) {
3716 cfg.after_is_words = 0;
3717 } else if (!strcasecmp(CMD_ARGV[i], "words")) {
3718 cfg.after_is_words = 1;
3719 } else {
3720 command_print(CMD, "Did not understand %s", CMD_ARGV[i]);
3721 return ERROR_FAIL;
3722 }
3723 }
3724
3725 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
3726 if (res != ERROR_OK)
3727 return res;
3728 if (trace_status.stat & TRAXSTAT_TRACT) {
3729 LOG_WARNING("Silently stop active tracing!");
3730 res = xtensa_dm_trace_stop(&xtensa->dbg_mod, false);
3731 if (res != ERROR_OK)
3732 return res;
3733 }
3734
3735 res = xtensa_dm_trace_start(&xtensa->dbg_mod, &cfg);
3736 if (res != ERROR_OK)
3737 return res;
3738
3739 xtensa->trace_active = true;
3740 command_print(CMD, "Trace started.");
3741 return ERROR_OK;
3742 }
3743
3744 COMMAND_HANDLER(xtensa_cmd_tracestart)
3745 {
3746 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestart_do,
3747 target_to_xtensa(get_current_target(CMD_CTX)));
3748 }
3749
3750 COMMAND_HELPER(xtensa_cmd_tracestop_do, struct xtensa *xtensa)
3751 {
3752 struct xtensa_trace_status trace_status;
3753
3754 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
3755 if (res != ERROR_OK)
3756 return res;
3757
3758 if (!(trace_status.stat & TRAXSTAT_TRACT)) {
3759 command_print(CMD, "No trace is currently active.");
3760 return ERROR_FAIL;
3761 }
3762
3763 res = xtensa_dm_trace_stop(&xtensa->dbg_mod, true);
3764 if (res != ERROR_OK)
3765 return res;
3766
3767 xtensa->trace_active = false;
3768 command_print(CMD, "Trace stop triggered.");
3769 return ERROR_OK;
3770 }
3771
3772 COMMAND_HANDLER(xtensa_cmd_tracestop)
3773 {
3774 return CALL_COMMAND_HANDLER(xtensa_cmd_tracestop_do,
3775 target_to_xtensa(get_current_target(CMD_CTX)));
3776 }
3777
3778 COMMAND_HELPER(xtensa_cmd_tracedump_do, struct xtensa *xtensa, const char *fname)
3779 {
3780 struct xtensa_trace_config trace_config;
3781 struct xtensa_trace_status trace_status;
3782 uint32_t memsz, wmem;
3783
3784 int res = xtensa_dm_trace_status_read(&xtensa->dbg_mod, &trace_status);
3785 if (res != ERROR_OK)
3786 return res;
3787
3788 if (trace_status.stat & TRAXSTAT_TRACT) {
3789 command_print(CMD, "Tracing is still active. Please stop it first.");
3790 return ERROR_FAIL;
3791 }
3792
3793 res = xtensa_dm_trace_config_read(&xtensa->dbg_mod, &trace_config);
3794 if (res != ERROR_OK)
3795 return res;
3796
3797 if (!(trace_config.ctrl & TRAXCTRL_TREN)) {
3798 command_print(CMD, "No active trace found; nothing to dump.");
3799 return ERROR_FAIL;
3800 }
3801
3802 memsz = trace_config.memaddr_end - trace_config.memaddr_start + 1;
3803 LOG_INFO("Total trace memory: %d words", memsz);
3804 if ((trace_config.addr &
3805 ((TRAXADDR_TWRAP_MASK << TRAXADDR_TWRAP_SHIFT) | TRAXADDR_TWSAT)) == 0) {
3806 /*Memory hasn't overwritten itself yet. */
3807 wmem = trace_config.addr & TRAXADDR_TADDR_MASK;
3808 LOG_INFO("...but trace is only %d words", wmem);
3809 if (wmem < memsz)
3810 memsz = wmem;
3811 } else {
3812 if (trace_config.addr & TRAXADDR_TWSAT) {
3813 LOG_INFO("Real trace is many times longer than that (overflow)");
3814 } else {
3815 uint32_t trc_sz = (trace_config.addr >> TRAXADDR_TWRAP_SHIFT) & TRAXADDR_TWRAP_MASK;
3816 trc_sz = (trc_sz * memsz) + (trace_config.addr & TRAXADDR_TADDR_MASK);
3817 LOG_INFO("Real trace is %d words, but the start has been truncated.", trc_sz);
3818 }
3819 }
3820
3821 uint8_t *tracemem = malloc(memsz * 4);
3822 if (!tracemem) {
3823 command_print(CMD, "Failed to alloc memory for trace data!");
3824 return ERROR_FAIL;
3825 }
3826 res = xtensa_dm_trace_data_read(&xtensa->dbg_mod, tracemem, memsz * 4);
3827 if (res != ERROR_OK) {
3828 free(tracemem);
3829 return res;
3830 }
3831
3832 int f = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0666);
3833 if (f <= 0) {
3834 free(tracemem);
3835 command_print(CMD, "Unable to open file %s", fname);
3836 return ERROR_FAIL;
3837 }
3838 if (write(f, tracemem, memsz * 4) != (int)memsz * 4)
3839 command_print(CMD, "Unable to write to file %s", fname);
3840 else
3841 command_print(CMD, "Written %d bytes of trace data to %s", memsz * 4, fname);
3842 close(f);
3843
3844 bool is_all_zeroes = true;
3845 for (unsigned int i = 0; i < memsz * 4; i++) {
3846 if (tracemem[i] != 0) {
3847 is_all_zeroes = false;
3848 break;
3849 }
3850 }
3851 free(tracemem);
3852 if (is_all_zeroes)
3853 command_print(
3854 CMD,
3855 "WARNING: File written is all zeroes. Are you sure you enabled trace memory?");
3856
3857 return ERROR_OK;
3858 }
3859
3860 COMMAND_HANDLER(xtensa_cmd_tracedump)
3861 {
3862 if (CMD_ARGC != 1) {
3863 command_print(CMD, "Command takes exactly 1 parameter.Need filename to dump to as output!");
3864 return ERROR_FAIL;
3865 }
3866
3867 return CALL_COMMAND_HANDLER(xtensa_cmd_tracedump_do,
3868 target_to_xtensa(get_current_target(CMD_CTX)), CMD_ARGV[0]);
3869 }
3870
3871 static const struct command_registration xtensa_any_command_handlers[] = {
3872 {
3873 .name = "xtdef",
3874 .handler = xtensa_cmd_xtdef,
3875 .mode = COMMAND_CONFIG,
3876 .help = "Configure Xtensa core type",
3877 .usage = "<type>",
3878 },
3879 {
3880 .name = "xtopt",
3881 .handler = xtensa_cmd_xtopt,
3882 .mode = COMMAND_CONFIG,
3883 .help = "Configure Xtensa core option",
3884 .usage = "<name> <value>",
3885 },
3886 {
3887 .name = "xtmem",
3888 .handler = xtensa_cmd_xtmem,
3889 .mode = COMMAND_CONFIG,
3890 .help = "Configure Xtensa memory/cache option",
3891 .usage = "<type> [parameters]",
3892 },
3893 {
3894 .name = "xtmmu",
3895 .handler = xtensa_cmd_xtmmu,
3896 .mode = COMMAND_CONFIG,
3897 .help = "Configure Xtensa MMU option",
3898 .usage = "<NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56>",
3899 },
3900 {
3901 .name = "xtmpu",
3902 .handler = xtensa_cmd_xtmpu,
3903 .mode = COMMAND_CONFIG,
3904 .help = "Configure Xtensa MPU option",
3905 .usage = "<num FG seg> <min seg size> <lockable> <executeonly>",
3906 },
3907 {
3908 .name = "xtreg",
3909 .handler = xtensa_cmd_xtreg,
3910 .mode = COMMAND_CONFIG,
3911 .help = "Configure Xtensa register",
3912 .usage = "<regname> <regnum>",
3913 },
3914 {
3915 .name = "xtregs",
3916 .handler = xtensa_cmd_xtreg,
3917 .mode = COMMAND_CONFIG,
3918 .help = "Configure number of Xtensa registers",
3919 .usage = "<numregs>",
3920 },
3921 {
3922 .name = "xtregfmt",
3923 .handler = xtensa_cmd_xtregfmt,
3924 .mode = COMMAND_CONFIG,
3925 .help = "Configure format of Xtensa register map",
3926 .usage = "<contiguous|sparse> [numgregs]",
3927 },
3928 {
3929 .name = "set_permissive",
3930 .handler = xtensa_cmd_permissive_mode,
3931 .mode = COMMAND_ANY,
3932 .help = "When set to 1, enable Xtensa permissive mode (fewer client-side checks)",
3933 .usage = "[0|1]",
3934 },
3935 {
3936 .name = "maskisr",
3937 .handler = xtensa_cmd_mask_interrupts,
3938 .mode = COMMAND_ANY,
3939 .help = "mask Xtensa interrupts at step",
3940 .usage = "['on'|'off']",
3941 },
3942 {
3943 .name = "smpbreak",
3944 .handler = xtensa_cmd_smpbreak,
3945 .mode = COMMAND_ANY,
3946 .help = "Set the way the CPU chains OCD breaks",
3947 .usage = "[none|breakinout|runstall] | [BreakIn] [BreakOut] [RunStallIn] [DebugModeOut]",
3948 },
3949 {
3950 .name = "perfmon_enable",
3951 .handler = xtensa_cmd_perfmon_enable,
3952 .mode = COMMAND_EXEC,
3953 .help = "Enable and start performance counter",
3954 .usage = "<counter_id> <select> [mask] [kernelcnt] [tracelevel]",
3955 },
3956 {
3957 .name = "perfmon_dump",
3958 .handler = xtensa_cmd_perfmon_dump,
3959 .mode = COMMAND_EXEC,
3960 .help = "Dump performance counter value. If no argument specified, dumps all counters.",
3961 .usage = "[counter_id]",
3962 },
3963 {
3964 .name = "tracestart",
3965 .handler = xtensa_cmd_tracestart,
3966 .mode = COMMAND_EXEC,
3967 .help =
3968 "Tracing: Set up and start a trace. Optionally set stop trigger address and amount of data captured after.",
3969 .usage = "[pc <pcval>/[maskbitcount]] [after <n> [ins|words]]",
3970 },
3971 {
3972 .name = "tracestop",
3973 .handler = xtensa_cmd_tracestop,
3974 .mode = COMMAND_EXEC,
3975 .help = "Tracing: Stop current trace as started by the tracestart command",
3976 .usage = "",
3977 },
3978 {
3979 .name = "tracedump",
3980 .handler = xtensa_cmd_tracedump,
3981 .mode = COMMAND_EXEC,
3982 .help = "Tracing: Dump trace memory to a files. One file per core.",
3983 .usage = "<outfile>",
3984 },
3985 {
3986 .name = "exe",
3987 .handler = xtensa_cmd_exe,
3988 .mode = COMMAND_ANY,
3989 .help = "Xtensa stub execution",
3990 .usage = "<ascii-encoded hexadecimal instruction bytes>",
3991 },
3992 COMMAND_REGISTRATION_DONE
3993 };
3994
3995 const struct command_registration xtensa_command_handlers[] = {
3996 {
3997 .name = "xtensa",
3998 .mode = COMMAND_ANY,
3999 .help = "Xtensa command group",
4000 .usage = "",
4001 .chain = xtensa_any_command_handlers,
4002 },
4003 COMMAND_REGISTRATION_DONE
4004 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)