055d94340114e44e727ed5c7ca213d3b0b98c4c7
[openocd.git] / src / target / lakemont.c
1 /*
2 * Copyright(c) 2013 Intel Corporation.
3 *
4 * Adrian Burns (adrian.burns@intel.com)
5 * Thomas Faust (thomas.faust@intel.com)
6 * Ivan De Cesaris (ivan.de.cesaris@intel.com)
7 * Julien Carreno (julien.carreno@intel.com)
8 * Jeffrey Maxwell (jeffrey.r.maxwell@intel.com)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
23 *
24 * Contact Information:
25 * Intel Corporation
26 */
27
28 /*
29 * @file
30 * This implements the probemode operations for Lakemont 1 (LMT1).
31 */
32
33 #ifdef HAVE_CONFIG_H
34 #include "config.h"
35 #endif
36
37 #include <helper/log.h>
38
39 #include "target.h"
40 #include "target_type.h"
41 #include "lakemont.h"
42 #include "register.h"
43 #include "breakpoints.h"
44 #include "x86_32_common.h"
45
46 static int irscan(struct target *t, uint8_t *out,
47 uint8_t *in, uint8_t ir_len);
48 static int drscan(struct target *t, uint8_t *out, uint8_t *in, uint8_t len);
49 static int save_context(struct target *target);
50 static int restore_context(struct target *target);
51 static uint32_t get_tapstatus(struct target *t);
52 static int enter_probemode(struct target *t);
53 static int exit_probemode(struct target *t);
54 static int halt_prep(struct target *t);
55 static int do_halt(struct target *t);
56 static int do_resume(struct target *t);
57 static int read_all_core_hw_regs(struct target *t);
58 static int write_all_core_hw_regs(struct target *t);
59 static int read_hw_reg(struct target *t,
60 int reg, uint32_t *regval, uint8_t cache);
61 static int write_hw_reg(struct target *t,
62 int reg, uint32_t regval, uint8_t cache);
63 static struct reg_cache *lakemont_build_reg_cache
64 (struct target *target);
65 static int submit_reg_pir(struct target *t, int num);
66 static int submit_instruction_pir(struct target *t, int num);
67 static int submit_pir(struct target *t, uint64_t op);
68 static int lakemont_get_core_reg(struct reg *reg);
69 static int lakemont_set_core_reg(struct reg *reg, uint8_t *buf);
70
71 static struct scan_blk scan;
72
73 /* registers and opcodes for register access, pm_idx is used to identify the
74 * registers that are modified for lakemont probemode specific operations
75 */
76 static const struct {
77 uint8_t id;
78 const char *name;
79 uint64_t op;
80 uint8_t pm_idx;
81 unsigned bits;
82 enum reg_type type;
83 const char *group;
84 const char *feature;
85 } regs[] = {
86 /* general purpose registers */
87 { EAX, "eax", 0x000000D01D660000, 0, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
88 { ECX, "ecx", 0x000000501D660000, 1, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
89 { EDX, "edx", 0x000000901D660000, 2, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
90 { EBX, "ebx", 0x000000101D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
91 { ESP, "esp", 0x000000E01D660000, NOT_PMREG, 32, REG_TYPE_DATA_PTR, "general", "org.gnu.gdb.i386.core" },
92 { EBP, "ebp", 0x000000601D660000, NOT_PMREG, 32, REG_TYPE_DATA_PTR, "general", "org.gnu.gdb.i386.core" },
93 { ESI, "esi", 0x000000A01D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
94 { EDI, "edi", 0x000000201D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
95
96 /* instruction pointer & flags */
97 { EIP, "eip", 0x000000C01D660000, 3, 32, REG_TYPE_CODE_PTR, "general", "org.gnu.gdb.i386.core" },
98 { EFLAGS, "eflags", 0x000000401D660000, 4, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
99
100 /* segment registers */
101 { CS, "cs", 0x000000281D660000, 5, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
102 { SS, "ss", 0x000000C81D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
103 { DS, "ds", 0x000000481D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
104 { ES, "es", 0x000000A81D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
105 { FS, "fs", 0x000000881D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
106 { GS, "gs", 0x000000081D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
107
108 /* floating point unit registers - not accessible via JTAG - here to satisfy GDB */
109 { ST0, "st0", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
110 { ST1, "st1", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
111 { ST2, "st2", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
112 { ST3, "st3", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
113 { ST4, "st4", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
114 { ST5, "st5", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
115 { ST6, "st6", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
116 { ST7, "st7", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
117 { FCTRL, "fctrl", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
118 { FSTAT, "fstat", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
119 { FTAG, "ftag", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
120 { FISEG, "fiseg", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
121 { FIOFF, "fioff", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
122 { FOSEG, "foseg", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
123 { FOOFF, "fooff", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
124 { FOP, "fop", 0x0, NOT_AVAIL_REG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.core" },
125
126 /* control registers */
127 { CR0, "cr0", 0x000000001D660000, 6, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
128 { CR2, "cr2", 0x000000BC1D660000, 7, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
129 { CR3, "cr3", 0x000000801D660000, 8, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
130 { CR4, "cr4", 0x0000002C1D660000, 9, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
131
132 /* debug registers */
133 { DR0, "dr0", 0x0000007C1D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
134 { DR1, "dr1", 0x000000FC1D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
135 { DR2, "dr2", 0x000000021D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
136 { DR3, "dr3", 0x000000821D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
137 { DR6, "dr6", 0x000000301D660000, 10, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
138 { DR7, "dr7", 0x000000B01D660000, 11, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
139
140 /* descriptor tables */
141 { IDTB, "idtbase", 0x000000581D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
142 { IDTL, "idtlimit", 0x000000D81D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
143 { IDTAR, "idtar", 0x000000981D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
144 { GDTB, "gdtbase", 0x000000B81D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
145 { GDTL, "gdtlimit", 0x000000781D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
146 { GDTAR, "gdtar", 0x000000381D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
147 { TR, "tr", 0x000000701D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
148 { LDTR, "ldtr", 0x000000F01D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
149 { LDTB, "ldbase", 0x000000041D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
150 { LDTL, "ldlimit", 0x000000841D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
151 { LDTAR, "ldtar", 0x000000F81D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
152
153 /* segment registers */
154 { CSB, "csbase", 0x000000F41D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
155 { CSL, "cslimit", 0x0000000C1D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
156 { CSAR, "csar", 0x000000741D660000, 12, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
157 { DSB, "dsbase", 0x000000941D660000, 13, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
158 { DSL, "dslimit", 0x000000541D660000, 14, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
159 { DSAR, "dsar", 0x000000141D660000, 15, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
160 { ESB, "esbase", 0x0000004C1D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
161 { ESL, "eslimit", 0x000000CC1D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
162 { ESAR, "esar", 0x0000008C1D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
163 { FSB, "fsbase", 0x000000641D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
164 { FSL, "fslimit", 0x000000E41D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
165 { FSAR, "fsar", 0x000000A41D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
166 { GSB, "gsbase", 0x000000C41D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
167 { GSL, "gslimit", 0x000000241D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
168 { GSAR, "gsar", 0x000000441D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
169 { SSB, "ssbase", 0x000000341D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
170 { SSL, "sslimit", 0x000000B41D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
171 { SSAR, "ssar", 0x000000D41D660000, 16, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
172 { TSSB, "tssbase", 0x000000E81D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
173 { TSSL, "tsslimit", 0x000000181D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
174 { TSSAR, "tssar", 0x000000681D660000, NOT_PMREG, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
175 /* probemode control register */
176 { PMCR, "pmcr", 0x000000421D660000, 17, 32, REG_TYPE_INT32, "general", "org.gnu.gdb.i386.sys" },
177 };
178
179 static const struct {
180 uint8_t id;
181 const char *name;
182 uint64_t op;
183 } instructions[] = {
184 /* memory read/write */
185 { MEMRDB32, "MEMRDB32", 0x0909090909090851 },
186 { MEMRDB16, "MEMRDB16", 0x09090909090851E6 },
187 { MEMRDH32, "MEMRDH32", 0x090909090908D166 },
188 { MEMRDH16, "MEMRDH16", 0x090909090908D1E6 },
189 { MEMRDW32, "MEMRDW32", 0x09090909090908D1 },
190 { MEMRDW16, "MEMRDW16", 0x0909090908D1E666 },
191 { MEMWRB32, "MEMWRB32", 0x0909090909090811 },
192 { MEMWRB16, "MEMWRB16", 0x09090909090811E6 },
193 { MEMWRH32, "MEMWRH32", 0x0909090909089166 },
194 { MEMWRH16, "MEMWRH16", 0x09090909090891E6 },
195 { MEMWRW32, "MEMWRW32", 0x0909090909090891 },
196 { MEMWRW16, "MEMWRW16", 0x090909090891E666 },
197 /* IO read/write */
198 { IORDB32, "IORDB32", 0x0909090909090937 },
199 { IORDB16, "IORDB16", 0x09090909090937E6 },
200 { IORDH32, "IORDH32", 0x090909090909B766 },
201 { IORDH16, "IORDH16", 0x090909090909B7E6 },
202 { IORDW32, "IORDW32", 0x09090909090909B7 },
203 { IORDW16, "IORDW16", 0x0909090909B7E666 },
204 { IOWRB32, "IOWRB32", 0x0909090909090977 },
205 { IOWRB16, "IOWRB16", 0x09090909090977E6 },
206 { IOWRH32, "IOWRH32", 0x090909090909F766 },
207 { IOWRH16, "IOWRH16", 0x090909090909F7E6 },
208 { IOWRW32, "IOWRW32", 0x09090909090909F7 },
209 { IOWRW16, "IOWRW16", 0x0909090909F7E666 },
210 /* lakemont1 core shadow ram access opcodes */
211 { SRAMACCESS, "SRAMACCESS", 0x0000000E9D660000 },
212 { SRAM2PDR, "SRAM2PDR", 0x4CF0000000000000 },
213 { PDR2SRAM, "PDR2SRAM", 0x0CF0000000000000 },
214 { WBINVD, "WBINVD", 0x09090909090990F0 },
215 };
216
217 bool check_not_halted(const struct target *t)
218 {
219 bool halted = t->state == TARGET_HALTED;
220 if (!halted)
221 LOG_ERROR("target running, halt it first");
222 return !halted;
223 }
224
225 static int irscan(struct target *t, uint8_t *out,
226 uint8_t *in, uint8_t ir_len)
227 {
228 int retval = ERROR_OK;
229 struct x86_32_common *x86_32 = target_to_x86_32(t);
230 if (NULL == t->tap) {
231 retval = ERROR_FAIL;
232 LOG_ERROR("%s invalid target tap", __func__);
233 return retval;
234 }
235 if (ir_len != t->tap->ir_length) {
236 retval = ERROR_FAIL;
237 if (t->tap->enabled)
238 LOG_ERROR("%s tap enabled but tap irlen=%d",
239 __func__, t->tap->ir_length);
240 else
241 LOG_ERROR("%s tap not enabled and irlen=%d",
242 __func__, t->tap->ir_length);
243 return retval;
244 }
245 struct scan_field *fields = &scan.field;
246 fields->num_bits = ir_len;
247 fields->out_value = out;
248 fields->in_value = in;
249 jtag_add_ir_scan(x86_32->curr_tap, fields, TAP_IDLE);
250 if (x86_32->flush) {
251 retval = jtag_execute_queue();
252 if (retval != ERROR_OK)
253 LOG_ERROR("%s failed to execute queue", __func__);
254 }
255 return retval;
256 }
257
258 static int drscan(struct target *t, uint8_t *out, uint8_t *in, uint8_t len)
259 {
260 int retval = ERROR_OK;
261 uint64_t data = 0;
262 struct x86_32_common *x86_32 = target_to_x86_32(t);
263 if (NULL == t->tap) {
264 retval = ERROR_FAIL;
265 LOG_ERROR("%s invalid target tap", __func__);
266 return retval;
267 }
268 if (len > MAX_SCAN_SIZE || 0 == len) {
269 retval = ERROR_FAIL;
270 LOG_ERROR("%s data len is %d bits, max is %d bits",
271 __func__, len, MAX_SCAN_SIZE);
272 return retval;
273 }
274 struct scan_field *fields = &scan.field;
275 fields->out_value = out;
276 fields->in_value = in;
277 fields->num_bits = len;
278 jtag_add_dr_scan(x86_32->curr_tap, 1, fields, TAP_IDLE);
279 if (x86_32->flush) {
280 retval = jtag_execute_queue();
281 if (retval != ERROR_OK) {
282 LOG_ERROR("%s drscan failed to execute queue", __func__);
283 return retval;
284 }
285 }
286 if (in != NULL) {
287 if (len >= 8) {
288 for (int n = (len / 8) - 1 ; n >= 0; n--)
289 data = (data << 8) + *(in+n);
290 } else
291 LOG_DEBUG("dr in 0x%02" PRIx8, *in);
292 } else {
293 LOG_ERROR("%s no drscan data", __func__);
294 retval = ERROR_FAIL;
295 }
296 return retval;
297 }
298
299 static int save_context(struct target *t)
300 {
301 int err;
302 /* read core registers from lakemont sram */
303 err = read_all_core_hw_regs(t);
304 if (err != ERROR_OK) {
305 LOG_ERROR("%s error reading regs", __func__);
306 return err;
307 }
308 return ERROR_OK;
309 }
310
311 static int restore_context(struct target *t)
312 {
313 int err = ERROR_OK;
314 uint32_t i;
315 struct x86_32_common *x86_32 = target_to_x86_32(t);
316
317 /* write core regs into the core PM SRAM from the reg_cache */
318 err = write_all_core_hw_regs(t);
319 if (err != ERROR_OK) {
320 LOG_ERROR("%s error writing regs", __func__);
321 return err;
322 }
323
324 for (i = 0; i < (x86_32->cache->num_regs); i++) {
325 x86_32->cache->reg_list[i].dirty = 0;
326 x86_32->cache->reg_list[i].valid = 0;
327 }
328 return err;
329 }
330
331 /*
332 * we keep reg_cache in sync with hardware at halt/resume time, we avoid
333 * writing to real hardware here bacause pm_regs reflects the hardware
334 * while we are halted then reg_cache syncs with hw on resume
335 * TODO - in order for "reg eip force" to work it assume get/set reads
336 * and writes from hardware, may be other reasons also because generally
337 * other openocd targets read/write from hardware in get/set - watch this!
338 */
339 static int lakemont_get_core_reg(struct reg *reg)
340 {
341 int retval = ERROR_OK;
342 struct lakemont_core_reg *lakemont_reg = reg->arch_info;
343 struct target *t = lakemont_reg->target;
344 if (check_not_halted(t))
345 return ERROR_TARGET_NOT_HALTED;
346 LOG_DEBUG("reg=%s, value=0x%08" PRIx32, reg->name,
347 buf_get_u32(reg->value, 0, 32));
348 return retval;
349 }
350
351 static int lakemont_set_core_reg(struct reg *reg, uint8_t *buf)
352 {
353 struct lakemont_core_reg *lakemont_reg = reg->arch_info;
354 struct target *t = lakemont_reg->target;
355 uint32_t value = buf_get_u32(buf, 0, 32);
356 LOG_DEBUG("reg=%s, newval=0x%08" PRIx32, reg->name, value);
357 if (check_not_halted(t))
358 return ERROR_TARGET_NOT_HALTED;
359 buf_set_u32(reg->value, 0, 32, value);
360 reg->dirty = 1;
361 reg->valid = 1;
362 return ERROR_OK;
363 }
364
365 static const struct reg_arch_type lakemont_reg_type = {
366 /* these get called if reg_cache doesnt have a "valid" value
367 * of an individual reg eg "reg eip" but not for "reg" block
368 */
369 .get = lakemont_get_core_reg,
370 .set = lakemont_set_core_reg,
371 };
372
373 struct reg_cache *lakemont_build_reg_cache(struct target *t)
374 {
375 struct x86_32_common *x86_32 = target_to_x86_32(t);
376 int num_regs = ARRAY_SIZE(regs);
377 struct reg_cache **cache_p = register_get_last_cache_p(&t->reg_cache);
378 struct reg_cache *cache = malloc(sizeof(struct reg_cache));
379 struct reg *reg_list = calloc(num_regs, sizeof(struct reg));
380 struct lakemont_core_reg *arch_info = malloc(sizeof(struct lakemont_core_reg) * num_regs);
381 struct reg_feature *feature;
382 int i;
383
384 if (cache == NULL || reg_list == NULL || arch_info == NULL) {
385 free(cache);
386 free(reg_list);
387 free(arch_info);
388 LOG_ERROR("%s out of memory", __func__);
389 return NULL;
390 }
391
392 /* Build the process context cache */
393 cache->name = "lakemont registers";
394 cache->next = NULL;
395 cache->reg_list = reg_list;
396 cache->num_regs = num_regs;
397 (*cache_p) = cache;
398 x86_32->cache = cache;
399
400 for (i = 0; i < num_regs; i++) {
401 arch_info[i].target = t;
402 arch_info[i].x86_32_common = x86_32;
403 arch_info[i].op = regs[i].op;
404 arch_info[i].pm_idx = regs[i].pm_idx;
405 reg_list[i].name = regs[i].name;
406 reg_list[i].size = 32;
407 reg_list[i].value = calloc(1, 4);
408 reg_list[i].dirty = 0;
409 reg_list[i].valid = 0;
410 reg_list[i].type = &lakemont_reg_type;
411 reg_list[i].arch_info = &arch_info[i];
412
413 reg_list[i].group = regs[i].group;
414 reg_list[i].number = i;
415 reg_list[i].exist = true;
416 reg_list[i].caller_save = true; /* gdb defaults to true */
417
418 feature = calloc(1, sizeof(struct reg_feature));
419 if (feature) {
420 feature->name = regs[i].feature;
421 reg_list[i].feature = feature;
422 } else
423 LOG_ERROR("%s unable to allocate feature list", __func__);
424
425 reg_list[i].reg_data_type = calloc(1, sizeof(struct reg_data_type));
426 if (reg_list[i].reg_data_type)
427 reg_list[i].reg_data_type->type = regs[i].type;
428 else
429 LOG_ERROR("%s unable to allocate reg type list", __func__);
430 }
431 return cache;
432 }
433
434 static uint32_t get_tapstatus(struct target *t)
435 {
436 scan.out[0] = TAPSTATUS;
437 if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
438 return 0;
439 if (drscan(t, NULL, scan.out, TS_SIZE) != ERROR_OK)
440 return 0;
441 return buf_get_u32(scan.out, 0, 32);
442 }
443
444 static int enter_probemode(struct target *t)
445 {
446 uint32_t tapstatus = 0;
447 tapstatus = get_tapstatus(t);
448 LOG_DEBUG("TS before PM enter = 0x%08" PRIx32, tapstatus);
449 if (tapstatus & TS_PM_BIT) {
450 LOG_DEBUG("core already in probemode");
451 return ERROR_OK;
452 }
453 scan.out[0] = PROBEMODE;
454 if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
455 return ERROR_FAIL;
456 scan.out[0] = 1;
457 if (drscan(t, scan.out, scan.in, 1) != ERROR_OK)
458 return ERROR_FAIL;
459 tapstatus = get_tapstatus(t);
460 LOG_DEBUG("TS after PM enter = 0x%08" PRIx32, tapstatus);
461 if ((tapstatus & TS_PM_BIT) && (!(tapstatus & TS_EN_PM_BIT)))
462 return ERROR_OK;
463 else {
464 LOG_ERROR("%s PM enter error, tapstatus = 0x%08" PRIx32
465 , __func__, tapstatus);
466 return ERROR_FAIL;
467 }
468 }
469
470 static int exit_probemode(struct target *t)
471 {
472 uint32_t tapstatus = get_tapstatus(t);
473 LOG_DEBUG("TS before PM exit = 0x%08" PRIx32, tapstatus);
474
475 if (!(tapstatus & TS_PM_BIT)) {
476 LOG_USER("core not in PM");
477 return ERROR_OK;
478 }
479 scan.out[0] = PROBEMODE;
480 if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
481 return ERROR_FAIL;
482 scan.out[0] = 0;
483 if (drscan(t, scan.out, scan.in, 1) != ERROR_OK)
484 return ERROR_FAIL;
485 return ERROR_OK;
486 }
487
488 /* do whats needed to properly enter probemode for debug on lakemont */
489 static int halt_prep(struct target *t)
490 {
491 struct x86_32_common *x86_32 = target_to_x86_32(t);
492 if (write_hw_reg(t, DSB, PM_DSB, 0) != ERROR_OK)
493 return ERROR_FAIL;
494 LOG_DEBUG("write %s 0x%08" PRIx32, regs[DSB].name, PM_DSB);
495 if (write_hw_reg(t, DSL, PM_DSL, 0) != ERROR_OK)
496 return ERROR_FAIL;
497 LOG_DEBUG("write %s 0x%08" PRIx32, regs[DSL].name, PM_DSL);
498 if (write_hw_reg(t, DSAR, PM_DSAR, 0) != ERROR_OK)
499 return ERROR_FAIL;
500 LOG_DEBUG("write DSAR 0x%08" PRIx32, PM_DSAR);
501 if (write_hw_reg(t, DR7, PM_DR7, 0) != ERROR_OK)
502 return ERROR_FAIL;
503 LOG_DEBUG("write DR7 0x%08" PRIx32, PM_DR7);
504
505 uint32_t eflags = buf_get_u32(x86_32->cache->reg_list[EFLAGS].value, 0, 32);
506 uint32_t csar = buf_get_u32(x86_32->cache->reg_list[CSAR].value, 0, 32);
507 uint32_t ssar = buf_get_u32(x86_32->cache->reg_list[SSAR].value, 0, 32);
508 uint32_t cr0 = buf_get_u32(x86_32->cache->reg_list[CR0].value, 0, 32);
509
510 /* clear VM86 and IF bits if they are set */
511 LOG_DEBUG("EFLAGS = 0x%08" PRIx32 ", VM86 = %d, IF = %d", eflags,
512 eflags & EFLAGS_VM86 ? 1 : 0,
513 eflags & EFLAGS_IF ? 1 : 0);
514 if (eflags & EFLAGS_VM86
515 || eflags & EFLAGS_IF) {
516 x86_32->pm_regs[I(EFLAGS)] = eflags & ~(EFLAGS_VM86 | EFLAGS_IF);
517 if (write_hw_reg(t, EFLAGS, x86_32->pm_regs[I(EFLAGS)], 0) != ERROR_OK)
518 return ERROR_FAIL;
519 LOG_DEBUG("EFLAGS now = 0x%08" PRIx32 ", VM86 = %d, IF = %d",
520 x86_32->pm_regs[I(EFLAGS)],
521 x86_32->pm_regs[I(EFLAGS)] & EFLAGS_VM86 ? 1 : 0,
522 x86_32->pm_regs[I(EFLAGS)] & EFLAGS_IF ? 1 : 0);
523 }
524
525 /* set CPL to 0 for memory access */
526 if (csar & CSAR_DPL) {
527 x86_32->pm_regs[I(CSAR)] = csar & ~CSAR_DPL;
528 if (write_hw_reg(t, CSAR, x86_32->pm_regs[I(CSAR)], 0) != ERROR_OK)
529 return ERROR_FAIL;
530 LOG_DEBUG("write CSAR_CPL to 0 0x%08" PRIx32, x86_32->pm_regs[I(CSAR)]);
531 }
532 if (ssar & SSAR_DPL) {
533 x86_32->pm_regs[I(SSAR)] = ssar & ~CSAR_DPL;
534 if (write_hw_reg(t, SSAR, x86_32->pm_regs[I(SSAR)], 0) != ERROR_OK)
535 return ERROR_FAIL;
536 LOG_DEBUG("write SSAR_CPL to 0 0x%08" PRIx32, x86_32->pm_regs[I(SSAR)]);
537 }
538
539 /* if cache's are enabled, disable and flush */
540 if (!(cr0 & CR0_CD)) {
541 LOG_DEBUG("caching enabled CR0 = 0x%08" PRIx32, cr0);
542 if (cr0 & CR0_PG) {
543 x86_32->pm_regs[I(CR0)] = cr0 & ~CR0_PG;
544 if (write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0) != ERROR_OK)
545 return ERROR_FAIL;
546 LOG_DEBUG("cleared paging CR0_PG = 0x%08" PRIx32, x86_32->pm_regs[I(CR0)]);
547 /* submit wbinvd to flush cache */
548 if (submit_reg_pir(t, WBINVD) != ERROR_OK)
549 return ERROR_FAIL;
550 x86_32->pm_regs[I(CR0)] =
551 x86_32->pm_regs[I(CR0)] | (CR0_CD | CR0_NW | CR0_PG);
552 if (write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0) != ERROR_OK)
553 return ERROR_FAIL;
554 LOG_DEBUG("set CD, NW and PG, CR0 = 0x%08" PRIx32, x86_32->pm_regs[I(CR0)]);
555 }
556 }
557 return ERROR_OK;
558 }
559
560 static int do_halt(struct target *t)
561 {
562 /* needs proper handling later if doing a halt errors out */
563 t->state = TARGET_DEBUG_RUNNING;
564 if (enter_probemode(t) != ERROR_OK)
565 return ERROR_FAIL;
566 if (save_context(t) != ERROR_OK)
567 return ERROR_FAIL;
568 if (halt_prep(t) != ERROR_OK)
569 return ERROR_FAIL;
570 t->state = TARGET_HALTED;
571
572 return target_call_event_callbacks(t, TARGET_EVENT_HALTED);
573 }
574
575 static int do_resume(struct target *t)
576 {
577 /* needs proper handling later */
578 t->state = TARGET_DEBUG_RUNNING;
579 if (restore_context(t) != ERROR_OK)
580 return ERROR_FAIL;
581 if (exit_probemode(t) != ERROR_OK)
582 return ERROR_FAIL;
583 t->state = TARGET_RUNNING;
584
585 t->debug_reason = DBG_REASON_NOTHALTED;
586 LOG_USER("target running");
587
588 return target_call_event_callbacks(t, TARGET_EVENT_RESUMED);
589 }
590
591 static int read_all_core_hw_regs(struct target *t)
592 {
593 int err;
594 uint32_t regval;
595 unsigned i;
596 struct x86_32_common *x86_32 = target_to_x86_32(t);
597 for (i = 0; i < (x86_32->cache->num_regs); i++) {
598 if (NOT_AVAIL_REG == regs[i].pm_idx)
599 continue;
600 err = read_hw_reg(t, regs[i].id, &regval, 1);
601 if (err != ERROR_OK) {
602 LOG_ERROR("%s error saving reg %s",
603 __func__, x86_32->cache->reg_list[i].name);
604 return err;
605 }
606 }
607 LOG_DEBUG("read_all_core_hw_regs read %u registers ok", i);
608 return ERROR_OK;
609 }
610
611 static int write_all_core_hw_regs(struct target *t)
612 {
613 int err;
614 unsigned i;
615 struct x86_32_common *x86_32 = target_to_x86_32(t);
616 for (i = 0; i < (x86_32->cache->num_regs); i++) {
617 if (NOT_AVAIL_REG == regs[i].pm_idx)
618 continue;
619 err = write_hw_reg(t, i, 0, 1);
620 if (err != ERROR_OK) {
621 LOG_ERROR("%s error restoring reg %s",
622 __func__, x86_32->cache->reg_list[i].name);
623 return err;
624 }
625 }
626 LOG_DEBUG("write_all_core_hw_regs wrote %u registers ok", i);
627 return ERROR_OK;
628 }
629
630 /* read reg from lakemont core shadow ram, update reg cache if needed */
631 static int read_hw_reg(struct target *t, int reg, uint32_t *regval, uint8_t cache)
632 {
633 struct x86_32_common *x86_32 = target_to_x86_32(t);
634 struct lakemont_core_reg *arch_info;
635 arch_info = x86_32->cache->reg_list[reg].arch_info;
636 x86_32->flush = 0; /* dont flush scans till we have a batch */
637 if (submit_reg_pir(t, reg) != ERROR_OK)
638 return ERROR_FAIL;
639 if (submit_instruction_pir(t, SRAMACCESS) != ERROR_OK)
640 return ERROR_FAIL;
641 if (submit_instruction_pir(t, SRAM2PDR) != ERROR_OK)
642 return ERROR_FAIL;
643 x86_32->flush = 1;
644 scan.out[0] = RDWRPDR;
645 if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
646 return ERROR_FAIL;
647 if (drscan(t, NULL, scan.out, PDR_SIZE) != ERROR_OK)
648 return ERROR_FAIL;
649
650 jtag_add_sleep(DELAY_SUBMITPIR);
651 *regval = buf_get_u32(scan.out, 0, 32);
652 if (cache) {
653 buf_set_u32(x86_32->cache->reg_list[reg].value, 0, 32, *regval);
654 x86_32->cache->reg_list[reg].valid = 1;
655 x86_32->cache->reg_list[reg].dirty = 0;
656 }
657 LOG_DEBUG("reg=%s, op=0x%016" PRIx64 ", val=0x%08" PRIx32,
658 x86_32->cache->reg_list[reg].name,
659 arch_info->op,
660 *regval);
661 return ERROR_OK;
662 }
663
664 /* write lakemont core shadow ram reg, update reg cache if needed */
665 static int write_hw_reg(struct target *t, int reg, uint32_t regval, uint8_t cache)
666 {
667 struct x86_32_common *x86_32 = target_to_x86_32(t);
668 struct lakemont_core_reg *arch_info;
669 arch_info = x86_32->cache->reg_list[reg].arch_info;
670
671 uint8_t reg_buf[4];
672 if (cache)
673 regval = buf_get_u32(x86_32->cache->reg_list[reg].value, 0, 32);
674 buf_set_u32(reg_buf, 0, 32, regval);
675 LOG_DEBUG("reg=%s, op=0x%016" PRIx64 ", val=0x%08" PRIx32,
676 x86_32->cache->reg_list[reg].name,
677 arch_info->op,
678 regval);
679
680 scan.out[0] = RDWRPDR;
681 x86_32->flush = 0; /* dont flush scans till we have a batch */
682 if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
683 return ERROR_FAIL;
684 if (drscan(t, reg_buf, scan.out, PDR_SIZE) != ERROR_OK)
685 return ERROR_FAIL;
686 if (submit_reg_pir(t, reg) != ERROR_OK)
687 return ERROR_FAIL;
688 if (submit_instruction_pir(t, SRAMACCESS) != ERROR_OK)
689 return ERROR_FAIL;
690 x86_32->flush = 1;
691 if (submit_instruction_pir(t, PDR2SRAM) != ERROR_OK)
692 return ERROR_FAIL;
693
694 /* we are writing from the cache so ensure we reset flags */
695 if (cache) {
696 x86_32->cache->reg_list[reg].dirty = 0;
697 x86_32->cache->reg_list[reg].valid = 0;
698 }
699 return ERROR_OK;
700 }
701
702 static bool is_paging_enabled(struct target *t)
703 {
704 struct x86_32_common *x86_32 = target_to_x86_32(t);
705 if (x86_32->pm_regs[I(CR0)] & CR0_PG)
706 return true;
707 else
708 return false;
709 }
710
711 static uint8_t get_num_user_regs(struct target *t)
712 {
713 struct x86_32_common *x86_32 = target_to_x86_32(t);
714 return x86_32->cache->num_regs;
715 }
716 /* value of the CR0.PG (paging enabled) bit influences memory reads/writes */
717 static int disable_paging(struct target *t)
718 {
719 struct x86_32_common *x86_32 = target_to_x86_32(t);
720 x86_32->pm_regs[I(CR0)] = x86_32->pm_regs[I(CR0)] & ~CR0_PG;
721 int err = x86_32->write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0);
722 if (err != ERROR_OK) {
723 LOG_ERROR("%s error disabling paging", __func__);
724 return err;
725 }
726 return err;
727 }
728
729 static int enable_paging(struct target *t)
730 {
731 struct x86_32_common *x86_32 = target_to_x86_32(t);
732 x86_32->pm_regs[I(CR0)] = (x86_32->pm_regs[I(CR0)] | CR0_PG);
733 int err = x86_32->write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0);
734 if (err != ERROR_OK) {
735 LOG_ERROR("%s error enabling paging", __func__);
736 return err;
737 }
738 return err;
739 }
740
741 static bool sw_bpts_supported(struct target *t)
742 {
743 uint32_t tapstatus = get_tapstatus(t);
744 if (tapstatus & TS_SBP_BIT)
745 return true;
746 else
747 return false;
748 }
749
750 static int transaction_status(struct target *t)
751 {
752 uint32_t tapstatus = get_tapstatus(t);
753 if ((TS_EN_PM_BIT | TS_PRDY_BIT) & tapstatus) {
754 LOG_ERROR("%s transaction error tapstatus = 0x%08" PRIx32
755 , __func__, tapstatus);
756 return ERROR_FAIL;
757 } else {
758 return ERROR_OK;
759 }
760 }
761
762 static int submit_instruction(struct target *t, int num)
763 {
764 int err = submit_instruction_pir(t, num);
765 if (err != ERROR_OK) {
766 LOG_ERROR("%s error submitting pir", __func__);
767 return err;
768 }
769 return err;
770 }
771
772 static int submit_reg_pir(struct target *t, int num)
773 {
774 LOG_DEBUG("reg %s op=0x%016" PRIx64, regs[num].name, regs[num].op);
775 int err = submit_pir(t, regs[num].op);
776 if (err != ERROR_OK) {
777 LOG_ERROR("%s error submitting pir", __func__);
778 return err;
779 }
780 return err;
781 }
782
783 static int submit_instruction_pir(struct target *t, int num)
784 {
785 LOG_DEBUG("%s op=0x%016" PRIx64, instructions[num].name,
786 instructions[num].op);
787 int err = submit_pir(t, instructions[num].op);
788 if (err != ERROR_OK) {
789 LOG_ERROR("%s error submitting pir", __func__);
790 return err;
791 }
792 return err;
793 }
794
795 /*
796 * PIR (Probe Mode Instruction Register), SUBMITPIR is an "IR only" TAP
797 * command; there is no corresponding data register
798 */
799 static int submit_pir(struct target *t, uint64_t op)
800 {
801 struct x86_32_common *x86_32 = target_to_x86_32(t);
802
803 uint8_t op_buf[8];
804 buf_set_u64(op_buf, 0, 64, op);
805 int flush = x86_32->flush;
806 x86_32->flush = 0;
807 scan.out[0] = WRPIR;
808 if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
809 return ERROR_FAIL;
810 if (drscan(t, op_buf, scan.out, PIR_SIZE) != ERROR_OK)
811 return ERROR_FAIL;
812 scan.out[0] = SUBMITPIR;
813 x86_32->flush = flush;
814 if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
815 return ERROR_FAIL;
816 jtag_add_sleep(DELAY_SUBMITPIR);
817 return ERROR_OK;
818 }
819
820 int lakemont_init_target(struct command_context *cmd_ctx, struct target *t)
821 {
822 lakemont_build_reg_cache(t);
823 t->state = TARGET_RUNNING;
824 t->debug_reason = DBG_REASON_NOTHALTED;
825 return ERROR_OK;
826 }
827
828 int lakemont_init_arch_info(struct target *t, struct x86_32_common *x86_32)
829 {
830 x86_32->submit_instruction = submit_instruction;
831 x86_32->transaction_status = transaction_status;
832 x86_32->read_hw_reg = read_hw_reg;
833 x86_32->write_hw_reg = write_hw_reg;
834 x86_32->sw_bpts_supported = sw_bpts_supported;
835 x86_32->get_num_user_regs = get_num_user_regs;
836 x86_32->is_paging_enabled = is_paging_enabled;
837 x86_32->disable_paging = disable_paging;
838 x86_32->enable_paging = enable_paging;
839 return ERROR_OK;
840 }
841
842 int lakemont_poll(struct target *t)
843 {
844 /* LMT1 PMCR register currently allows code breakpoints, data breakpoints,
845 * single stepping and shutdowns to be redirected to PM but does not allow
846 * redirecting into PM as a result of SMM enter and SMM exit
847 */
848 uint32_t ts = get_tapstatus(t);
849
850 if (ts == 0xFFFFFFFF && t->state != TARGET_DEBUG_RUNNING) {
851 /* something is wrong here */
852 LOG_ERROR("tapstatus invalid - scan_chain serialization or locked JTAG access issues");
853 /* TODO: Give a hint that unlocking is wrong or maybe a
854 * 'jtag arp_init' helps
855 */
856 t->state = TARGET_DEBUG_RUNNING;
857 return ERROR_OK;
858 }
859
860 if (t->state == TARGET_HALTED && (!(ts & TS_PM_BIT))) {
861 LOG_INFO("target running for unknown reason");
862 t->state = TARGET_RUNNING;
863 }
864
865 if (t->state == TARGET_RUNNING &&
866 t->state != TARGET_DEBUG_RUNNING) {
867
868 if ((ts & TS_PM_BIT) && (ts & TS_PMCR_BIT)) {
869
870 LOG_DEBUG("redirect to PM, tapstatus=0x%08" PRIx32, get_tapstatus(t));
871
872 t->state = TARGET_DEBUG_RUNNING;
873 if (save_context(t) != ERROR_OK)
874 return ERROR_FAIL;
875 if (halt_prep(t) != ERROR_OK)
876 return ERROR_FAIL;
877 t->state = TARGET_HALTED;
878 t->debug_reason = DBG_REASON_UNDEFINED;
879
880 struct x86_32_common *x86_32 = target_to_x86_32(t);
881 uint32_t eip = buf_get_u32(x86_32->cache->reg_list[EIP].value, 0, 32);
882 uint32_t dr6 = buf_get_u32(x86_32->cache->reg_list[DR6].value, 0, 32);
883 uint32_t hwbreakpoint = (uint32_t)-1;
884
885 if (dr6 & DR6_BRKDETECT_0)
886 hwbreakpoint = 0;
887 if (dr6 & DR6_BRKDETECT_1)
888 hwbreakpoint = 1;
889 if (dr6 & DR6_BRKDETECT_2)
890 hwbreakpoint = 2;
891 if (dr6 & DR6_BRKDETECT_3)
892 hwbreakpoint = 3;
893
894 if (hwbreakpoint != (uint32_t)-1) {
895 uint32_t dr7 = buf_get_u32(x86_32->cache->reg_list[DR7].value, 0, 32);
896 uint32_t type = dr7 & (0x03 << (DR7_RW_SHIFT + hwbreakpoint*DR7_RW_LEN_SIZE));
897 if (type == DR7_BP_EXECUTE) {
898 LOG_USER("hit hardware breakpoint (hwreg=%" PRIu32 ") at 0x%08" PRIx32, hwbreakpoint, eip);
899 } else {
900 uint32_t address = 0;
901 switch (hwbreakpoint) {
902 default:
903 case 0:
904 address = buf_get_u32(x86_32->cache->reg_list[DR0].value, 0, 32);
905 break;
906 case 1:
907 address = buf_get_u32(x86_32->cache->reg_list[DR1].value, 0, 32);
908 break;
909 case 2:
910 address = buf_get_u32(x86_32->cache->reg_list[DR2].value, 0, 32);
911 break;
912 case 3:
913 address = buf_get_u32(x86_32->cache->reg_list[DR3].value, 0, 32);
914 break;
915 }
916 LOG_USER("hit '%s' watchpoint for 0x%08" PRIx32 " (hwreg=%" PRIu32 ") at 0x%08" PRIx32,
917 type == DR7_BP_WRITE ? "write" : "access", address,
918 hwbreakpoint, eip);
919 }
920 t->debug_reason = DBG_REASON_BREAKPOINT;
921 } else {
922 /* Check if the target hit a software breakpoint.
923 * ! Watch out: EIP is currently pointing after the breakpoint opcode
924 */
925 struct breakpoint *bp = NULL;
926 bp = breakpoint_find(t, eip-1);
927 if (bp != NULL) {
928 t->debug_reason = DBG_REASON_BREAKPOINT;
929 if (bp->type == BKPT_SOFT) {
930 /* The EIP is now pointing the the next byte after the
931 * breakpoint instruction. This needs to be corrected.
932 */
933 buf_set_u32(x86_32->cache->reg_list[EIP].value, 0, 32, eip-1);
934 x86_32->cache->reg_list[EIP].dirty = 1;
935 x86_32->cache->reg_list[EIP].valid = 1;
936 LOG_USER("hit software breakpoint at 0x%08" PRIx32, eip-1);
937 } else {
938 /* it's not a hardware breakpoint (checked already in DR6 state)
939 * and it's also not a software breakpoint ...
940 */
941 LOG_USER("hit unknown breakpoint at 0x%08" PRIx32, eip);
942 }
943 } else {
944
945 /* There is also the case that we hit an breakpoint instruction,
946 * which was not set by us. This needs to be handled be the
947 * application that introduced the breakpoint.
948 */
949
950 LOG_USER("unknown break reason at 0x%08" PRIx32, eip);
951 }
952 }
953
954 return target_call_event_callbacks(t, TARGET_EVENT_HALTED);
955 }
956 }
957 return ERROR_OK;
958 }
959
960 int lakemont_arch_state(struct target *t)
961 {
962 struct x86_32_common *x86_32 = target_to_x86_32(t);
963
964 LOG_USER("target halted due to %s at 0x%08" PRIx32 " in %s mode",
965 debug_reason_name(t),
966 buf_get_u32(x86_32->cache->reg_list[EIP].value, 0, 32),
967 (buf_get_u32(x86_32->cache->reg_list[CR0].value, 0, 32) & CR0_PE) ? "protected" : "real");
968
969 return ERROR_OK;
970 }
971
972 int lakemont_halt(struct target *t)
973 {
974 if (t->state == TARGET_RUNNING) {
975 t->debug_reason = DBG_REASON_DBGRQ;
976 if (do_halt(t) != ERROR_OK)
977 return ERROR_FAIL;
978 return ERROR_OK;
979 } else {
980 LOG_ERROR("%s target not running", __func__);
981 return ERROR_FAIL;
982 }
983 }
984
985 int lakemont_resume(struct target *t, int current, uint32_t address,
986 int handle_breakpoints, int debug_execution)
987 {
988 struct breakpoint *bp = NULL;
989 struct x86_32_common *x86_32 = target_to_x86_32(t);
990
991 if (check_not_halted(t))
992 return ERROR_TARGET_NOT_HALTED;
993 /* TODO lakemont_enable_breakpoints(t); */
994 if (t->state == TARGET_HALTED) {
995
996 /* running away for a software breakpoint needs some special handling */
997 uint32_t eip = buf_get_u32(x86_32->cache->reg_list[EIP].value, 0, 32);
998 bp = breakpoint_find(t, eip);
999 if (bp != NULL /*&& bp->type == BKPT_SOFT*/) {
1000 /* the step will step over the breakpoint */
1001 if (lakemont_step(t, 0, 0, 1) != ERROR_OK) {
1002 LOG_ERROR("%s stepping over a software breakpoint at 0x%08" PRIx32 " "
1003 "failed to resume the target", __func__, eip);
1004 return ERROR_FAIL;
1005 }
1006 }
1007
1008 /* if breakpoints are enabled, we need to redirect these into probe mode */
1009 struct breakpoint *activeswbp = t->breakpoints;
1010 while (activeswbp != NULL && activeswbp->set == 0)
1011 activeswbp = activeswbp->next;
1012 struct watchpoint *activehwbp = t->watchpoints;
1013 while (activehwbp != NULL && activehwbp->set == 0)
1014 activehwbp = activehwbp->next;
1015 if (activeswbp != NULL || activehwbp != NULL)
1016 buf_set_u32(x86_32->cache->reg_list[PMCR].value, 0, 32, 1);
1017 if (do_resume(t) != ERROR_OK)
1018 return ERROR_FAIL;
1019 } else {
1020 LOG_USER("target not halted");
1021 return ERROR_FAIL;
1022 }
1023 return ERROR_OK;
1024 }
1025
1026 int lakemont_step(struct target *t, int current,
1027 uint32_t address, int handle_breakpoints)
1028 {
1029 struct x86_32_common *x86_32 = target_to_x86_32(t);
1030 uint32_t eflags = buf_get_u32(x86_32->cache->reg_list[EFLAGS].value, 0, 32);
1031 uint32_t eip = buf_get_u32(x86_32->cache->reg_list[EIP].value, 0, 32);
1032 uint32_t pmcr = buf_get_u32(x86_32->cache->reg_list[PMCR].value, 0, 32);
1033 struct breakpoint *bp = NULL;
1034 int retval = ERROR_OK;
1035 uint32_t tapstatus = 0;
1036
1037 if (check_not_halted(t))
1038 return ERROR_TARGET_NOT_HALTED;
1039 bp = breakpoint_find(t, eip);
1040 if (retval == ERROR_OK && bp != NULL/*&& bp->type == BKPT_SOFT*/) {
1041 /* TODO: This should only be done for software breakpoints.
1042 * Stepping from hardware breakpoints should be possible with the resume flag
1043 * Needs testing.
1044 */
1045 retval = x86_32_common_remove_breakpoint(t, bp);
1046 }
1047
1048 /* Set EFLAGS[TF] and PMCR[IR], exit pm and wait for PRDY# */
1049 LOG_DEBUG("modifying PMCR = 0x%08" PRIx32 " and EFLAGS = 0x%08" PRIx32, pmcr, eflags);
1050 eflags = eflags | (EFLAGS_TF | EFLAGS_RF);
1051 buf_set_u32(x86_32->cache->reg_list[EFLAGS].value, 0, 32, eflags);
1052 buf_set_u32(x86_32->cache->reg_list[PMCR].value, 0, 32, 1);
1053 LOG_DEBUG("EFLAGS [TF] [RF] bits set=0x%08" PRIx32 ", PMCR=0x%08" PRIx32 ", EIP=0x%08" PRIx32,
1054 eflags, pmcr, eip);
1055
1056 tapstatus = get_tapstatus(t);
1057
1058 t->debug_reason = DBG_REASON_SINGLESTEP;
1059 t->state = TARGET_DEBUG_RUNNING;
1060 if (restore_context(t) != ERROR_OK)
1061 return ERROR_FAIL;
1062 if (exit_probemode(t) != ERROR_OK)
1063 return ERROR_FAIL;
1064
1065 target_call_event_callbacks(t, TARGET_EVENT_RESUMED);
1066
1067 tapstatus = get_tapstatus(t);
1068 if (tapstatus & (TS_PM_BIT | TS_EN_PM_BIT | TS_PRDY_BIT | TS_PMCR_BIT)) {
1069 /* target has stopped */
1070 if (save_context(t) != ERROR_OK)
1071 return ERROR_FAIL;
1072 if (halt_prep(t) != ERROR_OK)
1073 return ERROR_FAIL;
1074 t->state = TARGET_HALTED;
1075
1076 LOG_USER("step done from EIP 0x%08" PRIx32 " to 0x%08" PRIx32, eip,
1077 buf_get_u32(x86_32->cache->reg_list[EIP].value, 0, 32));
1078 target_call_event_callbacks(t, TARGET_EVENT_HALTED);
1079 } else {
1080 /* target didn't stop
1081 * I hope the poll() will catch it, but the deleted breakpoint is gone
1082 */
1083 LOG_ERROR("%s target didn't stop after executing a single step", __func__);
1084 t->state = TARGET_RUNNING;
1085 return ERROR_FAIL;
1086 }
1087
1088 /* try to re-apply the breakpoint, even of step failed
1089 * TODO: When a bp was set, we should try to stop the target - fix the return above
1090 */
1091 if (bp != NULL/*&& bp->type == BKPT_SOFT*/) {
1092 /* TODO: This should only be done for software breakpoints.
1093 * Stepping from hardware breakpoints should be possible with the resume flag
1094 * Needs testing.
1095 */
1096 retval = x86_32_common_add_breakpoint(t, bp);
1097 }
1098
1099 return retval;
1100 }
1101
1102 /* TODO - implement resetbreak fully through CLTAP registers */
1103 int lakemont_reset_assert(struct target *t)
1104 {
1105 LOG_DEBUG("-");
1106 return ERROR_OK;
1107 }
1108
1109 int lakemont_reset_deassert(struct target *t)
1110 {
1111 LOG_DEBUG("-");
1112 return ERROR_OK;
1113 }