target_t -> struct target
[openocd.git] / src / target / etb.c
1 /***************************************************************************
2 * Copyright (C) 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
19 ***************************************************************************/
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "armv4_5.h"
25 #include "etb.h"
26
27
28 static char* etb_reg_list[] =
29 {
30 "ETB_identification",
31 "ETB_ram_depth",
32 "ETB_ram_width",
33 "ETB_status",
34 "ETB_ram_data",
35 "ETB_ram_read_pointer",
36 "ETB_ram_write_pointer",
37 "ETB_trigger_counter",
38 "ETB_control",
39 };
40
41 static int etb_reg_arch_type = -1;
42
43 static int etb_get_reg(struct reg *reg);
44
45 static int etb_set_instr(struct etb *etb, uint32_t new_instr)
46 {
47 struct jtag_tap *tap;
48
49 tap = etb->tap;
50 if (tap == NULL)
51 return ERROR_FAIL;
52
53 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
54 {
55 struct scan_field field;
56
57 field.tap = tap;
58 field.num_bits = tap->ir_length;
59 field.out_value = calloc(CEIL(field.num_bits, 8), 1);
60 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
61
62 field.in_value = NULL;
63
64 jtag_add_ir_scan(1, &field, jtag_get_end_state());
65
66 free(field.out_value);
67 }
68
69 return ERROR_OK;
70 }
71
72 static int etb_scann(struct etb *etb, uint32_t new_scan_chain)
73 {
74 if (etb->cur_scan_chain != new_scan_chain)
75 {
76 struct scan_field field;
77
78 field.tap = etb->tap;
79 field.num_bits = 5;
80 field.out_value = calloc(CEIL(field.num_bits, 8), 1);
81 buf_set_u32(field.out_value, 0, field.num_bits, new_scan_chain);
82
83 field.in_value = NULL;
84
85 /* select INTEST instruction */
86 etb_set_instr(etb, 0x2);
87 jtag_add_dr_scan(1, &field, jtag_get_end_state());
88
89 etb->cur_scan_chain = new_scan_chain;
90
91 free(field.out_value);
92 }
93
94 return ERROR_OK;
95 }
96
97 static int etb_read_reg_w_check(struct reg *, uint8_t *, uint8_t *);
98 static int etb_set_reg_w_exec(struct reg *, uint8_t *);
99
100 static int etb_read_reg(struct reg *reg)
101 {
102 return etb_read_reg_w_check(reg, NULL, NULL);
103 }
104
105 static int etb_get_reg(struct reg *reg)
106 {
107 int retval;
108
109 if ((retval = etb_read_reg(reg)) != ERROR_OK)
110 {
111 LOG_ERROR("BUG: error scheduling ETB register read");
112 return retval;
113 }
114
115 if ((retval = jtag_execute_queue()) != ERROR_OK)
116 {
117 LOG_ERROR("ETB register read failed");
118 return retval;
119 }
120
121 return ERROR_OK;
122 }
123
124 struct reg_cache* etb_build_reg_cache(struct etb *etb)
125 {
126 struct reg_cache *reg_cache = malloc(sizeof(struct reg_cache));
127 struct reg *reg_list = NULL;
128 struct etb_reg *arch_info = NULL;
129 int num_regs = 9;
130 int i;
131
132 /* register a register arch-type for etm registers only once */
133 if (etb_reg_arch_type == -1)
134 etb_reg_arch_type = register_reg_arch_type(etb_get_reg, etb_set_reg_w_exec);
135
136 /* the actual registers are kept in two arrays */
137 reg_list = calloc(num_regs, sizeof(struct reg));
138 arch_info = calloc(num_regs, sizeof(struct etb_reg));
139
140 /* fill in values for the reg cache */
141 reg_cache->name = "etb registers";
142 reg_cache->next = NULL;
143 reg_cache->reg_list = reg_list;
144 reg_cache->num_regs = num_regs;
145
146 /* set up registers */
147 for (i = 0; i < num_regs; i++)
148 {
149 reg_list[i].name = etb_reg_list[i];
150 reg_list[i].size = 32;
151 reg_list[i].dirty = 0;
152 reg_list[i].valid = 0;
153 reg_list[i].bitfield_desc = NULL;
154 reg_list[i].num_bitfields = 0;
155 reg_list[i].value = calloc(1, 4);
156 reg_list[i].arch_info = &arch_info[i];
157 reg_list[i].arch_type = etb_reg_arch_type;
158 reg_list[i].size = 32;
159 arch_info[i].addr = i;
160 arch_info[i].etb = etb;
161 }
162
163 return reg_cache;
164 }
165
166 static void etb_getbuf(jtag_callback_data_t arg)
167 {
168 uint8_t *in = (uint8_t *)arg;
169
170 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
171 }
172
173
174 static int etb_read_ram(struct etb *etb, uint32_t *data, int num_frames)
175 {
176 struct scan_field fields[3];
177 int i;
178
179 jtag_set_end_state(TAP_IDLE);
180 etb_scann(etb, 0x0);
181 etb_set_instr(etb, 0xc);
182
183 fields[0].tap = etb->tap;
184 fields[0].num_bits = 32;
185 fields[0].out_value = NULL;
186 fields[0].in_value = NULL;
187
188 fields[1].tap = etb->tap;
189 fields[1].num_bits = 7;
190 fields[1].out_value = malloc(1);
191 buf_set_u32(fields[1].out_value, 0, 7, 4);
192 fields[1].in_value = NULL;
193
194 fields[2].tap = etb->tap;
195 fields[2].num_bits = 1;
196 fields[2].out_value = malloc(1);
197 buf_set_u32(fields[2].out_value, 0, 1, 0);
198 fields[2].in_value = NULL;
199
200 jtag_add_dr_scan(3, fields, jtag_get_end_state());
201
202 for (i = 0; i < num_frames; i++)
203 {
204 /* ensure nR/W reamins set to read */
205 buf_set_u32(fields[2].out_value, 0, 1, 0);
206
207 /* address remains set to 0x4 (RAM data) until we read the last frame */
208 if (i < num_frames - 1)
209 buf_set_u32(fields[1].out_value, 0, 7, 4);
210 else
211 buf_set_u32(fields[1].out_value, 0, 7, 0);
212
213 fields[0].in_value = (uint8_t *)(data + i);
214 jtag_add_dr_scan(3, fields, jtag_get_end_state());
215
216 jtag_add_callback(etb_getbuf, (jtag_callback_data_t)(data + i));
217 }
218
219 jtag_execute_queue();
220
221 free(fields[1].out_value);
222 free(fields[2].out_value);
223
224 return ERROR_OK;
225 }
226
227 static int etb_read_reg_w_check(struct reg *reg,
228 uint8_t* check_value, uint8_t* check_mask)
229 {
230 struct etb_reg *etb_reg = reg->arch_info;
231 uint8_t reg_addr = etb_reg->addr & 0x7f;
232 struct scan_field fields[3];
233
234 LOG_DEBUG("%i", (int)(etb_reg->addr));
235
236 jtag_set_end_state(TAP_IDLE);
237 etb_scann(etb_reg->etb, 0x0);
238 etb_set_instr(etb_reg->etb, 0xc);
239
240 fields[0].tap = etb_reg->etb->tap;
241 fields[0].num_bits = 32;
242 fields[0].out_value = reg->value;
243 fields[0].in_value = NULL;
244 fields[0].check_value = NULL;
245 fields[0].check_mask = NULL;
246
247 fields[1].tap = etb_reg->etb->tap;
248 fields[1].num_bits = 7;
249 fields[1].out_value = malloc(1);
250 buf_set_u32(fields[1].out_value, 0, 7, reg_addr);
251 fields[1].in_value = NULL;
252 fields[1].check_value = NULL;
253 fields[1].check_mask = NULL;
254
255 fields[2].tap = etb_reg->etb->tap;
256 fields[2].num_bits = 1;
257 fields[2].out_value = malloc(1);
258 buf_set_u32(fields[2].out_value, 0, 1, 0);
259 fields[2].in_value = NULL;
260 fields[2].check_value = NULL;
261 fields[2].check_mask = NULL;
262
263 jtag_add_dr_scan(3, fields, jtag_get_end_state());
264
265 /* read the identification register in the second run, to make sure we
266 * don't read the ETB data register twice, skipping every second entry
267 */
268 buf_set_u32(fields[1].out_value, 0, 7, 0x0);
269 fields[0].in_value = reg->value;
270 fields[0].check_value = check_value;
271 fields[0].check_mask = check_mask;
272
273 jtag_add_dr_scan_check(3, fields, jtag_get_end_state());
274
275 free(fields[1].out_value);
276 free(fields[2].out_value);
277
278 return ERROR_OK;
279 }
280
281 static int etb_write_reg(struct reg *, uint32_t);
282
283 static int etb_set_reg(struct reg *reg, uint32_t value)
284 {
285 int retval;
286
287 if ((retval = etb_write_reg(reg, value)) != ERROR_OK)
288 {
289 LOG_ERROR("BUG: error scheduling ETB register write");
290 return retval;
291 }
292
293 buf_set_u32(reg->value, 0, reg->size, value);
294 reg->valid = 1;
295 reg->dirty = 0;
296
297 return ERROR_OK;
298 }
299
300 static int etb_set_reg_w_exec(struct reg *reg, uint8_t *buf)
301 {
302 int retval;
303
304 etb_set_reg(reg, buf_get_u32(buf, 0, reg->size));
305
306 if ((retval = jtag_execute_queue()) != ERROR_OK)
307 {
308 LOG_ERROR("ETB: register write failed");
309 return retval;
310 }
311 return ERROR_OK;
312 }
313
314 static int etb_write_reg(struct reg *reg, uint32_t value)
315 {
316 struct etb_reg *etb_reg = reg->arch_info;
317 uint8_t reg_addr = etb_reg->addr & 0x7f;
318 struct scan_field fields[3];
319
320 LOG_DEBUG("%i: 0x%8.8" PRIx32 "", (int)(etb_reg->addr), value);
321
322 jtag_set_end_state(TAP_IDLE);
323 etb_scann(etb_reg->etb, 0x0);
324 etb_set_instr(etb_reg->etb, 0xc);
325
326 fields[0].tap = etb_reg->etb->tap;
327 fields[0].num_bits = 32;
328 fields[0].out_value = malloc(4);
329 buf_set_u32(fields[0].out_value, 0, 32, value);
330 fields[0].in_value = NULL;
331
332 fields[1].tap = etb_reg->etb->tap;
333 fields[1].num_bits = 7;
334 fields[1].out_value = malloc(1);
335 buf_set_u32(fields[1].out_value, 0, 7, reg_addr);
336 fields[1].in_value = NULL;
337
338 fields[2].tap = etb_reg->etb->tap;
339 fields[2].num_bits = 1;
340 fields[2].out_value = malloc(1);
341 buf_set_u32(fields[2].out_value, 0, 1, 1);
342
343 fields[2].in_value = NULL;
344
345 free(fields[0].out_value);
346 free(fields[1].out_value);
347 free(fields[2].out_value);
348
349 return ERROR_OK;
350 }
351
352 COMMAND_HANDLER(handle_etb_config_command)
353 {
354 struct target *target;
355 struct jtag_tap *tap;
356 struct arm *arm;
357
358 if (argc != 2)
359 {
360 return ERROR_COMMAND_SYNTAX_ERROR;
361 }
362
363 target = get_target(args[0]);
364
365 if (!target)
366 {
367 LOG_ERROR("ETB: target '%s' not defined", args[0]);
368 return ERROR_FAIL;
369 }
370
371 arm = target_to_arm(target);
372 if (!is_arm(arm))
373 {
374 command_print(cmd_ctx, "ETB: '%s' isn't an ARM", args[0]);
375 return ERROR_FAIL;
376 }
377
378 tap = jtag_tap_by_string(args[1]);
379 if (tap == NULL)
380 {
381 command_print(cmd_ctx, "ETB: TAP %s does not exist", args[1]);
382 return ERROR_FAIL;
383 }
384
385 if (arm->etm)
386 {
387 struct etb *etb = malloc(sizeof(struct etb));
388
389 arm->etm->capture_driver_priv = etb;
390
391 etb->tap = tap;
392 etb->cur_scan_chain = 0xffffffff;
393 etb->reg_cache = NULL;
394 etb->ram_width = 0;
395 etb->ram_depth = 0;
396 }
397 else
398 {
399 LOG_ERROR("ETM: target has no ETM defined, ETB left unconfigured");
400 return ERROR_FAIL;
401 }
402
403 return ERROR_OK;
404 }
405
406 static int etb_register_commands(struct command_context_s *cmd_ctx)
407 {
408 command_t *etb_cmd = register_command(cmd_ctx, NULL, "etb",
409 NULL, COMMAND_ANY, "Embedded Trace Buffer");
410
411 register_command(cmd_ctx, etb_cmd, "config",
412 handle_etb_config_command, COMMAND_CONFIG,
413 NULL);
414
415 return ERROR_OK;
416 }
417
418 static int etb_init(struct etm_context *etm_ctx)
419 {
420 struct etb *etb = etm_ctx->capture_driver_priv;
421
422 etb->etm_ctx = etm_ctx;
423
424 /* identify ETB RAM depth and width */
425 etb_read_reg(&etb->reg_cache->reg_list[ETB_RAM_DEPTH]);
426 etb_read_reg(&etb->reg_cache->reg_list[ETB_RAM_WIDTH]);
427 jtag_execute_queue();
428
429 etb->ram_depth = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_DEPTH].value, 0, 32);
430 etb->ram_width = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_WIDTH].value, 0, 32);
431
432 return ERROR_OK;
433 }
434
435 static trace_status_t etb_status(struct etm_context *etm_ctx)
436 {
437 struct etb *etb = etm_ctx->capture_driver_priv;
438 struct reg *control = &etb->reg_cache->reg_list[ETB_CTRL];
439 struct reg *status = &etb->reg_cache->reg_list[ETB_STATUS];
440 trace_status_t retval = 0;
441 int etb_timeout = 100;
442
443 etb->etm_ctx = etm_ctx;
444
445 /* read control and status registers */
446 etb_read_reg(control);
447 etb_read_reg(status);
448 jtag_execute_queue();
449
450 /* See if it's (still) active */
451 retval = buf_get_u32(control->value, 0, 1) ? TRACE_RUNNING : TRACE_IDLE;
452
453 /* check Full bit to identify wraparound/overflow */
454 if (buf_get_u32(status->value, 0, 1) == 1)
455 retval |= TRACE_OVERFLOWED;
456
457 /* check Triggered bit to identify trigger condition */
458 if (buf_get_u32(status->value, 1, 1) == 1)
459 retval |= TRACE_TRIGGERED;
460
461 /* check AcqComp to see if trigger counter dropped to zero */
462 if (buf_get_u32(status->value, 2, 1) == 1) {
463 /* wait for DFEmpty */
464 while (etb_timeout-- && buf_get_u32(status->value, 3, 1) == 0)
465 etb_get_reg(status);
466
467 if (etb_timeout == 0)
468 LOG_ERROR("ETB: DFEmpty won't go high, status 0x%02x",
469 (unsigned) buf_get_u32(status->value, 0, 4));
470
471 if (!(etm_ctx->capture_status & TRACE_TRIGGERED))
472 LOG_WARNING("ETB: trace complete without triggering?");
473
474 retval |= TRACE_COMPLETED;
475 }
476
477 /* NOTE: using a trigger is optional; and at least ETB11 has a mode
478 * where it can ignore the trigger counter.
479 */
480
481 /* update recorded state */
482 etm_ctx->capture_status = retval;
483
484 return retval;
485 }
486
487 static int etb_read_trace(struct etm_context *etm_ctx)
488 {
489 struct etb *etb = etm_ctx->capture_driver_priv;
490 int first_frame = 0;
491 int num_frames = etb->ram_depth;
492 uint32_t *trace_data = NULL;
493 int i, j;
494
495 etb_read_reg(&etb->reg_cache->reg_list[ETB_STATUS]);
496 etb_read_reg(&etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER]);
497 jtag_execute_queue();
498
499 /* check if we overflowed, and adjust first frame of the trace accordingly
500 * if we didn't overflow, read only up to the frame that would be written next,
501 * i.e. don't read invalid entries
502 */
503 if (buf_get_u32(etb->reg_cache->reg_list[ETB_STATUS].value, 0, 1))
504 {
505 first_frame = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER].value, 0, 32);
506 }
507 else
508 {
509 num_frames = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER].value, 0, 32);
510 }
511
512 etb_write_reg(&etb->reg_cache->reg_list[ETB_RAM_READ_POINTER], first_frame);
513
514 /* read data into temporary array for unpacking */
515 trace_data = malloc(sizeof(uint32_t) * num_frames);
516 etb_read_ram(etb, trace_data, num_frames);
517
518 if (etm_ctx->trace_depth > 0)
519 {
520 free(etm_ctx->trace_data);
521 }
522
523 if ((etm_ctx->portmode & ETM_PORT_WIDTH_MASK) == ETM_PORT_4BIT)
524 etm_ctx->trace_depth = num_frames * 3;
525 else if ((etm_ctx->portmode & ETM_PORT_WIDTH_MASK) == ETM_PORT_8BIT)
526 etm_ctx->trace_depth = num_frames * 2;
527 else
528 etm_ctx->trace_depth = num_frames;
529
530 etm_ctx->trace_data = malloc(sizeof(struct etmv1_trace_data) * etm_ctx->trace_depth);
531
532 for (i = 0, j = 0; i < num_frames; i++)
533 {
534 if ((etm_ctx->portmode & ETM_PORT_WIDTH_MASK) == ETM_PORT_4BIT)
535 {
536 /* trace word j */
537 etm_ctx->trace_data[j].pipestat = trace_data[i] & 0x7;
538 etm_ctx->trace_data[j].packet = (trace_data[i] & 0x78) >> 3;
539 etm_ctx->trace_data[j].flags = 0;
540 if ((trace_data[i] & 0x80) >> 7)
541 {
542 etm_ctx->trace_data[j].flags |= ETMV1_TRACESYNC_CYCLE;
543 }
544 if (etm_ctx->trace_data[j].pipestat == STAT_TR)
545 {
546 etm_ctx->trace_data[j].pipestat = etm_ctx->trace_data[j].packet & 0x7;
547 etm_ctx->trace_data[j].flags |= ETMV1_TRIGGER_CYCLE;
548 }
549
550 /* trace word j + 1 */
551 etm_ctx->trace_data[j + 1].pipestat = (trace_data[i] & 0x100) >> 8;
552 etm_ctx->trace_data[j + 1].packet = (trace_data[i] & 0x7800) >> 11;
553 etm_ctx->trace_data[j + 1].flags = 0;
554 if ((trace_data[i] & 0x8000) >> 15)
555 {
556 etm_ctx->trace_data[j + 1].flags |= ETMV1_TRACESYNC_CYCLE;
557 }
558 if (etm_ctx->trace_data[j + 1].pipestat == STAT_TR)
559 {
560 etm_ctx->trace_data[j + 1].pipestat = etm_ctx->trace_data[j + 1].packet & 0x7;
561 etm_ctx->trace_data[j + 1].flags |= ETMV1_TRIGGER_CYCLE;
562 }
563
564 /* trace word j + 2 */
565 etm_ctx->trace_data[j + 2].pipestat = (trace_data[i] & 0x10000) >> 16;
566 etm_ctx->trace_data[j + 2].packet = (trace_data[i] & 0x780000) >> 19;
567 etm_ctx->trace_data[j + 2].flags = 0;
568 if ((trace_data[i] & 0x800000) >> 23)
569 {
570 etm_ctx->trace_data[j + 2].flags |= ETMV1_TRACESYNC_CYCLE;
571 }
572 if (etm_ctx->trace_data[j + 2].pipestat == STAT_TR)
573 {
574 etm_ctx->trace_data[j + 2].pipestat = etm_ctx->trace_data[j + 2].packet & 0x7;
575 etm_ctx->trace_data[j + 2].flags |= ETMV1_TRIGGER_CYCLE;
576 }
577
578 j += 3;
579 }
580 else if ((etm_ctx->portmode & ETM_PORT_WIDTH_MASK) == ETM_PORT_8BIT)
581 {
582 /* trace word j */
583 etm_ctx->trace_data[j].pipestat = trace_data[i] & 0x7;
584 etm_ctx->trace_data[j].packet = (trace_data[i] & 0x7f8) >> 3;
585 etm_ctx->trace_data[j].flags = 0;
586 if ((trace_data[i] & 0x800) >> 11)
587 {
588 etm_ctx->trace_data[j].flags |= ETMV1_TRACESYNC_CYCLE;
589 }
590 if (etm_ctx->trace_data[j].pipestat == STAT_TR)
591 {
592 etm_ctx->trace_data[j].pipestat = etm_ctx->trace_data[j].packet & 0x7;
593 etm_ctx->trace_data[j].flags |= ETMV1_TRIGGER_CYCLE;
594 }
595
596 /* trace word j + 1 */
597 etm_ctx->trace_data[j + 1].pipestat = (trace_data[i] & 0x7000) >> 12;
598 etm_ctx->trace_data[j + 1].packet = (trace_data[i] & 0x7f8000) >> 15;
599 etm_ctx->trace_data[j + 1].flags = 0;
600 if ((trace_data[i] & 0x800000) >> 23)
601 {
602 etm_ctx->trace_data[j + 1].flags |= ETMV1_TRACESYNC_CYCLE;
603 }
604 if (etm_ctx->trace_data[j + 1].pipestat == STAT_TR)
605 {
606 etm_ctx->trace_data[j + 1].pipestat = etm_ctx->trace_data[j + 1].packet & 0x7;
607 etm_ctx->trace_data[j + 1].flags |= ETMV1_TRIGGER_CYCLE;
608 }
609
610 j += 2;
611 }
612 else
613 {
614 /* trace word j */
615 etm_ctx->trace_data[j].pipestat = trace_data[i] & 0x7;
616 etm_ctx->trace_data[j].packet = (trace_data[i] & 0x7fff8) >> 3;
617 etm_ctx->trace_data[j].flags = 0;
618 if ((trace_data[i] & 0x80000) >> 19)
619 {
620 etm_ctx->trace_data[j].flags |= ETMV1_TRACESYNC_CYCLE;
621 }
622 if (etm_ctx->trace_data[j].pipestat == STAT_TR)
623 {
624 etm_ctx->trace_data[j].pipestat = etm_ctx->trace_data[j].packet & 0x7;
625 etm_ctx->trace_data[j].flags |= ETMV1_TRIGGER_CYCLE;
626 }
627
628 j += 1;
629 }
630 }
631
632 free(trace_data);
633
634 return ERROR_OK;
635 }
636
637 static int etb_start_capture(struct etm_context *etm_ctx)
638 {
639 struct etb *etb = etm_ctx->capture_driver_priv;
640 uint32_t etb_ctrl_value = 0x1;
641 uint32_t trigger_count;
642
643 if ((etm_ctx->portmode & ETM_PORT_MODE_MASK) == ETM_PORT_DEMUXED)
644 {
645 if ((etm_ctx->portmode & ETM_PORT_WIDTH_MASK) != ETM_PORT_8BIT)
646 {
647 LOG_ERROR("ETB can't run in demultiplexed mode with a 4 or 16 bit port");
648 return ERROR_ETM_PORTMODE_NOT_SUPPORTED;
649 }
650 etb_ctrl_value |= 0x2;
651 }
652
653 if ((etm_ctx->portmode & ETM_PORT_MODE_MASK) == ETM_PORT_MUXED) {
654 LOG_ERROR("ETB: can't run in multiplexed mode");
655 return ERROR_ETM_PORTMODE_NOT_SUPPORTED;
656 }
657
658 trigger_count = (etb->ram_depth * etm_ctx->trigger_percent) / 100;
659
660 etb_write_reg(&etb->reg_cache->reg_list[ETB_TRIGGER_COUNTER], trigger_count);
661 etb_write_reg(&etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER], 0x0);
662 etb_write_reg(&etb->reg_cache->reg_list[ETB_CTRL], etb_ctrl_value);
663 jtag_execute_queue();
664
665 /* we're starting a new trace, initialize capture status */
666 etm_ctx->capture_status = TRACE_RUNNING;
667
668 return ERROR_OK;
669 }
670
671 static int etb_stop_capture(struct etm_context *etm_ctx)
672 {
673 struct etb *etb = etm_ctx->capture_driver_priv;
674 struct reg *etb_ctrl_reg = &etb->reg_cache->reg_list[ETB_CTRL];
675
676 etb_write_reg(etb_ctrl_reg, 0x0);
677 jtag_execute_queue();
678
679 /* trace stopped, just clear running flag, but preserve others */
680 etm_ctx->capture_status &= ~TRACE_RUNNING;
681
682 return ERROR_OK;
683 }
684
685 struct etm_capture_driver etb_capture_driver =
686 {
687 .name = "etb",
688 .register_commands = etb_register_commands,
689 .init = etb_init,
690 .status = etb_status,
691 .start_capture = etb_start_capture,
692 .stop_capture = etb_stop_capture,
693 .read_trace = etb_read_trace,
694 };