ETM trigger_percent becomes an ETB command
[openocd.git] / src / target / etb.c
1 /***************************************************************************
2 * Copyright (C) 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
19 ***************************************************************************/
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "arm.h"
25 #include "etm.h"
26 #include "etb.h"
27 #include "register.h"
28
29
30 static char* etb_reg_list[] =
31 {
32 "ETB_identification",
33 "ETB_ram_depth",
34 "ETB_ram_width",
35 "ETB_status",
36 "ETB_ram_data",
37 "ETB_ram_read_pointer",
38 "ETB_ram_write_pointer",
39 "ETB_trigger_counter",
40 "ETB_control",
41 };
42
43 static int etb_get_reg(struct reg *reg);
44
45 static int etb_set_instr(struct etb *etb, uint32_t new_instr)
46 {
47 struct jtag_tap *tap;
48
49 tap = etb->tap;
50 if (tap == NULL)
51 return ERROR_FAIL;
52
53 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr)
54 {
55 struct scan_field field;
56
57 field.tap = tap;
58 field.num_bits = tap->ir_length;
59 field.out_value = calloc(DIV_ROUND_UP(field.num_bits, 8), 1);
60 buf_set_u32(field.out_value, 0, field.num_bits, new_instr);
61
62 field.in_value = NULL;
63
64 jtag_add_ir_scan(1, &field, jtag_get_end_state());
65
66 free(field.out_value);
67 }
68
69 return ERROR_OK;
70 }
71
72 static int etb_scann(struct etb *etb, uint32_t new_scan_chain)
73 {
74 if (etb->cur_scan_chain != new_scan_chain)
75 {
76 struct scan_field field;
77
78 field.tap = etb->tap;
79 field.num_bits = 5;
80 field.out_value = calloc(DIV_ROUND_UP(field.num_bits, 8), 1);
81 buf_set_u32(field.out_value, 0, field.num_bits, new_scan_chain);
82
83 field.in_value = NULL;
84
85 /* select INTEST instruction */
86 etb_set_instr(etb, 0x2);
87 jtag_add_dr_scan(1, &field, jtag_get_end_state());
88
89 etb->cur_scan_chain = new_scan_chain;
90
91 free(field.out_value);
92 }
93
94 return ERROR_OK;
95 }
96
97 static int etb_read_reg_w_check(struct reg *, uint8_t *, uint8_t *);
98 static int etb_set_reg_w_exec(struct reg *, uint8_t *);
99
100 static int etb_read_reg(struct reg *reg)
101 {
102 return etb_read_reg_w_check(reg, NULL, NULL);
103 }
104
105 static int etb_get_reg(struct reg *reg)
106 {
107 int retval;
108
109 if ((retval = etb_read_reg(reg)) != ERROR_OK)
110 {
111 LOG_ERROR("BUG: error scheduling ETB register read");
112 return retval;
113 }
114
115 if ((retval = jtag_execute_queue()) != ERROR_OK)
116 {
117 LOG_ERROR("ETB register read failed");
118 return retval;
119 }
120
121 return ERROR_OK;
122 }
123
124 static const struct reg_arch_type etb_reg_type = {
125 .get = etb_get_reg,
126 .set = etb_set_reg_w_exec,
127 };
128
129 struct reg_cache* etb_build_reg_cache(struct etb *etb)
130 {
131 struct reg_cache *reg_cache = malloc(sizeof(struct reg_cache));
132 struct reg *reg_list = NULL;
133 struct etb_reg *arch_info = NULL;
134 int num_regs = 9;
135 int i;
136
137 /* the actual registers are kept in two arrays */
138 reg_list = calloc(num_regs, sizeof(struct reg));
139 arch_info = calloc(num_regs, sizeof(struct etb_reg));
140
141 /* fill in values for the reg cache */
142 reg_cache->name = "etb registers";
143 reg_cache->next = NULL;
144 reg_cache->reg_list = reg_list;
145 reg_cache->num_regs = num_regs;
146
147 /* set up registers */
148 for (i = 0; i < num_regs; i++)
149 {
150 reg_list[i].name = etb_reg_list[i];
151 reg_list[i].size = 32;
152 reg_list[i].dirty = 0;
153 reg_list[i].valid = 0;
154 reg_list[i].value = calloc(1, 4);
155 reg_list[i].arch_info = &arch_info[i];
156 reg_list[i].type = &etb_reg_type;
157 reg_list[i].size = 32;
158 arch_info[i].addr = i;
159 arch_info[i].etb = etb;
160 }
161
162 return reg_cache;
163 }
164
165 static void etb_getbuf(jtag_callback_data_t arg)
166 {
167 uint8_t *in = (uint8_t *)arg;
168
169 *((uint32_t *)in) = buf_get_u32(in, 0, 32);
170 }
171
172
173 static int etb_read_ram(struct etb *etb, uint32_t *data, int num_frames)
174 {
175 struct scan_field fields[3];
176 int i;
177
178 jtag_set_end_state(TAP_IDLE);
179 etb_scann(etb, 0x0);
180 etb_set_instr(etb, 0xc);
181
182 fields[0].tap = etb->tap;
183 fields[0].num_bits = 32;
184 fields[0].out_value = NULL;
185 fields[0].in_value = NULL;
186
187 fields[1].tap = etb->tap;
188 fields[1].num_bits = 7;
189 fields[1].out_value = malloc(1);
190 buf_set_u32(fields[1].out_value, 0, 7, 4);
191 fields[1].in_value = NULL;
192
193 fields[2].tap = etb->tap;
194 fields[2].num_bits = 1;
195 fields[2].out_value = malloc(1);
196 buf_set_u32(fields[2].out_value, 0, 1, 0);
197 fields[2].in_value = NULL;
198
199 jtag_add_dr_scan(3, fields, jtag_get_end_state());
200
201 for (i = 0; i < num_frames; i++)
202 {
203 /* ensure nR/W reamins set to read */
204 buf_set_u32(fields[2].out_value, 0, 1, 0);
205
206 /* address remains set to 0x4 (RAM data) until we read the last frame */
207 if (i < num_frames - 1)
208 buf_set_u32(fields[1].out_value, 0, 7, 4);
209 else
210 buf_set_u32(fields[1].out_value, 0, 7, 0);
211
212 fields[0].in_value = (uint8_t *)(data + i);
213 jtag_add_dr_scan(3, fields, jtag_get_end_state());
214
215 jtag_add_callback(etb_getbuf, (jtag_callback_data_t)(data + i));
216 }
217
218 jtag_execute_queue();
219
220 free(fields[1].out_value);
221 free(fields[2].out_value);
222
223 return ERROR_OK;
224 }
225
226 static int etb_read_reg_w_check(struct reg *reg,
227 uint8_t* check_value, uint8_t* check_mask)
228 {
229 struct etb_reg *etb_reg = reg->arch_info;
230 uint8_t reg_addr = etb_reg->addr & 0x7f;
231 struct scan_field fields[3];
232
233 LOG_DEBUG("%i", (int)(etb_reg->addr));
234
235 jtag_set_end_state(TAP_IDLE);
236 etb_scann(etb_reg->etb, 0x0);
237 etb_set_instr(etb_reg->etb, 0xc);
238
239 fields[0].tap = etb_reg->etb->tap;
240 fields[0].num_bits = 32;
241 fields[0].out_value = reg->value;
242 fields[0].in_value = NULL;
243 fields[0].check_value = NULL;
244 fields[0].check_mask = NULL;
245
246 fields[1].tap = etb_reg->etb->tap;
247 fields[1].num_bits = 7;
248 fields[1].out_value = malloc(1);
249 buf_set_u32(fields[1].out_value, 0, 7, reg_addr);
250 fields[1].in_value = NULL;
251 fields[1].check_value = NULL;
252 fields[1].check_mask = NULL;
253
254 fields[2].tap = etb_reg->etb->tap;
255 fields[2].num_bits = 1;
256 fields[2].out_value = malloc(1);
257 buf_set_u32(fields[2].out_value, 0, 1, 0);
258 fields[2].in_value = NULL;
259 fields[2].check_value = NULL;
260 fields[2].check_mask = NULL;
261
262 jtag_add_dr_scan(3, fields, jtag_get_end_state());
263
264 /* read the identification register in the second run, to make sure we
265 * don't read the ETB data register twice, skipping every second entry
266 */
267 buf_set_u32(fields[1].out_value, 0, 7, 0x0);
268 fields[0].in_value = reg->value;
269 fields[0].check_value = check_value;
270 fields[0].check_mask = check_mask;
271
272 jtag_add_dr_scan_check(3, fields, jtag_get_end_state());
273
274 free(fields[1].out_value);
275 free(fields[2].out_value);
276
277 return ERROR_OK;
278 }
279
280 static int etb_write_reg(struct reg *, uint32_t);
281
282 static int etb_set_reg(struct reg *reg, uint32_t value)
283 {
284 int retval;
285
286 if ((retval = etb_write_reg(reg, value)) != ERROR_OK)
287 {
288 LOG_ERROR("BUG: error scheduling ETB register write");
289 return retval;
290 }
291
292 buf_set_u32(reg->value, 0, reg->size, value);
293 reg->valid = 1;
294 reg->dirty = 0;
295
296 return ERROR_OK;
297 }
298
299 static int etb_set_reg_w_exec(struct reg *reg, uint8_t *buf)
300 {
301 int retval;
302
303 etb_set_reg(reg, buf_get_u32(buf, 0, reg->size));
304
305 if ((retval = jtag_execute_queue()) != ERROR_OK)
306 {
307 LOG_ERROR("ETB: register write failed");
308 return retval;
309 }
310 return ERROR_OK;
311 }
312
313 static int etb_write_reg(struct reg *reg, uint32_t value)
314 {
315 struct etb_reg *etb_reg = reg->arch_info;
316 uint8_t reg_addr = etb_reg->addr & 0x7f;
317 struct scan_field fields[3];
318
319 LOG_DEBUG("%i: 0x%8.8" PRIx32 "", (int)(etb_reg->addr), value);
320
321 jtag_set_end_state(TAP_IDLE);
322 etb_scann(etb_reg->etb, 0x0);
323 etb_set_instr(etb_reg->etb, 0xc);
324
325 fields[0].tap = etb_reg->etb->tap;
326 fields[0].num_bits = 32;
327 fields[0].out_value = malloc(4);
328 buf_set_u32(fields[0].out_value, 0, 32, value);
329 fields[0].in_value = NULL;
330
331 fields[1].tap = etb_reg->etb->tap;
332 fields[1].num_bits = 7;
333 fields[1].out_value = malloc(1);
334 buf_set_u32(fields[1].out_value, 0, 7, reg_addr);
335 fields[1].in_value = NULL;
336
337 fields[2].tap = etb_reg->etb->tap;
338 fields[2].num_bits = 1;
339 fields[2].out_value = malloc(1);
340 buf_set_u32(fields[2].out_value, 0, 1, 1);
341
342 fields[2].in_value = NULL;
343
344 free(fields[0].out_value);
345 free(fields[1].out_value);
346 free(fields[2].out_value);
347
348 return ERROR_OK;
349 }
350
351 COMMAND_HANDLER(handle_etb_config_command)
352 {
353 struct target *target;
354 struct jtag_tap *tap;
355 struct arm *arm;
356
357 if (CMD_ARGC != 2)
358 {
359 return ERROR_COMMAND_SYNTAX_ERROR;
360 }
361
362 target = get_target(CMD_ARGV[0]);
363
364 if (!target)
365 {
366 LOG_ERROR("ETB: target '%s' not defined", CMD_ARGV[0]);
367 return ERROR_FAIL;
368 }
369
370 arm = target_to_arm(target);
371 if (!is_arm(arm))
372 {
373 command_print(CMD_CTX, "ETB: '%s' isn't an ARM", CMD_ARGV[0]);
374 return ERROR_FAIL;
375 }
376
377 tap = jtag_tap_by_string(CMD_ARGV[1]);
378 if (tap == NULL)
379 {
380 command_print(CMD_CTX, "ETB: TAP %s does not exist", CMD_ARGV[1]);
381 return ERROR_FAIL;
382 }
383
384 if (arm->etm)
385 {
386 struct etb *etb = malloc(sizeof(struct etb));
387
388 arm->etm->capture_driver_priv = etb;
389
390 etb->tap = tap;
391 etb->cur_scan_chain = 0xffffffff;
392 etb->reg_cache = NULL;
393 etb->ram_width = 0;
394 etb->ram_depth = 0;
395 }
396 else
397 {
398 LOG_ERROR("ETM: target has no ETM defined, ETB left unconfigured");
399 return ERROR_FAIL;
400 }
401
402 return ERROR_OK;
403 }
404
405 COMMAND_HANDLER(handle_etb_trigger_percent_command)
406 {
407 struct target *target;
408 struct arm *arm;
409 struct etm_context *etm;
410 struct etb *etb;
411
412 target = get_current_target(CMD_CTX);
413 arm = target_to_arm(target);
414 if (!is_arm(arm))
415 {
416 command_print(CMD_CTX, "ETB: current target isn't an ARM");
417 return ERROR_FAIL;
418 }
419
420 etm = arm->etm;
421 if (!etm) {
422 command_print(CMD_CTX, "ETB: target has no ETM configured");
423 return ERROR_FAIL;
424 }
425 if (etm->capture_driver != &etb_capture_driver) {
426 command_print(CMD_CTX, "ETB: target not using ETB");
427 return ERROR_FAIL;
428 }
429 etb = arm->etm->capture_driver_priv;
430
431 if (CMD_ARGC > 0) {
432 uint32_t new_value;
433
434 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], new_value);
435 if ((new_value < 2) || (new_value > 100))
436 command_print(CMD_CTX,
437 "valid percentages are 2%% to 100%%");
438 else
439 etb->trigger_percent = (unsigned) new_value;
440 }
441
442 command_print(CMD_CTX, "%d percent of tracebuffer fills after trigger",
443 etb->trigger_percent);
444
445 return ERROR_OK;
446 }
447
448 static const struct command_registration etb_config_command_handlers[] = {
449 {
450 .name = "config",
451 .handler = &handle_etb_config_command,
452 .mode = COMMAND_CONFIG,
453 .usage = "target tap",
454 },
455 {
456 .name = "trigger_percent",
457 .handler = &handle_etb_trigger_percent_command,
458 .mode = COMMAND_EXEC,
459 .help = "percent of trace buffer to be filled "
460 "after the trigger occurs",
461 .usage = "[percent]",
462 },
463 COMMAND_REGISTRATION_DONE
464 };
465 static const struct command_registration etb_command_handlers[] = {
466 {
467 .name = "etb",
468 .mode = COMMAND_ANY,
469 .help = "Emebdded Trace Buffer command group",
470 .chain = etb_config_command_handlers,
471 },
472 COMMAND_REGISTRATION_DONE
473 };
474
475 static int etb_init(struct etm_context *etm_ctx)
476 {
477 struct etb *etb = etm_ctx->capture_driver_priv;
478
479 etb->etm_ctx = etm_ctx;
480
481 /* identify ETB RAM depth and width */
482 etb_read_reg(&etb->reg_cache->reg_list[ETB_RAM_DEPTH]);
483 etb_read_reg(&etb->reg_cache->reg_list[ETB_RAM_WIDTH]);
484 jtag_execute_queue();
485
486 etb->ram_depth = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_DEPTH].value, 0, 32);
487 etb->ram_width = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_WIDTH].value, 0, 32);
488
489 etb->trigger_percent = 50;
490
491 return ERROR_OK;
492 }
493
494 static trace_status_t etb_status(struct etm_context *etm_ctx)
495 {
496 struct etb *etb = etm_ctx->capture_driver_priv;
497 struct reg *control = &etb->reg_cache->reg_list[ETB_CTRL];
498 struct reg *status = &etb->reg_cache->reg_list[ETB_STATUS];
499 trace_status_t retval = 0;
500 int etb_timeout = 100;
501
502 etb->etm_ctx = etm_ctx;
503
504 /* read control and status registers */
505 etb_read_reg(control);
506 etb_read_reg(status);
507 jtag_execute_queue();
508
509 /* See if it's (still) active */
510 retval = buf_get_u32(control->value, 0, 1) ? TRACE_RUNNING : TRACE_IDLE;
511
512 /* check Full bit to identify wraparound/overflow */
513 if (buf_get_u32(status->value, 0, 1) == 1)
514 retval |= TRACE_OVERFLOWED;
515
516 /* check Triggered bit to identify trigger condition */
517 if (buf_get_u32(status->value, 1, 1) == 1)
518 retval |= TRACE_TRIGGERED;
519
520 /* check AcqComp to see if trigger counter dropped to zero */
521 if (buf_get_u32(status->value, 2, 1) == 1) {
522 /* wait for DFEmpty */
523 while (etb_timeout-- && buf_get_u32(status->value, 3, 1) == 0)
524 etb_get_reg(status);
525
526 if (etb_timeout == 0)
527 LOG_ERROR("ETB: DFEmpty won't go high, status 0x%02x",
528 (unsigned) buf_get_u32(status->value, 0, 4));
529
530 if (!(etm_ctx->capture_status & TRACE_TRIGGERED))
531 LOG_WARNING("ETB: trace complete without triggering?");
532
533 retval |= TRACE_COMPLETED;
534 }
535
536 /* NOTE: using a trigger is optional; and at least ETB11 has a mode
537 * where it can ignore the trigger counter.
538 */
539
540 /* update recorded state */
541 etm_ctx->capture_status = retval;
542
543 return retval;
544 }
545
546 static int etb_read_trace(struct etm_context *etm_ctx)
547 {
548 struct etb *etb = etm_ctx->capture_driver_priv;
549 int first_frame = 0;
550 int num_frames = etb->ram_depth;
551 uint32_t *trace_data = NULL;
552 int i, j;
553
554 etb_read_reg(&etb->reg_cache->reg_list[ETB_STATUS]);
555 etb_read_reg(&etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER]);
556 jtag_execute_queue();
557
558 /* check if we overflowed, and adjust first frame of the trace accordingly
559 * if we didn't overflow, read only up to the frame that would be written next,
560 * i.e. don't read invalid entries
561 */
562 if (buf_get_u32(etb->reg_cache->reg_list[ETB_STATUS].value, 0, 1))
563 {
564 first_frame = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER].value, 0, 32);
565 }
566 else
567 {
568 num_frames = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER].value, 0, 32);
569 }
570
571 etb_write_reg(&etb->reg_cache->reg_list[ETB_RAM_READ_POINTER], first_frame);
572
573 /* read data into temporary array for unpacking */
574 trace_data = malloc(sizeof(uint32_t) * num_frames);
575 etb_read_ram(etb, trace_data, num_frames);
576
577 if (etm_ctx->trace_depth > 0)
578 {
579 free(etm_ctx->trace_data);
580 }
581
582 if ((etm_ctx->portmode & ETM_PORT_WIDTH_MASK) == ETM_PORT_4BIT)
583 etm_ctx->trace_depth = num_frames * 3;
584 else if ((etm_ctx->portmode & ETM_PORT_WIDTH_MASK) == ETM_PORT_8BIT)
585 etm_ctx->trace_depth = num_frames * 2;
586 else
587 etm_ctx->trace_depth = num_frames;
588
589 etm_ctx->trace_data = malloc(sizeof(struct etmv1_trace_data) * etm_ctx->trace_depth);
590
591 for (i = 0, j = 0; i < num_frames; i++)
592 {
593 if ((etm_ctx->portmode & ETM_PORT_WIDTH_MASK) == ETM_PORT_4BIT)
594 {
595 /* trace word j */
596 etm_ctx->trace_data[j].pipestat = trace_data[i] & 0x7;
597 etm_ctx->trace_data[j].packet = (trace_data[i] & 0x78) >> 3;
598 etm_ctx->trace_data[j].flags = 0;
599 if ((trace_data[i] & 0x80) >> 7)
600 {
601 etm_ctx->trace_data[j].flags |= ETMV1_TRACESYNC_CYCLE;
602 }
603 if (etm_ctx->trace_data[j].pipestat == STAT_TR)
604 {
605 etm_ctx->trace_data[j].pipestat = etm_ctx->trace_data[j].packet & 0x7;
606 etm_ctx->trace_data[j].flags |= ETMV1_TRIGGER_CYCLE;
607 }
608
609 /* trace word j + 1 */
610 etm_ctx->trace_data[j + 1].pipestat = (trace_data[i] & 0x100) >> 8;
611 etm_ctx->trace_data[j + 1].packet = (trace_data[i] & 0x7800) >> 11;
612 etm_ctx->trace_data[j + 1].flags = 0;
613 if ((trace_data[i] & 0x8000) >> 15)
614 {
615 etm_ctx->trace_data[j + 1].flags |= ETMV1_TRACESYNC_CYCLE;
616 }
617 if (etm_ctx->trace_data[j + 1].pipestat == STAT_TR)
618 {
619 etm_ctx->trace_data[j + 1].pipestat = etm_ctx->trace_data[j + 1].packet & 0x7;
620 etm_ctx->trace_data[j + 1].flags |= ETMV1_TRIGGER_CYCLE;
621 }
622
623 /* trace word j + 2 */
624 etm_ctx->trace_data[j + 2].pipestat = (trace_data[i] & 0x10000) >> 16;
625 etm_ctx->trace_data[j + 2].packet = (trace_data[i] & 0x780000) >> 19;
626 etm_ctx->trace_data[j + 2].flags = 0;
627 if ((trace_data[i] & 0x800000) >> 23)
628 {
629 etm_ctx->trace_data[j + 2].flags |= ETMV1_TRACESYNC_CYCLE;
630 }
631 if (etm_ctx->trace_data[j + 2].pipestat == STAT_TR)
632 {
633 etm_ctx->trace_data[j + 2].pipestat = etm_ctx->trace_data[j + 2].packet & 0x7;
634 etm_ctx->trace_data[j + 2].flags |= ETMV1_TRIGGER_CYCLE;
635 }
636
637 j += 3;
638 }
639 else if ((etm_ctx->portmode & ETM_PORT_WIDTH_MASK) == ETM_PORT_8BIT)
640 {
641 /* trace word j */
642 etm_ctx->trace_data[j].pipestat = trace_data[i] & 0x7;
643 etm_ctx->trace_data[j].packet = (trace_data[i] & 0x7f8) >> 3;
644 etm_ctx->trace_data[j].flags = 0;
645 if ((trace_data[i] & 0x800) >> 11)
646 {
647 etm_ctx->trace_data[j].flags |= ETMV1_TRACESYNC_CYCLE;
648 }
649 if (etm_ctx->trace_data[j].pipestat == STAT_TR)
650 {
651 etm_ctx->trace_data[j].pipestat = etm_ctx->trace_data[j].packet & 0x7;
652 etm_ctx->trace_data[j].flags |= ETMV1_TRIGGER_CYCLE;
653 }
654
655 /* trace word j + 1 */
656 etm_ctx->trace_data[j + 1].pipestat = (trace_data[i] & 0x7000) >> 12;
657 etm_ctx->trace_data[j + 1].packet = (trace_data[i] & 0x7f8000) >> 15;
658 etm_ctx->trace_data[j + 1].flags = 0;
659 if ((trace_data[i] & 0x800000) >> 23)
660 {
661 etm_ctx->trace_data[j + 1].flags |= ETMV1_TRACESYNC_CYCLE;
662 }
663 if (etm_ctx->trace_data[j + 1].pipestat == STAT_TR)
664 {
665 etm_ctx->trace_data[j + 1].pipestat = etm_ctx->trace_data[j + 1].packet & 0x7;
666 etm_ctx->trace_data[j + 1].flags |= ETMV1_TRIGGER_CYCLE;
667 }
668
669 j += 2;
670 }
671 else
672 {
673 /* trace word j */
674 etm_ctx->trace_data[j].pipestat = trace_data[i] & 0x7;
675 etm_ctx->trace_data[j].packet = (trace_data[i] & 0x7fff8) >> 3;
676 etm_ctx->trace_data[j].flags = 0;
677 if ((trace_data[i] & 0x80000) >> 19)
678 {
679 etm_ctx->trace_data[j].flags |= ETMV1_TRACESYNC_CYCLE;
680 }
681 if (etm_ctx->trace_data[j].pipestat == STAT_TR)
682 {
683 etm_ctx->trace_data[j].pipestat = etm_ctx->trace_data[j].packet & 0x7;
684 etm_ctx->trace_data[j].flags |= ETMV1_TRIGGER_CYCLE;
685 }
686
687 j += 1;
688 }
689 }
690
691 free(trace_data);
692
693 return ERROR_OK;
694 }
695
696 static int etb_start_capture(struct etm_context *etm_ctx)
697 {
698 struct etb *etb = etm_ctx->capture_driver_priv;
699 uint32_t etb_ctrl_value = 0x1;
700 uint32_t trigger_count;
701
702 if ((etm_ctx->portmode & ETM_PORT_MODE_MASK) == ETM_PORT_DEMUXED)
703 {
704 if ((etm_ctx->portmode & ETM_PORT_WIDTH_MASK) != ETM_PORT_8BIT)
705 {
706 LOG_ERROR("ETB can't run in demultiplexed mode with a 4 or 16 bit port");
707 return ERROR_ETM_PORTMODE_NOT_SUPPORTED;
708 }
709 etb_ctrl_value |= 0x2;
710 }
711
712 if ((etm_ctx->portmode & ETM_PORT_MODE_MASK) == ETM_PORT_MUXED) {
713 LOG_ERROR("ETB: can't run in multiplexed mode");
714 return ERROR_ETM_PORTMODE_NOT_SUPPORTED;
715 }
716
717 trigger_count = (etb->ram_depth * etb->trigger_percent) / 100;
718
719 etb_write_reg(&etb->reg_cache->reg_list[ETB_TRIGGER_COUNTER], trigger_count);
720 etb_write_reg(&etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER], 0x0);
721 etb_write_reg(&etb->reg_cache->reg_list[ETB_CTRL], etb_ctrl_value);
722 jtag_execute_queue();
723
724 /* we're starting a new trace, initialize capture status */
725 etm_ctx->capture_status = TRACE_RUNNING;
726
727 return ERROR_OK;
728 }
729
730 static int etb_stop_capture(struct etm_context *etm_ctx)
731 {
732 struct etb *etb = etm_ctx->capture_driver_priv;
733 struct reg *etb_ctrl_reg = &etb->reg_cache->reg_list[ETB_CTRL];
734
735 etb_write_reg(etb_ctrl_reg, 0x0);
736 jtag_execute_queue();
737
738 /* trace stopped, just clear running flag, but preserve others */
739 etm_ctx->capture_status &= ~TRACE_RUNNING;
740
741 return ERROR_OK;
742 }
743
744 struct etm_capture_driver etb_capture_driver =
745 {
746 .name = "etb",
747 .commands = etb_command_handlers,
748 .init = etb_init,
749 .status = etb_status,
750 .start_capture = etb_start_capture,
751 .stop_capture = etb_stop_capture,
752 .read_trace = etb_read_trace,
753 };