jtag: linuxgpiod: drop extra parenthesis
[openocd.git] / src / target / arm_adi_v5.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2006 by Magnus Lundin *
5 * lundin@mlu.mine.nu *
6 * *
7 * Copyright (C) 2008 by Spencer Oliver *
8 * spen@spen-soft.co.uk *
9 * *
10 * Copyright (C) 2009-2010 by Oyvind Harboe *
11 * oyvind.harboe@zylin.com *
12 * *
13 * Copyright (C) 2009-2010 by David Brownell *
14 * *
15 * Copyright (C) 2013 by Andreas Fritiofson *
16 * andreas.fritiofson@gmail.com *
17 * *
18 * Copyright (C) 2019-2021, Ampere Computing LLC *
19 ***************************************************************************/
20
21 /**
22 * @file
23 * This file implements support for the ARM Debug Interface version 5 (ADIv5)
24 * debugging architecture. Compared with previous versions, this includes
25 * a low pin-count Serial Wire Debug (SWD) alternative to JTAG for message
26 * transport, and focuses on memory mapped resources as defined by the
27 * CoreSight architecture.
28 *
29 * A key concept in ADIv5 is the Debug Access Port, or DAP. A DAP has two
30 * basic components: a Debug Port (DP) transporting messages to and from a
31 * debugger, and an Access Port (AP) accessing resources. Three types of DP
32 * are defined. One uses only JTAG for communication, and is called JTAG-DP.
33 * One uses only SWD for communication, and is called SW-DP. The third can
34 * use either SWD or JTAG, and is called SWJ-DP. The most common type of AP
35 * is used to access memory mapped resources and is called a MEM-AP. Also a
36 * JTAG-AP is also defined, bridging to JTAG resources; those are uncommon.
37 *
38 * This programming interface allows DAP pipelined operations through a
39 * transaction queue. This primarily affects AP operations (such as using
40 * a MEM-AP to access memory or registers). If the current transaction has
41 * not finished by the time the next one must begin, and the ORUNDETECT bit
42 * is set in the DP_CTRL_STAT register, the SSTICKYORUN status is set and
43 * further AP operations will fail. There are two basic methods to avoid
44 * such overrun errors. One involves polling for status instead of using
45 * transaction pipelining. The other involves adding delays to ensure the
46 * AP has enough time to complete one operation before starting the next
47 * one. (For JTAG these delays are controlled by memaccess_tck.)
48 */
49
50 /*
51 * Relevant specifications from ARM include:
52 *
53 * ARM(tm) Debug Interface v5 Architecture Specification ARM IHI 0031F
54 * ARM(tm) Debug Interface v6 Architecture Specification ARM IHI 0074C
55 * CoreSight(tm) v1.0 Architecture Specification ARM IHI 0029B
56 *
57 * CoreSight(tm) DAP-Lite TRM, ARM DDI 0316D
58 * Cortex-M3(tm) TRM, ARM DDI 0337G
59 */
60
61 #ifdef HAVE_CONFIG_H
62 #include "config.h"
63 #endif
64
65 #include "jtag/interface.h"
66 #include "arm.h"
67 #include "arm_adi_v5.h"
68 #include "arm_coresight.h"
69 #include "jtag/swd.h"
70 #include "transport/transport.h"
71 #include <helper/align.h>
72 #include <helper/jep106.h>
73 #include <helper/time_support.h>
74 #include <helper/list.h>
75 #include <helper/jim-nvp.h>
76
77 /* ARM ADI Specification requires at least 10 bits used for TAR autoincrement */
78
79 /*
80 uint32_t tar_block_size(uint32_t address)
81 Return the largest block starting at address that does not cross a tar block size alignment boundary
82 */
83 static uint32_t max_tar_block_size(uint32_t tar_autoincr_block, target_addr_t address)
84 {
85 return tar_autoincr_block - ((tar_autoincr_block - 1) & address);
86 }
87
88 /***************************************************************************
89 * *
90 * DP and MEM-AP register access through APACC and DPACC *
91 * *
92 ***************************************************************************/
93
94 static int mem_ap_setup_csw(struct adiv5_ap *ap, uint32_t csw)
95 {
96 csw |= ap->csw_default;
97
98 if (csw != ap->csw_value) {
99 /* LOG_DEBUG("DAP: Set CSW %x",csw); */
100 int retval = dap_queue_ap_write(ap, MEM_AP_REG_CSW(ap->dap), csw);
101 if (retval != ERROR_OK) {
102 ap->csw_value = 0;
103 return retval;
104 }
105 ap->csw_value = csw;
106 }
107 return ERROR_OK;
108 }
109
110 static int mem_ap_setup_tar(struct adiv5_ap *ap, target_addr_t tar)
111 {
112 if (!ap->tar_valid || tar != ap->tar_value) {
113 /* LOG_DEBUG("DAP: Set TAR %x",tar); */
114 int retval = dap_queue_ap_write(ap, MEM_AP_REG_TAR(ap->dap), (uint32_t)(tar & 0xffffffffUL));
115 if (retval == ERROR_OK && is_64bit_ap(ap)) {
116 /* See if bits 63:32 of tar is different from last setting */
117 if (!ap->tar_valid || (ap->tar_value >> 32) != (tar >> 32))
118 retval = dap_queue_ap_write(ap, MEM_AP_REG_TAR64(ap->dap), (uint32_t)(tar >> 32));
119 }
120 if (retval != ERROR_OK) {
121 ap->tar_valid = false;
122 return retval;
123 }
124 ap->tar_value = tar;
125 ap->tar_valid = true;
126 }
127 return ERROR_OK;
128 }
129
130 static int mem_ap_read_tar(struct adiv5_ap *ap, target_addr_t *tar)
131 {
132 uint32_t lower;
133 uint32_t upper = 0;
134
135 int retval = dap_queue_ap_read(ap, MEM_AP_REG_TAR(ap->dap), &lower);
136 if (retval == ERROR_OK && is_64bit_ap(ap))
137 retval = dap_queue_ap_read(ap, MEM_AP_REG_TAR64(ap->dap), &upper);
138
139 if (retval != ERROR_OK) {
140 ap->tar_valid = false;
141 return retval;
142 }
143
144 retval = dap_run(ap->dap);
145 if (retval != ERROR_OK) {
146 ap->tar_valid = false;
147 return retval;
148 }
149
150 *tar = (((target_addr_t)upper) << 32) | (target_addr_t)lower;
151
152 ap->tar_value = *tar;
153 ap->tar_valid = true;
154 return ERROR_OK;
155 }
156
157 static uint32_t mem_ap_get_tar_increment(struct adiv5_ap *ap)
158 {
159 switch (ap->csw_value & CSW_ADDRINC_MASK) {
160 case CSW_ADDRINC_SINGLE:
161 switch (ap->csw_value & CSW_SIZE_MASK) {
162 case CSW_8BIT:
163 return 1;
164 case CSW_16BIT:
165 return 2;
166 case CSW_32BIT:
167 return 4;
168 case CSW_64BIT:
169 return 8;
170 case CSW_128BIT:
171 return 16;
172 case CSW_256BIT:
173 return 32;
174 default:
175 return 0;
176 }
177 case CSW_ADDRINC_PACKED:
178 return 4;
179 }
180 return 0;
181 }
182
183 /* mem_ap_update_tar_cache is called after an access to MEM_AP_REG_DRW
184 */
185 static void mem_ap_update_tar_cache(struct adiv5_ap *ap)
186 {
187 if (!ap->tar_valid)
188 return;
189
190 uint32_t inc = mem_ap_get_tar_increment(ap);
191 if (inc >= max_tar_block_size(ap->tar_autoincr_block, ap->tar_value))
192 ap->tar_valid = false;
193 else
194 ap->tar_value += inc;
195 }
196
197 /**
198 * Queue transactions setting up transfer parameters for the
199 * currently selected MEM-AP.
200 *
201 * Subsequent transfers using registers like MEM_AP_REG_DRW or MEM_AP_REG_BD2
202 * initiate data reads or writes using memory or peripheral addresses.
203 * If the CSW is configured for it, the TAR may be automatically
204 * incremented after each transfer.
205 *
206 * @param ap The MEM-AP.
207 * @param csw MEM-AP Control/Status Word (CSW) register to assign. If this
208 * matches the cached value, the register is not changed.
209 * @param tar MEM-AP Transfer Address Register (TAR) to assign. If this
210 * matches the cached address, the register is not changed.
211 *
212 * @return ERROR_OK if the transaction was properly queued, else a fault code.
213 */
214 static int mem_ap_setup_transfer(struct adiv5_ap *ap, uint32_t csw, target_addr_t tar)
215 {
216 int retval;
217 retval = mem_ap_setup_csw(ap, csw);
218 if (retval != ERROR_OK)
219 return retval;
220 retval = mem_ap_setup_tar(ap, tar);
221 if (retval != ERROR_OK)
222 return retval;
223 return ERROR_OK;
224 }
225
226 /**
227 * Asynchronous (queued) read of a word from memory or a system register.
228 *
229 * @param ap The MEM-AP to access.
230 * @param address Address of the 32-bit word to read; it must be
231 * readable by the currently selected MEM-AP.
232 * @param value points to where the word will be stored when the
233 * transaction queue is flushed (assuming no errors).
234 *
235 * @return ERROR_OK for success. Otherwise a fault code.
236 */
237 int mem_ap_read_u32(struct adiv5_ap *ap, target_addr_t address,
238 uint32_t *value)
239 {
240 int retval;
241
242 /* Use banked addressing (REG_BDx) to avoid some link traffic
243 * (updating TAR) when reading several consecutive addresses.
244 */
245 retval = mem_ap_setup_transfer(ap,
246 CSW_32BIT | (ap->csw_value & CSW_ADDRINC_MASK),
247 address & 0xFFFFFFFFFFFFFFF0ull);
248 if (retval != ERROR_OK)
249 return retval;
250
251 return dap_queue_ap_read(ap, MEM_AP_REG_BD0(ap->dap) | (address & 0xC), value);
252 }
253
254 /**
255 * Synchronous read of a word from memory or a system register.
256 * As a side effect, this flushes any queued transactions.
257 *
258 * @param ap The MEM-AP to access.
259 * @param address Address of the 32-bit word to read; it must be
260 * readable by the currently selected MEM-AP.
261 * @param value points to where the result will be stored.
262 *
263 * @return ERROR_OK for success; *value holds the result.
264 * Otherwise a fault code.
265 */
266 int mem_ap_read_atomic_u32(struct adiv5_ap *ap, target_addr_t address,
267 uint32_t *value)
268 {
269 int retval;
270
271 retval = mem_ap_read_u32(ap, address, value);
272 if (retval != ERROR_OK)
273 return retval;
274
275 return dap_run(ap->dap);
276 }
277
278 /**
279 * Asynchronous (queued) write of a word to memory or a system register.
280 *
281 * @param ap The MEM-AP to access.
282 * @param address Address to be written; it must be writable by
283 * the currently selected MEM-AP.
284 * @param value Word that will be written to the address when transaction
285 * queue is flushed (assuming no errors).
286 *
287 * @return ERROR_OK for success. Otherwise a fault code.
288 */
289 int mem_ap_write_u32(struct adiv5_ap *ap, target_addr_t address,
290 uint32_t value)
291 {
292 int retval;
293
294 /* Use banked addressing (REG_BDx) to avoid some link traffic
295 * (updating TAR) when writing several consecutive addresses.
296 */
297 retval = mem_ap_setup_transfer(ap,
298 CSW_32BIT | (ap->csw_value & CSW_ADDRINC_MASK),
299 address & 0xFFFFFFFFFFFFFFF0ull);
300 if (retval != ERROR_OK)
301 return retval;
302
303 return dap_queue_ap_write(ap, MEM_AP_REG_BD0(ap->dap) | (address & 0xC),
304 value);
305 }
306
307 /**
308 * Synchronous write of a word to memory or a system register.
309 * As a side effect, this flushes any queued transactions.
310 *
311 * @param ap The MEM-AP to access.
312 * @param address Address to be written; it must be writable by
313 * the currently selected MEM-AP.
314 * @param value Word that will be written.
315 *
316 * @return ERROR_OK for success; the data was written. Otherwise a fault code.
317 */
318 int mem_ap_write_atomic_u32(struct adiv5_ap *ap, target_addr_t address,
319 uint32_t value)
320 {
321 int retval = mem_ap_write_u32(ap, address, value);
322
323 if (retval != ERROR_OK)
324 return retval;
325
326 return dap_run(ap->dap);
327 }
328
329 /**
330 * Queue transactions setting up transfer parameters for the
331 * currently selected MEM-AP. If transfer size or packing
332 * has not been probed, run the queue, read back CSW and check if the requested
333 * transfer mode is supported.
334 *
335 * @param ap The MEM-AP.
336 * @param size Transfer width in bytes. Corresponding CSW.Size will be set.
337 * @param address Transfer address, MEM-AP TAR will be set to this value.
338 * @param addrinc TAR will be autoincremented.
339 * @param pack Try to setup packed transfer.
340 * @param this_size Points to a variable set to the size of single transfer
341 * or to 4 when transferring packed bytes or halfwords
342 *
343 * @return ERROR_OK if the transaction was properly queued, else a fault code.
344 */
345 static int mem_ap_setup_transfer_verify_size_packing(struct adiv5_ap *ap,
346 unsigned int size, target_addr_t address,
347 bool addrinc, bool pack, unsigned int *this_size)
348 {
349 int retval;
350 uint32_t csw_size;
351
352 switch (size) {
353 case 1:
354 csw_size = CSW_8BIT;
355 break;
356 case 2:
357 csw_size = CSW_16BIT;
358 break;
359 case 4:
360 csw_size = CSW_32BIT;
361 break;
362 case 8:
363 csw_size = CSW_64BIT;
364 break;
365 case 16:
366 csw_size = CSW_128BIT;
367 break;
368 case 32:
369 csw_size = CSW_256BIT;
370 break;
371 default:
372 LOG_ERROR("Size %u not supported", size);
373 return ERROR_TARGET_SIZE_NOT_SUPPORTED;
374 }
375
376 if (!addrinc || size >= 4
377 || (ap->packed_transfers_probed && !ap->packed_transfers_supported)
378 || max_tar_block_size(ap->tar_autoincr_block, address) < 4)
379 pack = false;
380
381 uint32_t csw_addrinc = pack ? CSW_ADDRINC_PACKED :
382 addrinc ? CSW_ADDRINC_SINGLE : CSW_ADDRINC_OFF;
383 retval = mem_ap_setup_csw(ap, csw_size | csw_addrinc);
384 if (retval != ERROR_OK)
385 return retval;
386
387 bool do_probe = !(ap->csw_size_probed_mask & size)
388 || (pack && !ap->packed_transfers_probed);
389 if (do_probe) {
390 uint32_t csw_readback;
391 retval = dap_queue_ap_read(ap, MEM_AP_REG_CSW(ap->dap), &csw_readback);
392 if (retval != ERROR_OK)
393 return retval;
394
395 retval = dap_run(ap->dap);
396 if (retval != ERROR_OK)
397 return retval;
398
399 bool size_supported = ((csw_readback & CSW_SIZE_MASK) == csw_size);
400 LOG_DEBUG("AP#0x%" PRIx64 " probed size %u: %s", ap->ap_num, size,
401 size_supported ? "supported" : "not supported");
402 ap->csw_size_probed_mask |= size;
403 if (size_supported) {
404 ap->csw_size_supported_mask |= size;
405 if (pack && !ap->packed_transfers_probed) {
406 ap->packed_transfers_probed = true;
407 ap->packed_transfers_supported =
408 ((csw_readback & CSW_ADDRINC_MASK) == csw_addrinc);
409 LOG_DEBUG("probed packing: %s",
410 ap->packed_transfers_supported ? "supported" : "not supported");
411 }
412 }
413 }
414
415 if (!(ap->csw_size_supported_mask & size)) {
416 LOG_ERROR("Size %u not supported", size);
417 return ERROR_TARGET_SIZE_NOT_SUPPORTED;
418 }
419
420 if (pack && !ap->packed_transfers_supported)
421 return ERROR_TARGET_PACKING_NOT_SUPPORTED;
422
423 *this_size = pack ? 4 : size;
424
425 return mem_ap_setup_tar(ap, address);
426 }
427
428 /**
429 * Queue transactions setting up transfer parameters for the
430 * currently selected MEM-AP. If transfer size or packing
431 * has not been probed, run the queue, read back CSW and check if the requested
432 * transfer mode is supported.
433 * If packing is not supported fallback and prepare CSW for unpacked transfer.
434 *
435 * @param ap The MEM-AP.
436 * @param size Transfer width in bytes. Corresponding CSW.Size will be set.
437 * @param address Transfer address, MEM-AP TAR will be set to this value.
438 * @param addrinc TAR will be autoincremented.
439 * @param pack Try to setup packed transfer.
440 * @param this_size Points to a variable set to the size of single transfer
441 * or to 4 when transferring packed bytes or halfwords
442 *
443 * @return ERROR_OK if the transaction was properly queued, else a fault code.
444 */
445 static int mem_ap_setup_transfer_verify_size_packing_fallback(struct adiv5_ap *ap,
446 unsigned int size, target_addr_t address,
447 bool addrinc, bool pack, unsigned int *this_size)
448 {
449 int retval = mem_ap_setup_transfer_verify_size_packing(ap,
450 size, address,
451 addrinc, pack, this_size);
452 if (retval == ERROR_TARGET_PACKING_NOT_SUPPORTED) {
453 /* Retry without packing */
454 retval = mem_ap_setup_transfer_verify_size_packing(ap,
455 size, address,
456 addrinc, false, this_size);
457 }
458 return retval;
459 }
460
461 /**
462 * Synchronous write of a block of memory, using a specific access size.
463 *
464 * @param ap The MEM-AP to access.
465 * @param buffer The data buffer to write. No particular alignment is assumed.
466 * @param size Which access size to use, in bytes. 1, 2, or 4.
467 * If large data extension is available also accepts sizes 8, 16, 32.
468 * @param count The number of writes to do (in size units, not bytes).
469 * @param address Address to be written; it must be writable by the currently selected MEM-AP.
470 * @param addrinc Whether the target address should be increased for each write or not. This
471 * should normally be true, except when writing to e.g. a FIFO.
472 * @return ERROR_OK on success, otherwise an error code.
473 */
474 static int mem_ap_write(struct adiv5_ap *ap, const uint8_t *buffer, uint32_t size, uint32_t count,
475 target_addr_t address, bool addrinc)
476 {
477 struct adiv5_dap *dap = ap->dap;
478 size_t nbytes = size * count;
479 int retval = ERROR_OK;
480
481 /* TI BE-32 Quirks mode:
482 * Writes on big-endian TMS570 behave very strangely. Observed behavior:
483 * size write address bytes written in order
484 * 4 TAR ^ 0 (val >> 24), (val >> 16), (val >> 8), (val)
485 * 2 TAR ^ 2 (val >> 8), (val)
486 * 1 TAR ^ 3 (val)
487 * For example, if you attempt to write a single byte to address 0, the processor
488 * will actually write a byte to address 3.
489 *
490 * To make writes of size < 4 work as expected, we xor a value with the address before
491 * setting the TAP, and we set the TAP after every transfer rather then relying on
492 * address increment. */
493 target_addr_t ti_be_addr_xor = 0;
494 target_addr_t ti_be_lane_xor = 0;
495 if (dap->ti_be_32_quirks) {
496 ti_be_lane_xor = 3;
497 switch (size) {
498 case 1:
499 ti_be_addr_xor = 3;
500 break;
501 case 2:
502 ti_be_addr_xor = 2;
503 break;
504 case 4:
505 break;
506 default:
507 LOG_ERROR("Write more than 32 bits not supported with ti_be_32_quirks");
508 return ERROR_TARGET_SIZE_NOT_SUPPORTED;
509 }
510 }
511
512 if (ap->unaligned_access_bad && (address % size != 0))
513 return ERROR_TARGET_UNALIGNED_ACCESS;
514
515 /* Nuvoton NPCX quirks prevent packed writes */
516 bool pack = !dap->nu_npcx_quirks;
517
518 while (nbytes > 0) {
519 unsigned int this_size;
520 retval = mem_ap_setup_transfer_verify_size_packing_fallback(ap,
521 size, address ^ ti_be_addr_xor,
522 addrinc, pack && nbytes >= 4, &this_size);
523 if (retval != ERROR_OK)
524 return retval;
525
526 /* How many source bytes each transfer will consume, and their location in the DRW,
527 * depends on the type of transfer and alignment. See ARM document IHI0031C. */
528 uint32_t drw_byte_idx = address;
529 unsigned int drw_ops = DIV_ROUND_UP(this_size, 4);
530
531 while (drw_ops--) {
532 uint32_t outvalue = 0;
533 if (dap->nu_npcx_quirks && this_size <= 2) {
534 switch (this_size) {
535 case 2:
536 {
537 /* Alternate low and high byte to all byte lanes */
538 uint32_t low = *buffer++;
539 uint32_t high = *buffer++;
540 outvalue |= low << 8 * (drw_byte_idx++ & 3);
541 outvalue |= high << 8 * (drw_byte_idx++ & 3);
542 outvalue |= low << 8 * (drw_byte_idx++ & 3);
543 outvalue |= high << 8 * (drw_byte_idx & 3);
544 }
545 break;
546 case 1:
547 {
548 /* Mirror output byte to all byte lanes */
549 uint32_t data = *buffer++;
550 outvalue |= data;
551 outvalue |= data << 8;
552 outvalue |= data << 16;
553 outvalue |= data << 24;
554 }
555 }
556 } else {
557 unsigned int drw_bytes = MIN(this_size, 4);
558 while (drw_bytes--)
559 outvalue |= (uint32_t)*buffer++ <<
560 8 * ((drw_byte_idx++ & 3) ^ ti_be_lane_xor);
561 }
562
563 retval = dap_queue_ap_write(ap, MEM_AP_REG_DRW(dap), outvalue);
564 if (retval != ERROR_OK)
565 break;
566 }
567 if (retval != ERROR_OK)
568 break;
569
570 mem_ap_update_tar_cache(ap);
571 nbytes -= this_size;
572 if (addrinc)
573 address += this_size;
574 }
575
576 /* REVISIT: Might want to have a queued version of this function that does not run. */
577 if (retval == ERROR_OK)
578 retval = dap_run(dap);
579
580 if (retval != ERROR_OK) {
581 target_addr_t tar;
582 if (mem_ap_read_tar(ap, &tar) == ERROR_OK)
583 LOG_ERROR("Failed to write memory at " TARGET_ADDR_FMT, tar);
584 else
585 LOG_ERROR("Failed to write memory and, additionally, failed to find out where");
586 }
587
588 return retval;
589 }
590
591 /**
592 * Synchronous read of a block of memory, using a specific access size.
593 *
594 * @param ap The MEM-AP to access.
595 * @param buffer The data buffer to receive the data. No particular alignment is assumed.
596 * @param size Which access size to use, in bytes. 1, 2, or 4.
597 * If large data extension is available also accepts sizes 8, 16, 32.
598 * @param count The number of reads to do (in size units, not bytes).
599 * @param adr Address to be read; it must be readable by the currently selected MEM-AP.
600 * @param addrinc Whether the target address should be increased after each read or not. This
601 * should normally be true, except when reading from e.g. a FIFO.
602 * @return ERROR_OK on success, otherwise an error code.
603 */
604 static int mem_ap_read(struct adiv5_ap *ap, uint8_t *buffer, uint32_t size, uint32_t count,
605 target_addr_t adr, bool addrinc)
606 {
607 struct adiv5_dap *dap = ap->dap;
608 size_t nbytes = size * count;
609 target_addr_t address = adr;
610 int retval = ERROR_OK;
611
612 /* TI BE-32 Quirks mode:
613 * Reads on big-endian TMS570 behave strangely differently than writes.
614 * They read from the physical address requested, but with DRW byte-reversed.
615 * For example, a byte read from address 0 will place the result in the high bytes of DRW.
616 * Also, packed 8-bit and 16-bit transfers seem to sometimes return garbage in some bytes,
617 * so avoid them (ap->packed_transfers is forced to false in mem_ap_init). */
618
619 if (dap->ti_be_32_quirks && size > 4) {
620 LOG_ERROR("Read more than 32 bits not supported with ti_be_32_quirks");
621 return ERROR_TARGET_SIZE_NOT_SUPPORTED;
622 }
623
624 if (ap->unaligned_access_bad && (adr % size != 0))
625 return ERROR_TARGET_UNALIGNED_ACCESS;
626
627 /* Allocate buffer to hold the sequence of DRW reads that will be made. This is a significant
628 * over-allocation if packed transfers are going to be used, but determining the real need at
629 * this point would be messy. */
630 uint32_t *read_buf = calloc(count, MAX(sizeof(uint32_t), size));
631
632 /* Multiplication count * sizeof(uint32_t) may overflow, calloc() is safe */
633 uint32_t *read_ptr = read_buf;
634 if (!read_buf) {
635 LOG_ERROR("Failed to allocate read buffer");
636 return ERROR_FAIL;
637 }
638
639 /* Queue up all reads. Each read will store the entire DRW word in the read buffer. How many
640 * useful bytes it contains, and their location in the word, depends on the type of transfer
641 * and alignment. */
642 while (nbytes > 0) {
643 unsigned int this_size;
644 retval = mem_ap_setup_transfer_verify_size_packing_fallback(ap,
645 size, address,
646 addrinc, nbytes >= 4, &this_size);
647 if (retval != ERROR_OK)
648 break;
649
650
651 unsigned int drw_ops = DIV_ROUND_UP(this_size, 4);
652 while (drw_ops--) {
653 retval = dap_queue_ap_read(ap, MEM_AP_REG_DRW(dap), read_ptr++);
654 if (retval != ERROR_OK)
655 break;
656 }
657
658 nbytes -= this_size;
659 if (addrinc)
660 address += this_size;
661
662 mem_ap_update_tar_cache(ap);
663 }
664
665 if (retval == ERROR_OK)
666 retval = dap_run(dap);
667
668 /* Restore state */
669 address = adr;
670 nbytes = size * count;
671 read_ptr = read_buf;
672
673 /* If something failed, read TAR to find out how much data was successfully read, so we can
674 * at least give the caller what we have. */
675 if (retval == ERROR_TARGET_SIZE_NOT_SUPPORTED) {
676 nbytes = 0;
677 } else if (retval != ERROR_OK) {
678 target_addr_t tar;
679 if (mem_ap_read_tar(ap, &tar) == ERROR_OK) {
680 /* TAR is incremented after failed transfer on some devices (eg Cortex-M4) */
681 LOG_ERROR("Failed to read memory at " TARGET_ADDR_FMT, tar);
682 if (nbytes > tar - address)
683 nbytes = tar - address;
684 } else {
685 LOG_ERROR("Failed to read memory and, additionally, failed to find out where");
686 nbytes = 0;
687 }
688 }
689
690 target_addr_t ti_be_lane_xor = dap->ti_be_32_quirks ? 3 : 0;
691
692 /* Replay loop to populate caller's buffer from the correct word and byte lane */
693 while (nbytes > 0) {
694 /* Convert transfers longer than 32-bit on word-at-a-time basis */
695 unsigned int this_size = MIN(size, 4);
696
697 if (size < 4 && addrinc && ap->packed_transfers_supported && nbytes >= 4
698 && max_tar_block_size(ap->tar_autoincr_block, address) >= 4) {
699 this_size = 4; /* Packed read of 4 bytes or 2 halfwords */
700 }
701
702 switch (this_size) {
703 case 4:
704 *buffer++ = *read_ptr >> 8 * ((address++ & 3) ^ ti_be_lane_xor);
705 *buffer++ = *read_ptr >> 8 * ((address++ & 3) ^ ti_be_lane_xor);
706 /* fallthrough */
707 case 2:
708 *buffer++ = *read_ptr >> 8 * ((address++ & 3) ^ ti_be_lane_xor);
709 /* fallthrough */
710 case 1:
711 *buffer++ = *read_ptr >> 8 * ((address++ & 3) ^ ti_be_lane_xor);
712 }
713
714 read_ptr++;
715 nbytes -= this_size;
716 }
717
718 free(read_buf);
719 return retval;
720 }
721
722 int mem_ap_read_buf(struct adiv5_ap *ap,
723 uint8_t *buffer, uint32_t size, uint32_t count, target_addr_t address)
724 {
725 return mem_ap_read(ap, buffer, size, count, address, true);
726 }
727
728 int mem_ap_write_buf(struct adiv5_ap *ap,
729 const uint8_t *buffer, uint32_t size, uint32_t count, target_addr_t address)
730 {
731 return mem_ap_write(ap, buffer, size, count, address, true);
732 }
733
734 int mem_ap_read_buf_noincr(struct adiv5_ap *ap,
735 uint8_t *buffer, uint32_t size, uint32_t count, target_addr_t address)
736 {
737 return mem_ap_read(ap, buffer, size, count, address, false);
738 }
739
740 int mem_ap_write_buf_noincr(struct adiv5_ap *ap,
741 const uint8_t *buffer, uint32_t size, uint32_t count, target_addr_t address)
742 {
743 return mem_ap_write(ap, buffer, size, count, address, false);
744 }
745
746 /*--------------------------------------------------------------------------*/
747
748
749 #define DAP_POWER_DOMAIN_TIMEOUT (10)
750
751 /*--------------------------------------------------------------------------*/
752
753 /**
754 * Invalidate cached DP select and cached TAR and CSW of all APs
755 */
756 void dap_invalidate_cache(struct adiv5_dap *dap)
757 {
758 dap->select = 0; /* speculate the first AP access will select AP 0, bank 0 */
759 dap->select_valid = false;
760 dap->select1_valid = false;
761 dap->select_dpbanksel_valid = false;
762
763 dap->last_read = NULL;
764
765 int i;
766 for (i = 0; i <= DP_APSEL_MAX; i++) {
767 /* force csw and tar write on the next mem-ap access */
768 dap->ap[i].tar_valid = false;
769 dap->ap[i].csw_value = 0;
770 }
771 }
772
773 /**
774 * Initialize a DAP. This sets up the power domains, prepares the DP
775 * for further use and activates overrun checking.
776 *
777 * @param dap The DAP being initialized.
778 */
779 int dap_dp_init(struct adiv5_dap *dap)
780 {
781 int retval;
782
783 LOG_DEBUG("%s", adiv5_dap_name(dap));
784
785 dap->do_reconnect = false;
786 dap_invalidate_cache(dap);
787
788 /*
789 * Early initialize dap->dp_ctrl_stat.
790 * In jtag mode only, if the following queue run (in dap_dp_poll_register)
791 * fails and sets the sticky error, it will trigger the clearing
792 * of the sticky. Without this initialization system and debug power
793 * would be disabled while clearing the sticky error bit.
794 */
795 dap->dp_ctrl_stat = CDBGPWRUPREQ | CSYSPWRUPREQ;
796
797 /*
798 * This write operation clears the sticky error bit in jtag mode only and
799 * is ignored in swd mode. It also powers-up system and debug domains in
800 * both jtag and swd modes, if not done before.
801 */
802 retval = dap_queue_dp_write(dap, DP_CTRL_STAT, dap->dp_ctrl_stat | SSTICKYERR);
803 if (retval != ERROR_OK)
804 return retval;
805
806 retval = dap_queue_dp_read(dap, DP_CTRL_STAT, NULL);
807 if (retval != ERROR_OK)
808 return retval;
809
810 retval = dap_queue_dp_write(dap, DP_CTRL_STAT, dap->dp_ctrl_stat);
811 if (retval != ERROR_OK)
812 return retval;
813
814 /* Check that we have debug power domains activated */
815 LOG_DEBUG("DAP: wait CDBGPWRUPACK");
816 retval = dap_dp_poll_register(dap, DP_CTRL_STAT,
817 CDBGPWRUPACK, CDBGPWRUPACK,
818 DAP_POWER_DOMAIN_TIMEOUT);
819 if (retval != ERROR_OK)
820 return retval;
821
822 if (!dap->ignore_syspwrupack) {
823 LOG_DEBUG("DAP: wait CSYSPWRUPACK");
824 retval = dap_dp_poll_register(dap, DP_CTRL_STAT,
825 CSYSPWRUPACK, CSYSPWRUPACK,
826 DAP_POWER_DOMAIN_TIMEOUT);
827 if (retval != ERROR_OK)
828 return retval;
829 }
830
831 retval = dap_queue_dp_read(dap, DP_CTRL_STAT, NULL);
832 if (retval != ERROR_OK)
833 return retval;
834
835 /* With debug power on we can activate OVERRUN checking */
836 dap->dp_ctrl_stat = CDBGPWRUPREQ | CSYSPWRUPREQ | CORUNDETECT;
837 retval = dap_queue_dp_write(dap, DP_CTRL_STAT, dap->dp_ctrl_stat);
838 if (retval != ERROR_OK)
839 return retval;
840 retval = dap_queue_dp_read(dap, DP_CTRL_STAT, NULL);
841 if (retval != ERROR_OK)
842 return retval;
843
844 retval = dap_run(dap);
845 if (retval != ERROR_OK)
846 return retval;
847
848 return retval;
849 }
850
851 /**
852 * Initialize a DAP or do reconnect if DAP is not accessible.
853 *
854 * @param dap The DAP being initialized.
855 */
856 int dap_dp_init_or_reconnect(struct adiv5_dap *dap)
857 {
858 LOG_DEBUG("%s", adiv5_dap_name(dap));
859
860 /*
861 * Early initialize dap->dp_ctrl_stat.
862 * In jtag mode only, if the following atomic reads fail and set the
863 * sticky error, it will trigger the clearing of the sticky. Without this
864 * initialization system and debug power would be disabled while clearing
865 * the sticky error bit.
866 */
867 dap->dp_ctrl_stat = CDBGPWRUPREQ | CSYSPWRUPREQ;
868
869 dap->do_reconnect = false;
870
871 dap_dp_read_atomic(dap, DP_CTRL_STAT, NULL);
872 if (dap->do_reconnect) {
873 /* dap connect calls dap_dp_init() after transport dependent initialization */
874 return dap->ops->connect(dap);
875 } else {
876 return dap_dp_init(dap);
877 }
878 }
879
880 /**
881 * Initialize a DAP. This sets up the power domains, prepares the DP
882 * for further use, and arranges to use AP #0 for all AP operations
883 * until dap_ap-select() changes that policy.
884 *
885 * @param ap The MEM-AP being initialized.
886 */
887 int mem_ap_init(struct adiv5_ap *ap)
888 {
889 /* check that we support packed transfers */
890 uint32_t cfg;
891 int retval;
892 struct adiv5_dap *dap = ap->dap;
893
894 /* Set ap->cfg_reg before calling mem_ap_setup_transfer(). */
895 /* mem_ap_setup_transfer() needs to know if the MEM_AP supports LPAE. */
896 retval = dap_queue_ap_read(ap, MEM_AP_REG_CFG(dap), &cfg);
897 if (retval != ERROR_OK)
898 return retval;
899
900 retval = dap_run(dap);
901 if (retval != ERROR_OK)
902 return retval;
903
904 ap->cfg_reg = cfg;
905 ap->tar_valid = false;
906 ap->csw_value = 0; /* force csw and tar write */
907
908 /* CSW 32-bit size must be supported (IHI 0031F and 0074D). */
909 ap->csw_size_supported_mask = BIT(CSW_32BIT);
910 ap->csw_size_probed_mask = BIT(CSW_32BIT);
911
912 /* Suppress probing sizes longer than 32 bit if AP has no large data extension */
913 if (!(cfg & MEM_AP_REG_CFG_LD))
914 ap->csw_size_probed_mask |= BIT(CSW_64BIT) | BIT(CSW_128BIT) | BIT(CSW_256BIT);
915
916 /* Both IHI 0031F and 0074D state: Implementations that support transfers
917 * smaller than a word must support packed transfers. Unfortunately at least
918 * Cortex-M0 and Cortex-M0+ do not comply with this rule.
919 * Probe for packed transfers except we know they are broken.
920 * Packed transfers on TI BE-32 processors do not work correctly in
921 * many cases. */
922 ap->packed_transfers_supported = false;
923 ap->packed_transfers_probed = dap->ti_be_32_quirks ? true : false;
924
925 /* The ARM ADI spec leaves implementation-defined whether unaligned
926 * memory accesses work, only work partially, or cause a sticky error.
927 * On TI BE-32 processors, reads seem to return garbage in some bytes
928 * and unaligned writes seem to cause a sticky error.
929 * TODO: it would be nice to have a way to detect whether unaligned
930 * operations are supported on other processors. */
931 ap->unaligned_access_bad = dap->ti_be_32_quirks;
932
933 LOG_DEBUG("MEM_AP CFG: large data %d, long address %d, big-endian %d",
934 !!(cfg & MEM_AP_REG_CFG_LD), !!(cfg & MEM_AP_REG_CFG_LA), !!(cfg & MEM_AP_REG_CFG_BE));
935
936 return ERROR_OK;
937 }
938
939 /**
940 * Put the debug link into SWD mode, if the target supports it.
941 * The link's initial mode may be either JTAG (for example,
942 * with SWJ-DP after reset) or SWD.
943 *
944 * Note that targets using the JTAG-DP do not support SWD, and that
945 * some targets which could otherwise support it may have been
946 * configured to disable SWD signaling
947 *
948 * @param dap The DAP used
949 * @return ERROR_OK or else a fault code.
950 */
951 int dap_to_swd(struct adiv5_dap *dap)
952 {
953 LOG_DEBUG("Enter SWD mode");
954
955 return dap_send_sequence(dap, JTAG_TO_SWD);
956 }
957
958 /**
959 * Put the debug link into JTAG mode, if the target supports it.
960 * The link's initial mode may be either SWD or JTAG.
961 *
962 * Note that targets implemented with SW-DP do not support JTAG, and
963 * that some targets which could otherwise support it may have been
964 * configured to disable JTAG signaling
965 *
966 * @param dap The DAP used
967 * @return ERROR_OK or else a fault code.
968 */
969 int dap_to_jtag(struct adiv5_dap *dap)
970 {
971 LOG_DEBUG("Enter JTAG mode");
972
973 return dap_send_sequence(dap, SWD_TO_JTAG);
974 }
975
976 /* CID interpretation -- see ARM IHI 0029E table B2-7
977 * and ARM IHI 0031E table D1-2.
978 *
979 * From 2009/11/25 commit 21378f58b604:
980 * "OptimoDE DESS" is ARM's semicustom DSPish stuff.
981 * Let's keep it as is, for the time being
982 */
983 static const char *class_description[16] = {
984 [0x0] = "Generic verification component",
985 [0x1] = "ROM table",
986 [0x2] = "Reserved",
987 [0x3] = "Reserved",
988 [0x4] = "Reserved",
989 [0x5] = "Reserved",
990 [0x6] = "Reserved",
991 [0x7] = "Reserved",
992 [0x8] = "Reserved",
993 [0x9] = "CoreSight component",
994 [0xA] = "Reserved",
995 [0xB] = "Peripheral Test Block",
996 [0xC] = "Reserved",
997 [0xD] = "OptimoDE DESS", /* see above */
998 [0xE] = "Generic IP component",
999 [0xF] = "CoreLink, PrimeCell or System component",
1000 };
1001
1002 #define ARCH_ID(architect, archid) ( \
1003 (((architect) << ARM_CS_C9_DEVARCH_ARCHITECT_SHIFT) & ARM_CS_C9_DEVARCH_ARCHITECT_MASK) | \
1004 (((archid) << ARM_CS_C9_DEVARCH_ARCHID_SHIFT) & ARM_CS_C9_DEVARCH_ARCHID_MASK) \
1005 )
1006
1007 static const struct {
1008 uint32_t arch_id;
1009 const char *description;
1010 } class0x9_devarch[] = {
1011 /* keep same unsorted order as in ARM IHI0029E */
1012 { ARCH_ID(ARM_ID, 0x0A00), "RAS architecture" },
1013 { ARCH_ID(ARM_ID, 0x1A01), "Instrumentation Trace Macrocell (ITM) architecture" },
1014 { ARCH_ID(ARM_ID, 0x1A02), "DWT architecture" },
1015 { ARCH_ID(ARM_ID, 0x1A03), "Flash Patch and Breakpoint unit (FPB) architecture" },
1016 { ARCH_ID(ARM_ID, 0x2A04), "Processor debug architecture (ARMv8-M)" },
1017 { ARCH_ID(ARM_ID, 0x6A05), "Processor debug architecture (ARMv8-R)" },
1018 { ARCH_ID(ARM_ID, 0x0A10), "PC sample-based profiling" },
1019 { ARCH_ID(ARM_ID, 0x4A13), "Embedded Trace Macrocell (ETM) architecture" },
1020 { ARCH_ID(ARM_ID, 0x1A14), "Cross Trigger Interface (CTI) architecture" },
1021 { ARCH_ID(ARM_ID, 0x6A15), "Processor debug architecture (v8.0-A)" },
1022 { ARCH_ID(ARM_ID, 0x7A15), "Processor debug architecture (v8.1-A)" },
1023 { ARCH_ID(ARM_ID, 0x8A15), "Processor debug architecture (v8.2-A)" },
1024 { ARCH_ID(ARM_ID, 0x2A16), "Processor Performance Monitor (PMU) architecture" },
1025 { ARCH_ID(ARM_ID, 0x0A17), "Memory Access Port v2 architecture" },
1026 { ARCH_ID(ARM_ID, 0x0A27), "JTAG Access Port v2 architecture" },
1027 { ARCH_ID(ARM_ID, 0x0A31), "Basic trace router" },
1028 { ARCH_ID(ARM_ID, 0x0A37), "Power requestor" },
1029 { ARCH_ID(ARM_ID, 0x0A47), "Unknown Access Port v2 architecture" },
1030 { ARCH_ID(ARM_ID, 0x0A50), "HSSTP architecture" },
1031 { ARCH_ID(ARM_ID, 0x0A63), "System Trace Macrocell (STM) architecture" },
1032 { ARCH_ID(ARM_ID, 0x0A75), "CoreSight ELA architecture" },
1033 { ARCH_ID(ARM_ID, 0x0AF7), "CoreSight ROM architecture" },
1034 };
1035
1036 #define DEVARCH_ID_MASK (ARM_CS_C9_DEVARCH_ARCHITECT_MASK | ARM_CS_C9_DEVARCH_ARCHID_MASK)
1037 #define DEVARCH_MEM_AP ARCH_ID(ARM_ID, 0x0A17)
1038 #define DEVARCH_ROM_C_0X9 ARCH_ID(ARM_ID, 0x0AF7)
1039 #define DEVARCH_UNKNOWN_V2 ARCH_ID(ARM_ID, 0x0A47)
1040
1041 static const char *class0x9_devarch_description(uint32_t devarch)
1042 {
1043 if (!(devarch & ARM_CS_C9_DEVARCH_PRESENT))
1044 return "not present";
1045
1046 for (unsigned int i = 0; i < ARRAY_SIZE(class0x9_devarch); i++)
1047 if ((devarch & DEVARCH_ID_MASK) == class0x9_devarch[i].arch_id)
1048 return class0x9_devarch[i].description;
1049
1050 return "unknown";
1051 }
1052
1053 static const struct {
1054 enum ap_type type;
1055 const char *description;
1056 } ap_types[] = {
1057 { AP_TYPE_JTAG_AP, "JTAG-AP" },
1058 { AP_TYPE_COM_AP, "COM-AP" },
1059 { AP_TYPE_AHB3_AP, "MEM-AP AHB3" },
1060 { AP_TYPE_APB_AP, "MEM-AP APB2 or APB3" },
1061 { AP_TYPE_AXI_AP, "MEM-AP AXI3 or AXI4" },
1062 { AP_TYPE_AHB5_AP, "MEM-AP AHB5" },
1063 { AP_TYPE_APB4_AP, "MEM-AP APB4" },
1064 { AP_TYPE_AXI5_AP, "MEM-AP AXI5" },
1065 { AP_TYPE_AHB5H_AP, "MEM-AP AHB5 with enhanced HPROT" },
1066 };
1067
1068 static const char *ap_type_to_description(enum ap_type type)
1069 {
1070 for (unsigned int i = 0; i < ARRAY_SIZE(ap_types); i++)
1071 if (type == ap_types[i].type)
1072 return ap_types[i].description;
1073
1074 return "Unknown";
1075 }
1076
1077 bool is_ap_num_valid(struct adiv5_dap *dap, uint64_t ap_num)
1078 {
1079 if (!dap)
1080 return false;
1081
1082 /* no autodetection, by now, so uninitialized is equivalent to ADIv5 for
1083 * backward compatibility */
1084 if (!is_adiv6(dap)) {
1085 if (ap_num > DP_APSEL_MAX)
1086 return false;
1087 return true;
1088 }
1089
1090 if (is_adiv6(dap)) {
1091 if (ap_num & 0x0fffULL)
1092 return false;
1093 if (dap->asize != 0)
1094 if (ap_num & ((~0ULL) << dap->asize))
1095 return false;
1096 return true;
1097 }
1098
1099 return false;
1100 }
1101
1102 /*
1103 * This function checks the ID for each access port to find the requested Access Port type
1104 * It also calls dap_get_ap() to increment the AP refcount
1105 */
1106 int dap_find_get_ap(struct adiv5_dap *dap, enum ap_type type_to_find, struct adiv5_ap **ap_out)
1107 {
1108 if (is_adiv6(dap)) {
1109 /* TODO: scan the ROM table and detect the AP available */
1110 LOG_DEBUG("On ADIv6 we cannot scan all the possible AP");
1111 return ERROR_FAIL;
1112 }
1113
1114 /* Maximum AP number is 255 since the SELECT register is 8 bits */
1115 for (unsigned int ap_num = 0; ap_num <= DP_APSEL_MAX; ap_num++) {
1116 struct adiv5_ap *ap = dap_get_ap(dap, ap_num);
1117 if (!ap)
1118 continue;
1119
1120 /* read the IDR register of the Access Port */
1121 uint32_t id_val = 0;
1122
1123 int retval = dap_queue_ap_read(ap, AP_REG_IDR(dap), &id_val);
1124 if (retval != ERROR_OK) {
1125 dap_put_ap(ap);
1126 return retval;
1127 }
1128
1129 retval = dap_run(dap);
1130
1131 /* Reading register for a non-existent AP should not cause an error,
1132 * but just to be sure, try to continue searching if an error does happen.
1133 */
1134 if (retval == ERROR_OK && (id_val & AP_TYPE_MASK) == type_to_find) {
1135 LOG_DEBUG("Found %s at AP index: %d (IDR=0x%08" PRIX32 ")",
1136 ap_type_to_description(type_to_find),
1137 ap_num, id_val);
1138
1139 *ap_out = ap;
1140 return ERROR_OK;
1141 }
1142 dap_put_ap(ap);
1143 }
1144
1145 LOG_DEBUG("No %s found", ap_type_to_description(type_to_find));
1146 return ERROR_FAIL;
1147 }
1148
1149 static inline bool is_ap_in_use(struct adiv5_ap *ap)
1150 {
1151 return ap->refcount > 0 || ap->config_ap_never_release;
1152 }
1153
1154 static struct adiv5_ap *_dap_get_ap(struct adiv5_dap *dap, uint64_t ap_num)
1155 {
1156 if (!is_ap_num_valid(dap, ap_num)) {
1157 LOG_ERROR("Invalid AP#0x%" PRIx64, ap_num);
1158 return NULL;
1159 }
1160 if (is_adiv6(dap)) {
1161 for (unsigned int i = 0; i <= DP_APSEL_MAX; i++) {
1162 struct adiv5_ap *ap = &dap->ap[i];
1163 if (is_ap_in_use(ap) && ap->ap_num == ap_num) {
1164 ++ap->refcount;
1165 return ap;
1166 }
1167 }
1168 for (unsigned int i = 0; i <= DP_APSEL_MAX; i++) {
1169 struct adiv5_ap *ap = &dap->ap[i];
1170 if (!is_ap_in_use(ap)) {
1171 ap->ap_num = ap_num;
1172 ++ap->refcount;
1173 return ap;
1174 }
1175 }
1176 LOG_ERROR("No more AP available!");
1177 return NULL;
1178 }
1179
1180 /* ADIv5 */
1181 struct adiv5_ap *ap = &dap->ap[ap_num];
1182 ap->ap_num = ap_num;
1183 ++ap->refcount;
1184 return ap;
1185 }
1186
1187 /* Return AP with specified ap_num. Increment AP refcount */
1188 struct adiv5_ap *dap_get_ap(struct adiv5_dap *dap, uint64_t ap_num)
1189 {
1190 struct adiv5_ap *ap = _dap_get_ap(dap, ap_num);
1191 if (ap)
1192 LOG_DEBUG("refcount AP#0x%" PRIx64 " get %u", ap_num, ap->refcount);
1193 return ap;
1194 }
1195
1196 /* Return AP with specified ap_num. Increment AP refcount and keep it non-zero */
1197 struct adiv5_ap *dap_get_config_ap(struct adiv5_dap *dap, uint64_t ap_num)
1198 {
1199 struct adiv5_ap *ap = _dap_get_ap(dap, ap_num);
1200 if (ap) {
1201 ap->config_ap_never_release = true;
1202 LOG_DEBUG("refcount AP#0x%" PRIx64 " get_config %u", ap_num, ap->refcount);
1203 }
1204 return ap;
1205 }
1206
1207 /* Decrement AP refcount and release the AP when refcount reaches zero */
1208 int dap_put_ap(struct adiv5_ap *ap)
1209 {
1210 if (ap->refcount == 0) {
1211 LOG_ERROR("BUG: refcount AP#0x%" PRIx64 " put underflow", ap->ap_num);
1212 return ERROR_FAIL;
1213 }
1214
1215 --ap->refcount;
1216
1217 LOG_DEBUG("refcount AP#0x%" PRIx64 " put %u", ap->ap_num, ap->refcount);
1218 if (!is_ap_in_use(ap)) {
1219 /* defaults from dap_instance_init() */
1220 ap->ap_num = DP_APSEL_INVALID;
1221 ap->memaccess_tck = 255;
1222 ap->tar_autoincr_block = (1 << 10);
1223 ap->csw_default = CSW_AHB_DEFAULT;
1224 ap->cfg_reg = MEM_AP_REG_CFG_INVALID;
1225 }
1226 return ERROR_OK;
1227 }
1228
1229 static int dap_get_debugbase(struct adiv5_ap *ap,
1230 target_addr_t *dbgbase, uint32_t *apid)
1231 {
1232 struct adiv5_dap *dap = ap->dap;
1233 int retval;
1234 uint32_t baseptr_upper, baseptr_lower;
1235
1236 if (ap->cfg_reg == MEM_AP_REG_CFG_INVALID) {
1237 retval = dap_queue_ap_read(ap, MEM_AP_REG_CFG(dap), &ap->cfg_reg);
1238 if (retval != ERROR_OK)
1239 return retval;
1240 }
1241 retval = dap_queue_ap_read(ap, MEM_AP_REG_BASE(dap), &baseptr_lower);
1242 if (retval != ERROR_OK)
1243 return retval;
1244 retval = dap_queue_ap_read(ap, AP_REG_IDR(dap), apid);
1245 if (retval != ERROR_OK)
1246 return retval;
1247 /* MEM_AP_REG_BASE64 is defined as 'RES0'; can be read and then ignored on 32 bits AP */
1248 if (ap->cfg_reg == MEM_AP_REG_CFG_INVALID || is_64bit_ap(ap)) {
1249 retval = dap_queue_ap_read(ap, MEM_AP_REG_BASE64(dap), &baseptr_upper);
1250 if (retval != ERROR_OK)
1251 return retval;
1252 }
1253
1254 retval = dap_run(dap);
1255 if (retval != ERROR_OK)
1256 return retval;
1257
1258 if (!is_64bit_ap(ap))
1259 baseptr_upper = 0;
1260 *dbgbase = (((target_addr_t)baseptr_upper) << 32) | baseptr_lower;
1261
1262 return ERROR_OK;
1263 }
1264
1265 int adiv6_dap_read_baseptr(struct command_invocation *cmd, struct adiv5_dap *dap, uint64_t *baseptr)
1266 {
1267 uint32_t baseptr_lower, baseptr_upper = 0;
1268 int retval;
1269
1270 if (dap->asize > 32) {
1271 retval = dap_queue_dp_read(dap, DP_BASEPTR1, &baseptr_upper);
1272 if (retval != ERROR_OK)
1273 return retval;
1274 }
1275
1276 retval = dap_dp_read_atomic(dap, DP_BASEPTR0, &baseptr_lower);
1277 if (retval != ERROR_OK)
1278 return retval;
1279
1280 if ((baseptr_lower & DP_BASEPTR0_VALID) != DP_BASEPTR0_VALID) {
1281 command_print(cmd, "System root table not present");
1282 return ERROR_FAIL;
1283 }
1284
1285 baseptr_lower &= ~0x0fff;
1286 *baseptr = (((uint64_t)baseptr_upper) << 32) | baseptr_lower;
1287
1288 return ERROR_OK;
1289 }
1290
1291 /**
1292 * Method to access the CoreSight component.
1293 * On ADIv5, CoreSight components are on the bus behind a MEM-AP.
1294 * On ADIv6, CoreSight components can either be on the bus behind a MEM-AP
1295 * or directly in the AP.
1296 */
1297 enum coresight_access_mode {
1298 CS_ACCESS_AP,
1299 CS_ACCESS_MEM_AP,
1300 };
1301
1302 /** Holds registers and coordinates of a CoreSight component */
1303 struct cs_component_vals {
1304 struct adiv5_ap *ap;
1305 target_addr_t component_base;
1306 uint64_t pid;
1307 uint32_t cid;
1308 uint32_t devarch;
1309 uint32_t devid;
1310 uint32_t devtype_memtype;
1311 enum coresight_access_mode mode;
1312 };
1313
1314 /**
1315 * Helper to read CoreSight component's registers, either on the bus
1316 * behind a MEM-AP or directly in the AP.
1317 *
1318 * @param mode Method to access the component (AP or MEM-AP).
1319 * @param ap Pointer to AP containing the component.
1320 * @param component_base On MEM-AP access method, base address of the component.
1321 * @param reg Offset of the component's register to read.
1322 * @param value Pointer to the store the read value.
1323 *
1324 * @return ERROR_OK on success, else a fault code.
1325 */
1326 static int dap_queue_read_reg(enum coresight_access_mode mode, struct adiv5_ap *ap,
1327 uint64_t component_base, unsigned int reg, uint32_t *value)
1328 {
1329 if (mode == CS_ACCESS_AP)
1330 return dap_queue_ap_read(ap, reg, value);
1331
1332 /* mode == CS_ACCESS_MEM_AP */
1333 return mem_ap_read_u32(ap, component_base + reg, value);
1334 }
1335
1336 /**
1337 * Read the CoreSight registers needed during ROM Table Parsing (RTP).
1338 *
1339 * @param mode Method to access the component (AP or MEM-AP).
1340 * @param ap Pointer to AP containing the component.
1341 * @param component_base On MEM-AP access method, base address of the component.
1342 * @param v Pointer to the struct holding the value of registers.
1343 *
1344 * @return ERROR_OK on success, else a fault code.
1345 */
1346 static int rtp_read_cs_regs(enum coresight_access_mode mode, struct adiv5_ap *ap,
1347 target_addr_t component_base, struct cs_component_vals *v)
1348 {
1349 assert(IS_ALIGNED(component_base, ARM_CS_ALIGN));
1350 assert(ap && v);
1351
1352 uint32_t cid0, cid1, cid2, cid3;
1353 uint32_t pid0, pid1, pid2, pid3, pid4;
1354 int retval = ERROR_OK;
1355
1356 v->ap = ap;
1357 v->component_base = component_base;
1358 v->mode = mode;
1359
1360 /* sort by offset to gain speed */
1361
1362 /*
1363 * Registers DEVARCH, DEVID and DEVTYPE are valid on Class 0x9 devices
1364 * only, but are at offset above 0xf00, so can be read on any device
1365 * without triggering error. Read them for eventual use on Class 0x9.
1366 */
1367 if (retval == ERROR_OK)
1368 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_C9_DEVARCH, &v->devarch);
1369
1370 if (retval == ERROR_OK)
1371 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_C9_DEVID, &v->devid);
1372
1373 /* Same address as ARM_CS_C1_MEMTYPE */
1374 if (retval == ERROR_OK)
1375 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_C9_DEVTYPE, &v->devtype_memtype);
1376
1377 if (retval == ERROR_OK)
1378 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_PIDR4, &pid4);
1379
1380 if (retval == ERROR_OK)
1381 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_PIDR0, &pid0);
1382 if (retval == ERROR_OK)
1383 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_PIDR1, &pid1);
1384 if (retval == ERROR_OK)
1385 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_PIDR2, &pid2);
1386 if (retval == ERROR_OK)
1387 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_PIDR3, &pid3);
1388
1389 if (retval == ERROR_OK)
1390 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_CIDR0, &cid0);
1391 if (retval == ERROR_OK)
1392 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_CIDR1, &cid1);
1393 if (retval == ERROR_OK)
1394 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_CIDR2, &cid2);
1395 if (retval == ERROR_OK)
1396 retval = dap_queue_read_reg(mode, ap, component_base, ARM_CS_CIDR3, &cid3);
1397
1398 if (retval == ERROR_OK)
1399 retval = dap_run(ap->dap);
1400 if (retval != ERROR_OK) {
1401 LOG_DEBUG("Failed read CoreSight registers");
1402 return retval;
1403 }
1404
1405 v->cid = (cid3 & 0xff) << 24
1406 | (cid2 & 0xff) << 16
1407 | (cid1 & 0xff) << 8
1408 | (cid0 & 0xff);
1409 v->pid = (uint64_t)(pid4 & 0xff) << 32
1410 | (pid3 & 0xff) << 24
1411 | (pid2 & 0xff) << 16
1412 | (pid1 & 0xff) << 8
1413 | (pid0 & 0xff);
1414
1415 return ERROR_OK;
1416 }
1417
1418 /* Part number interpretations are from Cortex
1419 * core specs, the CoreSight components TRM
1420 * (ARM DDI 0314H), CoreSight System Design
1421 * Guide (ARM DGI 0012D) and ETM specs; also
1422 * from chip observation (e.g. TI SDTI).
1423 */
1424
1425 static const struct dap_part_nums {
1426 uint16_t designer_id;
1427 uint16_t part_num;
1428 const char *type;
1429 const char *full;
1430 } dap_part_nums[] = {
1431 { ARM_ID, 0x000, "Cortex-M3 SCS", "(System Control Space)", },
1432 { ARM_ID, 0x001, "Cortex-M3 ITM", "(Instrumentation Trace Module)", },
1433 { ARM_ID, 0x002, "Cortex-M3 DWT", "(Data Watchpoint and Trace)", },
1434 { ARM_ID, 0x003, "Cortex-M3 FPB", "(Flash Patch and Breakpoint)", },
1435 { ARM_ID, 0x008, "Cortex-M0 SCS", "(System Control Space)", },
1436 { ARM_ID, 0x00a, "Cortex-M0 DWT", "(Data Watchpoint and Trace)", },
1437 { ARM_ID, 0x00b, "Cortex-M0 BPU", "(Breakpoint Unit)", },
1438 { ARM_ID, 0x00c, "Cortex-M4 SCS", "(System Control Space)", },
1439 { ARM_ID, 0x00d, "CoreSight ETM11", "(Embedded Trace)", },
1440 { ARM_ID, 0x00e, "Cortex-M7 FPB", "(Flash Patch and Breakpoint)", },
1441 { ARM_ID, 0x193, "SoC-600 TSGEN", "(Timestamp Generator)", },
1442 { ARM_ID, 0x470, "Cortex-M1 ROM", "(ROM Table)", },
1443 { ARM_ID, 0x471, "Cortex-M0 ROM", "(ROM Table)", },
1444 { ARM_ID, 0x490, "Cortex-A15 GIC", "(Generic Interrupt Controller)", },
1445 { ARM_ID, 0x492, "Cortex-R52 GICD", "(Distributor)", },
1446 { ARM_ID, 0x493, "Cortex-R52 GICR", "(Redistributor)", },
1447 { ARM_ID, 0x4a1, "Cortex-A53 ROM", "(v8 Memory Map ROM Table)", },
1448 { ARM_ID, 0x4a2, "Cortex-A57 ROM", "(ROM Table)", },
1449 { ARM_ID, 0x4a3, "Cortex-A53 ROM", "(v7 Memory Map ROM Table)", },
1450 { ARM_ID, 0x4a4, "Cortex-A72 ROM", "(ROM Table)", },
1451 { ARM_ID, 0x4a9, "Cortex-A9 ROM", "(ROM Table)", },
1452 { ARM_ID, 0x4aa, "Cortex-A35 ROM", "(v8 Memory Map ROM Table)", },
1453 { ARM_ID, 0x4af, "Cortex-A15 ROM", "(ROM Table)", },
1454 { ARM_ID, 0x4b5, "Cortex-R5 ROM", "(ROM Table)", },
1455 { ARM_ID, 0x4b8, "Cortex-R52 ROM", "(ROM Table)", },
1456 { ARM_ID, 0x4c0, "Cortex-M0+ ROM", "(ROM Table)", },
1457 { ARM_ID, 0x4c3, "Cortex-M3 ROM", "(ROM Table)", },
1458 { ARM_ID, 0x4c4, "Cortex-M4 ROM", "(ROM Table)", },
1459 { ARM_ID, 0x4c7, "Cortex-M7 PPB ROM", "(Private Peripheral Bus ROM Table)", },
1460 { ARM_ID, 0x4c8, "Cortex-M7 ROM", "(ROM Table)", },
1461 { ARM_ID, 0x4e0, "Cortex-A35 ROM", "(v7 Memory Map ROM Table)", },
1462 { ARM_ID, 0x4e4, "Cortex-A76 ROM", "(ROM Table)", },
1463 { ARM_ID, 0x906, "CoreSight CTI", "(Cross Trigger)", },
1464 { ARM_ID, 0x907, "CoreSight ETB", "(Trace Buffer)", },
1465 { ARM_ID, 0x908, "CoreSight CSTF", "(Trace Funnel)", },
1466 { ARM_ID, 0x909, "CoreSight ATBR", "(Advanced Trace Bus Replicator)", },
1467 { ARM_ID, 0x910, "CoreSight ETM9", "(Embedded Trace)", },
1468 { ARM_ID, 0x912, "CoreSight TPIU", "(Trace Port Interface Unit)", },
1469 { ARM_ID, 0x913, "CoreSight ITM", "(Instrumentation Trace Macrocell)", },
1470 { ARM_ID, 0x914, "CoreSight SWO", "(Single Wire Output)", },
1471 { ARM_ID, 0x917, "CoreSight HTM", "(AHB Trace Macrocell)", },
1472 { ARM_ID, 0x920, "CoreSight ETM11", "(Embedded Trace)", },
1473 { ARM_ID, 0x921, "Cortex-A8 ETM", "(Embedded Trace)", },
1474 { ARM_ID, 0x922, "Cortex-A8 CTI", "(Cross Trigger)", },
1475 { ARM_ID, 0x923, "Cortex-M3 TPIU", "(Trace Port Interface Unit)", },
1476 { ARM_ID, 0x924, "Cortex-M3 ETM", "(Embedded Trace)", },
1477 { ARM_ID, 0x925, "Cortex-M4 ETM", "(Embedded Trace)", },
1478 { ARM_ID, 0x930, "Cortex-R4 ETM", "(Embedded Trace)", },
1479 { ARM_ID, 0x931, "Cortex-R5 ETM", "(Embedded Trace)", },
1480 { ARM_ID, 0x932, "CoreSight MTB-M0+", "(Micro Trace Buffer)", },
1481 { ARM_ID, 0x941, "CoreSight TPIU-Lite", "(Trace Port Interface Unit)", },
1482 { ARM_ID, 0x950, "Cortex-A9 PTM", "(Program Trace Macrocell)", },
1483 { ARM_ID, 0x955, "Cortex-A5 ETM", "(Embedded Trace)", },
1484 { ARM_ID, 0x95a, "Cortex-A72 ETM", "(Embedded Trace)", },
1485 { ARM_ID, 0x95b, "Cortex-A17 PTM", "(Program Trace Macrocell)", },
1486 { ARM_ID, 0x95d, "Cortex-A53 ETM", "(Embedded Trace)", },
1487 { ARM_ID, 0x95e, "Cortex-A57 ETM", "(Embedded Trace)", },
1488 { ARM_ID, 0x95f, "Cortex-A15 PTM", "(Program Trace Macrocell)", },
1489 { ARM_ID, 0x961, "CoreSight TMC", "(Trace Memory Controller)", },
1490 { ARM_ID, 0x962, "CoreSight STM", "(System Trace Macrocell)", },
1491 { ARM_ID, 0x975, "Cortex-M7 ETM", "(Embedded Trace)", },
1492 { ARM_ID, 0x9a0, "CoreSight PMU", "(Performance Monitoring Unit)", },
1493 { ARM_ID, 0x9a1, "Cortex-M4 TPIU", "(Trace Port Interface Unit)", },
1494 { ARM_ID, 0x9a4, "CoreSight GPR", "(Granular Power Requester)", },
1495 { ARM_ID, 0x9a5, "Cortex-A5 PMU", "(Performance Monitor Unit)", },
1496 { ARM_ID, 0x9a7, "Cortex-A7 PMU", "(Performance Monitor Unit)", },
1497 { ARM_ID, 0x9a8, "Cortex-A53 CTI", "(Cross Trigger)", },
1498 { ARM_ID, 0x9a9, "Cortex-M7 TPIU", "(Trace Port Interface Unit)", },
1499 { ARM_ID, 0x9ae, "Cortex-A17 PMU", "(Performance Monitor Unit)", },
1500 { ARM_ID, 0x9af, "Cortex-A15 PMU", "(Performance Monitor Unit)", },
1501 { ARM_ID, 0x9b6, "Cortex-R52 PMU/CTI/ETM", "(Performance Monitor Unit/Cross Trigger/ETM)", },
1502 { ARM_ID, 0x9b7, "Cortex-R7 PMU", "(Performance Monitor Unit)", },
1503 { ARM_ID, 0x9d3, "Cortex-A53 PMU", "(Performance Monitor Unit)", },
1504 { ARM_ID, 0x9d7, "Cortex-A57 PMU", "(Performance Monitor Unit)", },
1505 { ARM_ID, 0x9d8, "Cortex-A72 PMU", "(Performance Monitor Unit)", },
1506 { ARM_ID, 0x9da, "Cortex-A35 PMU/CTI/ETM", "(Performance Monitor Unit/Cross Trigger/ETM)", },
1507 { ARM_ID, 0x9e2, "SoC-600 APB-AP", "(APB4 Memory Access Port)", },
1508 { ARM_ID, 0x9e3, "SoC-600 AHB-AP", "(AHB5 Memory Access Port)", },
1509 { ARM_ID, 0x9e4, "SoC-600 AXI-AP", "(AXI Memory Access Port)", },
1510 { ARM_ID, 0x9e5, "SoC-600 APv1 Adapter", "(Access Port v1 Adapter)", },
1511 { ARM_ID, 0x9e6, "SoC-600 JTAG-AP", "(JTAG Access Port)", },
1512 { ARM_ID, 0x9e7, "SoC-600 TPIU", "(Trace Port Interface Unit)", },
1513 { ARM_ID, 0x9e8, "SoC-600 TMC ETR/ETS", "(Embedded Trace Router/Streamer)", },
1514 { ARM_ID, 0x9e9, "SoC-600 TMC ETB", "(Embedded Trace Buffer)", },
1515 { ARM_ID, 0x9ea, "SoC-600 TMC ETF", "(Embedded Trace FIFO)", },
1516 { ARM_ID, 0x9eb, "SoC-600 ATB Funnel", "(Trace Funnel)", },
1517 { ARM_ID, 0x9ec, "SoC-600 ATB Replicator", "(Trace Replicator)", },
1518 { ARM_ID, 0x9ed, "SoC-600 CTI", "(Cross Trigger)", },
1519 { ARM_ID, 0x9ee, "SoC-600 CATU", "(Address Translation Unit)", },
1520 { ARM_ID, 0xc05, "Cortex-A5 Debug", "(Debug Unit)", },
1521 { ARM_ID, 0xc07, "Cortex-A7 Debug", "(Debug Unit)", },
1522 { ARM_ID, 0xc08, "Cortex-A8 Debug", "(Debug Unit)", },
1523 { ARM_ID, 0xc09, "Cortex-A9 Debug", "(Debug Unit)", },
1524 { ARM_ID, 0xc0e, "Cortex-A17 Debug", "(Debug Unit)", },
1525 { ARM_ID, 0xc0f, "Cortex-A15 Debug", "(Debug Unit)", },
1526 { ARM_ID, 0xc14, "Cortex-R4 Debug", "(Debug Unit)", },
1527 { ARM_ID, 0xc15, "Cortex-R5 Debug", "(Debug Unit)", },
1528 { ARM_ID, 0xc17, "Cortex-R7 Debug", "(Debug Unit)", },
1529 { ARM_ID, 0xd03, "Cortex-A53 Debug", "(Debug Unit)", },
1530 { ARM_ID, 0xd04, "Cortex-A35 Debug", "(Debug Unit)", },
1531 { ARM_ID, 0xd07, "Cortex-A57 Debug", "(Debug Unit)", },
1532 { ARM_ID, 0xd08, "Cortex-A72 Debug", "(Debug Unit)", },
1533 { ARM_ID, 0xd0b, "Cortex-A76 Debug", "(Debug Unit)", },
1534 { ARM_ID, 0xd0c, "Neoverse N1", "(Debug Unit)", },
1535 { ARM_ID, 0xd13, "Cortex-R52 Debug", "(Debug Unit)", },
1536 { ARM_ID, 0xd49, "Neoverse N2", "(Debug Unit)", },
1537 { 0x017, 0x120, "TI SDTI", "(System Debug Trace Interface)", }, /* from OMAP3 memmap */
1538 { 0x017, 0x343, "TI DAPCTL", "", }, /* from OMAP3 memmap */
1539 { 0x017, 0x9af, "MSP432 ROM", "(ROM Table)" },
1540 { 0x01f, 0xcd0, "Atmel CPU with DSU", "(CPU)" },
1541 { 0x041, 0x1db, "XMC4500 ROM", "(ROM Table)" },
1542 { 0x041, 0x1df, "XMC4700/4800 ROM", "(ROM Table)" },
1543 { 0x041, 0x1ed, "XMC1000 ROM", "(ROM Table)" },
1544 { 0x065, 0x000, "SHARC+/Blackfin+", "", },
1545 { 0x070, 0x440, "Qualcomm QDSS Component v1", "(Qualcomm Designed CoreSight Component v1)", },
1546 { 0x0bf, 0x100, "Brahma-B53 Debug", "(Debug Unit)", },
1547 { 0x0bf, 0x9d3, "Brahma-B53 PMU", "(Performance Monitor Unit)", },
1548 { 0x0bf, 0x4a1, "Brahma-B53 ROM", "(ROM Table)", },
1549 { 0x0bf, 0x721, "Brahma-B53 ROM", "(ROM Table)", },
1550 { 0x1eb, 0x181, "Tegra 186 ROM", "(ROM Table)", },
1551 { 0x1eb, 0x202, "Denver ETM", "(Denver Embedded Trace)", },
1552 { 0x1eb, 0x211, "Tegra 210 ROM", "(ROM Table)", },
1553 { 0x1eb, 0x302, "Denver Debug", "(Debug Unit)", },
1554 { 0x1eb, 0x402, "Denver PMU", "(Performance Monitor Unit)", },
1555 };
1556
1557 static const struct dap_part_nums *pidr_to_part_num(unsigned int designer_id, unsigned int part_num)
1558 {
1559 static const struct dap_part_nums unknown = {
1560 .type = "Unrecognized",
1561 .full = "",
1562 };
1563
1564 for (unsigned int i = 0; i < ARRAY_SIZE(dap_part_nums); i++)
1565 if (dap_part_nums[i].designer_id == designer_id && dap_part_nums[i].part_num == part_num)
1566 return &dap_part_nums[i];
1567
1568 return &unknown;
1569 }
1570
1571 static int dap_devtype_display(struct command_invocation *cmd, uint32_t devtype)
1572 {
1573 const char *major = "Reserved", *subtype = "Reserved";
1574 const unsigned int minor = (devtype & ARM_CS_C9_DEVTYPE_SUB_MASK) >> ARM_CS_C9_DEVTYPE_SUB_SHIFT;
1575 const unsigned int devtype_major = (devtype & ARM_CS_C9_DEVTYPE_MAJOR_MASK) >> ARM_CS_C9_DEVTYPE_MAJOR_SHIFT;
1576 switch (devtype_major) {
1577 case 0:
1578 major = "Miscellaneous";
1579 switch (minor) {
1580 case 0:
1581 subtype = "other";
1582 break;
1583 case 4:
1584 subtype = "Validation component";
1585 break;
1586 }
1587 break;
1588 case 1:
1589 major = "Trace Sink";
1590 switch (minor) {
1591 case 0:
1592 subtype = "other";
1593 break;
1594 case 1:
1595 subtype = "Port";
1596 break;
1597 case 2:
1598 subtype = "Buffer";
1599 break;
1600 case 3:
1601 subtype = "Router";
1602 break;
1603 }
1604 break;
1605 case 2:
1606 major = "Trace Link";
1607 switch (minor) {
1608 case 0:
1609 subtype = "other";
1610 break;
1611 case 1:
1612 subtype = "Funnel, router";
1613 break;
1614 case 2:
1615 subtype = "Filter";
1616 break;
1617 case 3:
1618 subtype = "FIFO, buffer";
1619 break;
1620 }
1621 break;
1622 case 3:
1623 major = "Trace Source";
1624 switch (minor) {
1625 case 0:
1626 subtype = "other";
1627 break;
1628 case 1:
1629 subtype = "Processor";
1630 break;
1631 case 2:
1632 subtype = "DSP";
1633 break;
1634 case 3:
1635 subtype = "Engine/Coprocessor";
1636 break;
1637 case 4:
1638 subtype = "Bus";
1639 break;
1640 case 6:
1641 subtype = "Software";
1642 break;
1643 }
1644 break;
1645 case 4:
1646 major = "Debug Control";
1647 switch (minor) {
1648 case 0:
1649 subtype = "other";
1650 break;
1651 case 1:
1652 subtype = "Trigger Matrix";
1653 break;
1654 case 2:
1655 subtype = "Debug Auth";
1656 break;
1657 case 3:
1658 subtype = "Power Requestor";
1659 break;
1660 }
1661 break;
1662 case 5:
1663 major = "Debug Logic";
1664 switch (minor) {
1665 case 0:
1666 subtype = "other";
1667 break;
1668 case 1:
1669 subtype = "Processor";
1670 break;
1671 case 2:
1672 subtype = "DSP";
1673 break;
1674 case 3:
1675 subtype = "Engine/Coprocessor";
1676 break;
1677 case 4:
1678 subtype = "Bus";
1679 break;
1680 case 5:
1681 subtype = "Memory";
1682 break;
1683 }
1684 break;
1685 case 6:
1686 major = "Performance Monitor";
1687 switch (minor) {
1688 case 0:
1689 subtype = "other";
1690 break;
1691 case 1:
1692 subtype = "Processor";
1693 break;
1694 case 2:
1695 subtype = "DSP";
1696 break;
1697 case 3:
1698 subtype = "Engine/Coprocessor";
1699 break;
1700 case 4:
1701 subtype = "Bus";
1702 break;
1703 case 5:
1704 subtype = "Memory";
1705 break;
1706 }
1707 break;
1708 }
1709 command_print(cmd, "\t\tType is 0x%02x, %s, %s",
1710 devtype & ARM_CS_C9_DEVTYPE_MASK,
1711 major, subtype);
1712 return ERROR_OK;
1713 }
1714
1715 /**
1716 * Actions/operations to be executed while parsing ROM tables.
1717 */
1718 struct rtp_ops {
1719 /**
1720 * Executed at the start of a new AP, typically to print the AP header.
1721 * @param ap Pointer to AP.
1722 * @param depth The current depth level of ROM table.
1723 * @param priv Pointer to private data.
1724 * @return ERROR_OK on success, else a fault code.
1725 */
1726 int (*ap_header)(struct adiv5_ap *ap, int depth, void *priv);
1727 /**
1728 * Executed at the start of a new MEM-AP, typically to print the MEM-AP header.
1729 * @param retval Error encountered while reading AP.
1730 * @param ap Pointer to AP.
1731 * @param dbgbase Value of MEM-AP Debug Base Address register.
1732 * @param apid Value of MEM-AP IDR Identification Register.
1733 * @param depth The current depth level of ROM table.
1734 * @param priv Pointer to private data.
1735 * @return ERROR_OK on success, else a fault code.
1736 */
1737 int (*mem_ap_header)(int retval, struct adiv5_ap *ap, uint64_t dbgbase,
1738 uint32_t apid, int depth, void *priv);
1739 /**
1740 * Executed when a CoreSight component is parsed, typically to print
1741 * information on the component.
1742 * @param retval Error encountered while reading component's registers.
1743 * @param v Pointer to a container of the component's registers.
1744 * @param depth The current depth level of ROM table.
1745 * @param priv Pointer to private data.
1746 * @return ERROR_OK on success, else a fault code.
1747 */
1748 int (*cs_component)(int retval, struct cs_component_vals *v, int depth, void *priv);
1749 /**
1750 * Executed for each entry of a ROM table, typically to print the entry
1751 * and information about validity or end-of-table mark.
1752 * @param retval Error encountered while reading the ROM table entry.
1753 * @param depth The current depth level of ROM table.
1754 * @param offset The offset of the entry in the ROM table.
1755 * @param romentry The value of the ROM table entry.
1756 * @param priv Pointer to private data.
1757 * @return ERROR_OK on success, else a fault code.
1758 */
1759 int (*rom_table_entry)(int retval, int depth, unsigned int offset, uint64_t romentry,
1760 void *priv);
1761 /**
1762 * Private data
1763 */
1764 void *priv;
1765 };
1766
1767 /**
1768 * Wrapper around struct rtp_ops::ap_header.
1769 */
1770 static int rtp_ops_ap_header(const struct rtp_ops *ops,
1771 struct adiv5_ap *ap, int depth)
1772 {
1773 if (ops->ap_header)
1774 return ops->ap_header(ap, depth, ops->priv);
1775
1776 return ERROR_OK;
1777 }
1778
1779 /**
1780 * Wrapper around struct rtp_ops::mem_ap_header.
1781 * Input parameter @a retval is propagated.
1782 */
1783 static int rtp_ops_mem_ap_header(const struct rtp_ops *ops,
1784 int retval, struct adiv5_ap *ap, uint64_t dbgbase, uint32_t apid, int depth)
1785 {
1786 if (!ops->mem_ap_header)
1787 return retval;
1788
1789 int retval1 = ops->mem_ap_header(retval, ap, dbgbase, apid, depth, ops->priv);
1790 if (retval != ERROR_OK)
1791 return retval;
1792 return retval1;
1793 }
1794
1795 /**
1796 * Wrapper around struct rtp_ops::cs_component.
1797 * Input parameter @a retval is propagated.
1798 */
1799 static int rtp_ops_cs_component(const struct rtp_ops *ops,
1800 int retval, struct cs_component_vals *v, int depth)
1801 {
1802 if (!ops->cs_component)
1803 return retval;
1804
1805 int retval1 = ops->cs_component(retval, v, depth, ops->priv);
1806 if (retval != ERROR_OK)
1807 return retval;
1808 return retval1;
1809 }
1810
1811 /**
1812 * Wrapper around struct rtp_ops::rom_table_entry.
1813 * Input parameter @a retval is propagated.
1814 */
1815 static int rtp_ops_rom_table_entry(const struct rtp_ops *ops,
1816 int retval, int depth, unsigned int offset, uint64_t romentry)
1817 {
1818 if (!ops->rom_table_entry)
1819 return retval;
1820
1821 int retval1 = ops->rom_table_entry(retval, depth, offset, romentry, ops->priv);
1822 if (retval != ERROR_OK)
1823 return retval;
1824 return retval1;
1825 }
1826
1827 /* Broken ROM tables can have circular references. Stop after a while */
1828 #define ROM_TABLE_MAX_DEPTH (16)
1829
1830 /**
1831 * Value used only during lookup of a CoreSight component in ROM table.
1832 * Return CORESIGHT_COMPONENT_FOUND when component is found.
1833 * Return ERROR_OK when component is not found yet.
1834 * Return any other ERROR_* in case of error.
1835 */
1836 #define CORESIGHT_COMPONENT_FOUND (1)
1837
1838 static int rtp_ap(const struct rtp_ops *ops, struct adiv5_ap *ap, int depth);
1839 static int rtp_cs_component(enum coresight_access_mode mode, const struct rtp_ops *ops,
1840 struct adiv5_ap *ap, target_addr_t dbgbase, bool *is_mem_ap, int depth);
1841
1842 static int rtp_rom_loop(enum coresight_access_mode mode, const struct rtp_ops *ops,
1843 struct adiv5_ap *ap, target_addr_t base_address, int depth,
1844 unsigned int width, unsigned int max_entries)
1845 {
1846 /* ADIv6 AP ROM table provide offset from current AP */
1847 if (mode == CS_ACCESS_AP)
1848 base_address = ap->ap_num;
1849
1850 assert(IS_ALIGNED(base_address, ARM_CS_ALIGN));
1851
1852 unsigned int offset = 0;
1853 while (max_entries--) {
1854 uint64_t romentry;
1855 uint32_t romentry_low, romentry_high;
1856 target_addr_t component_base;
1857 unsigned int saved_offset = offset;
1858
1859 int retval = dap_queue_read_reg(mode, ap, base_address, offset, &romentry_low);
1860 offset += 4;
1861 if (retval == ERROR_OK && width == 64) {
1862 retval = dap_queue_read_reg(mode, ap, base_address, offset, &romentry_high);
1863 offset += 4;
1864 }
1865 if (retval == ERROR_OK)
1866 retval = dap_run(ap->dap);
1867 if (retval != ERROR_OK) {
1868 LOG_DEBUG("Failed read ROM table entry");
1869 return retval;
1870 }
1871
1872 if (width == 64) {
1873 romentry = (((uint64_t)romentry_high) << 32) | romentry_low;
1874 component_base = base_address +
1875 ((((uint64_t)romentry_high) << 32) | (romentry_low & ARM_CS_ROMENTRY_OFFSET_MASK));
1876 } else {
1877 romentry = romentry_low;
1878 /* "romentry" is signed */
1879 component_base = base_address + (int32_t)(romentry_low & ARM_CS_ROMENTRY_OFFSET_MASK);
1880 if (!is_64bit_ap(ap))
1881 component_base = (uint32_t)component_base;
1882 }
1883 retval = rtp_ops_rom_table_entry(ops, retval, depth, saved_offset, romentry);
1884 if (retval != ERROR_OK)
1885 return retval;
1886
1887 if (romentry == 0) {
1888 /* End of ROM table */
1889 break;
1890 }
1891
1892 if (!(romentry & ARM_CS_ROMENTRY_PRESENT))
1893 continue;
1894
1895 /* Recurse */
1896 if (mode == CS_ACCESS_AP) {
1897 struct adiv5_ap *next_ap = dap_get_ap(ap->dap, component_base);
1898 if (!next_ap) {
1899 LOG_DEBUG("Wrong AP # 0x%" PRIx64, component_base);
1900 continue;
1901 }
1902 retval = rtp_ap(ops, next_ap, depth + 1);
1903 dap_put_ap(next_ap);
1904 } else {
1905 /* mode == CS_ACCESS_MEM_AP */
1906 retval = rtp_cs_component(mode, ops, ap, component_base, NULL, depth + 1);
1907 }
1908 if (retval == CORESIGHT_COMPONENT_FOUND)
1909 return CORESIGHT_COMPONENT_FOUND;
1910 if (retval != ERROR_OK) {
1911 /* TODO: do we need to send an ABORT before continuing? */
1912 LOG_DEBUG("Ignore error parsing CoreSight component");
1913 continue;
1914 }
1915 }
1916
1917 return ERROR_OK;
1918 }
1919
1920 static int rtp_cs_component(enum coresight_access_mode mode, const struct rtp_ops *ops,
1921 struct adiv5_ap *ap, target_addr_t base_address, bool *is_mem_ap, int depth)
1922 {
1923 struct cs_component_vals v;
1924 int retval;
1925
1926 assert(IS_ALIGNED(base_address, ARM_CS_ALIGN));
1927
1928 if (is_mem_ap)
1929 *is_mem_ap = false;
1930
1931 if (depth > ROM_TABLE_MAX_DEPTH)
1932 retval = ERROR_FAIL;
1933 else
1934 retval = rtp_read_cs_regs(mode, ap, base_address, &v);
1935
1936 retval = rtp_ops_cs_component(ops, retval, &v, depth);
1937 if (retval == CORESIGHT_COMPONENT_FOUND)
1938 return CORESIGHT_COMPONENT_FOUND;
1939 if (retval != ERROR_OK)
1940 return ERROR_OK; /* Don't abort recursion */
1941
1942 if (!is_valid_arm_cs_cidr(v.cid))
1943 return ERROR_OK; /* Don't abort recursion */
1944
1945 const unsigned int class = ARM_CS_CIDR_CLASS(v.cid);
1946
1947 if (class == ARM_CS_CLASS_0X1_ROM_TABLE)
1948 return rtp_rom_loop(mode, ops, ap, base_address, depth, 32, 960);
1949
1950 if (class == ARM_CS_CLASS_0X9_CS_COMPONENT) {
1951 if ((v.devarch & ARM_CS_C9_DEVARCH_PRESENT) == 0)
1952 return ERROR_OK;
1953
1954 if (is_mem_ap) {
1955 if ((v.devarch & DEVARCH_ID_MASK) == DEVARCH_MEM_AP)
1956 *is_mem_ap = true;
1957
1958 /* SoC-600 APv1 Adapter */
1959 if ((v.devarch & DEVARCH_ID_MASK) == DEVARCH_UNKNOWN_V2 &&
1960 ARM_CS_PIDR_DESIGNER(v.pid) == ARM_ID &&
1961 ARM_CS_PIDR_PART(v.pid) == 0x9e5)
1962 *is_mem_ap = true;
1963 }
1964
1965 /* quit if not ROM table */
1966 if ((v.devarch & DEVARCH_ID_MASK) != DEVARCH_ROM_C_0X9)
1967 return ERROR_OK;
1968
1969 if ((v.devid & ARM_CS_C9_DEVID_FORMAT_MASK) == ARM_CS_C9_DEVID_FORMAT_64BIT)
1970 return rtp_rom_loop(mode, ops, ap, base_address, depth, 64, 256);
1971 else
1972 return rtp_rom_loop(mode, ops, ap, base_address, depth, 32, 512);
1973 }
1974
1975 /* Class other than 0x1 and 0x9 */
1976 return ERROR_OK;
1977 }
1978
1979 static int rtp_ap(const struct rtp_ops *ops, struct adiv5_ap *ap, int depth)
1980 {
1981 uint32_t apid;
1982 target_addr_t dbgbase, invalid_entry;
1983
1984 int retval = rtp_ops_ap_header(ops, ap, depth);
1985 if (retval != ERROR_OK || depth > ROM_TABLE_MAX_DEPTH)
1986 return ERROR_OK; /* Don't abort recursion */
1987
1988 if (is_adiv6(ap->dap)) {
1989 bool is_mem_ap;
1990 retval = rtp_cs_component(CS_ACCESS_AP, ops, ap, 0, &is_mem_ap, depth);
1991 if (retval == CORESIGHT_COMPONENT_FOUND)
1992 return CORESIGHT_COMPONENT_FOUND;
1993 if (retval != ERROR_OK)
1994 return ERROR_OK; /* Don't abort recursion */
1995
1996 if (!is_mem_ap)
1997 return ERROR_OK;
1998 /* Continue for an ADIv6 MEM-AP or SoC-600 APv1 Adapter */
1999 }
2000
2001 /* Now we read ROM table ID registers, ref. ARM IHI 0029B sec */
2002 retval = dap_get_debugbase(ap, &dbgbase, &apid);
2003 if (retval != ERROR_OK)
2004 return retval;
2005 retval = rtp_ops_mem_ap_header(ops, retval, ap, dbgbase, apid, depth);
2006 if (retval != ERROR_OK)
2007 return retval;
2008
2009 if (apid == 0)
2010 return ERROR_FAIL;
2011
2012 /* NOTE: a MEM-AP may have a single CoreSight component that's
2013 * not a ROM table ... or have no such components at all.
2014 */
2015 const unsigned int class = (apid & AP_REG_IDR_CLASS_MASK) >> AP_REG_IDR_CLASS_SHIFT;
2016
2017 if (class == AP_REG_IDR_CLASS_MEM_AP) {
2018 if (is_64bit_ap(ap))
2019 invalid_entry = 0xFFFFFFFFFFFFFFFFull;
2020 else
2021 invalid_entry = 0xFFFFFFFFul;
2022
2023 if (dbgbase != invalid_entry && (dbgbase & 0x3) != 0x2) {
2024 retval = rtp_cs_component(CS_ACCESS_MEM_AP, ops, ap,
2025 dbgbase & 0xFFFFFFFFFFFFF000ull, NULL, depth);
2026 if (retval == CORESIGHT_COMPONENT_FOUND)
2027 return CORESIGHT_COMPONENT_FOUND;
2028 }
2029 }
2030
2031 return ERROR_OK;
2032 }
2033
2034 /* Actions for command "dap info" */
2035
2036 static int dap_info_ap_header(struct adiv5_ap *ap, int depth, void *priv)
2037 {
2038 struct command_invocation *cmd = priv;
2039
2040 if (depth > ROM_TABLE_MAX_DEPTH) {
2041 command_print(cmd, "\tTables too deep");
2042 return ERROR_FAIL;
2043 }
2044
2045 command_print(cmd, "%sAP # 0x%" PRIx64, (depth) ? "\t\t" : "", ap->ap_num);
2046 return ERROR_OK;
2047 }
2048
2049 static int dap_info_mem_ap_header(int retval, struct adiv5_ap *ap,
2050 target_addr_t dbgbase, uint32_t apid, int depth, void *priv)
2051 {
2052 struct command_invocation *cmd = priv;
2053 target_addr_t invalid_entry;
2054 char tabs[17] = "";
2055
2056 if (retval != ERROR_OK) {
2057 command_print(cmd, "\t\tCan't read MEM-AP, the corresponding core might be turned off");
2058 return retval;
2059 }
2060
2061 if (depth > ROM_TABLE_MAX_DEPTH) {
2062 command_print(cmd, "\tTables too deep");
2063 return ERROR_FAIL;
2064 }
2065
2066 if (depth)
2067 snprintf(tabs, sizeof(tabs), "\t[L%02d] ", depth);
2068
2069 command_print(cmd, "\t\tAP ID register 0x%8.8" PRIx32, apid);
2070 if (apid == 0) {
2071 command_print(cmd, "\t\tNo AP found at this AP#0x%" PRIx64, ap->ap_num);
2072 return ERROR_FAIL;
2073 }
2074
2075 command_print(cmd, "\t\tType is %s", ap_type_to_description(apid & AP_TYPE_MASK));
2076
2077 /* NOTE: a MEM-AP may have a single CoreSight component that's
2078 * not a ROM table ... or have no such components at all.
2079 */
2080 const unsigned int class = (apid & AP_REG_IDR_CLASS_MASK) >> AP_REG_IDR_CLASS_SHIFT;
2081
2082 if (class == AP_REG_IDR_CLASS_MEM_AP) {
2083 if (is_64bit_ap(ap))
2084 invalid_entry = 0xFFFFFFFFFFFFFFFFull;
2085 else
2086 invalid_entry = 0xFFFFFFFFul;
2087
2088 command_print(cmd, "%sMEM-AP BASE " TARGET_ADDR_FMT, tabs, dbgbase);
2089
2090 if (dbgbase == invalid_entry || (dbgbase & 0x3) == 0x2) {
2091 command_print(cmd, "\t\tNo ROM table present");
2092 } else {
2093 if (dbgbase & 0x01)
2094 command_print(cmd, "\t\tValid ROM table present");
2095 else
2096 command_print(cmd, "\t\tROM table in legacy format");
2097 }
2098 }
2099
2100 return ERROR_OK;
2101 }
2102
2103 static int dap_info_cs_component(int retval, struct cs_component_vals *v, int depth, void *priv)
2104 {
2105 struct command_invocation *cmd = priv;
2106
2107 if (depth > ROM_TABLE_MAX_DEPTH) {
2108 command_print(cmd, "\tTables too deep");
2109 return ERROR_FAIL;
2110 }
2111
2112 if (v->mode == CS_ACCESS_MEM_AP)
2113 command_print(cmd, "\t\tComponent base address " TARGET_ADDR_FMT, v->component_base);
2114
2115 if (retval != ERROR_OK) {
2116 command_print(cmd, "\t\tCan't read component, the corresponding core might be turned off");
2117 return retval;
2118 }
2119
2120 if (!is_valid_arm_cs_cidr(v->cid)) {
2121 command_print(cmd, "\t\tInvalid CID 0x%08" PRIx32, v->cid);
2122 return ERROR_OK; /* Don't abort recursion */
2123 }
2124
2125 /* component may take multiple 4K pages */
2126 uint32_t size = ARM_CS_PIDR_SIZE(v->pid);
2127 if (size > 0)
2128 command_print(cmd, "\t\tStart address " TARGET_ADDR_FMT, v->component_base - 0x1000 * size);
2129
2130 command_print(cmd, "\t\tPeripheral ID 0x%010" PRIx64, v->pid);
2131
2132 const unsigned int part_num = ARM_CS_PIDR_PART(v->pid);
2133 unsigned int designer_id = ARM_CS_PIDR_DESIGNER(v->pid);
2134
2135 if (v->pid & ARM_CS_PIDR_JEDEC) {
2136 /* JEP106 code */
2137 command_print(cmd, "\t\tDesigner is 0x%03x, %s",
2138 designer_id, jep106_manufacturer(designer_id));
2139 } else {
2140 /* Legacy ASCII ID, clear invalid bits */
2141 designer_id &= 0x7f;
2142 command_print(cmd, "\t\tDesigner ASCII code 0x%02x, %s",
2143 designer_id, designer_id == 0x41 ? "ARM" : "<unknown>");
2144 }
2145
2146 const struct dap_part_nums *partnum = pidr_to_part_num(designer_id, part_num);
2147 command_print(cmd, "\t\tPart is 0x%03x, %s %s", part_num, partnum->type, partnum->full);
2148
2149 const unsigned int class = ARM_CS_CIDR_CLASS(v->cid);
2150 command_print(cmd, "\t\tComponent class is 0x%x, %s", class, class_description[class]);
2151
2152 if (class == ARM_CS_CLASS_0X1_ROM_TABLE) {
2153 if (v->devtype_memtype & ARM_CS_C1_MEMTYPE_SYSMEM_MASK)
2154 command_print(cmd, "\t\tMEMTYPE system memory present on bus");
2155 else
2156 command_print(cmd, "\t\tMEMTYPE system memory not present: dedicated debug bus");
2157 return ERROR_OK;
2158 }
2159
2160 if (class == ARM_CS_CLASS_0X9_CS_COMPONENT) {
2161 dap_devtype_display(cmd, v->devtype_memtype);
2162
2163 /* REVISIT also show ARM_CS_C9_DEVID */
2164
2165 if ((v->devarch & ARM_CS_C9_DEVARCH_PRESENT) == 0)
2166 return ERROR_OK;
2167
2168 unsigned int architect_id = ARM_CS_C9_DEVARCH_ARCHITECT(v->devarch);
2169 unsigned int revision = ARM_CS_C9_DEVARCH_REVISION(v->devarch);
2170 command_print(cmd, "\t\tDev Arch is 0x%08" PRIx32 ", %s \"%s\" rev.%u", v->devarch,
2171 jep106_manufacturer(architect_id), class0x9_devarch_description(v->devarch),
2172 revision);
2173
2174 if ((v->devarch & DEVARCH_ID_MASK) == DEVARCH_ROM_C_0X9) {
2175 command_print(cmd, "\t\tType is ROM table");
2176
2177 if (v->devid & ARM_CS_C9_DEVID_SYSMEM_MASK)
2178 command_print(cmd, "\t\tMEMTYPE system memory present on bus");
2179 else
2180 command_print(cmd, "\t\tMEMTYPE system memory not present: dedicated debug bus");
2181 }
2182 return ERROR_OK;
2183 }
2184
2185 /* Class other than 0x1 and 0x9 */
2186 return ERROR_OK;
2187 }
2188
2189 static int dap_info_rom_table_entry(int retval, int depth,
2190 unsigned int offset, uint64_t romentry, void *priv)
2191 {
2192 struct command_invocation *cmd = priv;
2193 char tabs[16] = "";
2194
2195 if (depth)
2196 snprintf(tabs, sizeof(tabs), "[L%02d] ", depth);
2197
2198 if (retval != ERROR_OK) {
2199 command_print(cmd, "\t%sROMTABLE[0x%x] Read error", tabs, offset);
2200 command_print(cmd, "\t\tUnable to continue");
2201 command_print(cmd, "\t%s\tStop parsing of ROM table", tabs);
2202 return retval;
2203 }
2204
2205 command_print(cmd, "\t%sROMTABLE[0x%x] = 0x%08" PRIx64,
2206 tabs, offset, romentry);
2207
2208 if (romentry == 0) {
2209 command_print(cmd, "\t%s\tEnd of ROM table", tabs);
2210 return ERROR_OK;
2211 }
2212
2213 if (!(romentry & ARM_CS_ROMENTRY_PRESENT)) {
2214 command_print(cmd, "\t\tComponent not present");
2215 return ERROR_OK;
2216 }
2217
2218 return ERROR_OK;
2219 }
2220
2221 int dap_info_command(struct command_invocation *cmd, struct adiv5_ap *ap)
2222 {
2223 struct rtp_ops dap_info_ops = {
2224 .ap_header = dap_info_ap_header,
2225 .mem_ap_header = dap_info_mem_ap_header,
2226 .cs_component = dap_info_cs_component,
2227 .rom_table_entry = dap_info_rom_table_entry,
2228 .priv = cmd,
2229 };
2230
2231 return rtp_ap(&dap_info_ops, ap, 0);
2232 }
2233
2234 /* Actions for dap_lookup_cs_component() */
2235
2236 struct dap_lookup_data {
2237 /* input */
2238 unsigned int idx;
2239 unsigned int type;
2240 /* output */
2241 uint64_t component_base;
2242 uint64_t ap_num;
2243 };
2244
2245 static int dap_lookup_cs_component_cs_component(int retval,
2246 struct cs_component_vals *v, int depth, void *priv)
2247 {
2248 struct dap_lookup_data *lookup = priv;
2249
2250 if (retval != ERROR_OK)
2251 return retval;
2252
2253 if (!is_valid_arm_cs_cidr(v->cid))
2254 return ERROR_OK;
2255
2256 const unsigned int class = ARM_CS_CIDR_CLASS(v->cid);
2257 if (class != ARM_CS_CLASS_0X9_CS_COMPONENT)
2258 return ERROR_OK;
2259
2260 if ((v->devtype_memtype & ARM_CS_C9_DEVTYPE_MASK) != lookup->type)
2261 return ERROR_OK;
2262
2263 if (lookup->idx) {
2264 /* search for next one */
2265 --lookup->idx;
2266 return ERROR_OK;
2267 }
2268
2269 /* Found! */
2270 lookup->component_base = v->component_base;
2271 lookup->ap_num = v->ap->ap_num;
2272 return CORESIGHT_COMPONENT_FOUND;
2273 }
2274
2275 int dap_lookup_cs_component(struct adiv5_ap *ap, uint8_t type,
2276 target_addr_t *addr, int32_t core_id)
2277 {
2278 struct dap_lookup_data lookup = {
2279 .type = type,
2280 .idx = core_id,
2281 };
2282 struct rtp_ops dap_lookup_cs_component_ops = {
2283 .ap_header = NULL,
2284 .mem_ap_header = NULL,
2285 .cs_component = dap_lookup_cs_component_cs_component,
2286 .rom_table_entry = NULL,
2287 .priv = &lookup,
2288 };
2289
2290 int retval = rtp_ap(&dap_lookup_cs_component_ops, ap, 0);
2291 if (retval == CORESIGHT_COMPONENT_FOUND) {
2292 if (lookup.ap_num != ap->ap_num) {
2293 /* TODO: handle search from root ROM table */
2294 LOG_DEBUG("CS lookup ended in AP # 0x%" PRIx64 ". Ignore it", lookup.ap_num);
2295 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2296 }
2297 LOG_DEBUG("CS lookup found at 0x%" PRIx64, lookup.component_base);
2298 *addr = lookup.component_base;
2299 return ERROR_OK;
2300 }
2301 if (retval != ERROR_OK) {
2302 LOG_DEBUG("CS lookup error %d", retval);
2303 return retval;
2304 }
2305 LOG_DEBUG("CS lookup not found");
2306 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2307 }
2308
2309 enum adiv5_cfg_param {
2310 CFG_DAP,
2311 CFG_AP_NUM,
2312 CFG_BASEADDR,
2313 CFG_CTIBASE, /* DEPRECATED */
2314 };
2315
2316 static const struct jim_nvp nvp_config_opts[] = {
2317 { .name = "-dap", .value = CFG_DAP },
2318 { .name = "-ap-num", .value = CFG_AP_NUM },
2319 { .name = "-baseaddr", .value = CFG_BASEADDR },
2320 { .name = "-ctibase", .value = CFG_CTIBASE }, /* DEPRECATED */
2321 { .name = NULL, .value = -1 }
2322 };
2323
2324 static int adiv5_jim_spot_configure(struct jim_getopt_info *goi,
2325 struct adiv5_dap **dap_p, uint64_t *ap_num_p, uint32_t *base_p)
2326 {
2327 assert(dap_p && ap_num_p);
2328
2329 if (!goi->argc)
2330 return JIM_OK;
2331
2332 Jim_SetEmptyResult(goi->interp);
2333
2334 struct jim_nvp *n;
2335 int e = jim_nvp_name2value_obj(goi->interp, nvp_config_opts,
2336 goi->argv[0], &n);
2337 if (e != JIM_OK)
2338 return JIM_CONTINUE;
2339
2340 /* base_p can be NULL, then '-baseaddr' option is treated as unknown */
2341 if (!base_p && (n->value == CFG_BASEADDR || n->value == CFG_CTIBASE))
2342 return JIM_CONTINUE;
2343
2344 e = jim_getopt_obj(goi, NULL);
2345 if (e != JIM_OK)
2346 return e;
2347
2348 switch (n->value) {
2349 case CFG_DAP:
2350 if (goi->isconfigure) {
2351 Jim_Obj *o_t;
2352 struct adiv5_dap *dap;
2353 e = jim_getopt_obj(goi, &o_t);
2354 if (e != JIM_OK)
2355 return e;
2356 dap = dap_instance_by_jim_obj(goi->interp, o_t);
2357 if (!dap) {
2358 Jim_SetResultString(goi->interp, "DAP name invalid!", -1);
2359 return JIM_ERR;
2360 }
2361 if (*dap_p && *dap_p != dap) {
2362 Jim_SetResultString(goi->interp,
2363 "DAP assignment cannot be changed!", -1);
2364 return JIM_ERR;
2365 }
2366 *dap_p = dap;
2367 } else {
2368 if (goi->argc)
2369 goto err_no_param;
2370 if (!*dap_p) {
2371 Jim_SetResultString(goi->interp, "DAP not configured", -1);
2372 return JIM_ERR;
2373 }
2374 Jim_SetResultString(goi->interp, adiv5_dap_name(*dap_p), -1);
2375 }
2376 break;
2377
2378 case CFG_AP_NUM:
2379 if (goi->isconfigure) {
2380 /* jim_wide is a signed 64 bits int, ap_num is unsigned with max 52 bits */
2381 jim_wide ap_num;
2382 e = jim_getopt_wide(goi, &ap_num);
2383 if (e != JIM_OK)
2384 return e;
2385 /* we still don't know dap->adi_version */
2386 if (ap_num < 0 || (ap_num > DP_APSEL_MAX && (ap_num & 0xfff))) {
2387 Jim_SetResultString(goi->interp, "Invalid AP number!", -1);
2388 return JIM_ERR;
2389 }
2390 *ap_num_p = ap_num;
2391 } else {
2392 if (goi->argc)
2393 goto err_no_param;
2394 if (*ap_num_p == DP_APSEL_INVALID) {
2395 Jim_SetResultString(goi->interp, "AP number not configured", -1);
2396 return JIM_ERR;
2397 }
2398 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, *ap_num_p));
2399 }
2400 break;
2401
2402 case CFG_CTIBASE:
2403 LOG_WARNING("DEPRECATED! use \'-baseaddr' not \'-ctibase\'");
2404 /* fall through */
2405 case CFG_BASEADDR:
2406 if (goi->isconfigure) {
2407 jim_wide base;
2408 e = jim_getopt_wide(goi, &base);
2409 if (e != JIM_OK)
2410 return e;
2411 *base_p = (uint32_t)base;
2412 } else {
2413 if (goi->argc)
2414 goto err_no_param;
2415 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, *base_p));
2416 }
2417 break;
2418 };
2419
2420 return JIM_OK;
2421
2422 err_no_param:
2423 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "NO PARAMS");
2424 return JIM_ERR;
2425 }
2426
2427 int adiv5_jim_configure_ext(struct target *target, struct jim_getopt_info *goi,
2428 struct adiv5_private_config *pc, enum adiv5_configure_dap_optional optional)
2429 {
2430 int e;
2431
2432 if (!pc) {
2433 pc = (struct adiv5_private_config *)target->private_config;
2434 if (!pc) {
2435 pc = calloc(1, sizeof(struct adiv5_private_config));
2436 if (!pc) {
2437 LOG_ERROR("Out of memory");
2438 return JIM_ERR;
2439 }
2440 pc->ap_num = DP_APSEL_INVALID;
2441 target->private_config = pc;
2442 }
2443 }
2444
2445 if (optional == ADI_CONFIGURE_DAP_COMPULSORY)
2446 target->has_dap = true;
2447
2448 e = adiv5_jim_spot_configure(goi, &pc->dap, &pc->ap_num, NULL);
2449 if (e != JIM_OK)
2450 return e;
2451
2452 if (pc->dap && !target->dap_configured) {
2453 if (target->tap_configured) {
2454 pc->dap = NULL;
2455 Jim_SetResultString(goi->interp,
2456 "-chain-position and -dap configparams are mutually exclusive!", -1);
2457 return JIM_ERR;
2458 }
2459 target->tap = pc->dap->tap;
2460 target->dap_configured = true;
2461 target->has_dap = true;
2462 }
2463
2464 return JIM_OK;
2465 }
2466
2467 int adiv5_jim_configure(struct target *target, struct jim_getopt_info *goi)
2468 {
2469 return adiv5_jim_configure_ext(target, goi, NULL, ADI_CONFIGURE_DAP_COMPULSORY);
2470 }
2471
2472 int adiv5_verify_config(struct adiv5_private_config *pc)
2473 {
2474 if (!pc)
2475 return ERROR_FAIL;
2476
2477 if (!pc->dap)
2478 return ERROR_FAIL;
2479
2480 return ERROR_OK;
2481 }
2482
2483 int adiv5_jim_mem_ap_spot_configure(struct adiv5_mem_ap_spot *cfg,
2484 struct jim_getopt_info *goi)
2485 {
2486 return adiv5_jim_spot_configure(goi, &cfg->dap, &cfg->ap_num, &cfg->base);
2487 }
2488
2489 int adiv5_mem_ap_spot_init(struct adiv5_mem_ap_spot *p)
2490 {
2491 p->dap = NULL;
2492 p->ap_num = DP_APSEL_INVALID;
2493 p->base = 0;
2494 return ERROR_OK;
2495 }
2496
2497 COMMAND_HANDLER(handle_dap_info_command)
2498 {
2499 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2500 uint64_t apsel;
2501
2502 switch (CMD_ARGC) {
2503 case 0:
2504 apsel = dap->apsel;
2505 break;
2506 case 1:
2507 if (!strcmp(CMD_ARGV[0], "root")) {
2508 if (!is_adiv6(dap)) {
2509 command_print(CMD, "Option \"root\" not allowed with ADIv5 DAP");
2510 return ERROR_COMMAND_ARGUMENT_INVALID;
2511 }
2512 int retval = adiv6_dap_read_baseptr(CMD, dap, &apsel);
2513 if (retval != ERROR_OK) {
2514 command_print(CMD, "Failed reading DAP baseptr");
2515 return retval;
2516 }
2517 break;
2518 }
2519 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[0], apsel);
2520 if (!is_ap_num_valid(dap, apsel)) {
2521 command_print(CMD, "Invalid AP number");
2522 return ERROR_COMMAND_ARGUMENT_INVALID;
2523 }
2524 break;
2525 default:
2526 return ERROR_COMMAND_SYNTAX_ERROR;
2527 }
2528
2529 struct adiv5_ap *ap = dap_get_ap(dap, apsel);
2530 if (!ap) {
2531 command_print(CMD, "Cannot get AP");
2532 return ERROR_FAIL;
2533 }
2534
2535 int retval = dap_info_command(CMD, ap);
2536 dap_put_ap(ap);
2537 return retval;
2538 }
2539
2540 COMMAND_HANDLER(dap_baseaddr_command)
2541 {
2542 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2543 uint64_t apsel;
2544 uint32_t baseaddr_lower, baseaddr_upper;
2545 struct adiv5_ap *ap;
2546 target_addr_t baseaddr;
2547 int retval;
2548
2549 baseaddr_upper = 0;
2550
2551 switch (CMD_ARGC) {
2552 case 0:
2553 apsel = dap->apsel;
2554 break;
2555 case 1:
2556 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[0], apsel);
2557 if (!is_ap_num_valid(dap, apsel)) {
2558 command_print(CMD, "Invalid AP number");
2559 return ERROR_COMMAND_ARGUMENT_INVALID;
2560 }
2561 break;
2562 default:
2563 return ERROR_COMMAND_SYNTAX_ERROR;
2564 }
2565
2566 /* NOTE: assumes we're talking to a MEM-AP, which
2567 * has a base address. There are other kinds of AP,
2568 * though they're not common for now. This should
2569 * use the ID register to verify it's a MEM-AP.
2570 */
2571
2572 ap = dap_get_ap(dap, apsel);
2573 if (!ap) {
2574 command_print(CMD, "Cannot get AP");
2575 return ERROR_FAIL;
2576 }
2577
2578 retval = dap_queue_ap_read(ap, MEM_AP_REG_BASE(dap), &baseaddr_lower);
2579
2580 if (retval == ERROR_OK && ap->cfg_reg == MEM_AP_REG_CFG_INVALID)
2581 retval = dap_queue_ap_read(ap, MEM_AP_REG_CFG(dap), &ap->cfg_reg);
2582
2583 if (retval == ERROR_OK && (ap->cfg_reg == MEM_AP_REG_CFG_INVALID || is_64bit_ap(ap))) {
2584 /* MEM_AP_REG_BASE64 is defined as 'RES0'; can be read and then ignored on 32 bits AP */
2585 retval = dap_queue_ap_read(ap, MEM_AP_REG_BASE64(dap), &baseaddr_upper);
2586 }
2587
2588 if (retval == ERROR_OK)
2589 retval = dap_run(dap);
2590 dap_put_ap(ap);
2591 if (retval != ERROR_OK)
2592 return retval;
2593
2594 if (is_64bit_ap(ap)) {
2595 baseaddr = (((target_addr_t)baseaddr_upper) << 32) | baseaddr_lower;
2596 command_print(CMD, "0x%016" PRIx64, baseaddr);
2597 } else
2598 command_print(CMD, "0x%08" PRIx32, baseaddr_lower);
2599
2600 return ERROR_OK;
2601 }
2602
2603 COMMAND_HANDLER(dap_memaccess_command)
2604 {
2605 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2606 struct adiv5_ap *ap;
2607 uint32_t memaccess_tck;
2608
2609 switch (CMD_ARGC) {
2610 case 0:
2611 ap = dap_get_ap(dap, dap->apsel);
2612 if (!ap) {
2613 command_print(CMD, "Cannot get AP");
2614 return ERROR_FAIL;
2615 }
2616 memaccess_tck = ap->memaccess_tck;
2617 break;
2618 case 1:
2619 ap = dap_get_config_ap(dap, dap->apsel);
2620 if (!ap) {
2621 command_print(CMD, "Cannot get AP");
2622 return ERROR_FAIL;
2623 }
2624 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], memaccess_tck);
2625 ap->memaccess_tck = memaccess_tck;
2626 break;
2627 default:
2628 return ERROR_COMMAND_SYNTAX_ERROR;
2629 }
2630
2631 dap_put_ap(ap);
2632
2633 command_print(CMD, "memory bus access delay set to %" PRIu32 " tck",
2634 memaccess_tck);
2635
2636 return ERROR_OK;
2637 }
2638
2639 COMMAND_HANDLER(dap_apsel_command)
2640 {
2641 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2642 uint64_t apsel;
2643
2644 switch (CMD_ARGC) {
2645 case 0:
2646 command_print(CMD, "0x%" PRIx64, dap->apsel);
2647 return ERROR_OK;
2648 case 1:
2649 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[0], apsel);
2650 if (!is_ap_num_valid(dap, apsel)) {
2651 command_print(CMD, "Invalid AP number");
2652 return ERROR_COMMAND_ARGUMENT_INVALID;
2653 }
2654 break;
2655 default:
2656 return ERROR_COMMAND_SYNTAX_ERROR;
2657 }
2658
2659 dap->apsel = apsel;
2660 return ERROR_OK;
2661 }
2662
2663 COMMAND_HANDLER(dap_apcsw_command)
2664 {
2665 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2666 struct adiv5_ap *ap;
2667 uint32_t csw_val, csw_mask;
2668
2669 switch (CMD_ARGC) {
2670 case 0:
2671 ap = dap_get_ap(dap, dap->apsel);
2672 if (!ap) {
2673 command_print(CMD, "Cannot get AP");
2674 return ERROR_FAIL;
2675 }
2676 command_print(CMD, "AP#0x%" PRIx64 " selected, csw 0x%8.8" PRIx32,
2677 dap->apsel, ap->csw_default);
2678 break;
2679 case 1:
2680 if (strcmp(CMD_ARGV[0], "default") == 0)
2681 csw_val = CSW_AHB_DEFAULT;
2682 else
2683 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], csw_val);
2684
2685 if (csw_val & (CSW_SIZE_MASK | CSW_ADDRINC_MASK)) {
2686 LOG_ERROR("CSW value cannot include 'Size' and 'AddrInc' bit-fields");
2687 return ERROR_COMMAND_ARGUMENT_INVALID;
2688 }
2689 ap = dap_get_config_ap(dap, dap->apsel);
2690 if (!ap) {
2691 command_print(CMD, "Cannot get AP");
2692 return ERROR_FAIL;
2693 }
2694 ap->csw_default = csw_val;
2695 break;
2696 case 2:
2697 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], csw_val);
2698 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], csw_mask);
2699 if (csw_mask & (CSW_SIZE_MASK | CSW_ADDRINC_MASK)) {
2700 LOG_ERROR("CSW mask cannot include 'Size' and 'AddrInc' bit-fields");
2701 return ERROR_COMMAND_ARGUMENT_INVALID;
2702 }
2703 ap = dap_get_config_ap(dap, dap->apsel);
2704 if (!ap) {
2705 command_print(CMD, "Cannot get AP");
2706 return ERROR_FAIL;
2707 }
2708 ap->csw_default = (ap->csw_default & ~csw_mask) | (csw_val & csw_mask);
2709 break;
2710 default:
2711 return ERROR_COMMAND_SYNTAX_ERROR;
2712 }
2713 dap_put_ap(ap);
2714
2715 return ERROR_OK;
2716 }
2717
2718
2719
2720 COMMAND_HANDLER(dap_apid_command)
2721 {
2722 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2723 uint64_t apsel;
2724 uint32_t apid;
2725 int retval;
2726
2727 switch (CMD_ARGC) {
2728 case 0:
2729 apsel = dap->apsel;
2730 break;
2731 case 1:
2732 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[0], apsel);
2733 if (!is_ap_num_valid(dap, apsel)) {
2734 command_print(CMD, "Invalid AP number");
2735 return ERROR_COMMAND_ARGUMENT_INVALID;
2736 }
2737 break;
2738 default:
2739 return ERROR_COMMAND_SYNTAX_ERROR;
2740 }
2741
2742 struct adiv5_ap *ap = dap_get_ap(dap, apsel);
2743 if (!ap) {
2744 command_print(CMD, "Cannot get AP");
2745 return ERROR_FAIL;
2746 }
2747 retval = dap_queue_ap_read(ap, AP_REG_IDR(dap), &apid);
2748 if (retval != ERROR_OK) {
2749 dap_put_ap(ap);
2750 return retval;
2751 }
2752 retval = dap_run(dap);
2753 dap_put_ap(ap);
2754 if (retval != ERROR_OK)
2755 return retval;
2756
2757 command_print(CMD, "0x%8.8" PRIx32, apid);
2758
2759 return retval;
2760 }
2761
2762 COMMAND_HANDLER(dap_apreg_command)
2763 {
2764 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2765 uint64_t apsel;
2766 uint32_t reg, value;
2767 int retval;
2768
2769 if (CMD_ARGC < 2 || CMD_ARGC > 3)
2770 return ERROR_COMMAND_SYNTAX_ERROR;
2771
2772 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[0], apsel);
2773 if (!is_ap_num_valid(dap, apsel)) {
2774 command_print(CMD, "Invalid AP number");
2775 return ERROR_COMMAND_ARGUMENT_INVALID;
2776 }
2777
2778 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], reg);
2779 if (is_adiv6(dap)) {
2780 if (reg >= 4096 || (reg & 3)) {
2781 command_print(CMD, "Invalid reg value (should be less than 4096 and 4 bytes aligned)");
2782 return ERROR_COMMAND_ARGUMENT_INVALID;
2783 }
2784 } else { /* ADI version 5 */
2785 if (reg >= 256 || (reg & 3)) {
2786 command_print(CMD, "Invalid reg value (should be less than 256 and 4 bytes aligned)");
2787 return ERROR_COMMAND_ARGUMENT_INVALID;
2788 }
2789 }
2790
2791 struct adiv5_ap *ap = dap_get_ap(dap, apsel);
2792 if (!ap) {
2793 command_print(CMD, "Cannot get AP");
2794 return ERROR_FAIL;
2795 }
2796
2797 if (CMD_ARGC == 3) {
2798 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], value);
2799 /* see if user supplied register address is a match for the CSW or TAR register */
2800 if (reg == MEM_AP_REG_CSW(dap)) {
2801 ap->csw_value = 0; /* invalid, in case write fails */
2802 retval = dap_queue_ap_write(ap, reg, value);
2803 if (retval == ERROR_OK)
2804 ap->csw_value = value;
2805 } else if (reg == MEM_AP_REG_TAR(dap)) {
2806 retval = dap_queue_ap_write(ap, reg, value);
2807 if (retval == ERROR_OK)
2808 ap->tar_value = (ap->tar_value & ~0xFFFFFFFFull) | value;
2809 else {
2810 /* To track independent writes to TAR and TAR64, two tar_valid flags */
2811 /* should be used. To keep it simple, tar_valid is only invalidated on a */
2812 /* write fail. This approach causes a later re-write of the TAR and TAR64 */
2813 /* if tar_valid is false. */
2814 ap->tar_valid = false;
2815 }
2816 } else if (reg == MEM_AP_REG_TAR64(dap)) {
2817 retval = dap_queue_ap_write(ap, reg, value);
2818 if (retval == ERROR_OK)
2819 ap->tar_value = (ap->tar_value & 0xFFFFFFFFull) | (((target_addr_t)value) << 32);
2820 else {
2821 /* See above comment for the MEM_AP_REG_TAR failed write case */
2822 ap->tar_valid = false;
2823 }
2824 } else {
2825 retval = dap_queue_ap_write(ap, reg, value);
2826 }
2827 } else {
2828 retval = dap_queue_ap_read(ap, reg, &value);
2829 }
2830 if (retval == ERROR_OK)
2831 retval = dap_run(dap);
2832
2833 dap_put_ap(ap);
2834
2835 if (retval != ERROR_OK)
2836 return retval;
2837
2838 if (CMD_ARGC == 2)
2839 command_print(CMD, "0x%08" PRIx32, value);
2840
2841 return retval;
2842 }
2843
2844 COMMAND_HANDLER(dap_dpreg_command)
2845 {
2846 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2847 uint32_t reg, value;
2848 int retval;
2849
2850 if (CMD_ARGC < 1 || CMD_ARGC > 2)
2851 return ERROR_COMMAND_SYNTAX_ERROR;
2852
2853 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], reg);
2854 if (reg >= 256 || (reg & 3)) {
2855 command_print(CMD, "Invalid reg value (should be less than 256 and 4 bytes aligned)");
2856 return ERROR_COMMAND_ARGUMENT_INVALID;
2857 }
2858
2859 if (CMD_ARGC == 2) {
2860 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value);
2861 retval = dap_queue_dp_write(dap, reg, value);
2862 } else {
2863 retval = dap_queue_dp_read(dap, reg, &value);
2864 }
2865 if (retval == ERROR_OK)
2866 retval = dap_run(dap);
2867
2868 if (retval != ERROR_OK)
2869 return retval;
2870
2871 if (CMD_ARGC == 1)
2872 command_print(CMD, "0x%08" PRIx32, value);
2873
2874 return retval;
2875 }
2876
2877 COMMAND_HANDLER(dap_ti_be_32_quirks_command)
2878 {
2879 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2880 return CALL_COMMAND_HANDLER(handle_command_parse_bool, &dap->ti_be_32_quirks,
2881 "TI BE-32 quirks mode");
2882 }
2883
2884 COMMAND_HANDLER(dap_nu_npcx_quirks_command)
2885 {
2886 struct adiv5_dap *dap = adiv5_get_dap(CMD_DATA);
2887 return CALL_COMMAND_HANDLER(handle_command_parse_bool, &dap->nu_npcx_quirks,
2888 "Nuvoton NPCX quirks mode");
2889 }
2890
2891 const struct command_registration dap_instance_commands[] = {
2892 {
2893 .name = "info",
2894 .handler = handle_dap_info_command,
2895 .mode = COMMAND_EXEC,
2896 .help = "display ROM table for specified MEM-AP (default currently selected AP) "
2897 "or the ADIv6 root ROM table",
2898 .usage = "[ap_num | 'root']",
2899 },
2900 {
2901 .name = "apsel",
2902 .handler = dap_apsel_command,
2903 .mode = COMMAND_ANY,
2904 .help = "Set the currently selected AP (default 0) "
2905 "and display the result",
2906 .usage = "[ap_num]",
2907 },
2908 {
2909 .name = "apcsw",
2910 .handler = dap_apcsw_command,
2911 .mode = COMMAND_ANY,
2912 .help = "Set CSW default bits",
2913 .usage = "[value [mask]]",
2914 },
2915
2916 {
2917 .name = "apid",
2918 .handler = dap_apid_command,
2919 .mode = COMMAND_EXEC,
2920 .help = "return ID register from AP "
2921 "(default currently selected AP)",
2922 .usage = "[ap_num]",
2923 },
2924 {
2925 .name = "apreg",
2926 .handler = dap_apreg_command,
2927 .mode = COMMAND_EXEC,
2928 .help = "read/write a register from AP "
2929 "(reg is byte address of a word register, like 0 4 8...)",
2930 .usage = "ap_num reg [value]",
2931 },
2932 {
2933 .name = "dpreg",
2934 .handler = dap_dpreg_command,
2935 .mode = COMMAND_EXEC,
2936 .help = "read/write a register from DP "
2937 "(reg is byte address (bank << 4 | reg) of a word register, like 0 4 8...)",
2938 .usage = "reg [value]",
2939 },
2940 {
2941 .name = "baseaddr",
2942 .handler = dap_baseaddr_command,
2943 .mode = COMMAND_EXEC,
2944 .help = "return debug base address from MEM-AP "
2945 "(default currently selected AP)",
2946 .usage = "[ap_num]",
2947 },
2948 {
2949 .name = "memaccess",
2950 .handler = dap_memaccess_command,
2951 .mode = COMMAND_EXEC,
2952 .help = "set/get number of extra tck for MEM-AP memory "
2953 "bus access [0-255]",
2954 .usage = "[cycles]",
2955 },
2956 {
2957 .name = "ti_be_32_quirks",
2958 .handler = dap_ti_be_32_quirks_command,
2959 .mode = COMMAND_CONFIG,
2960 .help = "set/get quirks mode for TI TMS450/TMS570 processors",
2961 .usage = "[enable]",
2962 },
2963 {
2964 .name = "nu_npcx_quirks",
2965 .handler = dap_nu_npcx_quirks_command,
2966 .mode = COMMAND_CONFIG,
2967 .help = "set/get quirks mode for Nuvoton NPCX controllers",
2968 .usage = "[enable]",
2969 },
2970 COMMAND_REGISTRATION_DONE
2971 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)