1 /***************************************************************************
2 * Copyright (C) 2009 by David Brownell *
4 * Copyright (C) ST-Ericsson SA 2011 michel.jaouen@stericsson.com *
6 * This program is free software; you can redistribute it and/or modify *
7 * it under the terms of the GNU General Public License as published by *
8 * the Free Software Foundation; either version 2 of the License, or *
9 * (at your option) any later version. *
11 * This program is distributed in the hope that it will be useful, *
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
14 * GNU General Public License for more details. *
16 * You should have received a copy of the GNU General Public License *
17 * along with this program; if not, write to the *
18 * Free Software Foundation, Inc., *
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
20 ***************************************************************************/
26 #include <helper/replacements.h>
29 #include "arm_disassembler.h"
32 #include <helper/binarybuffer.h>
33 #include <helper/command.h>
39 #include "arm_opcodes.h"
41 #include "target_type.h"
43 static void armv7a_show_fault_registers(struct target
*target
)
45 uint32_t dfsr
, ifsr
, dfar
, ifar
;
46 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
47 struct arm_dpm
*dpm
= armv7a
->arm
.dpm
;
50 retval
= dpm
->prepare(dpm
);
51 if (retval
!= ERROR_OK
)
54 /* ARMV4_5_MRC(cpnum, op1, r0, CRn, CRm, op2) */
56 /* c5/c0 - {data, instruction} fault status registers */
57 retval
= dpm
->instr_read_data_r0(dpm
,
58 ARMV4_5_MRC(15, 0, 0, 5, 0, 0),
60 if (retval
!= ERROR_OK
)
63 retval
= dpm
->instr_read_data_r0(dpm
,
64 ARMV4_5_MRC(15, 0, 0, 5, 0, 1),
66 if (retval
!= ERROR_OK
)
69 /* c6/c0 - {data, instruction} fault address registers */
70 retval
= dpm
->instr_read_data_r0(dpm
,
71 ARMV4_5_MRC(15, 0, 0, 6, 0, 0),
73 if (retval
!= ERROR_OK
)
76 retval
= dpm
->instr_read_data_r0(dpm
,
77 ARMV4_5_MRC(15, 0, 0, 6, 0, 2),
79 if (retval
!= ERROR_OK
)
82 LOG_USER("Data fault registers DFSR: %8.8" PRIx32
83 ", DFAR: %8.8" PRIx32
, dfsr
, dfar
);
84 LOG_USER("Instruction fault registers IFSR: %8.8" PRIx32
85 ", IFAR: %8.8" PRIx32
, ifsr
, ifar
);
88 /* (void) */ dpm
->finish(dpm
);
92 /* retrieve main id register */
93 static int armv7a_read_midr(struct target
*target
)
95 int retval
= ERROR_FAIL
;
96 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
97 struct arm_dpm
*dpm
= armv7a
->arm
.dpm
;
99 retval
= dpm
->prepare(dpm
);
100 if (retval
!= ERROR_OK
)
102 /* MRC p15,0,<Rd>,c0,c0,0; read main id register*/
104 retval
= dpm
->instr_read_data_r0(dpm
,
105 ARMV4_5_MRC(15, 0, 0, 0, 0, 0),
107 if (retval
!= ERROR_OK
)
110 armv7a
->rev
= (midr
& 0xf);
111 armv7a
->partnum
= (midr
>> 4) & 0xfff;
112 armv7a
->arch
= (midr
>> 16) & 0xf;
113 armv7a
->variant
= (midr
>> 20) & 0xf;
114 armv7a
->implementor
= (midr
>> 24) & 0xff;
115 LOG_INFO("%s rev %" PRIx32
", partnum %" PRIx32
", arch %" PRIx32
116 ", variant %" PRIx32
", implementor %" PRIx32
,
122 armv7a
->implementor
);
129 static int armv7a_read_ttbcr(struct target
*target
)
131 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
132 struct arm_dpm
*dpm
= armv7a
->arm
.dpm
;
133 uint32_t ttbcr
, ttbcr_n
;
134 int retval
= dpm
->prepare(dpm
);
135 if (retval
!= ERROR_OK
)
137 /* MRC p15,0,<Rt>,c2,c0,2 ; Read CP15 Translation Table Base Control Register*/
138 retval
= dpm
->instr_read_data_r0(dpm
,
139 ARMV4_5_MRC(15, 0, 0, 2, 0, 2),
141 if (retval
!= ERROR_OK
)
144 LOG_DEBUG("ttbcr %" PRIx32
, ttbcr
);
146 ttbcr_n
= ttbcr
& 0x7;
147 armv7a
->armv7a_mmu
.ttbcr
= ttbcr
;
148 armv7a
->armv7a_mmu
.cached
= 1;
151 * ARM Architecture Reference Manual (ARMv7-A and ARMv7-Redition),
152 * document # ARM DDI 0406C
154 armv7a
->armv7a_mmu
.ttbr_range
[0] = 0xffffffff >> ttbcr_n
;
155 armv7a
->armv7a_mmu
.ttbr_range
[1] = 0xffffffff;
156 armv7a
->armv7a_mmu
.ttbr_mask
[0] = 0xffffffff << (14 - ttbcr_n
);
157 armv7a
->armv7a_mmu
.ttbr_mask
[1] = 0xffffffff << 14;
158 armv7a
->armv7a_mmu
.cached
= 1;
160 retval
= armv7a_read_midr(target
);
161 if (retval
!= ERROR_OK
)
164 /* FIXME: why this special case based on part number? */
165 if ((armv7a
->partnum
& 0xf) == 0) {
166 /* ARM DDI 0344H , ARM DDI 0407F */
167 armv7a
->armv7a_mmu
.ttbr_mask
[0] = 7 << (32 - ttbcr_n
);
170 LOG_DEBUG("ttbr1 %s, ttbr0_mask %" PRIx32
" ttbr1_mask %" PRIx32
,
171 (ttbcr_n
!= 0) ?
"used" : "not used",
172 armv7a
->armv7a_mmu
.ttbr_mask
[0],
173 armv7a
->armv7a_mmu
.ttbr_mask
[1]);
175 /* FIXME: default is hard coded LINUX border */
176 armv7a
->armv7a_mmu
.os_border
= 0xc0000000;
178 LOG_INFO("SVC access above %" PRIx32
,
179 armv7a
->armv7a_mmu
.ttbr_range
[0] + 1);
180 armv7a
->armv7a_mmu
.os_border
= armv7a
->armv7a_mmu
.ttbr_range
[0] + 1;
187 /* method adapted to cortex A : reused arm v4 v5 method*/
188 int armv7a_mmu_translate_va(struct target
*target
, uint32_t va
, uint32_t *val
)
190 uint32_t first_lvl_descriptor
= 0x0;
191 uint32_t second_lvl_descriptor
= 0x0;
193 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
194 struct arm_dpm
*dpm
= armv7a
->arm
.dpm
;
195 uint32_t ttbidx
= 0; /* default to ttbr0 */
201 retval
= dpm
->prepare(dpm
);
202 if (retval
!= ERROR_OK
)
205 /* MRC p15,0,<Rt>,c2,c0,2 ; Read CP15 Translation Table Base Control Register*/
206 retval
= dpm
->instr_read_data_r0(dpm
,
207 ARMV4_5_MRC(15, 0, 0, 2, 0, 2),
209 if (retval
!= ERROR_OK
)
212 /* if ttbcr has changed or was not read before, re-read the information */
213 if ((armv7a
->armv7a_mmu
.cached
== 0) ||
214 (armv7a
->armv7a_mmu
.ttbcr
!= ttbcr
)) {
215 armv7a_read_ttbcr(target
);
218 /* if va is above the range handled by ttbr0, select ttbr1 */
219 if (va
> armv7a
->armv7a_mmu
.ttbr_range
[0]) {
223 /* MRC p15,0,<Rt>,c2,c0,ttbidx */
224 retval
= dpm
->instr_read_data_r0(dpm
,
225 ARMV4_5_MRC(15, 0, 0, 2, 0, ttbidx
),
227 if (retval
!= ERROR_OK
)
230 ttb_mask
= armv7a
->armv7a_mmu
.ttbr_mask
[ttbidx
];
231 va_mask
= 0xfff00000 & armv7a
->armv7a_mmu
.ttbr_range
[ttbidx
];
233 LOG_DEBUG("ttb_mask %" PRIx32
" va_mask %" PRIx32
" ttbidx %i",
234 ttb_mask
, va_mask
, ttbidx
);
235 retval
= armv7a
->armv7a_mmu
.read_physical_memory(target
,
236 (ttb
& ttb_mask
) | ((va
& va_mask
) >> 18),
237 4, 1, (uint8_t *)&first_lvl_descriptor
);
238 if (retval
!= ERROR_OK
)
240 first_lvl_descriptor
= target_buffer_get_u32(target
, (uint8_t *)
241 &first_lvl_descriptor
);
242 /* reuse armv4_5 piece of code, specific armv7a changes may come later */
243 LOG_DEBUG("1st lvl desc: %8.8" PRIx32
"", first_lvl_descriptor
);
245 if ((first_lvl_descriptor
& 0x3) == 0) {
246 LOG_ERROR("Address translation failure");
247 return ERROR_TARGET_TRANSLATION_FAULT
;
251 if ((first_lvl_descriptor
& 0x40002) == 2) {
252 /* section descriptor */
253 *val
= (first_lvl_descriptor
& 0xfff00000) | (va
& 0x000fffff);
255 } else if ((first_lvl_descriptor
& 0x40002) == 0x40002) {
256 /* supersection descriptor */
257 if (first_lvl_descriptor
& 0x00f001e0) {
258 LOG_ERROR("Physical address does not fit into 32 bits");
259 return ERROR_TARGET_TRANSLATION_FAULT
;
261 *val
= (first_lvl_descriptor
& 0xff000000) | (va
& 0x00ffffff);
266 retval
= armv7a
->armv7a_mmu
.read_physical_memory(target
,
267 (first_lvl_descriptor
& 0xfffffc00) | ((va
& 0x000ff000) >> 10),
268 4, 1, (uint8_t *)&second_lvl_descriptor
);
269 if (retval
!= ERROR_OK
)
272 second_lvl_descriptor
= target_buffer_get_u32(target
, (uint8_t *)
273 &second_lvl_descriptor
);
275 LOG_DEBUG("2nd lvl desc: %8.8" PRIx32
"", second_lvl_descriptor
);
277 if ((second_lvl_descriptor
& 0x3) == 0) {
278 LOG_ERROR("Address translation failure");
279 return ERROR_TARGET_TRANSLATION_FAULT
;
282 if ((second_lvl_descriptor
& 0x3) == 1) {
283 /* large page descriptor */
284 *val
= (second_lvl_descriptor
& 0xffff0000) | (va
& 0x0000ffff);
286 /* small page descriptor */
287 *val
= (second_lvl_descriptor
& 0xfffff000) | (va
& 0x00000fff);
296 /* V7 method VA TO PA */
297 int armv7a_mmu_translate_va_pa(struct target
*target
, uint32_t va
,
298 uint32_t *val
, int meminfo
)
300 int retval
= ERROR_FAIL
;
301 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
302 struct arm_dpm
*dpm
= armv7a
->arm
.dpm
;
303 uint32_t virt
= va
& ~0xfff;
304 uint32_t NOS
, NS
, INNER
, OUTER
;
306 retval
= dpm
->prepare(dpm
);
307 if (retval
!= ERROR_OK
)
309 /* mmu must be enable in order to get a correct translation
310 * use VA to PA CP15 register for conversion */
311 retval
= dpm
->instr_write_data_r0(dpm
,
312 ARMV4_5_MCR(15, 0, 0, 7, 8, 0),
314 if (retval
!= ERROR_OK
)
316 retval
= dpm
->instr_read_data_r0(dpm
,
317 ARMV4_5_MRC(15, 0, 0, 7, 4, 0),
319 /* decode memory attribute */
320 NOS
= (*val
>> 10) & 1; /* Not Outer shareable */
321 NS
= (*val
>> 9) & 1; /* Non secure */
322 INNER
= (*val
>> 4) & 0x7;
323 OUTER
= (*val
>> 2) & 0x3;
325 if (retval
!= ERROR_OK
)
327 *val
= (*val
& ~0xfff) + (va
& 0xfff);
329 LOG_WARNING("virt = phys : MMU disable !!");
331 LOG_INFO("%" PRIx32
" : %" PRIx32
" %s outer shareable %s secured",
333 NOS
== 1 ?
"not" : " ",
334 NS
== 1 ?
"not" : "");
337 LOG_INFO("outer: Non-Cacheable");
340 LOG_INFO("outer: Write-Back, Write-Allocate");
343 LOG_INFO("outer: Write-Through, No Write-Allocate");
346 LOG_INFO("outer: Write-Back, no Write-Allocate");
351 LOG_INFO("inner: Non-Cacheable");
354 LOG_INFO("inner: Strongly-ordered");
357 LOG_INFO("inner: Device");
360 LOG_INFO("inner: Write-Back, Write-Allocate");
363 LOG_INFO("inner: Write-Through");
366 LOG_INFO("inner: Write-Back, no Write-Allocate");
369 LOG_INFO("inner: %" PRIx32
" ???", INNER
);
379 static int armv7a_handle_inner_cache_info_command(struct command_context
*cmd_ctx
,
380 struct armv7a_cache_common
*armv7a_cache
)
382 if (armv7a_cache
->ctype
== -1) {
383 command_print(cmd_ctx
, "cache not yet identified");
387 command_print(cmd_ctx
,
388 "D-Cache: linelen %" PRIi32
", associativity %" PRIi32
", nsets %" PRIi32
", cachesize %" PRId32
" KBytes",
389 armv7a_cache
->d_u_size
.linelen
,
390 armv7a_cache
->d_u_size
.associativity
,
391 armv7a_cache
->d_u_size
.nsets
,
392 armv7a_cache
->d_u_size
.cachesize
);
394 command_print(cmd_ctx
,
395 "I-Cache: linelen %" PRIi32
", associativity %" PRIi32
", nsets %" PRIi32
", cachesize %" PRId32
" KBytes",
396 armv7a_cache
->i_size
.linelen
,
397 armv7a_cache
->i_size
.associativity
,
398 armv7a_cache
->i_size
.nsets
,
399 armv7a_cache
->i_size
.cachesize
);
404 static int _armv7a_flush_all_data(struct target
*target
)
406 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
407 struct arm_dpm
*dpm
= armv7a
->arm
.dpm
;
408 struct armv7a_cachesize
*d_u_size
=
409 &(armv7a
->armv7a_mmu
.armv7a_cache
.d_u_size
);
410 int32_t c_way
, c_index
= d_u_size
->index
;
412 /* check that cache data is on at target halt */
413 if (!armv7a
->armv7a_mmu
.armv7a_cache
.d_u_cache_enabled
) {
414 LOG_INFO("flushed not performed :cache not on at target halt");
417 retval
= dpm
->prepare(dpm
);
418 if (retval
!= ERROR_OK
)
421 c_way
= d_u_size
->way
;
423 uint32_t value
= (c_index
<< d_u_size
->index_shift
)
424 | (c_way
<< d_u_size
->way_shift
);
426 /* LOG_INFO ("%d %d %x",c_way,c_index,value); */
427 retval
= dpm
->instr_write_data_r0(dpm
,
428 ARMV4_5_MCR(15, 0, 0, 7, 14, 2),
430 if (retval
!= ERROR_OK
)
433 } while (c_way
>= 0);
435 } while (c_index
>= 0);
438 LOG_ERROR("flushed failed");
443 static int armv7a_flush_all_data(struct target
*target
)
445 int retval
= ERROR_FAIL
;
446 /* check that armv7a_cache is correctly identify */
447 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
448 if (armv7a
->armv7a_mmu
.armv7a_cache
.ctype
== -1) {
449 LOG_ERROR("trying to flush un-identified cache");
454 /* look if all the other target have been flushed in order to flush level
456 struct target_list
*head
;
459 while (head
!= (struct target_list
*)NULL
) {
461 if (curr
->state
== TARGET_HALTED
) {
462 LOG_INFO("Wait flushing data l1 on core %" PRId32
, curr
->coreid
);
463 retval
= _armv7a_flush_all_data(curr
);
468 retval
= _armv7a_flush_all_data(target
);
472 /* L2 is not specific to armv7a a specific file is needed */
473 static int armv7a_l2x_flush_all_data(struct target
*target
)
476 #define L2X0_CLEAN_INV_WAY 0x7FC
477 int retval
= ERROR_FAIL
;
478 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
479 struct armv7a_l2x_cache
*l2x_cache
= (struct armv7a_l2x_cache
*)
480 (armv7a
->armv7a_mmu
.armv7a_cache
.l2_cache
);
481 uint32_t base
= l2x_cache
->base
;
482 uint32_t l2_way
= l2x_cache
->way
;
483 uint32_t l2_way_val
= (1 << l2_way
) - 1;
484 retval
= armv7a_flush_all_data(target
);
485 if (retval
!= ERROR_OK
)
487 retval
= target
->type
->write_phys_memory(target
,
488 (uint32_t)(base
+(uint32_t)L2X0_CLEAN_INV_WAY
),
491 (uint8_t *)&l2_way_val
);
495 static int armv7a_handle_l2x_cache_info_command(struct command_context
*cmd_ctx
,
496 struct armv7a_cache_common
*armv7a_cache
)
499 struct armv7a_l2x_cache
*l2x_cache
= (struct armv7a_l2x_cache
*)
500 (armv7a_cache
->l2_cache
);
502 if (armv7a_cache
->ctype
== -1) {
503 command_print(cmd_ctx
, "cache not yet identified");
507 command_print(cmd_ctx
,
508 "L1 D-Cache: linelen %" PRIi32
", associativity %" PRIi32
", nsets %" PRIi32
", cachesize %" PRId32
" KBytes",
509 armv7a_cache
->d_u_size
.linelen
,
510 armv7a_cache
->d_u_size
.associativity
,
511 armv7a_cache
->d_u_size
.nsets
,
512 armv7a_cache
->d_u_size
.cachesize
);
514 command_print(cmd_ctx
,
515 "L1 I-Cache: linelen %" PRIi32
", associativity %" PRIi32
", nsets %" PRIi32
", cachesize %" PRId32
" KBytes",
516 armv7a_cache
->i_size
.linelen
,
517 armv7a_cache
->i_size
.associativity
,
518 armv7a_cache
->i_size
.nsets
,
519 armv7a_cache
->i_size
.cachesize
);
520 command_print(cmd_ctx
, "L2 unified cache Base Address 0x%" PRIx32
", %" PRId32
" ways",
521 l2x_cache
->base
, l2x_cache
->way
);
528 static int armv7a_l2x_cache_init(struct target
*target
, uint32_t base
, uint32_t way
)
530 struct armv7a_l2x_cache
*l2x_cache
;
531 struct target_list
*head
= target
->head
;
534 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
535 l2x_cache
= calloc(1, sizeof(struct armv7a_l2x_cache
));
536 l2x_cache
->base
= base
;
537 l2x_cache
->way
= way
;
538 /*LOG_INFO("cache l2 initialized base %x way %d",
539 l2x_cache->base,l2x_cache->way);*/
540 if (armv7a
->armv7a_mmu
.armv7a_cache
.l2_cache
)
541 LOG_INFO("cache l2 already initialized\n");
542 armv7a
->armv7a_mmu
.armv7a_cache
.l2_cache
= l2x_cache
;
543 /* initialize l1 / l2x cache function */
544 armv7a
->armv7a_mmu
.armv7a_cache
.flush_all_data_cache
545 = armv7a_l2x_flush_all_data
;
546 armv7a
->armv7a_mmu
.armv7a_cache
.display_cache_info
=
547 armv7a_handle_l2x_cache_info_command
;
548 /* initialize all target in this cluster (smp target)
549 * l2 cache must be configured after smp declaration */
550 while (head
!= (struct target_list
*)NULL
) {
552 if (curr
!= target
) {
553 armv7a
= target_to_armv7a(curr
);
554 if (armv7a
->armv7a_mmu
.armv7a_cache
.l2_cache
)
555 LOG_ERROR("smp target : cache l2 already initialized\n");
556 armv7a
->armv7a_mmu
.armv7a_cache
.l2_cache
= l2x_cache
;
557 armv7a
->armv7a_mmu
.armv7a_cache
.flush_all_data_cache
=
558 armv7a_l2x_flush_all_data
;
559 armv7a
->armv7a_mmu
.armv7a_cache
.display_cache_info
=
560 armv7a_handle_l2x_cache_info_command
;
567 COMMAND_HANDLER(handle_cache_l2x
)
569 struct target
*target
= get_current_target(CMD_CTX
);
573 return ERROR_COMMAND_SYNTAX_ERROR
;
575 /* command_print(CMD_CTX, "%s %s", CMD_ARGV[0], CMD_ARGV[1]); */
576 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[0], base
);
577 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[1], way
);
579 /* AP address is in bits 31:24 of DP_SELECT */
580 armv7a_l2x_cache_init(target
, base
, way
);
585 int armv7a_handle_cache_info_command(struct command_context
*cmd_ctx
,
586 struct armv7a_cache_common
*armv7a_cache
)
588 if (armv7a_cache
->ctype
== -1) {
589 command_print(cmd_ctx
, "cache not yet identified");
593 if (armv7a_cache
->display_cache_info
)
594 armv7a_cache
->display_cache_info(cmd_ctx
, armv7a_cache
);
598 /* retrieve core id cluster id */
599 static int armv7a_read_mpidr(struct target
*target
)
601 int retval
= ERROR_FAIL
;
602 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
603 struct arm_dpm
*dpm
= armv7a
->arm
.dpm
;
605 retval
= dpm
->prepare(dpm
);
606 if (retval
!= ERROR_OK
)
608 /* MRC p15,0,<Rd>,c0,c0,5; read Multiprocessor ID register*/
610 retval
= dpm
->instr_read_data_r0(dpm
,
611 ARMV4_5_MRC(15, 0, 0, 0, 0, 5),
613 if (retval
!= ERROR_OK
)
616 /* ARMv7R uses a different format for MPIDR.
617 * When configured uniprocessor (most R cores) it reads as 0.
618 * This will need to be implemented for multiprocessor ARMv7R cores. */
619 if (armv7a
->is_armv7r
) {
621 LOG_ERROR("MPIDR nonzero in ARMv7-R target");
626 armv7a
->multi_processor_system
= (mpidr
>> 30) & 1;
627 armv7a
->cluster_id
= (mpidr
>> 8) & 0xf;
628 armv7a
->cpu_id
= mpidr
& 0x3;
629 LOG_INFO("%s cluster %x core %x %s", target_name(target
),
632 armv7a
->multi_processor_system
== 0 ?
"multi core" : "mono core");
635 LOG_ERROR("MPIDR not in multiprocessor format");
644 int armv7a_identify_cache(struct target
*target
)
646 /* read cache descriptor */
647 int retval
= ERROR_FAIL
;
648 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
649 struct arm_dpm
*dpm
= armv7a
->arm
.dpm
;
650 uint32_t cache_selected
, clidr
;
651 uint32_t cache_i_reg
, cache_d_reg
;
652 struct armv7a_cache_common
*cache
= &(armv7a
->armv7a_mmu
.armv7a_cache
);
653 if (!armv7a
->is_armv7r
)
654 armv7a_read_ttbcr(target
);
655 retval
= dpm
->prepare(dpm
);
657 if (retval
!= ERROR_OK
)
660 * mrc p15, 1, r0, c0, c0, 1 @ read clidr */
661 retval
= dpm
->instr_read_data_r0(dpm
,
662 ARMV4_5_MRC(15, 1, 0, 0, 0, 1),
664 if (retval
!= ERROR_OK
)
666 clidr
= (clidr
& 0x7000000) >> 23;
667 LOG_INFO("number of cache level %" PRIx32
, (uint32_t)(clidr
/ 2));
668 if ((clidr
/ 2) > 1) {
669 /* FIXME not supported present in cortex A8 and later */
670 /* in cortex A7, A15 */
671 LOG_ERROR("cache l2 present :not supported");
673 /* retrieve selected cache
674 * MRC p15, 2,<Rd>, c0, c0, 0; Read CSSELR */
675 retval
= dpm
->instr_read_data_r0(dpm
,
676 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
678 if (retval
!= ERROR_OK
)
681 retval
= armv7a
->arm
.mrc(target
, 15,
685 if (retval
!= ERROR_OK
)
687 /* select instruction cache
688 * MCR p15, 2,<Rd>, c0, c0, 0; Write CSSELR
689 * [0] : 1 instruction cache selection , 0 data cache selection */
690 retval
= dpm
->instr_write_data_r0(dpm
,
691 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
693 if (retval
!= ERROR_OK
)
697 * MRC P15,1,<RT>,C0, C0,0 ;on cortex A9 read CCSIDR
698 * [2:0] line size 001 eight word per line
699 * [27:13] NumSet 0x7f 16KB, 0xff 32Kbytes, 0x1ff 64Kbytes */
700 retval
= dpm
->instr_read_data_r0(dpm
,
701 ARMV4_5_MRC(15, 1, 0, 0, 0, 0),
703 if (retval
!= ERROR_OK
)
706 /* select data cache*/
707 retval
= dpm
->instr_write_data_r0(dpm
,
708 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
710 if (retval
!= ERROR_OK
)
713 retval
= dpm
->instr_read_data_r0(dpm
,
714 ARMV4_5_MRC(15, 1, 0, 0, 0, 0),
716 if (retval
!= ERROR_OK
)
719 /* restore selected cache */
720 dpm
->instr_write_data_r0(dpm
,
721 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
724 if (retval
!= ERROR_OK
)
729 cache
->d_u_size
.linelen
= 16 << (cache_d_reg
& 0x7);
730 cache
->d_u_size
.cachesize
= (((cache_d_reg
>> 13) & 0x7fff)+1)/8;
731 cache
->d_u_size
.nsets
= (cache_d_reg
>> 13) & 0x7fff;
732 cache
->d_u_size
.associativity
= ((cache_d_reg
>> 3) & 0x3ff) + 1;
733 /* compute info for set way operation on cache */
734 cache
->d_u_size
.index_shift
= (cache_d_reg
& 0x7) + 4;
735 cache
->d_u_size
.index
= (cache_d_reg
>> 13) & 0x7fff;
736 cache
->d_u_size
.way
= ((cache_d_reg
>> 3) & 0x3ff);
737 cache
->d_u_size
.way_shift
= cache
->d_u_size
.way
+ 1;
740 while (((cache
->d_u_size
.way_shift
>> i
) & 1) != 1)
742 cache
->d_u_size
.way_shift
= 32-i
;
745 LOG_INFO("data cache index %d << %d, way %d << %d",
746 cache
->d_u_size
.index
, cache
->d_u_size
.index_shift
,
748 cache
->d_u_size
.way_shift
);
750 LOG_INFO("data cache %d bytes %d KBytes asso %d ways",
751 cache
->d_u_size
.linelen
,
752 cache
->d_u_size
.cachesize
,
753 cache
->d_u_size
.associativity
);
755 cache
->i_size
.linelen
= 16 << (cache_i_reg
& 0x7);
756 cache
->i_size
.associativity
= ((cache_i_reg
>> 3) & 0x3ff) + 1;
757 cache
->i_size
.nsets
= (cache_i_reg
>> 13) & 0x7fff;
758 cache
->i_size
.cachesize
= (((cache_i_reg
>> 13) & 0x7fff)+1)/8;
759 /* compute info for set way operation on cache */
760 cache
->i_size
.index_shift
= (cache_i_reg
& 0x7) + 4;
761 cache
->i_size
.index
= (cache_i_reg
>> 13) & 0x7fff;
762 cache
->i_size
.way
= ((cache_i_reg
>> 3) & 0x3ff);
763 cache
->i_size
.way_shift
= cache
->i_size
.way
+ 1;
766 while (((cache
->i_size
.way_shift
>> i
) & 1) != 1)
768 cache
->i_size
.way_shift
= 32-i
;
771 LOG_INFO("instruction cache index %d << %d, way %d << %d",
772 cache
->i_size
.index
, cache
->i_size
.index_shift
,
773 cache
->i_size
.way
, cache
->i_size
.way_shift
);
775 LOG_INFO("instruction cache %d bytes %d KBytes asso %d ways",
776 cache
->i_size
.linelen
,
777 cache
->i_size
.cachesize
,
778 cache
->i_size
.associativity
);
780 /* if no l2 cache initialize l1 data cache flush function function */
781 if (armv7a
->armv7a_mmu
.armv7a_cache
.flush_all_data_cache
== NULL
) {
782 armv7a
->armv7a_mmu
.armv7a_cache
.display_cache_info
=
783 armv7a_handle_inner_cache_info_command
;
784 armv7a
->armv7a_mmu
.armv7a_cache
.flush_all_data_cache
=
785 armv7a_flush_all_data
;
787 armv7a
->armv7a_mmu
.armv7a_cache
.ctype
= 0;
791 armv7a_read_mpidr(target
);
796 int armv7a_init_arch_info(struct target
*target
, struct armv7a_common
*armv7a
)
798 struct arm
*arm
= &armv7a
->arm
;
799 arm
->arch_info
= armv7a
;
800 target
->arch_info
= &armv7a
->arm
;
801 /* target is useful in all function arm v4 5 compatible */
802 armv7a
->arm
.target
= target
;
803 armv7a
->arm
.common_magic
= ARM_COMMON_MAGIC
;
804 armv7a
->common_magic
= ARMV7_COMMON_MAGIC
;
805 armv7a
->armv7a_mmu
.armv7a_cache
.l2_cache
= NULL
;
806 armv7a
->armv7a_mmu
.armv7a_cache
.ctype
= -1;
807 armv7a
->armv7a_mmu
.armv7a_cache
.flush_all_data_cache
= NULL
;
808 armv7a
->armv7a_mmu
.armv7a_cache
.display_cache_info
= NULL
;
812 int armv7a_arch_state(struct target
*target
)
814 static const char *state
[] = {
815 "disabled", "enabled"
818 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
819 struct arm
*arm
= &armv7a
->arm
;
821 if (armv7a
->common_magic
!= ARMV7_COMMON_MAGIC
) {
822 LOG_ERROR("BUG: called for a non-ARMv7A target");
823 return ERROR_COMMAND_SYNTAX_ERROR
;
826 arm_arch_state(target
);
828 if (armv7a
->is_armv7r
) {
829 LOG_USER("D-Cache: %s, I-Cache: %s",
830 state
[armv7a
->armv7a_mmu
.armv7a_cache
.d_u_cache_enabled
],
831 state
[armv7a
->armv7a_mmu
.armv7a_cache
.i_cache_enabled
]);
833 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s",
834 state
[armv7a
->armv7a_mmu
.mmu_enabled
],
835 state
[armv7a
->armv7a_mmu
.armv7a_cache
.d_u_cache_enabled
],
836 state
[armv7a
->armv7a_mmu
.armv7a_cache
.i_cache_enabled
]);
839 if (arm
->core_mode
== ARM_MODE_ABT
)
840 armv7a_show_fault_registers(target
);
841 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
)
842 LOG_USER("Watchpoint triggered at PC %#08x",
843 (unsigned) armv7a
->dpm
.wp_pc
);
848 static const struct command_registration l2_cache_commands
[] = {
851 .handler
= handle_cache_l2x
,
852 .mode
= COMMAND_EXEC
,
853 .help
= "configure l2x cache "
855 .usage
= "[base_addr] [number_of_way]",
857 COMMAND_REGISTRATION_DONE
861 const struct command_registration l2x_cache_command_handlers
[] = {
863 .name
= "cache_config",
864 .mode
= COMMAND_EXEC
,
865 .help
= "cache configuration for a target",
867 .chain
= l2_cache_commands
,
869 COMMAND_REGISTRATION_DONE
873 const struct command_registration armv7a_command_handlers
[] = {
875 .chain
= dap_command_handlers
,
878 .chain
= l2x_cache_command_handlers
,
880 COMMAND_REGISTRATION_DONE