add armv7a_cache handlers
[openocd.git] / src / target / armv7a.c
1 /***************************************************************************
2 * Copyright (C) 2009 by David Brownell *
3 * *
4 * Copyright (C) ST-Ericsson SA 2011 michel.jaouen@stericsson.com *
5 * *
6 * This program is free software; you can redistribute it and/or modify *
7 * it under the terms of the GNU General Public License as published by *
8 * the Free Software Foundation; either version 2 of the License, or *
9 * (at your option) any later version. *
10 * *
11 * This program is distributed in the hope that it will be useful, *
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
14 * GNU General Public License for more details. *
15 * *
16 * You should have received a copy of the GNU General Public License *
17 * along with this program; if not, write to the *
18 * Free Software Foundation, Inc., *
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
20 ***************************************************************************/
21
22 #ifdef HAVE_CONFIG_H
23 #include "config.h"
24 #endif
25
26 #include <helper/replacements.h>
27
28 #include "armv7a.h"
29 #include "arm_disassembler.h"
30
31 #include "register.h"
32 #include <helper/binarybuffer.h>
33 #include <helper/command.h>
34
35 #include <stdlib.h>
36 #include <string.h>
37 #include <unistd.h>
38
39 #include "arm_opcodes.h"
40 #include "target.h"
41 #include "target_type.h"
42
43 static void armv7a_show_fault_registers(struct target *target)
44 {
45 uint32_t dfsr, ifsr, dfar, ifar;
46 struct armv7a_common *armv7a = target_to_armv7a(target);
47 struct arm_dpm *dpm = armv7a->arm.dpm;
48 int retval;
49
50 retval = dpm->prepare(dpm);
51 if (retval != ERROR_OK)
52 return;
53
54 /* ARMV4_5_MRC(cpnum, op1, r0, CRn, CRm, op2) */
55
56 /* c5/c0 - {data, instruction} fault status registers */
57 retval = dpm->instr_read_data_r0(dpm,
58 ARMV4_5_MRC(15, 0, 0, 5, 0, 0),
59 &dfsr);
60 if (retval != ERROR_OK)
61 goto done;
62
63 retval = dpm->instr_read_data_r0(dpm,
64 ARMV4_5_MRC(15, 0, 0, 5, 0, 1),
65 &ifsr);
66 if (retval != ERROR_OK)
67 goto done;
68
69 /* c6/c0 - {data, instruction} fault address registers */
70 retval = dpm->instr_read_data_r0(dpm,
71 ARMV4_5_MRC(15, 0, 0, 6, 0, 0),
72 &dfar);
73 if (retval != ERROR_OK)
74 goto done;
75
76 retval = dpm->instr_read_data_r0(dpm,
77 ARMV4_5_MRC(15, 0, 0, 6, 0, 2),
78 &ifar);
79 if (retval != ERROR_OK)
80 goto done;
81
82 LOG_USER("Data fault registers DFSR: %8.8" PRIx32
83 ", DFAR: %8.8" PRIx32, dfsr, dfar);
84 LOG_USER("Instruction fault registers IFSR: %8.8" PRIx32
85 ", IFAR: %8.8" PRIx32, ifsr, ifar);
86
87 done:
88 /* (void) */ dpm->finish(dpm);
89 }
90
91
92 /* retrieve main id register */
93 static int armv7a_read_midr(struct target *target)
94 {
95 int retval = ERROR_FAIL;
96 struct armv7a_common *armv7a = target_to_armv7a(target);
97 struct arm_dpm *dpm = armv7a->arm.dpm;
98 uint32_t midr;
99 retval = dpm->prepare(dpm);
100 if (retval != ERROR_OK)
101 goto done;
102 /* MRC p15,0,<Rd>,c0,c0,0; read main id register*/
103
104 retval = dpm->instr_read_data_r0(dpm,
105 ARMV4_5_MRC(15, 0, 0, 0, 0, 0),
106 &midr);
107 if (retval != ERROR_OK)
108 goto done;
109
110 armv7a->rev = (midr & 0xf);
111 armv7a->partnum = (midr >> 4) & 0xfff;
112 armv7a->arch = (midr >> 16) & 0xf;
113 armv7a->variant = (midr >> 20) & 0xf;
114 armv7a->implementor = (midr >> 24) & 0xff;
115 LOG_INFO("%s rev %" PRIx32 ", partnum %" PRIx32 ", arch %" PRIx32
116 ", variant %" PRIx32 ", implementor %" PRIx32,
117 target->cmd_name,
118 armv7a->rev,
119 armv7a->partnum,
120 armv7a->arch,
121 armv7a->variant,
122 armv7a->implementor);
123
124 done:
125 dpm->finish(dpm);
126 return retval;
127 }
128
129 static int armv7a_read_ttbcr(struct target *target)
130 {
131 struct armv7a_common *armv7a = target_to_armv7a(target);
132 struct arm_dpm *dpm = armv7a->arm.dpm;
133 uint32_t ttbcr, ttbcr_n;
134 int retval = dpm->prepare(dpm);
135 if (retval != ERROR_OK)
136 goto done;
137 /* MRC p15,0,<Rt>,c2,c0,2 ; Read CP15 Translation Table Base Control Register*/
138 retval = dpm->instr_read_data_r0(dpm,
139 ARMV4_5_MRC(15, 0, 0, 2, 0, 2),
140 &ttbcr);
141 if (retval != ERROR_OK)
142 goto done;
143
144 LOG_DEBUG("ttbcr %" PRIx32, ttbcr);
145
146 ttbcr_n = ttbcr & 0x7;
147 armv7a->armv7a_mmu.ttbcr = ttbcr;
148 armv7a->armv7a_mmu.cached = 1;
149
150 /*
151 * ARM Architecture Reference Manual (ARMv7-A and ARMv7-Redition),
152 * document # ARM DDI 0406C
153 */
154 armv7a->armv7a_mmu.ttbr_range[0] = 0xffffffff >> ttbcr_n;
155 armv7a->armv7a_mmu.ttbr_range[1] = 0xffffffff;
156 armv7a->armv7a_mmu.ttbr_mask[0] = 0xffffffff << (14 - ttbcr_n);
157 armv7a->armv7a_mmu.ttbr_mask[1] = 0xffffffff << 14;
158 armv7a->armv7a_mmu.cached = 1;
159
160 retval = armv7a_read_midr(target);
161 if (retval != ERROR_OK)
162 goto done;
163
164 /* FIXME: why this special case based on part number? */
165 if ((armv7a->partnum & 0xf) == 0) {
166 /* ARM DDI 0344H , ARM DDI 0407F */
167 armv7a->armv7a_mmu.ttbr_mask[0] = 7 << (32 - ttbcr_n);
168 }
169
170 LOG_DEBUG("ttbr1 %s, ttbr0_mask %" PRIx32 " ttbr1_mask %" PRIx32,
171 (ttbcr_n != 0) ? "used" : "not used",
172 armv7a->armv7a_mmu.ttbr_mask[0],
173 armv7a->armv7a_mmu.ttbr_mask[1]);
174
175 /* FIXME: default is hard coded LINUX border */
176 armv7a->armv7a_mmu.os_border = 0xc0000000;
177 if (ttbcr_n != 0) {
178 LOG_INFO("SVC access above %" PRIx32,
179 armv7a->armv7a_mmu.ttbr_range[0] + 1);
180 armv7a->armv7a_mmu.os_border = armv7a->armv7a_mmu.ttbr_range[0] + 1;
181 }
182 done:
183 dpm->finish(dpm);
184 return retval;
185 }
186
187 /* method adapted to cortex A : reused arm v4 v5 method*/
188 int armv7a_mmu_translate_va(struct target *target, uint32_t va, uint32_t *val)
189 {
190 uint32_t first_lvl_descriptor = 0x0;
191 uint32_t second_lvl_descriptor = 0x0;
192 int retval;
193 struct armv7a_common *armv7a = target_to_armv7a(target);
194 struct arm_dpm *dpm = armv7a->arm.dpm;
195 uint32_t ttbidx = 0; /* default to ttbr0 */
196 uint32_t ttb_mask;
197 uint32_t va_mask;
198 uint32_t ttbcr;
199 uint32_t ttb;
200
201 retval = dpm->prepare(dpm);
202 if (retval != ERROR_OK)
203 goto done;
204
205 /* MRC p15,0,<Rt>,c2,c0,2 ; Read CP15 Translation Table Base Control Register*/
206 retval = dpm->instr_read_data_r0(dpm,
207 ARMV4_5_MRC(15, 0, 0, 2, 0, 2),
208 &ttbcr);
209 if (retval != ERROR_OK)
210 goto done;
211
212 /* if ttbcr has changed or was not read before, re-read the information */
213 if ((armv7a->armv7a_mmu.cached == 0) ||
214 (armv7a->armv7a_mmu.ttbcr != ttbcr)) {
215 armv7a_read_ttbcr(target);
216 }
217
218 /* if va is above the range handled by ttbr0, select ttbr1 */
219 if (va > armv7a->armv7a_mmu.ttbr_range[0]) {
220 /* select ttb 1 */
221 ttbidx = 1;
222 }
223 /* MRC p15,0,<Rt>,c2,c0,ttbidx */
224 retval = dpm->instr_read_data_r0(dpm,
225 ARMV4_5_MRC(15, 0, 0, 2, 0, ttbidx),
226 &ttb);
227 if (retval != ERROR_OK)
228 return retval;
229
230 ttb_mask = armv7a->armv7a_mmu.ttbr_mask[ttbidx];
231 va_mask = 0xfff00000 & armv7a->armv7a_mmu.ttbr_range[ttbidx];
232
233 LOG_DEBUG("ttb_mask %" PRIx32 " va_mask %" PRIx32 " ttbidx %i",
234 ttb_mask, va_mask, ttbidx);
235 retval = armv7a->armv7a_mmu.read_physical_memory(target,
236 (ttb & ttb_mask) | ((va & va_mask) >> 18),
237 4, 1, (uint8_t *)&first_lvl_descriptor);
238 if (retval != ERROR_OK)
239 return retval;
240 first_lvl_descriptor = target_buffer_get_u32(target, (uint8_t *)
241 &first_lvl_descriptor);
242 /* reuse armv4_5 piece of code, specific armv7a changes may come later */
243 LOG_DEBUG("1st lvl desc: %8.8" PRIx32 "", first_lvl_descriptor);
244
245 if ((first_lvl_descriptor & 0x3) == 0) {
246 LOG_ERROR("Address translation failure");
247 return ERROR_TARGET_TRANSLATION_FAULT;
248 }
249
250
251 if ((first_lvl_descriptor & 0x40002) == 2) {
252 /* section descriptor */
253 *val = (first_lvl_descriptor & 0xfff00000) | (va & 0x000fffff);
254 return ERROR_OK;
255 } else if ((first_lvl_descriptor & 0x40002) == 0x40002) {
256 /* supersection descriptor */
257 if (first_lvl_descriptor & 0x00f001e0) {
258 LOG_ERROR("Physical address does not fit into 32 bits");
259 return ERROR_TARGET_TRANSLATION_FAULT;
260 }
261 *val = (first_lvl_descriptor & 0xff000000) | (va & 0x00ffffff);
262 return ERROR_OK;
263 }
264
265 /* page table */
266 retval = armv7a->armv7a_mmu.read_physical_memory(target,
267 (first_lvl_descriptor & 0xfffffc00) | ((va & 0x000ff000) >> 10),
268 4, 1, (uint8_t *)&second_lvl_descriptor);
269 if (retval != ERROR_OK)
270 return retval;
271
272 second_lvl_descriptor = target_buffer_get_u32(target, (uint8_t *)
273 &second_lvl_descriptor);
274
275 LOG_DEBUG("2nd lvl desc: %8.8" PRIx32 "", second_lvl_descriptor);
276
277 if ((second_lvl_descriptor & 0x3) == 0) {
278 LOG_ERROR("Address translation failure");
279 return ERROR_TARGET_TRANSLATION_FAULT;
280 }
281
282 if ((second_lvl_descriptor & 0x3) == 1) {
283 /* large page descriptor */
284 *val = (second_lvl_descriptor & 0xffff0000) | (va & 0x0000ffff);
285 } else {
286 /* small page descriptor */
287 *val = (second_lvl_descriptor & 0xfffff000) | (va & 0x00000fff);
288 }
289
290 return ERROR_OK;
291
292 done:
293 return retval;
294 }
295
296 /* V7 method VA TO PA */
297 int armv7a_mmu_translate_va_pa(struct target *target, uint32_t va,
298 uint32_t *val, int meminfo)
299 {
300 int retval = ERROR_FAIL;
301 struct armv7a_common *armv7a = target_to_armv7a(target);
302 struct arm_dpm *dpm = armv7a->arm.dpm;
303 uint32_t virt = va & ~0xfff;
304 uint32_t NOS, NS, INNER, OUTER;
305 *val = 0xdeadbeef;
306 retval = dpm->prepare(dpm);
307 if (retval != ERROR_OK)
308 goto done;
309 /* mmu must be enable in order to get a correct translation
310 * use VA to PA CP15 register for conversion */
311 retval = dpm->instr_write_data_r0(dpm,
312 ARMV4_5_MCR(15, 0, 0, 7, 8, 0),
313 virt);
314 if (retval != ERROR_OK)
315 goto done;
316 retval = dpm->instr_read_data_r0(dpm,
317 ARMV4_5_MRC(15, 0, 0, 7, 4, 0),
318 val);
319 /* decode memory attribute */
320 NOS = (*val >> 10) & 1; /* Not Outer shareable */
321 NS = (*val >> 9) & 1; /* Non secure */
322 INNER = (*val >> 4) & 0x7;
323 OUTER = (*val >> 2) & 0x3;
324
325 if (retval != ERROR_OK)
326 goto done;
327 *val = (*val & ~0xfff) + (va & 0xfff);
328 if (*val == va)
329 LOG_WARNING("virt = phys : MMU disable !!");
330 if (meminfo) {
331 LOG_INFO("%" PRIx32 " : %" PRIx32 " %s outer shareable %s secured",
332 va, *val,
333 NOS == 1 ? "not" : " ",
334 NS == 1 ? "not" : "");
335 switch (OUTER) {
336 case 0:
337 LOG_INFO("outer: Non-Cacheable");
338 break;
339 case 1:
340 LOG_INFO("outer: Write-Back, Write-Allocate");
341 break;
342 case 2:
343 LOG_INFO("outer: Write-Through, No Write-Allocate");
344 break;
345 case 3:
346 LOG_INFO("outer: Write-Back, no Write-Allocate");
347 break;
348 }
349 switch (INNER) {
350 case 0:
351 LOG_INFO("inner: Non-Cacheable");
352 break;
353 case 1:
354 LOG_INFO("inner: Strongly-ordered");
355 break;
356 case 3:
357 LOG_INFO("inner: Device");
358 break;
359 case 5:
360 LOG_INFO("inner: Write-Back, Write-Allocate");
361 break;
362 case 6:
363 LOG_INFO("inner: Write-Through");
364 break;
365 case 7:
366 LOG_INFO("inner: Write-Back, no Write-Allocate");
367
368 default:
369 LOG_INFO("inner: %" PRIx32 " ???", INNER);
370 }
371 }
372
373 done:
374 dpm->finish(dpm);
375
376 return retval;
377 }
378
379 static int armv7a_handle_inner_cache_info_command(struct command_context *cmd_ctx,
380 struct armv7a_cache_common *armv7a_cache)
381 {
382 if (armv7a_cache->ctype == -1) {
383 command_print(cmd_ctx, "cache not yet identified");
384 return ERROR_OK;
385 }
386
387 command_print(cmd_ctx,
388 "D-Cache: linelen %" PRIi32 ", associativity %" PRIi32 ", nsets %" PRIi32 ", cachesize %" PRId32 " KBytes",
389 armv7a_cache->d_u_size.linelen,
390 armv7a_cache->d_u_size.associativity,
391 armv7a_cache->d_u_size.nsets,
392 armv7a_cache->d_u_size.cachesize);
393
394 command_print(cmd_ctx,
395 "I-Cache: linelen %" PRIi32 ", associativity %" PRIi32 ", nsets %" PRIi32 ", cachesize %" PRId32 " KBytes",
396 armv7a_cache->i_size.linelen,
397 armv7a_cache->i_size.associativity,
398 armv7a_cache->i_size.nsets,
399 armv7a_cache->i_size.cachesize);
400
401 return ERROR_OK;
402 }
403
404 static int _armv7a_flush_all_data(struct target *target)
405 {
406 struct armv7a_common *armv7a = target_to_armv7a(target);
407 struct arm_dpm *dpm = armv7a->arm.dpm;
408 struct armv7a_cachesize *d_u_size =
409 &(armv7a->armv7a_mmu.armv7a_cache.d_u_size);
410 int32_t c_way, c_index = d_u_size->index;
411 int retval;
412 /* check that cache data is on at target halt */
413 if (!armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled) {
414 LOG_INFO("flushed not performed :cache not on at target halt");
415 return ERROR_OK;
416 }
417 retval = dpm->prepare(dpm);
418 if (retval != ERROR_OK)
419 goto done;
420 do {
421 c_way = d_u_size->way;
422 do {
423 uint32_t value = (c_index << d_u_size->index_shift)
424 | (c_way << d_u_size->way_shift);
425 /* DCCISW */
426 /* LOG_INFO ("%d %d %x",c_way,c_index,value); */
427 retval = dpm->instr_write_data_r0(dpm,
428 ARMV4_5_MCR(15, 0, 0, 7, 14, 2),
429 value);
430 if (retval != ERROR_OK)
431 goto done;
432 c_way -= 1;
433 } while (c_way >= 0);
434 c_index -= 1;
435 } while (c_index >= 0);
436 return retval;
437 done:
438 LOG_ERROR("flushed failed");
439 dpm->finish(dpm);
440 return retval;
441 }
442
443 static int armv7a_flush_all_data(struct target *target)
444 {
445 int retval = ERROR_FAIL;
446 /* check that armv7a_cache is correctly identify */
447 struct armv7a_common *armv7a = target_to_armv7a(target);
448 if (armv7a->armv7a_mmu.armv7a_cache.ctype == -1) {
449 LOG_ERROR("trying to flush un-identified cache");
450 return retval;
451 }
452
453 if (target->smp) {
454 /* look if all the other target have been flushed in order to flush level
455 * 2 */
456 struct target_list *head;
457 struct target *curr;
458 head = target->head;
459 while (head != (struct target_list *)NULL) {
460 curr = head->target;
461 if (curr->state == TARGET_HALTED) {
462 LOG_INFO("Wait flushing data l1 on core %" PRId32, curr->coreid);
463 retval = _armv7a_flush_all_data(curr);
464 }
465 head = head->next;
466 }
467 } else
468 retval = _armv7a_flush_all_data(target);
469 return retval;
470 }
471
472 /* L2 is not specific to armv7a a specific file is needed */
473 static int armv7a_l2x_flush_all_data(struct target *target)
474 {
475
476 #define L2X0_CLEAN_INV_WAY 0x7FC
477 int retval = ERROR_FAIL;
478 struct armv7a_common *armv7a = target_to_armv7a(target);
479 struct armv7a_l2x_cache *l2x_cache = (struct armv7a_l2x_cache *)
480 (armv7a->armv7a_mmu.armv7a_cache.l2_cache);
481 uint32_t base = l2x_cache->base;
482 uint32_t l2_way = l2x_cache->way;
483 uint32_t l2_way_val = (1 << l2_way) - 1;
484 retval = armv7a_flush_all_data(target);
485 if (retval != ERROR_OK)
486 return retval;
487 retval = target->type->write_phys_memory(target,
488 (uint32_t)(base+(uint32_t)L2X0_CLEAN_INV_WAY),
489 (uint32_t)4,
490 (uint32_t)1,
491 (uint8_t *)&l2_way_val);
492 return retval;
493 }
494
495 static int armv7a_handle_l2x_cache_info_command(struct command_context *cmd_ctx,
496 struct armv7a_cache_common *armv7a_cache)
497 {
498
499 struct armv7a_l2x_cache *l2x_cache = (struct armv7a_l2x_cache *)
500 (armv7a_cache->l2_cache);
501
502 if (armv7a_cache->ctype == -1) {
503 command_print(cmd_ctx, "cache not yet identified");
504 return ERROR_OK;
505 }
506
507 command_print(cmd_ctx,
508 "L1 D-Cache: linelen %" PRIi32 ", associativity %" PRIi32 ", nsets %" PRIi32 ", cachesize %" PRId32 " KBytes",
509 armv7a_cache->d_u_size.linelen,
510 armv7a_cache->d_u_size.associativity,
511 armv7a_cache->d_u_size.nsets,
512 armv7a_cache->d_u_size.cachesize);
513
514 command_print(cmd_ctx,
515 "L1 I-Cache: linelen %" PRIi32 ", associativity %" PRIi32 ", nsets %" PRIi32 ", cachesize %" PRId32 " KBytes",
516 armv7a_cache->i_size.linelen,
517 armv7a_cache->i_size.associativity,
518 armv7a_cache->i_size.nsets,
519 armv7a_cache->i_size.cachesize);
520 command_print(cmd_ctx, "L2 unified cache Base Address 0x%" PRIx32 ", %" PRId32 " ways",
521 l2x_cache->base, l2x_cache->way);
522
523
524 return ERROR_OK;
525 }
526
527
528 static int armv7a_l2x_cache_init(struct target *target, uint32_t base, uint32_t way)
529 {
530 struct armv7a_l2x_cache *l2x_cache;
531 struct target_list *head = target->head;
532 struct target *curr;
533
534 struct armv7a_common *armv7a = target_to_armv7a(target);
535 l2x_cache = calloc(1, sizeof(struct armv7a_l2x_cache));
536 l2x_cache->base = base;
537 l2x_cache->way = way;
538 /*LOG_INFO("cache l2 initialized base %x way %d",
539 l2x_cache->base,l2x_cache->way);*/
540 if (armv7a->armv7a_mmu.armv7a_cache.l2_cache)
541 LOG_INFO("cache l2 already initialized\n");
542 armv7a->armv7a_mmu.armv7a_cache.l2_cache = l2x_cache;
543 /* initialize l1 / l2x cache function */
544 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache
545 = armv7a_l2x_flush_all_data;
546 armv7a->armv7a_mmu.armv7a_cache.display_cache_info =
547 armv7a_handle_l2x_cache_info_command;
548 /* initialize all target in this cluster (smp target)
549 * l2 cache must be configured after smp declaration */
550 while (head != (struct target_list *)NULL) {
551 curr = head->target;
552 if (curr != target) {
553 armv7a = target_to_armv7a(curr);
554 if (armv7a->armv7a_mmu.armv7a_cache.l2_cache)
555 LOG_ERROR("smp target : cache l2 already initialized\n");
556 armv7a->armv7a_mmu.armv7a_cache.l2_cache = l2x_cache;
557 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache =
558 armv7a_l2x_flush_all_data;
559 armv7a->armv7a_mmu.armv7a_cache.display_cache_info =
560 armv7a_handle_l2x_cache_info_command;
561 }
562 head = head->next;
563 }
564 return JIM_OK;
565 }
566
567 COMMAND_HANDLER(handle_cache_l2x)
568 {
569 struct target *target = get_current_target(CMD_CTX);
570 uint32_t base, way;
571
572 if (CMD_ARGC != 2)
573 return ERROR_COMMAND_SYNTAX_ERROR;
574
575 /* command_print(CMD_CTX, "%s %s", CMD_ARGV[0], CMD_ARGV[1]); */
576 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], base);
577 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], way);
578
579 /* AP address is in bits 31:24 of DP_SELECT */
580 armv7a_l2x_cache_init(target, base, way);
581
582 return ERROR_OK;
583 }
584
585 int armv7a_handle_cache_info_command(struct command_context *cmd_ctx,
586 struct armv7a_cache_common *armv7a_cache)
587 {
588 if (armv7a_cache->ctype == -1) {
589 command_print(cmd_ctx, "cache not yet identified");
590 return ERROR_OK;
591 }
592
593 if (armv7a_cache->display_cache_info)
594 armv7a_cache->display_cache_info(cmd_ctx, armv7a_cache);
595 return ERROR_OK;
596 }
597
598 /* retrieve core id cluster id */
599 static int armv7a_read_mpidr(struct target *target)
600 {
601 int retval = ERROR_FAIL;
602 struct armv7a_common *armv7a = target_to_armv7a(target);
603 struct arm_dpm *dpm = armv7a->arm.dpm;
604 uint32_t mpidr;
605 retval = dpm->prepare(dpm);
606 if (retval != ERROR_OK)
607 goto done;
608 /* MRC p15,0,<Rd>,c0,c0,5; read Multiprocessor ID register*/
609
610 retval = dpm->instr_read_data_r0(dpm,
611 ARMV4_5_MRC(15, 0, 0, 0, 0, 5),
612 &mpidr);
613 if (retval != ERROR_OK)
614 goto done;
615
616 /* ARMv7R uses a different format for MPIDR.
617 * When configured uniprocessor (most R cores) it reads as 0.
618 * This will need to be implemented for multiprocessor ARMv7R cores. */
619 if (armv7a->is_armv7r) {
620 if (mpidr)
621 LOG_ERROR("MPIDR nonzero in ARMv7-R target");
622 goto done;
623 }
624
625 if (mpidr & 1<<31) {
626 armv7a->multi_processor_system = (mpidr >> 30) & 1;
627 armv7a->cluster_id = (mpidr >> 8) & 0xf;
628 armv7a->cpu_id = mpidr & 0x3;
629 LOG_INFO("%s cluster %x core %x %s", target_name(target),
630 armv7a->cluster_id,
631 armv7a->cpu_id,
632 armv7a->multi_processor_system == 0 ? "multi core" : "mono core");
633
634 } else
635 LOG_ERROR("MPIDR not in multiprocessor format");
636
637 done:
638 dpm->finish(dpm);
639 return retval;
640
641
642 }
643
644 int armv7a_identify_cache(struct target *target)
645 {
646 /* read cache descriptor */
647 int retval = ERROR_FAIL;
648 struct armv7a_common *armv7a = target_to_armv7a(target);
649 struct arm_dpm *dpm = armv7a->arm.dpm;
650 uint32_t cache_selected, clidr, ctr;
651 uint32_t cache_i_reg, cache_d_reg;
652 struct armv7a_cache_common *cache = &(armv7a->armv7a_mmu.armv7a_cache);
653 if (!armv7a->is_armv7r)
654 armv7a_read_ttbcr(target);
655 retval = dpm->prepare(dpm);
656 if (retval != ERROR_OK)
657 goto done;
658
659 /* retrieve CTR
660 * mrc p15, 0, r0, c0, c0, 1 @ read ctr */
661 retval = dpm->instr_read_data_r0(dpm,
662 ARMV4_5_MRC(15, 0, 0, 0, 0, 1),
663 &ctr);
664 if (retval != ERROR_OK)
665 goto done;
666
667 cache->iminline = 4UL << (ctr & 0xf);
668 cache->dminline = 4UL << ((ctr & 0xf0000) >> 16);
669 LOG_DEBUG("ctr %" PRIx32 " ctr.iminline %" PRId32 " ctr.dminline %" PRId32,
670 ctr, cache->iminline, cache->dminline);
671
672 /* retrieve CLIDR
673 * mrc p15, 1, r0, c0, c0, 1 @ read clidr */
674 retval = dpm->instr_read_data_r0(dpm,
675 ARMV4_5_MRC(15, 1, 0, 0, 0, 1),
676 &clidr);
677 if (retval != ERROR_OK)
678 goto done;
679 clidr = (clidr & 0x7000000) >> 23;
680 LOG_INFO("number of cache level %" PRIx32, (uint32_t)(clidr / 2));
681 if ((clidr / 2) > 1) {
682 /* FIXME not supported present in cortex A8 and later */
683 /* in cortex A7, A15 */
684 LOG_ERROR("cache l2 present :not supported");
685 }
686 /* retrieve selected cache
687 * MRC p15, 2,<Rd>, c0, c0, 0; Read CSSELR */
688 retval = dpm->instr_read_data_r0(dpm,
689 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
690 &cache_selected);
691 if (retval != ERROR_OK)
692 goto done;
693
694 retval = armv7a->arm.mrc(target, 15,
695 2, 0, /* op1, op2 */
696 0, 0, /* CRn, CRm */
697 &cache_selected);
698 if (retval != ERROR_OK)
699 goto done;
700 /* select instruction cache
701 * MCR p15, 2,<Rd>, c0, c0, 0; Write CSSELR
702 * [0] : 1 instruction cache selection , 0 data cache selection */
703 retval = dpm->instr_write_data_r0(dpm,
704 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
705 1);
706 if (retval != ERROR_OK)
707 goto done;
708
709 /* read CCSIDR
710 * MRC P15,1,<RT>,C0, C0,0 ;on cortex A9 read CCSIDR
711 * [2:0] line size 001 eight word per line
712 * [27:13] NumSet 0x7f 16KB, 0xff 32Kbytes, 0x1ff 64Kbytes */
713 retval = dpm->instr_read_data_r0(dpm,
714 ARMV4_5_MRC(15, 1, 0, 0, 0, 0),
715 &cache_i_reg);
716 if (retval != ERROR_OK)
717 goto done;
718
719 /* select data cache*/
720 retval = dpm->instr_write_data_r0(dpm,
721 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
722 0);
723 if (retval != ERROR_OK)
724 goto done;
725
726 retval = dpm->instr_read_data_r0(dpm,
727 ARMV4_5_MRC(15, 1, 0, 0, 0, 0),
728 &cache_d_reg);
729 if (retval != ERROR_OK)
730 goto done;
731
732 /* restore selected cache */
733 dpm->instr_write_data_r0(dpm,
734 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
735 cache_selected);
736
737 if (retval != ERROR_OK)
738 goto done;
739 dpm->finish(dpm);
740
741 /* put fake type */
742 cache->d_u_size.linelen = 16 << (cache_d_reg & 0x7);
743 cache->d_u_size.cachesize = (((cache_d_reg >> 13) & 0x7fff)+1)/8;
744 cache->d_u_size.nsets = (cache_d_reg >> 13) & 0x7fff;
745 cache->d_u_size.associativity = ((cache_d_reg >> 3) & 0x3ff) + 1;
746 /* compute info for set way operation on cache */
747 cache->d_u_size.index_shift = (cache_d_reg & 0x7) + 4;
748 cache->d_u_size.index = (cache_d_reg >> 13) & 0x7fff;
749 cache->d_u_size.way = ((cache_d_reg >> 3) & 0x3ff);
750 cache->d_u_size.way_shift = cache->d_u_size.way + 1;
751 {
752 int i = 0;
753 while (((cache->d_u_size.way_shift >> i) & 1) != 1)
754 i++;
755 cache->d_u_size.way_shift = 32-i;
756 }
757 #if 0
758 LOG_INFO("data cache index %d << %d, way %d << %d",
759 cache->d_u_size.index, cache->d_u_size.index_shift,
760 cache->d_u_size.way,
761 cache->d_u_size.way_shift);
762
763 LOG_INFO("data cache %d bytes %d KBytes asso %d ways",
764 cache->d_u_size.linelen,
765 cache->d_u_size.cachesize,
766 cache->d_u_size.associativity);
767 #endif
768 cache->i_size.linelen = 16 << (cache_i_reg & 0x7);
769 cache->i_size.associativity = ((cache_i_reg >> 3) & 0x3ff) + 1;
770 cache->i_size.nsets = (cache_i_reg >> 13) & 0x7fff;
771 cache->i_size.cachesize = (((cache_i_reg >> 13) & 0x7fff)+1)/8;
772 /* compute info for set way operation on cache */
773 cache->i_size.index_shift = (cache_i_reg & 0x7) + 4;
774 cache->i_size.index = (cache_i_reg >> 13) & 0x7fff;
775 cache->i_size.way = ((cache_i_reg >> 3) & 0x3ff);
776 cache->i_size.way_shift = cache->i_size.way + 1;
777 {
778 int i = 0;
779 while (((cache->i_size.way_shift >> i) & 1) != 1)
780 i++;
781 cache->i_size.way_shift = 32-i;
782 }
783 #if 0
784 LOG_INFO("instruction cache index %d << %d, way %d << %d",
785 cache->i_size.index, cache->i_size.index_shift,
786 cache->i_size.way, cache->i_size.way_shift);
787
788 LOG_INFO("instruction cache %d bytes %d KBytes asso %d ways",
789 cache->i_size.linelen,
790 cache->i_size.cachesize,
791 cache->i_size.associativity);
792 #endif
793 /* if no l2 cache initialize l1 data cache flush function function */
794 if (armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache == NULL) {
795 armv7a->armv7a_mmu.armv7a_cache.display_cache_info =
796 armv7a_handle_inner_cache_info_command;
797 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache =
798 armv7a_flush_all_data;
799 }
800 armv7a->armv7a_mmu.armv7a_cache.ctype = 0;
801
802 done:
803 dpm->finish(dpm);
804 armv7a_read_mpidr(target);
805 return retval;
806
807 }
808
809 int armv7a_init_arch_info(struct target *target, struct armv7a_common *armv7a)
810 {
811 struct arm *arm = &armv7a->arm;
812 arm->arch_info = armv7a;
813 target->arch_info = &armv7a->arm;
814 /* target is useful in all function arm v4 5 compatible */
815 armv7a->arm.target = target;
816 armv7a->arm.common_magic = ARM_COMMON_MAGIC;
817 armv7a->common_magic = ARMV7_COMMON_MAGIC;
818 armv7a->armv7a_mmu.armv7a_cache.l2_cache = NULL;
819 armv7a->armv7a_mmu.armv7a_cache.ctype = -1;
820 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache = NULL;
821 armv7a->armv7a_mmu.armv7a_cache.display_cache_info = NULL;
822 armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled = 1;
823 return ERROR_OK;
824 }
825
826 int armv7a_arch_state(struct target *target)
827 {
828 static const char *state[] = {
829 "disabled", "enabled"
830 };
831
832 struct armv7a_common *armv7a = target_to_armv7a(target);
833 struct arm *arm = &armv7a->arm;
834
835 if (armv7a->common_magic != ARMV7_COMMON_MAGIC) {
836 LOG_ERROR("BUG: called for a non-ARMv7A target");
837 return ERROR_COMMAND_SYNTAX_ERROR;
838 }
839
840 arm_arch_state(target);
841
842 if (armv7a->is_armv7r) {
843 LOG_USER("D-Cache: %s, I-Cache: %s",
844 state[armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled],
845 state[armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled]);
846 } else {
847 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s",
848 state[armv7a->armv7a_mmu.mmu_enabled],
849 state[armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled],
850 state[armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled]);
851 }
852
853 if (arm->core_mode == ARM_MODE_ABT)
854 armv7a_show_fault_registers(target);
855 if (target->debug_reason == DBG_REASON_WATCHPOINT)
856 LOG_USER("Watchpoint triggered at PC %#08x",
857 (unsigned) armv7a->dpm.wp_pc);
858
859 return ERROR_OK;
860 }
861
862 static const struct command_registration l2_cache_commands[] = {
863 {
864 .name = "l2x",
865 .handler = handle_cache_l2x,
866 .mode = COMMAND_EXEC,
867 .help = "configure l2x cache "
868 "",
869 .usage = "[base_addr] [number_of_way]",
870 },
871 COMMAND_REGISTRATION_DONE
872
873 };
874
875 const struct command_registration l2x_cache_command_handlers[] = {
876 {
877 .name = "cache_config",
878 .mode = COMMAND_EXEC,
879 .help = "cache configuration for a target",
880 .usage = "",
881 .chain = l2_cache_commands,
882 },
883 COMMAND_REGISTRATION_DONE
884 };
885
886 const struct command_registration armv7a_command_handlers[] = {
887 {
888 .chain = dap_command_handlers,
889 },
890 {
891 .chain = l2x_cache_command_handlers,
892 },
893 {
894 .chain = arm7a_cache_command_handlers,
895 },
896 COMMAND_REGISTRATION_DONE
897 };