armv7a ,cortex a : add L1, L2 cache support, va to pa support
[openocd.git] / src / target / armv7a.c
1 /***************************************************************************
2 * Copyright (C) 2009 by David Brownell *
3 * *
4 * Copyright (C) ST-Ericsson SA 2011 michel.jaouen@stericsson.com *
5 * *
6 * This program is free software; you can redistribute it and/or modify *
7 * it under the terms of the GNU General Public License as published by *
8 * the Free Software Foundation; either version 2 of the License, or *
9 * (at your option) any later version. *
10 * *
11 * This program is distributed in the hope that it will be useful, *
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
14 * GNU General Public License for more details. *
15 * *
16 * You should have received a copy of the GNU General Public License *
17 * along with this program; if not, write to the *
18 * Free Software Foundation, Inc., *
19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
20 ***************************************************************************/
21 #ifdef HAVE_CONFIG_H
22 #include "config.h"
23 #endif
24
25 #include <helper/replacements.h>
26
27 #include "armv7a.h"
28 #include "arm_disassembler.h"
29
30 #include "register.h"
31 #include <helper/binarybuffer.h>
32 #include <helper/command.h>
33
34 #include <stdlib.h>
35 #include <string.h>
36 #include <unistd.h>
37
38 #include "arm_opcodes.h"
39 #include "target.h"
40 #include "target_type.h"
41
42 static void armv7a_show_fault_registers(struct target *target)
43 {
44 uint32_t dfsr, ifsr, dfar, ifar;
45 struct armv7a_common *armv7a = target_to_armv7a(target);
46 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
47 int retval;
48
49 retval = dpm->prepare(dpm);
50 if (retval != ERROR_OK)
51 return;
52
53 /* ARMV4_5_MRC(cpnum, op1, r0, CRn, CRm, op2) */
54
55 /* c5/c0 - {data, instruction} fault status registers */
56 retval = dpm->instr_read_data_r0(dpm,
57 ARMV4_5_MRC(15, 0, 0, 5, 0, 0),
58 &dfsr);
59 if (retval != ERROR_OK)
60 goto done;
61
62 retval = dpm->instr_read_data_r0(dpm,
63 ARMV4_5_MRC(15, 0, 0, 5, 0, 1),
64 &ifsr);
65 if (retval != ERROR_OK)
66 goto done;
67
68 /* c6/c0 - {data, instruction} fault address registers */
69 retval = dpm->instr_read_data_r0(dpm,
70 ARMV4_5_MRC(15, 0, 0, 6, 0, 0),
71 &dfar);
72 if (retval != ERROR_OK)
73 goto done;
74
75 retval = dpm->instr_read_data_r0(dpm,
76 ARMV4_5_MRC(15, 0, 0, 6, 0, 2),
77 &ifar);
78 if (retval != ERROR_OK)
79 goto done;
80
81 LOG_USER("Data fault registers DFSR: %8.8" PRIx32
82 ", DFAR: %8.8" PRIx32, dfsr, dfar);
83 LOG_USER("Instruction fault registers IFSR: %8.8" PRIx32
84 ", IFAR: %8.8" PRIx32, ifsr, ifar);
85
86 done:
87 /* (void) */ dpm->finish(dpm);
88 }
89
90 int armv7a_read_ttbcr(struct target *target)
91 {
92 struct armv7a_common *armv7a = target_to_armv7a(target);
93 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
94 uint32_t ttbcr;
95 int retval = dpm->prepare(dpm);
96 if (retval!=ERROR_OK) goto done;
97 /* MRC p15,0,<Rt>,c2,c0,2 ; Read CP15 Translation Table Base Control Register*/
98 retval = dpm->instr_read_data_r0(dpm,
99 ARMV4_5_MRC(15, 0, 0, 2, 0, 2),
100 &ttbcr);
101 if (retval!=ERROR_OK) goto done;
102 armv7a->armv7a_mmu.ttbr1_used = ((ttbcr & 0x7)!=0)? 1: 0;
103 armv7a->armv7a_mmu.ttbr0_mask = 7 << (32 -((ttbcr & 0x7)));
104 #if 0
105 LOG_INFO("ttb1 %s ,ttb0_mask %x",
106 armv7a->armv7a_mmu.ttbr1_used ? "used":"not used",
107 armv7a->armv7a_mmu.ttbr0_mask);
108 #endif
109 if (armv7a->armv7a_mmu.ttbr1_used == 1)
110 {
111 LOG_INFO("SVC access above %x",
112 (0xffffffff & armv7a->armv7a_mmu.ttbr0_mask));
113 armv7a->armv7a_mmu.os_border = 0xffffffff & armv7a->armv7a_mmu.ttbr0_mask;
114 }
115 else
116 {
117 /* fix me , default is hard coded LINUX border */
118 armv7a->armv7a_mmu.os_border = 0xc0000000;
119 }
120 done:
121 dpm->finish(dpm);
122 return retval;
123 }
124
125
126 /* method adapted to cortex A : reused arm v4 v5 method*/
127 int armv7a_mmu_translate_va(struct target *target, uint32_t va, uint32_t *val)
128 {
129 uint32_t first_lvl_descriptor = 0x0;
130 uint32_t second_lvl_descriptor = 0x0;
131 int retval;
132 uint32_t cb;
133 struct armv7a_common *armv7a = target_to_armv7a(target);
134 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
135 uint32_t ttb = 0; /* default ttb0 */
136 if (armv7a->armv7a_mmu.ttbr1_used == -1) armv7a_read_ttbcr(target);
137 if ((armv7a->armv7a_mmu.ttbr1_used) &&
138 (va > (0xffffffff & armv7a->armv7a_mmu.ttbr0_mask)))
139 {
140 /* select ttb 1 */
141 ttb = 1;
142 }
143 retval = dpm->prepare(dpm);
144 if (retval != ERROR_OK)
145 goto done;
146
147 /* MRC p15,0,<Rt>,c2,c0,ttb */
148 retval = dpm->instr_read_data_r0(dpm,
149 ARMV4_5_MRC(15, 0, 0, 2, 0, ttb),
150 &ttb);
151 retval = armv7a->armv7a_mmu.read_physical_memory(target,
152 (ttb & 0xffffc000) | ((va & 0xfff00000) >> 18),
153 4, 1, (uint8_t*)&first_lvl_descriptor);
154 if (retval != ERROR_OK)
155 return retval;
156 first_lvl_descriptor = target_buffer_get_u32(target, (uint8_t*)
157 &first_lvl_descriptor);
158 /* reuse armv4_5 piece of code, specific armv7a changes may come later */
159 LOG_DEBUG("1st lvl desc: %8.8" PRIx32 "", first_lvl_descriptor);
160
161 if ((first_lvl_descriptor & 0x3) == 0)
162 {
163 LOG_ERROR("Address translation failure");
164 return ERROR_TARGET_TRANSLATION_FAULT;
165 }
166
167
168 if ((first_lvl_descriptor & 0x3) == 2)
169 {
170 /* section descriptor */
171 cb = (first_lvl_descriptor & 0xc) >> 2;
172 *val = (first_lvl_descriptor & 0xfff00000) | (va & 0x000fffff);
173 return ERROR_OK;
174 }
175
176 if ((first_lvl_descriptor & 0x3) == 1)
177 {
178 /* coarse page table */
179 retval = armv7a->armv7a_mmu.read_physical_memory(target,
180 (first_lvl_descriptor & 0xfffffc00) | ((va & 0x000ff000) >> 10),
181 4, 1, (uint8_t*)&second_lvl_descriptor);
182 if (retval != ERROR_OK)
183 return retval;
184 }
185 else if ((first_lvl_descriptor & 0x3) == 3)
186 {
187 /* fine page table */
188 retval = armv7a->armv7a_mmu.read_physical_memory(target,
189 (first_lvl_descriptor & 0xfffff000) | ((va & 0x000ffc00) >> 8),
190 4, 1, (uint8_t*)&second_lvl_descriptor);
191 if (retval != ERROR_OK)
192 return retval;
193 }
194
195 second_lvl_descriptor = target_buffer_get_u32(target, (uint8_t*)
196 &second_lvl_descriptor);
197
198 LOG_DEBUG("2nd lvl desc: %8.8" PRIx32 "", second_lvl_descriptor);
199
200 if ((second_lvl_descriptor & 0x3) == 0)
201 {
202 LOG_ERROR("Address translation failure");
203 return ERROR_TARGET_TRANSLATION_FAULT;
204 }
205
206 /* cacheable/bufferable is always specified in bits 3-2 */
207 cb = (second_lvl_descriptor & 0xc) >> 2;
208
209 if ((second_lvl_descriptor & 0x3) == 1)
210 {
211 /* large page descriptor */
212 *val = (second_lvl_descriptor & 0xffff0000) | (va & 0x0000ffff);
213 return ERROR_OK;
214 }
215
216 if ((second_lvl_descriptor & 0x3) == 2)
217 {
218 /* small page descriptor */
219 *val = (second_lvl_descriptor & 0xfffff000) | (va & 0x00000fff);
220 return ERROR_OK;
221 }
222
223 if ((second_lvl_descriptor & 0x3) == 3)
224 {
225 *val = (second_lvl_descriptor & 0xfffffc00) | (va & 0x000003ff);
226 return ERROR_OK;
227 }
228
229 /* should not happen */
230 LOG_ERROR("Address translation failure");
231 return ERROR_TARGET_TRANSLATION_FAULT;
232
233 done:
234 return retval;
235 }
236
237
238 /* V7 method VA TO PA */
239 int armv7a_mmu_translate_va_pa(struct target *target, uint32_t va,
240 uint32_t *val, int meminfo)
241 {
242 int retval = ERROR_FAIL;
243 struct armv7a_common *armv7a = target_to_armv7a(target);
244 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
245 uint32_t virt = va & ~0xfff;
246 uint32_t NOS,NS,SH,INNER,OUTER;
247 *val = 0xdeadbeef;
248 retval = dpm->prepare(dpm);
249 if (retval != ERROR_OK)
250 goto done;
251 /* mmu must be enable in order to get a correct translation */
252 /* use VA to PA CP15 register for conversion */
253 retval = dpm->instr_write_data_r0(dpm,
254 ARMV4_5_MCR(15, 0, 0, 7, 8, 0),
255 virt);
256 if (retval!=ERROR_OK) goto done;
257 retval = dpm->instr_read_data_r0(dpm,
258 ARMV4_5_MRC(15, 0, 0, 7, 4, 0),
259 val);
260 /* decode memory attribute */
261 NOS = (*val >> 10) & 1; /* Not Outer shareable */
262 NS = (*val >> 9) & 1; /* Non secure */
263 SH = (*val >> 7 )& 1; /* shareable */
264 INNER = (*val >> 4) & 0x7;
265 OUTER = (*val >> 2) & 0x3;
266
267 if (retval!=ERROR_OK) goto done;
268 *val = (*val & ~0xfff) + (va & 0xfff);
269 if (*val == va)
270 LOG_WARNING("virt = phys : MMU disable !!");
271 if (meminfo)
272 {
273 LOG_INFO("%x : %x %s outer shareable %s secured",
274 va, *val,
275 NOS == 1 ? "not" : " ",
276 NS == 1 ? "not" :"");
277 switch (OUTER) {
278 case 0 : LOG_INFO("outer: Non-Cacheable");
279 break;
280 case 1 : LOG_INFO("outer: Write-Back, Write-Allocate");
281 break;
282 case 2 : LOG_INFO("outer: Write-Through, No Write-Allocate");
283 break;
284 case 3 : LOG_INFO("outer: Write-Back, no Write-Allocate");
285 break;
286 }
287 switch (INNER) {
288 case 0 : LOG_INFO("inner: Non-Cacheable");
289 break;
290 case 1 : LOG_INFO("inner: Strongly-ordered");
291 break;
292 case 3 : LOG_INFO("inner: Device");
293 break;
294 case 5 : LOG_INFO("inner: Write-Back, Write-Allocate");
295 break;
296 case 6 : LOG_INFO("inner: Write-Through");
297 break;
298 case 7 : LOG_INFO("inner: Write-Back, no Write-Allocate");
299
300 default: LOG_INFO("inner: %x ???",INNER);
301 }
302 }
303
304 done:
305 dpm->finish(dpm);
306
307 return retval;
308 }
309
310 static int armv7a_handle_inner_cache_info_command(struct command_context *cmd_ctx,
311 struct armv7a_cache_common *armv7a_cache)
312 {
313 if (armv7a_cache->ctype == -1)
314 {
315 command_print(cmd_ctx, "cache not yet identified");
316 return ERROR_OK;
317 }
318
319 command_print(cmd_ctx,
320 "D-Cache: linelen %i, associativity %i, nsets %i, cachesize %d KBytes",
321 armv7a_cache->d_u_size.linelen,
322 armv7a_cache->d_u_size.associativity,
323 armv7a_cache->d_u_size.nsets,
324 armv7a_cache->d_u_size.cachesize);
325
326 command_print(cmd_ctx,
327 "I-Cache: linelen %i, associativity %i, nsets %i, cachesize %d KBytes",
328 armv7a_cache->i_size.linelen,
329 armv7a_cache->i_size.associativity,
330 armv7a_cache->i_size.nsets,
331 armv7a_cache->i_size.cachesize);
332
333 return ERROR_OK;
334 }
335
336 static int _armv7a_flush_all_data(struct target *target)
337 {
338 struct armv7a_common *armv7a = target_to_armv7a(target);
339 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
340 struct armv7a_cachesize *d_u_size =
341 &(armv7a->armv7a_mmu.armv7a_cache.d_u_size);
342 int32_t c_way, c_index = d_u_size->index;
343 int retval;
344 /* check that cache data is on at target halt */
345 if (!armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled)
346 {
347 LOG_INFO("flushed not performed :cache not on at target halt");
348 return ERROR_OK;
349 }
350 retval = dpm->prepare(dpm);
351 if (retval != ERROR_OK) goto done;
352 do {
353 c_way = d_u_size->way;
354 do {
355 uint32_t value = (c_index << d_u_size->index_shift)
356 | (c_way << d_u_size->way_shift);
357 /* DCCISW */
358 //LOG_INFO ("%d %d %x",c_way,c_index,value);
359 retval = dpm->instr_write_data_r0(dpm,
360 ARMV4_5_MCR(15, 0, 0, 7, 14, 2),
361 value);
362 if (retval!= ERROR_OK) goto done;
363 c_way -= 1;
364 } while (c_way >=0);
365 c_index -= 1;
366 } while (c_index >=0);
367 return retval;
368 done:
369 LOG_ERROR("flushed failed");
370 dpm->finish(dpm);
371 return retval;
372 }
373
374 static int armv7a_flush_all_data( struct target * target)
375 {
376 int retval = ERROR_FAIL;
377 /* check that armv7a_cache is correctly identify */
378 struct armv7a_common *armv7a = target_to_armv7a(target);
379 if (armv7a->armv7a_mmu.armv7a_cache.ctype == -1)
380 {
381 LOG_ERROR("trying to flush un-identified cache");
382 return retval;
383 }
384
385 if (target->smp)
386 {
387 /* look if all the other target have been flushed in order to flush level
388 * 2 */
389 struct target_list *head;
390 struct target *curr;
391 head = target->head;
392 while(head != (struct target_list*)NULL)
393 {
394 curr = head->target;
395 if ((curr->state == TARGET_HALTED))
396 { LOG_INFO("Wait flushing data l1 on core %d",curr->coreid);
397 retval = _armv7a_flush_all_data(curr);
398 }
399 head = head->next;
400 }
401 }
402 else retval = _armv7a_flush_all_data(target);
403 return retval;
404 }
405
406
407 /* L2 is not specific to armv7a a specific file is needed */
408 static int armv7a_l2x_flush_all_data(struct target * target)
409 {
410
411 #define L2X0_CLEAN_INV_WAY 0x7FC
412 int retval = ERROR_FAIL;
413 struct armv7a_common *armv7a = target_to_armv7a(target);
414 struct armv7a_l2x_cache *l2x_cache = (struct armv7a_l2x_cache*)
415 (armv7a->armv7a_mmu.armv7a_cache.l2_cache);
416 uint32_t base = l2x_cache->base;
417 uint32_t l2_way = l2x_cache->way;
418 uint32_t l2_way_val = (1<<l2_way) -1;
419 retval = armv7a_flush_all_data(target);
420 if (retval!=ERROR_OK) return retval;
421 retval = target->type->write_phys_memory(target,
422 (uint32_t)(base+(uint32_t)L2X0_CLEAN_INV_WAY),
423 (uint32_t)4,
424 (uint32_t)1,
425 (uint8_t*)&l2_way_val);
426 return retval;
427 }
428
429 static int armv7a_handle_l2x_cache_info_command(struct command_context *cmd_ctx,
430 struct armv7a_cache_common *armv7a_cache)
431 {
432
433 struct armv7a_l2x_cache *l2x_cache = (struct armv7a_l2x_cache*)
434 (armv7a_cache->l2_cache);
435
436 if (armv7a_cache->ctype == -1)
437 {
438 command_print(cmd_ctx, "cache not yet identified");
439 return ERROR_OK;
440 }
441
442 command_print(cmd_ctx,
443 "L1 D-Cache: linelen %i, associativity %i, nsets %i, cachesize %d KBytes",
444 armv7a_cache->d_u_size.linelen,
445 armv7a_cache->d_u_size.associativity,
446 armv7a_cache->d_u_size.nsets,
447 armv7a_cache->d_u_size.cachesize);
448
449 command_print(cmd_ctx,
450 "L1 I-Cache: linelen %i, associativity %i, nsets %i, cachesize %d KBytes",
451 armv7a_cache->i_size.linelen,
452 armv7a_cache->i_size.associativity,
453 armv7a_cache->i_size.nsets,
454 armv7a_cache->i_size.cachesize);
455 command_print(cmd_ctx, "L2 unified cache Base Address 0x%x, %d ways",
456 l2x_cache->base, l2x_cache->way);
457
458
459 return ERROR_OK;
460 }
461
462
463 int armv7a_l2x_cache_init(struct target *target, uint32_t base, uint32_t way)
464 {
465 struct armv7a_l2x_cache *l2x_cache;
466 struct target_list *head = target->head;
467 struct target *curr;
468
469 struct armv7a_common *armv7a = target_to_armv7a(target);
470 if (armv7a == NULL)
471 LOG_ERROR("not an armv7a target");
472 l2x_cache = calloc(1, sizeof(struct armv7a_l2x_cache));
473 l2x_cache->base = base;
474 l2x_cache->way = way;
475 /*LOG_INFO("cache l2 initialized base %x way %d",
476 l2x_cache->base,l2x_cache->way);*/
477 if (armv7a->armv7a_mmu.armv7a_cache.l2_cache)
478 {
479 LOG_INFO("cache l2 already initialized\n");
480 }
481 armv7a->armv7a_mmu.armv7a_cache.l2_cache = (void*) l2x_cache;
482 /* initialize l1 / l2x cache function */
483 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache
484 = armv7a_l2x_flush_all_data;
485 armv7a->armv7a_mmu.armv7a_cache.display_cache_info =
486 armv7a_handle_l2x_cache_info_command;
487 /* initialize all target in this cluster (smp target)*/
488 /* l2 cache must be configured after smp declaration */
489 while(head != (struct target_list*)NULL)
490 {
491 curr = head->target;
492 if (curr != target)
493 {
494 armv7a = target_to_armv7a(curr);
495 if (armv7a->armv7a_mmu.armv7a_cache.l2_cache)
496 {
497 LOG_ERROR("smp target : cache l2 already initialized\n");
498 }
499 armv7a->armv7a_mmu.armv7a_cache.l2_cache = (void*) l2x_cache;
500 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache =
501 armv7a_l2x_flush_all_data;
502 armv7a->armv7a_mmu.armv7a_cache.display_cache_info =
503 armv7a_handle_l2x_cache_info_command;
504 }
505 head = head -> next;
506 }
507 return JIM_OK;
508 }
509
510 COMMAND_HANDLER(handle_cache_l2x)
511 {
512 struct target *target = get_current_target(CMD_CTX);
513 uint32_t base, way;
514 switch (CMD_ARGC) {
515 case 0:
516 return ERROR_COMMAND_SYNTAX_ERROR;
517 break;
518 case 2:
519 //command_print(CMD_CTX, "%s %s", CMD_ARGV[0], CMD_ARGV[1]);
520
521
522 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], base);
523 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], way);
524
525 /* AP address is in bits 31:24 of DP_SELECT */
526 armv7a_l2x_cache_init(target, base, way);
527 break;
528 default:
529 return ERROR_COMMAND_SYNTAX_ERROR;
530 }
531 return ERROR_OK;
532 }
533
534
535 int armv7a_handle_cache_info_command(struct command_context *cmd_ctx,
536 struct armv7a_cache_common *armv7a_cache)
537 {
538 if (armv7a_cache->ctype == -1)
539 {
540 command_print(cmd_ctx, "cache not yet identified");
541 return ERROR_OK;
542 }
543
544 if (armv7a_cache->display_cache_info)
545 armv7a_cache->display_cache_info(cmd_ctx, armv7a_cache);
546 return ERROR_OK;
547 }
548
549
550 /* retrieve core id cluster id */
551 int arnv7a_read_mpidr(struct target *target)
552 {
553 int retval = ERROR_FAIL;
554 struct armv7a_common *armv7a = target_to_armv7a(target);
555 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
556 uint32_t mpidr;
557 retval = dpm->prepare(dpm);
558 if (retval!=ERROR_OK) goto done;
559 /* MRC p15,0,<Rd>,c0,c0,5; read Multiprocessor ID register*/
560
561 retval = dpm->instr_read_data_r0(dpm,
562 ARMV4_5_MRC(15, 0, 0, 0, 0, 5),
563 &mpidr);
564 if (retval!=ERROR_OK) goto done;
565 if (mpidr & 1<<31)
566 {
567 armv7a->multi_processor_system = (mpidr >> 30) & 1;
568 armv7a->cluster_id = (mpidr >> 8) & 0xf;
569 armv7a->cpu_id = mpidr & 0x3;
570 LOG_INFO("%s cluster %x core %x %s", target->cmd_name,
571 armv7a->cluster_id,
572 armv7a->cpu_id,
573 armv7a->multi_processor_system == 0 ? "multi core": "mono core");
574
575 }
576 else
577 LOG_ERROR("mpdir not in multiprocessor format");
578
579 done:
580 dpm->finish(dpm);
581 return retval;
582
583
584 }
585
586
587 int armv7a_identify_cache(struct target *target)
588 {
589 /* read cache descriptor */
590 int retval = ERROR_FAIL;
591 struct armv7a_common *armv7a = target_to_armv7a(target);
592 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
593 uint32_t cache_selected,clidr;
594 uint32_t cache_i_reg, cache_d_reg;
595 struct armv7a_cache_common *cache = &(armv7a->armv7a_mmu.armv7a_cache);
596 armv7a_read_ttbcr(target);
597 retval = dpm->prepare(dpm);
598
599 if (retval!=ERROR_OK) goto done;
600 /* retrieve CLIDR */
601 /* mrc p15, 1, r0, c0, c0, 1 @ read clidr */
602 retval = dpm->instr_read_data_r0(dpm,
603 ARMV4_5_MRC(15, 1, 0, 0, 0, 1),
604 &clidr);
605 if (retval!=ERROR_OK) goto done;
606 clidr = (clidr & 0x7000000) >> 23;
607 LOG_INFO("number of cache level %d",clidr /2 );
608 if ((clidr /2) > 1)
609 {
610 // FIXME not supported present in cortex A8 and later
611 // in cortex A7, A15
612 LOG_ERROR("cache l2 present :not supported");
613 }
614 /* retrieve selected cache */
615 /* MRC p15, 2,<Rd>, c0, c0, 0; Read CSSELR */
616 retval = dpm->instr_read_data_r0(dpm,
617 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
618 &cache_selected);
619 if (retval!=ERROR_OK) goto done;
620
621 retval = armv7a->armv4_5_common.mrc(target, 15,
622 2, 0, /* op1, op2 */
623 0, 0, /* CRn, CRm */
624 &cache_selected);
625 /* select instruction cache*/
626 /* MCR p15, 2,<Rd>, c0, c0, 0; Write CSSELR */
627 /* [0] : 1 instruction cache selection , 0 data cache selection */
628 retval = dpm->instr_write_data_r0(dpm,
629 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
630 1);
631 if (retval!=ERROR_OK) goto done;
632
633 /* read CCSIDR*/
634 /* MRC P15,1,<RT>,C0, C0,0 ;on cortex A9 read CCSIDR */
635 /* [2:0] line size 001 eight word per line */
636 /* [27:13] NumSet 0x7f 16KB, 0xff 32Kbytes, 0x1ff 64Kbytes */
637 retval = dpm->instr_read_data_r0(dpm,
638 ARMV4_5_MRC(15, 1, 0, 0, 0, 0),
639 &cache_i_reg);
640 if (retval!=ERROR_OK) goto done;
641
642 /* select data cache*/
643 retval = dpm->instr_write_data_r0(dpm,
644 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
645 0);
646 if (retval!=ERROR_OK) goto done;
647
648 retval = dpm->instr_read_data_r0(dpm,
649 ARMV4_5_MRC(15, 1, 0, 0, 0, 0),
650 &cache_d_reg);
651 if (retval!=ERROR_OK) goto done;
652
653 /* restore selected cache */
654 dpm->instr_write_data_r0(dpm,
655 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
656 cache_selected);
657
658 if (retval != ERROR_OK) goto done;
659 dpm->finish(dpm);
660
661 // put fake type
662 cache->d_u_size.linelen = 16 << (cache_d_reg & 0x7);
663 cache->d_u_size.cachesize = (((cache_d_reg >> 13) & 0x7fff)+1)/8;
664 cache->d_u_size.nsets = (cache_d_reg >> 13) & 0x7fff;
665 cache->d_u_size.associativity = ((cache_d_reg >> 3) & 0x3ff) +1;
666 /* compute info for set way operation on cache */
667 cache->d_u_size.index_shift = (cache_d_reg & 0x7) + 4;
668 cache->d_u_size.index = (cache_d_reg >> 13) & 0x7fff;
669 cache->d_u_size.way = ((cache_d_reg >> 3) & 0x3ff);
670 cache->d_u_size.way_shift = cache->d_u_size.way+1;
671 {
672 int i=0;
673 while(((cache->d_u_size.way_shift >> i) & 1)!=1) i++;
674 cache->d_u_size.way_shift = 32-i;
675 }
676 /*LOG_INFO("data cache index %d << %d, way %d << %d",
677 cache->d_u_size.index, cache->d_u_size.index_shift,
678 cache->d_u_size.way, cache->d_u_size.way_shift);
679
680 LOG_INFO("data cache %d bytes %d KBytes asso %d ways",
681 cache->d_u_size.linelen,
682 cache->d_u_size.cachesize,
683 cache->d_u_size.associativity
684 );*/
685 cache->i_size.linelen = 16 << (cache_i_reg & 0x7);
686 cache->i_size.associativity = ((cache_i_reg >> 3) & 0x3ff) +1;
687 cache->i_size.nsets = (cache_i_reg >> 13) & 0x7fff;
688 cache->i_size.cachesize = (((cache_i_reg >> 13) & 0x7fff)+1)/8;
689 /* compute info for set way operation on cache */
690 cache->i_size.index_shift = (cache_i_reg & 0x7) + 4;
691 cache->i_size.index = (cache_i_reg >> 13) & 0x7fff;
692 cache->i_size.way = ((cache_i_reg >> 3) & 0x3ff);
693 cache->i_size.way_shift = cache->i_size.way+1;
694 {
695 int i=0;
696 while(((cache->i_size.way_shift >> i) & 1)!=1) i++;
697 cache->i_size.way_shift = 32-i;
698 }
699 /*LOG_INFO("instruction cache index %d << %d, way %d << %d",
700 cache->i_size.index, cache->i_size.index_shift,
701 cache->i_size.way, cache->i_size.way_shift);
702
703 LOG_INFO("instruction cache %d bytes %d KBytes asso %d ways",
704 cache->i_size.linelen,
705 cache->i_size.cachesize,
706 cache->i_size.associativity
707 );*/
708 /* if no l2 cache initialize l1 data cache flush function function */
709 if (armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache == NULL)
710 {
711 armv7a->armv7a_mmu.armv7a_cache.display_cache_info =
712 armv7a_handle_inner_cache_info_command;
713 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache =
714 armv7a_flush_all_data;
715 }
716 armv7a->armv7a_mmu.armv7a_cache.ctype = 0;
717
718 done:
719 dpm->finish(dpm);
720 arnv7a_read_mpidr(target);
721 return retval;
722
723 }
724
725
726
727 int armv7a_init_arch_info(struct target *target, struct armv7a_common *armv7a)
728 {
729 struct armv7a_common *again;
730 struct arm *armv4_5 = &armv7a->armv4_5_common;
731 armv4_5->arch_info = armv7a;
732 target->arch_info = &armv7a->armv4_5_common;
733 /* target is useful in all function arm v4 5 compatible */
734 armv7a->armv4_5_common.target = target;
735 armv7a->armv4_5_common.common_magic = ARM_COMMON_MAGIC;
736 armv7a->common_magic = ARMV7_COMMON_MAGIC;
737 armv7a->armv7a_mmu.armv7a_cache.l2_cache = NULL;
738 armv7a->armv7a_mmu.armv7a_cache.ctype = -1;
739 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache = NULL;
740 armv7a->armv7a_mmu.armv7a_cache.display_cache_info = NULL;
741 again =target_to_armv7a(target);
742 return ERROR_OK;
743 }
744
745 int armv7a_arch_state(struct target *target)
746 {
747 static const char *state[] =
748 {
749 "disabled", "enabled"
750 };
751
752 struct armv7a_common *armv7a = target_to_armv7a(target);
753 struct arm *armv4_5 = &armv7a->armv4_5_common;
754
755 if (armv7a->common_magic != ARMV7_COMMON_MAGIC)
756 {
757 LOG_ERROR("BUG: called for a non-ARMv7A target");
758 return ERROR_INVALID_ARGUMENTS;
759 }
760
761 arm_arch_state(target);
762
763 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s",
764 state[armv7a->armv7a_mmu.mmu_enabled],
765 state[armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled],
766 state[armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled]);
767
768 if (armv4_5->core_mode == ARM_MODE_ABT)
769 armv7a_show_fault_registers(target);
770 if (target->debug_reason == DBG_REASON_WATCHPOINT)
771 LOG_USER("Watchpoint triggered at PC %#08x",
772 (unsigned) armv7a->dpm.wp_pc);
773
774 return ERROR_OK;
775 }
776
777 static const struct command_registration l2_cache_commands[] = {
778 {
779 .name = "l2x",
780 .handler = handle_cache_l2x,
781 .mode = COMMAND_EXEC,
782 .help = "configure l2x cache "
783 "",
784 .usage = "[base_addr] [number_of_way]",
785 },
786 COMMAND_REGISTRATION_DONE
787
788 };
789
790 const struct command_registration l2x_cache_command_handlers[] = {
791 {
792 .name = "cache_config",
793 .mode = COMMAND_EXEC,
794 .help = "cache configuation for a target",
795 .chain = l2_cache_commands,
796 },
797 COMMAND_REGISTRATION_DONE
798 };
799
800
801 const struct command_registration armv7a_command_handlers[] = {
802 {
803 .chain = dap_command_handlers,
804 },
805 {
806 .chain = l2x_cache_command_handlers,
807 },
808 COMMAND_REGISTRATION_DONE
809 };
810

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)