armv7a: make local functions static
[openocd.git] / src / target / armv7a.c
1 /***************************************************************************
2 * Copyright (C) 2009 by David Brownell *
3 * *
4 * Copyright (C) ST-Ericsson SA 2011 michel.jaouen@stericsson.com *
5 * *
6 * This program is free software; you can redistribute it and/or modify *
7 * it under the terms of the GNU General Public License as published by *
8 * the Free Software Foundation; either version 2 of the License, or *
9 * (at your option) any later version. *
10 * *
11 * This program is distributed in the hope that it will be useful, *
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
14 * GNU General Public License for more details. *
15 * *
16 * You should have received a copy of the GNU General Public License *
17 * along with this program; if not, write to the *
18 * Free Software Foundation, Inc., *
19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
20 ***************************************************************************/
21 #ifdef HAVE_CONFIG_H
22 #include "config.h"
23 #endif
24
25 #include <helper/replacements.h>
26
27 #include "armv7a.h"
28 #include "arm_disassembler.h"
29
30 #include "register.h"
31 #include <helper/binarybuffer.h>
32 #include <helper/command.h>
33
34 #include <stdlib.h>
35 #include <string.h>
36 #include <unistd.h>
37
38 #include "arm_opcodes.h"
39 #include "target.h"
40 #include "target_type.h"
41
42 static void armv7a_show_fault_registers(struct target *target)
43 {
44 uint32_t dfsr, ifsr, dfar, ifar;
45 struct armv7a_common *armv7a = target_to_armv7a(target);
46 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
47 int retval;
48
49 retval = dpm->prepare(dpm);
50 if (retval != ERROR_OK)
51 return;
52
53 /* ARMV4_5_MRC(cpnum, op1, r0, CRn, CRm, op2) */
54
55 /* c5/c0 - {data, instruction} fault status registers */
56 retval = dpm->instr_read_data_r0(dpm,
57 ARMV4_5_MRC(15, 0, 0, 5, 0, 0),
58 &dfsr);
59 if (retval != ERROR_OK)
60 goto done;
61
62 retval = dpm->instr_read_data_r0(dpm,
63 ARMV4_5_MRC(15, 0, 0, 5, 0, 1),
64 &ifsr);
65 if (retval != ERROR_OK)
66 goto done;
67
68 /* c6/c0 - {data, instruction} fault address registers */
69 retval = dpm->instr_read_data_r0(dpm,
70 ARMV4_5_MRC(15, 0, 0, 6, 0, 0),
71 &dfar);
72 if (retval != ERROR_OK)
73 goto done;
74
75 retval = dpm->instr_read_data_r0(dpm,
76 ARMV4_5_MRC(15, 0, 0, 6, 0, 2),
77 &ifar);
78 if (retval != ERROR_OK)
79 goto done;
80
81 LOG_USER("Data fault registers DFSR: %8.8" PRIx32
82 ", DFAR: %8.8" PRIx32, dfsr, dfar);
83 LOG_USER("Instruction fault registers IFSR: %8.8" PRIx32
84 ", IFAR: %8.8" PRIx32, ifsr, ifar);
85
86 done:
87 /* (void) */ dpm->finish(dpm);
88 }
89
90 static int armv7a_read_ttbcr(struct target *target)
91 {
92 struct armv7a_common *armv7a = target_to_armv7a(target);
93 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
94 uint32_t ttbcr;
95 int retval = dpm->prepare(dpm);
96 if (retval!=ERROR_OK) goto done;
97 /* MRC p15,0,<Rt>,c2,c0,2 ; Read CP15 Translation Table Base Control Register*/
98 retval = dpm->instr_read_data_r0(dpm,
99 ARMV4_5_MRC(15, 0, 0, 2, 0, 2),
100 &ttbcr);
101 if (retval!=ERROR_OK) goto done;
102 armv7a->armv7a_mmu.ttbr1_used = ((ttbcr & 0x7)!=0)? 1: 0;
103 armv7a->armv7a_mmu.ttbr0_mask = 7 << (32 -((ttbcr & 0x7)));
104 #if 0
105 LOG_INFO("ttb1 %s ,ttb0_mask %x",
106 armv7a->armv7a_mmu.ttbr1_used ? "used":"not used",
107 armv7a->armv7a_mmu.ttbr0_mask);
108 #endif
109 if (armv7a->armv7a_mmu.ttbr1_used == 1)
110 {
111 LOG_INFO("SVC access above %x",
112 (0xffffffff & armv7a->armv7a_mmu.ttbr0_mask));
113 armv7a->armv7a_mmu.os_border = 0xffffffff & armv7a->armv7a_mmu.ttbr0_mask;
114 }
115 else
116 {
117 /* fix me , default is hard coded LINUX border */
118 armv7a->armv7a_mmu.os_border = 0xc0000000;
119 }
120 done:
121 dpm->finish(dpm);
122 return retval;
123 }
124
125
126 /* method adapted to cortex A : reused arm v4 v5 method*/
127 int armv7a_mmu_translate_va(struct target *target, uint32_t va, uint32_t *val)
128 {
129 uint32_t first_lvl_descriptor = 0x0;
130 uint32_t second_lvl_descriptor = 0x0;
131 int retval;
132 struct armv7a_common *armv7a = target_to_armv7a(target);
133 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
134 uint32_t ttb = 0; /* default ttb0 */
135 if (armv7a->armv7a_mmu.ttbr1_used == -1) armv7a_read_ttbcr(target);
136 if ((armv7a->armv7a_mmu.ttbr1_used) &&
137 (va > (0xffffffff & armv7a->armv7a_mmu.ttbr0_mask)))
138 {
139 /* select ttb 1 */
140 ttb = 1;
141 }
142 retval = dpm->prepare(dpm);
143 if (retval != ERROR_OK)
144 goto done;
145
146 /* MRC p15,0,<Rt>,c2,c0,ttb */
147 retval = dpm->instr_read_data_r0(dpm,
148 ARMV4_5_MRC(15, 0, 0, 2, 0, ttb),
149 &ttb);
150 retval = armv7a->armv7a_mmu.read_physical_memory(target,
151 (ttb & 0xffffc000) | ((va & 0xfff00000) >> 18),
152 4, 1, (uint8_t*)&first_lvl_descriptor);
153 if (retval != ERROR_OK)
154 return retval;
155 first_lvl_descriptor = target_buffer_get_u32(target, (uint8_t*)
156 &first_lvl_descriptor);
157 /* reuse armv4_5 piece of code, specific armv7a changes may come later */
158 LOG_DEBUG("1st lvl desc: %8.8" PRIx32 "", first_lvl_descriptor);
159
160 if ((first_lvl_descriptor & 0x3) == 0)
161 {
162 LOG_ERROR("Address translation failure");
163 return ERROR_TARGET_TRANSLATION_FAULT;
164 }
165
166
167 if ((first_lvl_descriptor & 0x3) == 2)
168 {
169 /* section descriptor */
170 *val = (first_lvl_descriptor & 0xfff00000) | (va & 0x000fffff);
171 return ERROR_OK;
172 }
173
174 if ((first_lvl_descriptor & 0x3) == 1)
175 {
176 /* coarse page table */
177 retval = armv7a->armv7a_mmu.read_physical_memory(target,
178 (first_lvl_descriptor & 0xfffffc00) | ((va & 0x000ff000) >> 10),
179 4, 1, (uint8_t*)&second_lvl_descriptor);
180 if (retval != ERROR_OK)
181 return retval;
182 }
183 else if ((first_lvl_descriptor & 0x3) == 3)
184 {
185 /* fine page table */
186 retval = armv7a->armv7a_mmu.read_physical_memory(target,
187 (first_lvl_descriptor & 0xfffff000) | ((va & 0x000ffc00) >> 8),
188 4, 1, (uint8_t*)&second_lvl_descriptor);
189 if (retval != ERROR_OK)
190 return retval;
191 }
192
193 second_lvl_descriptor = target_buffer_get_u32(target, (uint8_t*)
194 &second_lvl_descriptor);
195
196 LOG_DEBUG("2nd lvl desc: %8.8" PRIx32 "", second_lvl_descriptor);
197
198 if ((second_lvl_descriptor & 0x3) == 0)
199 {
200 LOG_ERROR("Address translation failure");
201 return ERROR_TARGET_TRANSLATION_FAULT;
202 }
203
204 if ((second_lvl_descriptor & 0x3) == 1)
205 {
206 /* large page descriptor */
207 *val = (second_lvl_descriptor & 0xffff0000) | (va & 0x0000ffff);
208 return ERROR_OK;
209 }
210
211 if ((second_lvl_descriptor & 0x3) == 2)
212 {
213 /* small page descriptor */
214 *val = (second_lvl_descriptor & 0xfffff000) | (va & 0x00000fff);
215 return ERROR_OK;
216 }
217
218 if ((second_lvl_descriptor & 0x3) == 3)
219 {
220 *val = (second_lvl_descriptor & 0xfffffc00) | (va & 0x000003ff);
221 return ERROR_OK;
222 }
223
224 /* should not happen */
225 LOG_ERROR("Address translation failure");
226 return ERROR_TARGET_TRANSLATION_FAULT;
227
228 done:
229 return retval;
230 }
231
232
233 /* V7 method VA TO PA */
234 int armv7a_mmu_translate_va_pa(struct target *target, uint32_t va,
235 uint32_t *val, int meminfo)
236 {
237 int retval = ERROR_FAIL;
238 struct armv7a_common *armv7a = target_to_armv7a(target);
239 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
240 uint32_t virt = va & ~0xfff;
241 uint32_t NOS,NS,INNER,OUTER;
242 *val = 0xdeadbeef;
243 retval = dpm->prepare(dpm);
244 if (retval != ERROR_OK)
245 goto done;
246 /* mmu must be enable in order to get a correct translation */
247 /* use VA to PA CP15 register for conversion */
248 retval = dpm->instr_write_data_r0(dpm,
249 ARMV4_5_MCR(15, 0, 0, 7, 8, 0),
250 virt);
251 if (retval!=ERROR_OK) goto done;
252 retval = dpm->instr_read_data_r0(dpm,
253 ARMV4_5_MRC(15, 0, 0, 7, 4, 0),
254 val);
255 /* decode memory attribute */
256 NOS = (*val >> 10) & 1; /* Not Outer shareable */
257 NS = (*val >> 9) & 1; /* Non secure */
258 INNER = (*val >> 4) & 0x7;
259 OUTER = (*val >> 2) & 0x3;
260
261 if (retval!=ERROR_OK) goto done;
262 *val = (*val & ~0xfff) + (va & 0xfff);
263 if (*val == va)
264 LOG_WARNING("virt = phys : MMU disable !!");
265 if (meminfo)
266 {
267 LOG_INFO("%x : %x %s outer shareable %s secured",
268 va, *val,
269 NOS == 1 ? "not" : " ",
270 NS == 1 ? "not" :"");
271 switch (OUTER) {
272 case 0 : LOG_INFO("outer: Non-Cacheable");
273 break;
274 case 1 : LOG_INFO("outer: Write-Back, Write-Allocate");
275 break;
276 case 2 : LOG_INFO("outer: Write-Through, No Write-Allocate");
277 break;
278 case 3 : LOG_INFO("outer: Write-Back, no Write-Allocate");
279 break;
280 }
281 switch (INNER) {
282 case 0 : LOG_INFO("inner: Non-Cacheable");
283 break;
284 case 1 : LOG_INFO("inner: Strongly-ordered");
285 break;
286 case 3 : LOG_INFO("inner: Device");
287 break;
288 case 5 : LOG_INFO("inner: Write-Back, Write-Allocate");
289 break;
290 case 6 : LOG_INFO("inner: Write-Through");
291 break;
292 case 7 : LOG_INFO("inner: Write-Back, no Write-Allocate");
293
294 default: LOG_INFO("inner: %x ???",INNER);
295 }
296 }
297
298 done:
299 dpm->finish(dpm);
300
301 return retval;
302 }
303
304 static int armv7a_handle_inner_cache_info_command(struct command_context *cmd_ctx,
305 struct armv7a_cache_common *armv7a_cache)
306 {
307 if (armv7a_cache->ctype == -1)
308 {
309 command_print(cmd_ctx, "cache not yet identified");
310 return ERROR_OK;
311 }
312
313 command_print(cmd_ctx,
314 "D-Cache: linelen %i, associativity %i, nsets %i, cachesize %d KBytes",
315 armv7a_cache->d_u_size.linelen,
316 armv7a_cache->d_u_size.associativity,
317 armv7a_cache->d_u_size.nsets,
318 armv7a_cache->d_u_size.cachesize);
319
320 command_print(cmd_ctx,
321 "I-Cache: linelen %i, associativity %i, nsets %i, cachesize %d KBytes",
322 armv7a_cache->i_size.linelen,
323 armv7a_cache->i_size.associativity,
324 armv7a_cache->i_size.nsets,
325 armv7a_cache->i_size.cachesize);
326
327 return ERROR_OK;
328 }
329
330 static int _armv7a_flush_all_data(struct target *target)
331 {
332 struct armv7a_common *armv7a = target_to_armv7a(target);
333 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
334 struct armv7a_cachesize *d_u_size =
335 &(armv7a->armv7a_mmu.armv7a_cache.d_u_size);
336 int32_t c_way, c_index = d_u_size->index;
337 int retval;
338 /* check that cache data is on at target halt */
339 if (!armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled)
340 {
341 LOG_INFO("flushed not performed :cache not on at target halt");
342 return ERROR_OK;
343 }
344 retval = dpm->prepare(dpm);
345 if (retval != ERROR_OK) goto done;
346 do {
347 c_way = d_u_size->way;
348 do {
349 uint32_t value = (c_index << d_u_size->index_shift)
350 | (c_way << d_u_size->way_shift);
351 /* DCCISW */
352 //LOG_INFO ("%d %d %x",c_way,c_index,value);
353 retval = dpm->instr_write_data_r0(dpm,
354 ARMV4_5_MCR(15, 0, 0, 7, 14, 2),
355 value);
356 if (retval!= ERROR_OK) goto done;
357 c_way -= 1;
358 } while (c_way >=0);
359 c_index -= 1;
360 } while (c_index >=0);
361 return retval;
362 done:
363 LOG_ERROR("flushed failed");
364 dpm->finish(dpm);
365 return retval;
366 }
367
368 static int armv7a_flush_all_data( struct target * target)
369 {
370 int retval = ERROR_FAIL;
371 /* check that armv7a_cache is correctly identify */
372 struct armv7a_common *armv7a = target_to_armv7a(target);
373 if (armv7a->armv7a_mmu.armv7a_cache.ctype == -1)
374 {
375 LOG_ERROR("trying to flush un-identified cache");
376 return retval;
377 }
378
379 if (target->smp)
380 {
381 /* look if all the other target have been flushed in order to flush level
382 * 2 */
383 struct target_list *head;
384 struct target *curr;
385 head = target->head;
386 while(head != (struct target_list*)NULL)
387 {
388 curr = head->target;
389 if ((curr->state == TARGET_HALTED))
390 { LOG_INFO("Wait flushing data l1 on core %d",curr->coreid);
391 retval = _armv7a_flush_all_data(curr);
392 }
393 head = head->next;
394 }
395 }
396 else retval = _armv7a_flush_all_data(target);
397 return retval;
398 }
399
400
401 /* L2 is not specific to armv7a a specific file is needed */
402 static int armv7a_l2x_flush_all_data(struct target * target)
403 {
404
405 #define L2X0_CLEAN_INV_WAY 0x7FC
406 int retval = ERROR_FAIL;
407 struct armv7a_common *armv7a = target_to_armv7a(target);
408 struct armv7a_l2x_cache *l2x_cache = (struct armv7a_l2x_cache*)
409 (armv7a->armv7a_mmu.armv7a_cache.l2_cache);
410 uint32_t base = l2x_cache->base;
411 uint32_t l2_way = l2x_cache->way;
412 uint32_t l2_way_val = (1<<l2_way) -1;
413 retval = armv7a_flush_all_data(target);
414 if (retval!=ERROR_OK) return retval;
415 retval = target->type->write_phys_memory(target,
416 (uint32_t)(base+(uint32_t)L2X0_CLEAN_INV_WAY),
417 (uint32_t)4,
418 (uint32_t)1,
419 (uint8_t*)&l2_way_val);
420 return retval;
421 }
422
423 static int armv7a_handle_l2x_cache_info_command(struct command_context *cmd_ctx,
424 struct armv7a_cache_common *armv7a_cache)
425 {
426
427 struct armv7a_l2x_cache *l2x_cache = (struct armv7a_l2x_cache*)
428 (armv7a_cache->l2_cache);
429
430 if (armv7a_cache->ctype == -1)
431 {
432 command_print(cmd_ctx, "cache not yet identified");
433 return ERROR_OK;
434 }
435
436 command_print(cmd_ctx,
437 "L1 D-Cache: linelen %i, associativity %i, nsets %i, cachesize %d KBytes",
438 armv7a_cache->d_u_size.linelen,
439 armv7a_cache->d_u_size.associativity,
440 armv7a_cache->d_u_size.nsets,
441 armv7a_cache->d_u_size.cachesize);
442
443 command_print(cmd_ctx,
444 "L1 I-Cache: linelen %i, associativity %i, nsets %i, cachesize %d KBytes",
445 armv7a_cache->i_size.linelen,
446 armv7a_cache->i_size.associativity,
447 armv7a_cache->i_size.nsets,
448 armv7a_cache->i_size.cachesize);
449 command_print(cmd_ctx, "L2 unified cache Base Address 0x%x, %d ways",
450 l2x_cache->base, l2x_cache->way);
451
452
453 return ERROR_OK;
454 }
455
456
457 static int armv7a_l2x_cache_init(struct target *target, uint32_t base, uint32_t way)
458 {
459 struct armv7a_l2x_cache *l2x_cache;
460 struct target_list *head = target->head;
461 struct target *curr;
462
463 struct armv7a_common *armv7a = target_to_armv7a(target);
464 if (armv7a == NULL)
465 LOG_ERROR("not an armv7a target");
466 l2x_cache = calloc(1, sizeof(struct armv7a_l2x_cache));
467 l2x_cache->base = base;
468 l2x_cache->way = way;
469 /*LOG_INFO("cache l2 initialized base %x way %d",
470 l2x_cache->base,l2x_cache->way);*/
471 if (armv7a->armv7a_mmu.armv7a_cache.l2_cache)
472 {
473 LOG_INFO("cache l2 already initialized\n");
474 }
475 armv7a->armv7a_mmu.armv7a_cache.l2_cache = (void*) l2x_cache;
476 /* initialize l1 / l2x cache function */
477 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache
478 = armv7a_l2x_flush_all_data;
479 armv7a->armv7a_mmu.armv7a_cache.display_cache_info =
480 armv7a_handle_l2x_cache_info_command;
481 /* initialize all target in this cluster (smp target)*/
482 /* l2 cache must be configured after smp declaration */
483 while(head != (struct target_list*)NULL)
484 {
485 curr = head->target;
486 if (curr != target)
487 {
488 armv7a = target_to_armv7a(curr);
489 if (armv7a->armv7a_mmu.armv7a_cache.l2_cache)
490 {
491 LOG_ERROR("smp target : cache l2 already initialized\n");
492 }
493 armv7a->armv7a_mmu.armv7a_cache.l2_cache = (void*) l2x_cache;
494 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache =
495 armv7a_l2x_flush_all_data;
496 armv7a->armv7a_mmu.armv7a_cache.display_cache_info =
497 armv7a_handle_l2x_cache_info_command;
498 }
499 head = head -> next;
500 }
501 return JIM_OK;
502 }
503
504 COMMAND_HANDLER(handle_cache_l2x)
505 {
506 struct target *target = get_current_target(CMD_CTX);
507 uint32_t base, way;
508 switch (CMD_ARGC) {
509 case 0:
510 return ERROR_COMMAND_SYNTAX_ERROR;
511 break;
512 case 2:
513 //command_print(CMD_CTX, "%s %s", CMD_ARGV[0], CMD_ARGV[1]);
514
515
516 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], base);
517 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], way);
518
519 /* AP address is in bits 31:24 of DP_SELECT */
520 armv7a_l2x_cache_init(target, base, way);
521 break;
522 default:
523 return ERROR_COMMAND_SYNTAX_ERROR;
524 }
525 return ERROR_OK;
526 }
527
528
529 int armv7a_handle_cache_info_command(struct command_context *cmd_ctx,
530 struct armv7a_cache_common *armv7a_cache)
531 {
532 if (armv7a_cache->ctype == -1)
533 {
534 command_print(cmd_ctx, "cache not yet identified");
535 return ERROR_OK;
536 }
537
538 if (armv7a_cache->display_cache_info)
539 armv7a_cache->display_cache_info(cmd_ctx, armv7a_cache);
540 return ERROR_OK;
541 }
542
543
544 /* retrieve core id cluster id */
545 static int armv7a_read_mpidr(struct target *target)
546 {
547 int retval = ERROR_FAIL;
548 struct armv7a_common *armv7a = target_to_armv7a(target);
549 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
550 uint32_t mpidr;
551 retval = dpm->prepare(dpm);
552 if (retval!=ERROR_OK) goto done;
553 /* MRC p15,0,<Rd>,c0,c0,5; read Multiprocessor ID register*/
554
555 retval = dpm->instr_read_data_r0(dpm,
556 ARMV4_5_MRC(15, 0, 0, 0, 0, 5),
557 &mpidr);
558 if (retval!=ERROR_OK) goto done;
559 if (mpidr & 1<<31)
560 {
561 armv7a->multi_processor_system = (mpidr >> 30) & 1;
562 armv7a->cluster_id = (mpidr >> 8) & 0xf;
563 armv7a->cpu_id = mpidr & 0x3;
564 LOG_INFO("%s cluster %x core %x %s", target->cmd_name,
565 armv7a->cluster_id,
566 armv7a->cpu_id,
567 armv7a->multi_processor_system == 0 ? "multi core": "mono core");
568
569 }
570 else
571 LOG_ERROR("mpdir not in multiprocessor format");
572
573 done:
574 dpm->finish(dpm);
575 return retval;
576
577
578 }
579
580
581 int armv7a_identify_cache(struct target *target)
582 {
583 /* read cache descriptor */
584 int retval = ERROR_FAIL;
585 struct armv7a_common *armv7a = target_to_armv7a(target);
586 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
587 uint32_t cache_selected,clidr;
588 uint32_t cache_i_reg, cache_d_reg;
589 struct armv7a_cache_common *cache = &(armv7a->armv7a_mmu.armv7a_cache);
590 armv7a_read_ttbcr(target);
591 retval = dpm->prepare(dpm);
592
593 if (retval!=ERROR_OK) goto done;
594 /* retrieve CLIDR */
595 /* mrc p15, 1, r0, c0, c0, 1 @ read clidr */
596 retval = dpm->instr_read_data_r0(dpm,
597 ARMV4_5_MRC(15, 1, 0, 0, 0, 1),
598 &clidr);
599 if (retval!=ERROR_OK) goto done;
600 clidr = (clidr & 0x7000000) >> 23;
601 LOG_INFO("number of cache level %d",clidr /2 );
602 if ((clidr /2) > 1)
603 {
604 // FIXME not supported present in cortex A8 and later
605 // in cortex A7, A15
606 LOG_ERROR("cache l2 present :not supported");
607 }
608 /* retrieve selected cache */
609 /* MRC p15, 2,<Rd>, c0, c0, 0; Read CSSELR */
610 retval = dpm->instr_read_data_r0(dpm,
611 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
612 &cache_selected);
613 if (retval!=ERROR_OK) goto done;
614
615 retval = armv7a->armv4_5_common.mrc(target, 15,
616 2, 0, /* op1, op2 */
617 0, 0, /* CRn, CRm */
618 &cache_selected);
619 /* select instruction cache*/
620 /* MCR p15, 2,<Rd>, c0, c0, 0; Write CSSELR */
621 /* [0] : 1 instruction cache selection , 0 data cache selection */
622 retval = dpm->instr_write_data_r0(dpm,
623 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
624 1);
625 if (retval!=ERROR_OK) goto done;
626
627 /* read CCSIDR*/
628 /* MRC P15,1,<RT>,C0, C0,0 ;on cortex A9 read CCSIDR */
629 /* [2:0] line size 001 eight word per line */
630 /* [27:13] NumSet 0x7f 16KB, 0xff 32Kbytes, 0x1ff 64Kbytes */
631 retval = dpm->instr_read_data_r0(dpm,
632 ARMV4_5_MRC(15, 1, 0, 0, 0, 0),
633 &cache_i_reg);
634 if (retval!=ERROR_OK) goto done;
635
636 /* select data cache*/
637 retval = dpm->instr_write_data_r0(dpm,
638 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
639 0);
640 if (retval!=ERROR_OK) goto done;
641
642 retval = dpm->instr_read_data_r0(dpm,
643 ARMV4_5_MRC(15, 1, 0, 0, 0, 0),
644 &cache_d_reg);
645 if (retval!=ERROR_OK) goto done;
646
647 /* restore selected cache */
648 dpm->instr_write_data_r0(dpm,
649 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
650 cache_selected);
651
652 if (retval != ERROR_OK) goto done;
653 dpm->finish(dpm);
654
655 // put fake type
656 cache->d_u_size.linelen = 16 << (cache_d_reg & 0x7);
657 cache->d_u_size.cachesize = (((cache_d_reg >> 13) & 0x7fff)+1)/8;
658 cache->d_u_size.nsets = (cache_d_reg >> 13) & 0x7fff;
659 cache->d_u_size.associativity = ((cache_d_reg >> 3) & 0x3ff) +1;
660 /* compute info for set way operation on cache */
661 cache->d_u_size.index_shift = (cache_d_reg & 0x7) + 4;
662 cache->d_u_size.index = (cache_d_reg >> 13) & 0x7fff;
663 cache->d_u_size.way = ((cache_d_reg >> 3) & 0x3ff);
664 cache->d_u_size.way_shift = cache->d_u_size.way+1;
665 {
666 int i=0;
667 while(((cache->d_u_size.way_shift >> i) & 1)!=1) i++;
668 cache->d_u_size.way_shift = 32-i;
669 }
670 /*LOG_INFO("data cache index %d << %d, way %d << %d",
671 cache->d_u_size.index, cache->d_u_size.index_shift,
672 cache->d_u_size.way, cache->d_u_size.way_shift);
673
674 LOG_INFO("data cache %d bytes %d KBytes asso %d ways",
675 cache->d_u_size.linelen,
676 cache->d_u_size.cachesize,
677 cache->d_u_size.associativity
678 );*/
679 cache->i_size.linelen = 16 << (cache_i_reg & 0x7);
680 cache->i_size.associativity = ((cache_i_reg >> 3) & 0x3ff) +1;
681 cache->i_size.nsets = (cache_i_reg >> 13) & 0x7fff;
682 cache->i_size.cachesize = (((cache_i_reg >> 13) & 0x7fff)+1)/8;
683 /* compute info for set way operation on cache */
684 cache->i_size.index_shift = (cache_i_reg & 0x7) + 4;
685 cache->i_size.index = (cache_i_reg >> 13) & 0x7fff;
686 cache->i_size.way = ((cache_i_reg >> 3) & 0x3ff);
687 cache->i_size.way_shift = cache->i_size.way+1;
688 {
689 int i=0;
690 while(((cache->i_size.way_shift >> i) & 1)!=1) i++;
691 cache->i_size.way_shift = 32-i;
692 }
693 /*LOG_INFO("instruction cache index %d << %d, way %d << %d",
694 cache->i_size.index, cache->i_size.index_shift,
695 cache->i_size.way, cache->i_size.way_shift);
696
697 LOG_INFO("instruction cache %d bytes %d KBytes asso %d ways",
698 cache->i_size.linelen,
699 cache->i_size.cachesize,
700 cache->i_size.associativity
701 );*/
702 /* if no l2 cache initialize l1 data cache flush function function */
703 if (armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache == NULL)
704 {
705 armv7a->armv7a_mmu.armv7a_cache.display_cache_info =
706 armv7a_handle_inner_cache_info_command;
707 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache =
708 armv7a_flush_all_data;
709 }
710 armv7a->armv7a_mmu.armv7a_cache.ctype = 0;
711
712 done:
713 dpm->finish(dpm);
714 armv7a_read_mpidr(target);
715 return retval;
716
717 }
718
719
720
721 int armv7a_init_arch_info(struct target *target, struct armv7a_common *armv7a)
722 {
723 struct arm *armv4_5 = &armv7a->armv4_5_common;
724 armv4_5->arch_info = armv7a;
725 target->arch_info = &armv7a->armv4_5_common;
726 /* target is useful in all function arm v4 5 compatible */
727 armv7a->armv4_5_common.target = target;
728 armv7a->armv4_5_common.common_magic = ARM_COMMON_MAGIC;
729 armv7a->common_magic = ARMV7_COMMON_MAGIC;
730 armv7a->armv7a_mmu.armv7a_cache.l2_cache = NULL;
731 armv7a->armv7a_mmu.armv7a_cache.ctype = -1;
732 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache = NULL;
733 armv7a->armv7a_mmu.armv7a_cache.display_cache_info = NULL;
734 return ERROR_OK;
735 }
736
737 int armv7a_arch_state(struct target *target)
738 {
739 static const char *state[] =
740 {
741 "disabled", "enabled"
742 };
743
744 struct armv7a_common *armv7a = target_to_armv7a(target);
745 struct arm *armv4_5 = &armv7a->armv4_5_common;
746
747 if (armv7a->common_magic != ARMV7_COMMON_MAGIC)
748 {
749 LOG_ERROR("BUG: called for a non-ARMv7A target");
750 return ERROR_INVALID_ARGUMENTS;
751 }
752
753 arm_arch_state(target);
754
755 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s",
756 state[armv7a->armv7a_mmu.mmu_enabled],
757 state[armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled],
758 state[armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled]);
759
760 if (armv4_5->core_mode == ARM_MODE_ABT)
761 armv7a_show_fault_registers(target);
762 if (target->debug_reason == DBG_REASON_WATCHPOINT)
763 LOG_USER("Watchpoint triggered at PC %#08x",
764 (unsigned) armv7a->dpm.wp_pc);
765
766 return ERROR_OK;
767 }
768
769 static const struct command_registration l2_cache_commands[] = {
770 {
771 .name = "l2x",
772 .handler = handle_cache_l2x,
773 .mode = COMMAND_EXEC,
774 .help = "configure l2x cache "
775 "",
776 .usage = "[base_addr] [number_of_way]",
777 },
778 COMMAND_REGISTRATION_DONE
779
780 };
781
782 const struct command_registration l2x_cache_command_handlers[] = {
783 {
784 .name = "cache_config",
785 .mode = COMMAND_EXEC,
786 .help = "cache configuation for a target",
787 .chain = l2_cache_commands,
788 },
789 COMMAND_REGISTRATION_DONE
790 };
791
792
793 const struct command_registration armv7a_command_handlers[] = {
794 {
795 .chain = dap_command_handlers,
796 },
797 {
798 .chain = l2x_cache_command_handlers,
799 },
800 COMMAND_REGISTRATION_DONE
801 };
802

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)