build: fix clang warnings
[openocd.git] / src / target / armv7a.c
1 /***************************************************************************
2 * Copyright (C) 2009 by David Brownell *
3 * *
4 * Copyright (C) ST-Ericsson SA 2011 michel.jaouen@stericsson.com *
5 * *
6 * This program is free software; you can redistribute it and/or modify *
7 * it under the terms of the GNU General Public License as published by *
8 * the Free Software Foundation; either version 2 of the License, or *
9 * (at your option) any later version. *
10 * *
11 * This program is distributed in the hope that it will be useful, *
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
14 * GNU General Public License for more details. *
15 * *
16 * You should have received a copy of the GNU General Public License *
17 * along with this program; if not, write to the *
18 * Free Software Foundation, Inc., *
19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
20 ***************************************************************************/
21
22 #ifdef HAVE_CONFIG_H
23 #include "config.h"
24 #endif
25
26 #include <helper/replacements.h>
27
28 #include "armv7a.h"
29 #include "arm_disassembler.h"
30
31 #include "register.h"
32 #include <helper/binarybuffer.h>
33 #include <helper/command.h>
34
35 #include <stdlib.h>
36 #include <string.h>
37 #include <unistd.h>
38
39 #include "arm_opcodes.h"
40 #include "target.h"
41 #include "target_type.h"
42
43 static void armv7a_show_fault_registers(struct target *target)
44 {
45 uint32_t dfsr, ifsr, dfar, ifar;
46 struct armv7a_common *armv7a = target_to_armv7a(target);
47 struct arm_dpm *dpm = armv7a->arm.dpm;
48 int retval;
49
50 retval = dpm->prepare(dpm);
51 if (retval != ERROR_OK)
52 return;
53
54 /* ARMV4_5_MRC(cpnum, op1, r0, CRn, CRm, op2) */
55
56 /* c5/c0 - {data, instruction} fault status registers */
57 retval = dpm->instr_read_data_r0(dpm,
58 ARMV4_5_MRC(15, 0, 0, 5, 0, 0),
59 &dfsr);
60 if (retval != ERROR_OK)
61 goto done;
62
63 retval = dpm->instr_read_data_r0(dpm,
64 ARMV4_5_MRC(15, 0, 0, 5, 0, 1),
65 &ifsr);
66 if (retval != ERROR_OK)
67 goto done;
68
69 /* c6/c0 - {data, instruction} fault address registers */
70 retval = dpm->instr_read_data_r0(dpm,
71 ARMV4_5_MRC(15, 0, 0, 6, 0, 0),
72 &dfar);
73 if (retval != ERROR_OK)
74 goto done;
75
76 retval = dpm->instr_read_data_r0(dpm,
77 ARMV4_5_MRC(15, 0, 0, 6, 0, 2),
78 &ifar);
79 if (retval != ERROR_OK)
80 goto done;
81
82 LOG_USER("Data fault registers DFSR: %8.8" PRIx32
83 ", DFAR: %8.8" PRIx32, dfsr, dfar);
84 LOG_USER("Instruction fault registers IFSR: %8.8" PRIx32
85 ", IFAR: %8.8" PRIx32, ifsr, ifar);
86
87 done:
88 /* (void) */ dpm->finish(dpm);
89 }
90
91 static int armv7a_read_ttbcr(struct target *target)
92 {
93 struct armv7a_common *armv7a = target_to_armv7a(target);
94 struct arm_dpm *dpm = armv7a->arm.dpm;
95 uint32_t ttbcr;
96 int retval = dpm->prepare(dpm);
97 if (retval != ERROR_OK)
98 goto done;
99 /* MRC p15,0,<Rt>,c2,c0,2 ; Read CP15 Translation Table Base Control Register*/
100 retval = dpm->instr_read_data_r0(dpm,
101 ARMV4_5_MRC(15, 0, 0, 2, 0, 2),
102 &ttbcr);
103 if (retval != ERROR_OK)
104 goto done;
105 armv7a->armv7a_mmu.ttbr1_used = ((ttbcr & 0x7) != 0) ? 1 : 0;
106 armv7a->armv7a_mmu.ttbr0_mask = 7 << (32 - ((ttbcr & 0x7)));
107 #if 0
108 LOG_INFO("ttb1 %s ,ttb0_mask %x",
109 armv7a->armv7a_mmu.ttbr1_used ? "used" : "not used",
110 armv7a->armv7a_mmu.ttbr0_mask);
111 #endif
112 if (armv7a->armv7a_mmu.ttbr1_used == 1) {
113 LOG_INFO("SVC access above %x",
114 (0xffffffff & armv7a->armv7a_mmu.ttbr0_mask));
115 armv7a->armv7a_mmu.os_border = 0xffffffff & armv7a->armv7a_mmu.ttbr0_mask;
116 } else {
117 /* fix me , default is hard coded LINUX border */
118 armv7a->armv7a_mmu.os_border = 0xc0000000;
119 }
120 done:
121 dpm->finish(dpm);
122 return retval;
123 }
124
125
126 /* method adapted to cortex A : reused arm v4 v5 method*/
127 int armv7a_mmu_translate_va(struct target *target, uint32_t va, uint32_t *val)
128 {
129 uint32_t first_lvl_descriptor = 0x0;
130 uint32_t second_lvl_descriptor = 0x0;
131 int retval;
132 struct armv7a_common *armv7a = target_to_armv7a(target);
133 struct arm_dpm *dpm = armv7a->arm.dpm;
134 uint32_t ttb = 0; /* default ttb0 */
135 if (armv7a->armv7a_mmu.ttbr1_used == -1)
136 armv7a_read_ttbcr(target);
137 if ((armv7a->armv7a_mmu.ttbr1_used) &&
138 (va > (0xffffffff & armv7a->armv7a_mmu.ttbr0_mask))) {
139 /* select ttb 1 */
140 ttb = 1;
141 }
142 retval = dpm->prepare(dpm);
143 if (retval != ERROR_OK)
144 goto done;
145
146 /* MRC p15,0,<Rt>,c2,c0,ttb */
147 retval = dpm->instr_read_data_r0(dpm,
148 ARMV4_5_MRC(15, 0, 0, 2, 0, ttb),
149 &ttb);
150 if (retval != ERROR_OK)
151 return retval;
152 retval = armv7a->armv7a_mmu.read_physical_memory(target,
153 (ttb & 0xffffc000) | ((va & 0xfff00000) >> 18),
154 4, 1, (uint8_t *)&first_lvl_descriptor);
155 if (retval != ERROR_OK)
156 return retval;
157 first_lvl_descriptor = target_buffer_get_u32(target, (uint8_t *)
158 &first_lvl_descriptor);
159 /* reuse armv4_5 piece of code, specific armv7a changes may come later */
160 LOG_DEBUG("1st lvl desc: %8.8" PRIx32 "", first_lvl_descriptor);
161
162 if ((first_lvl_descriptor & 0x3) == 0) {
163 LOG_ERROR("Address translation failure");
164 return ERROR_TARGET_TRANSLATION_FAULT;
165 }
166
167
168 if ((first_lvl_descriptor & 0x3) == 2) {
169 /* section descriptor */
170 *val = (first_lvl_descriptor & 0xfff00000) | (va & 0x000fffff);
171 return ERROR_OK;
172 }
173
174 if ((first_lvl_descriptor & 0x3) == 1) {
175 /* coarse page table */
176 retval = armv7a->armv7a_mmu.read_physical_memory(target,
177 (first_lvl_descriptor & 0xfffffc00) | ((va & 0x000ff000) >> 10),
178 4, 1, (uint8_t *)&second_lvl_descriptor);
179 if (retval != ERROR_OK)
180 return retval;
181 } else if ((first_lvl_descriptor & 0x3) == 3) {
182 /* fine page table */
183 retval = armv7a->armv7a_mmu.read_physical_memory(target,
184 (first_lvl_descriptor & 0xfffff000) | ((va & 0x000ffc00) >> 8),
185 4, 1, (uint8_t *)&second_lvl_descriptor);
186 if (retval != ERROR_OK)
187 return retval;
188 }
189
190 second_lvl_descriptor = target_buffer_get_u32(target, (uint8_t *)
191 &second_lvl_descriptor);
192
193 LOG_DEBUG("2nd lvl desc: %8.8" PRIx32 "", second_lvl_descriptor);
194
195 if ((second_lvl_descriptor & 0x3) == 0) {
196 LOG_ERROR("Address translation failure");
197 return ERROR_TARGET_TRANSLATION_FAULT;
198 }
199
200 if ((second_lvl_descriptor & 0x3) == 1) {
201 /* large page descriptor */
202 *val = (second_lvl_descriptor & 0xffff0000) | (va & 0x0000ffff);
203 return ERROR_OK;
204 }
205
206 if ((second_lvl_descriptor & 0x3) == 2) {
207 /* small page descriptor */
208 *val = (second_lvl_descriptor & 0xfffff000) | (va & 0x00000fff);
209 return ERROR_OK;
210 }
211
212 if ((second_lvl_descriptor & 0x3) == 3) {
213 *val = (second_lvl_descriptor & 0xfffffc00) | (va & 0x000003ff);
214 return ERROR_OK;
215 }
216
217 /* should not happen */
218 LOG_ERROR("Address translation failure");
219 return ERROR_TARGET_TRANSLATION_FAULT;
220
221 done:
222 return retval;
223 }
224
225 /* V7 method VA TO PA */
226 int armv7a_mmu_translate_va_pa(struct target *target, uint32_t va,
227 uint32_t *val, int meminfo)
228 {
229 int retval = ERROR_FAIL;
230 struct armv7a_common *armv7a = target_to_armv7a(target);
231 struct arm_dpm *dpm = armv7a->arm.dpm;
232 uint32_t virt = va & ~0xfff;
233 uint32_t NOS, NS, INNER, OUTER;
234 *val = 0xdeadbeef;
235 retval = dpm->prepare(dpm);
236 if (retval != ERROR_OK)
237 goto done;
238 /* mmu must be enable in order to get a correct translation
239 * use VA to PA CP15 register for conversion */
240 retval = dpm->instr_write_data_r0(dpm,
241 ARMV4_5_MCR(15, 0, 0, 7, 8, 0),
242 virt);
243 if (retval != ERROR_OK)
244 goto done;
245 retval = dpm->instr_read_data_r0(dpm,
246 ARMV4_5_MRC(15, 0, 0, 7, 4, 0),
247 val);
248 /* decode memory attribute */
249 NOS = (*val >> 10) & 1; /* Not Outer shareable */
250 NS = (*val >> 9) & 1; /* Non secure */
251 INNER = (*val >> 4) & 0x7;
252 OUTER = (*val >> 2) & 0x3;
253
254 if (retval != ERROR_OK)
255 goto done;
256 *val = (*val & ~0xfff) + (va & 0xfff);
257 if (*val == va)
258 LOG_WARNING("virt = phys : MMU disable !!");
259 if (meminfo) {
260 LOG_INFO("%x : %x %s outer shareable %s secured",
261 va, *val,
262 NOS == 1 ? "not" : " ",
263 NS == 1 ? "not" : "");
264 switch (OUTER) {
265 case 0:
266 LOG_INFO("outer: Non-Cacheable");
267 break;
268 case 1:
269 LOG_INFO("outer: Write-Back, Write-Allocate");
270 break;
271 case 2:
272 LOG_INFO("outer: Write-Through, No Write-Allocate");
273 break;
274 case 3:
275 LOG_INFO("outer: Write-Back, no Write-Allocate");
276 break;
277 }
278 switch (INNER) {
279 case 0:
280 LOG_INFO("inner: Non-Cacheable");
281 break;
282 case 1:
283 LOG_INFO("inner: Strongly-ordered");
284 break;
285 case 3:
286 LOG_INFO("inner: Device");
287 break;
288 case 5:
289 LOG_INFO("inner: Write-Back, Write-Allocate");
290 break;
291 case 6:
292 LOG_INFO("inner: Write-Through");
293 break;
294 case 7:
295 LOG_INFO("inner: Write-Back, no Write-Allocate");
296
297 default:
298 LOG_INFO("inner: %x ???", INNER);
299 }
300 }
301
302 done:
303 dpm->finish(dpm);
304
305 return retval;
306 }
307
308 static int armv7a_handle_inner_cache_info_command(struct command_context *cmd_ctx,
309 struct armv7a_cache_common *armv7a_cache)
310 {
311 if (armv7a_cache->ctype == -1) {
312 command_print(cmd_ctx, "cache not yet identified");
313 return ERROR_OK;
314 }
315
316 command_print(cmd_ctx,
317 "D-Cache: linelen %i, associativity %i, nsets %i, cachesize %d KBytes",
318 armv7a_cache->d_u_size.linelen,
319 armv7a_cache->d_u_size.associativity,
320 armv7a_cache->d_u_size.nsets,
321 armv7a_cache->d_u_size.cachesize);
322
323 command_print(cmd_ctx,
324 "I-Cache: linelen %i, associativity %i, nsets %i, cachesize %d KBytes",
325 armv7a_cache->i_size.linelen,
326 armv7a_cache->i_size.associativity,
327 armv7a_cache->i_size.nsets,
328 armv7a_cache->i_size.cachesize);
329
330 return ERROR_OK;
331 }
332
333 static int _armv7a_flush_all_data(struct target *target)
334 {
335 struct armv7a_common *armv7a = target_to_armv7a(target);
336 struct arm_dpm *dpm = armv7a->arm.dpm;
337 struct armv7a_cachesize *d_u_size =
338 &(armv7a->armv7a_mmu.armv7a_cache.d_u_size);
339 int32_t c_way, c_index = d_u_size->index;
340 int retval;
341 /* check that cache data is on at target halt */
342 if (!armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled) {
343 LOG_INFO("flushed not performed :cache not on at target halt");
344 return ERROR_OK;
345 }
346 retval = dpm->prepare(dpm);
347 if (retval != ERROR_OK)
348 goto done;
349 do {
350 c_way = d_u_size->way;
351 do {
352 uint32_t value = (c_index << d_u_size->index_shift)
353 | (c_way << d_u_size->way_shift);
354 /* DCCISW */
355 /* LOG_INFO ("%d %d %x",c_way,c_index,value); */
356 retval = dpm->instr_write_data_r0(dpm,
357 ARMV4_5_MCR(15, 0, 0, 7, 14, 2),
358 value);
359 if (retval != ERROR_OK)
360 goto done;
361 c_way -= 1;
362 } while (c_way >= 0);
363 c_index -= 1;
364 } while (c_index >= 0);
365 return retval;
366 done:
367 LOG_ERROR("flushed failed");
368 dpm->finish(dpm);
369 return retval;
370 }
371
372 static int armv7a_flush_all_data(struct target *target)
373 {
374 int retval = ERROR_FAIL;
375 /* check that armv7a_cache is correctly identify */
376 struct armv7a_common *armv7a = target_to_armv7a(target);
377 if (armv7a->armv7a_mmu.armv7a_cache.ctype == -1) {
378 LOG_ERROR("trying to flush un-identified cache");
379 return retval;
380 }
381
382 if (target->smp) {
383 /* look if all the other target have been flushed in order to flush level
384 * 2 */
385 struct target_list *head;
386 struct target *curr;
387 head = target->head;
388 while (head != (struct target_list *)NULL) {
389 curr = head->target;
390 if (curr->state == TARGET_HALTED) {
391 LOG_INFO("Wait flushing data l1 on core %d", curr->coreid);
392 retval = _armv7a_flush_all_data(curr);
393 }
394 head = head->next;
395 }
396 } else
397 retval = _armv7a_flush_all_data(target);
398 return retval;
399 }
400
401 /* L2 is not specific to armv7a a specific file is needed */
402 static int armv7a_l2x_flush_all_data(struct target *target)
403 {
404
405 #define L2X0_CLEAN_INV_WAY 0x7FC
406 int retval = ERROR_FAIL;
407 struct armv7a_common *armv7a = target_to_armv7a(target);
408 struct armv7a_l2x_cache *l2x_cache = (struct armv7a_l2x_cache *)
409 (armv7a->armv7a_mmu.armv7a_cache.l2_cache);
410 uint32_t base = l2x_cache->base;
411 uint32_t l2_way = l2x_cache->way;
412 uint32_t l2_way_val = (1 << l2_way) - 1;
413 retval = armv7a_flush_all_data(target);
414 if (retval != ERROR_OK)
415 return retval;
416 retval = target->type->write_phys_memory(target,
417 (uint32_t)(base+(uint32_t)L2X0_CLEAN_INV_WAY),
418 (uint32_t)4,
419 (uint32_t)1,
420 (uint8_t *)&l2_way_val);
421 return retval;
422 }
423
424 static int armv7a_handle_l2x_cache_info_command(struct command_context *cmd_ctx,
425 struct armv7a_cache_common *armv7a_cache)
426 {
427
428 struct armv7a_l2x_cache *l2x_cache = (struct armv7a_l2x_cache *)
429 (armv7a_cache->l2_cache);
430
431 if (armv7a_cache->ctype == -1) {
432 command_print(cmd_ctx, "cache not yet identified");
433 return ERROR_OK;
434 }
435
436 command_print(cmd_ctx,
437 "L1 D-Cache: linelen %i, associativity %i, nsets %i, cachesize %d KBytes",
438 armv7a_cache->d_u_size.linelen,
439 armv7a_cache->d_u_size.associativity,
440 armv7a_cache->d_u_size.nsets,
441 armv7a_cache->d_u_size.cachesize);
442
443 command_print(cmd_ctx,
444 "L1 I-Cache: linelen %i, associativity %i, nsets %i, cachesize %d KBytes",
445 armv7a_cache->i_size.linelen,
446 armv7a_cache->i_size.associativity,
447 armv7a_cache->i_size.nsets,
448 armv7a_cache->i_size.cachesize);
449 command_print(cmd_ctx, "L2 unified cache Base Address 0x%x, %d ways",
450 l2x_cache->base, l2x_cache->way);
451
452
453 return ERROR_OK;
454 }
455
456
457 static int armv7a_l2x_cache_init(struct target *target, uint32_t base, uint32_t way)
458 {
459 struct armv7a_l2x_cache *l2x_cache;
460 struct target_list *head = target->head;
461 struct target *curr;
462
463 struct armv7a_common *armv7a = target_to_armv7a(target);
464 l2x_cache = calloc(1, sizeof(struct armv7a_l2x_cache));
465 l2x_cache->base = base;
466 l2x_cache->way = way;
467 /*LOG_INFO("cache l2 initialized base %x way %d",
468 l2x_cache->base,l2x_cache->way);*/
469 if (armv7a->armv7a_mmu.armv7a_cache.l2_cache)
470 LOG_INFO("cache l2 already initialized\n");
471 armv7a->armv7a_mmu.armv7a_cache.l2_cache = (void *) l2x_cache;
472 /* initialize l1 / l2x cache function */
473 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache
474 = armv7a_l2x_flush_all_data;
475 armv7a->armv7a_mmu.armv7a_cache.display_cache_info =
476 armv7a_handle_l2x_cache_info_command;
477 /* initialize all target in this cluster (smp target)
478 * l2 cache must be configured after smp declaration */
479 while (head != (struct target_list *)NULL) {
480 curr = head->target;
481 if (curr != target) {
482 armv7a = target_to_armv7a(curr);
483 if (armv7a->armv7a_mmu.armv7a_cache.l2_cache)
484 LOG_ERROR("smp target : cache l2 already initialized\n");
485 armv7a->armv7a_mmu.armv7a_cache.l2_cache = (void *) l2x_cache;
486 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache =
487 armv7a_l2x_flush_all_data;
488 armv7a->armv7a_mmu.armv7a_cache.display_cache_info =
489 armv7a_handle_l2x_cache_info_command;
490 }
491 head = head->next;
492 }
493 return JIM_OK;
494 }
495
496 COMMAND_HANDLER(handle_cache_l2x)
497 {
498 struct target *target = get_current_target(CMD_CTX);
499 uint32_t base, way;
500 switch (CMD_ARGC) {
501 case 0:
502 return ERROR_COMMAND_SYNTAX_ERROR;
503 break;
504 case 2:
505 /* command_print(CMD_CTX, "%s %s", CMD_ARGV[0], CMD_ARGV[1]); */
506 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], base);
507 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], way);
508
509 /* AP address is in bits 31:24 of DP_SELECT */
510 armv7a_l2x_cache_init(target, base, way);
511 break;
512 default:
513 return ERROR_COMMAND_SYNTAX_ERROR;
514 }
515 return ERROR_OK;
516 }
517
518 int armv7a_handle_cache_info_command(struct command_context *cmd_ctx,
519 struct armv7a_cache_common *armv7a_cache)
520 {
521 if (armv7a_cache->ctype == -1) {
522 command_print(cmd_ctx, "cache not yet identified");
523 return ERROR_OK;
524 }
525
526 if (armv7a_cache->display_cache_info)
527 armv7a_cache->display_cache_info(cmd_ctx, armv7a_cache);
528 return ERROR_OK;
529 }
530
531 /* retrieve core id cluster id */
532 static int armv7a_read_mpidr(struct target *target)
533 {
534 int retval = ERROR_FAIL;
535 struct armv7a_common *armv7a = target_to_armv7a(target);
536 struct arm_dpm *dpm = armv7a->arm.dpm;
537 uint32_t mpidr;
538 retval = dpm->prepare(dpm);
539 if (retval != ERROR_OK)
540 goto done;
541 /* MRC p15,0,<Rd>,c0,c0,5; read Multiprocessor ID register*/
542
543 retval = dpm->instr_read_data_r0(dpm,
544 ARMV4_5_MRC(15, 0, 0, 0, 0, 5),
545 &mpidr);
546 if (retval != ERROR_OK)
547 goto done;
548 if (mpidr & 1<<31) {
549 armv7a->multi_processor_system = (mpidr >> 30) & 1;
550 armv7a->cluster_id = (mpidr >> 8) & 0xf;
551 armv7a->cpu_id = mpidr & 0x3;
552 LOG_INFO("%s cluster %x core %x %s", target->cmd_name,
553 armv7a->cluster_id,
554 armv7a->cpu_id,
555 armv7a->multi_processor_system == 0 ? "multi core" : "mono core");
556
557 } else
558 LOG_ERROR("mpdir not in multiprocessor format");
559
560 done:
561 dpm->finish(dpm);
562 return retval;
563
564
565 }
566
567 int armv7a_identify_cache(struct target *target)
568 {
569 /* read cache descriptor */
570 int retval = ERROR_FAIL;
571 struct armv7a_common *armv7a = target_to_armv7a(target);
572 struct arm_dpm *dpm = armv7a->arm.dpm;
573 uint32_t cache_selected, clidr;
574 uint32_t cache_i_reg, cache_d_reg;
575 struct armv7a_cache_common *cache = &(armv7a->armv7a_mmu.armv7a_cache);
576 armv7a_read_ttbcr(target);
577 retval = dpm->prepare(dpm);
578
579 if (retval != ERROR_OK)
580 goto done;
581 /* retrieve CLIDR
582 * mrc p15, 1, r0, c0, c0, 1 @ read clidr */
583 retval = dpm->instr_read_data_r0(dpm,
584 ARMV4_5_MRC(15, 1, 0, 0, 0, 1),
585 &clidr);
586 if (retval != ERROR_OK)
587 goto done;
588 clidr = (clidr & 0x7000000) >> 23;
589 LOG_INFO("number of cache level %d", clidr / 2);
590 if ((clidr / 2) > 1) {
591 /* FIXME not supported present in cortex A8 and later */
592 /* in cortex A7, A15 */
593 LOG_ERROR("cache l2 present :not supported");
594 }
595 /* retrieve selected cache
596 * MRC p15, 2,<Rd>, c0, c0, 0; Read CSSELR */
597 retval = dpm->instr_read_data_r0(dpm,
598 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
599 &cache_selected);
600 if (retval != ERROR_OK)
601 goto done;
602
603 retval = armv7a->arm.mrc(target, 15,
604 2, 0, /* op1, op2 */
605 0, 0, /* CRn, CRm */
606 &cache_selected);
607 if (retval != ERROR_OK)
608 goto done;
609 /* select instruction cache
610 * MCR p15, 2,<Rd>, c0, c0, 0; Write CSSELR
611 * [0] : 1 instruction cache selection , 0 data cache selection */
612 retval = dpm->instr_write_data_r0(dpm,
613 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
614 1);
615 if (retval != ERROR_OK)
616 goto done;
617
618 /* read CCSIDR
619 * MRC P15,1,<RT>,C0, C0,0 ;on cortex A9 read CCSIDR
620 * [2:0] line size 001 eight word per line
621 * [27:13] NumSet 0x7f 16KB, 0xff 32Kbytes, 0x1ff 64Kbytes */
622 retval = dpm->instr_read_data_r0(dpm,
623 ARMV4_5_MRC(15, 1, 0, 0, 0, 0),
624 &cache_i_reg);
625 if (retval != ERROR_OK)
626 goto done;
627
628 /* select data cache*/
629 retval = dpm->instr_write_data_r0(dpm,
630 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
631 0);
632 if (retval != ERROR_OK)
633 goto done;
634
635 retval = dpm->instr_read_data_r0(dpm,
636 ARMV4_5_MRC(15, 1, 0, 0, 0, 0),
637 &cache_d_reg);
638 if (retval != ERROR_OK)
639 goto done;
640
641 /* restore selected cache */
642 dpm->instr_write_data_r0(dpm,
643 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
644 cache_selected);
645
646 if (retval != ERROR_OK)
647 goto done;
648 dpm->finish(dpm);
649
650 /* put fake type */
651 cache->d_u_size.linelen = 16 << (cache_d_reg & 0x7);
652 cache->d_u_size.cachesize = (((cache_d_reg >> 13) & 0x7fff)+1)/8;
653 cache->d_u_size.nsets = (cache_d_reg >> 13) & 0x7fff;
654 cache->d_u_size.associativity = ((cache_d_reg >> 3) & 0x3ff) + 1;
655 /* compute info for set way operation on cache */
656 cache->d_u_size.index_shift = (cache_d_reg & 0x7) + 4;
657 cache->d_u_size.index = (cache_d_reg >> 13) & 0x7fff;
658 cache->d_u_size.way = ((cache_d_reg >> 3) & 0x3ff);
659 cache->d_u_size.way_shift = cache->d_u_size.way + 1;
660 {
661 int i = 0;
662 while (((cache->d_u_size.way_shift >> i) & 1) != 1)
663 i++;
664 cache->d_u_size.way_shift = 32-i;
665 }
666 #if 0
667 LOG_INFO("data cache index %d << %d, way %d << %d",
668 cache->d_u_size.index, cache->d_u_size.index_shift,
669 cache->d_u_size.way,
670 cache->d_u_size.way_shift);
671
672 LOG_INFO("data cache %d bytes %d KBytes asso %d ways",
673 cache->d_u_size.linelen,
674 cache->d_u_size.cachesize,
675 cache->d_u_size.associativity);
676 #endif
677 cache->i_size.linelen = 16 << (cache_i_reg & 0x7);
678 cache->i_size.associativity = ((cache_i_reg >> 3) & 0x3ff) + 1;
679 cache->i_size.nsets = (cache_i_reg >> 13) & 0x7fff;
680 cache->i_size.cachesize = (((cache_i_reg >> 13) & 0x7fff)+1)/8;
681 /* compute info for set way operation on cache */
682 cache->i_size.index_shift = (cache_i_reg & 0x7) + 4;
683 cache->i_size.index = (cache_i_reg >> 13) & 0x7fff;
684 cache->i_size.way = ((cache_i_reg >> 3) & 0x3ff);
685 cache->i_size.way_shift = cache->i_size.way + 1;
686 {
687 int i = 0;
688 while (((cache->i_size.way_shift >> i) & 1) != 1)
689 i++;
690 cache->i_size.way_shift = 32-i;
691 }
692 #if 0
693 LOG_INFO("instruction cache index %d << %d, way %d << %d",
694 cache->i_size.index, cache->i_size.index_shift,
695 cache->i_size.way, cache->i_size.way_shift);
696
697 LOG_INFO("instruction cache %d bytes %d KBytes asso %d ways",
698 cache->i_size.linelen,
699 cache->i_size.cachesize,
700 cache->i_size.associativity);
701 #endif
702 /* if no l2 cache initialize l1 data cache flush function function */
703 if (armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache == NULL) {
704 armv7a->armv7a_mmu.armv7a_cache.display_cache_info =
705 armv7a_handle_inner_cache_info_command;
706 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache =
707 armv7a_flush_all_data;
708 }
709 armv7a->armv7a_mmu.armv7a_cache.ctype = 0;
710
711 done:
712 dpm->finish(dpm);
713 armv7a_read_mpidr(target);
714 return retval;
715
716 }
717
718 int armv7a_init_arch_info(struct target *target, struct armv7a_common *armv7a)
719 {
720 struct arm *arm = &armv7a->arm;
721 arm->arch_info = armv7a;
722 target->arch_info = &armv7a->arm;
723 /* target is useful in all function arm v4 5 compatible */
724 armv7a->arm.target = target;
725 armv7a->arm.common_magic = ARM_COMMON_MAGIC;
726 armv7a->common_magic = ARMV7_COMMON_MAGIC;
727 armv7a->armv7a_mmu.armv7a_cache.l2_cache = NULL;
728 armv7a->armv7a_mmu.armv7a_cache.ctype = -1;
729 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache = NULL;
730 armv7a->armv7a_mmu.armv7a_cache.display_cache_info = NULL;
731 return ERROR_OK;
732 }
733
734 int armv7a_arch_state(struct target *target)
735 {
736 static const char *state[] = {
737 "disabled", "enabled"
738 };
739
740 struct armv7a_common *armv7a = target_to_armv7a(target);
741 struct arm *arm = &armv7a->arm;
742
743 if (armv7a->common_magic != ARMV7_COMMON_MAGIC) {
744 LOG_ERROR("BUG: called for a non-ARMv7A target");
745 return ERROR_COMMAND_SYNTAX_ERROR;
746 }
747
748 arm_arch_state(target);
749
750 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s",
751 state[armv7a->armv7a_mmu.mmu_enabled],
752 state[armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled],
753 state[armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled]);
754
755 if (arm->core_mode == ARM_MODE_ABT)
756 armv7a_show_fault_registers(target);
757 if (target->debug_reason == DBG_REASON_WATCHPOINT)
758 LOG_USER("Watchpoint triggered at PC %#08x",
759 (unsigned) armv7a->dpm.wp_pc);
760
761 return ERROR_OK;
762 }
763
764 static const struct command_registration l2_cache_commands[] = {
765 {
766 .name = "l2x",
767 .handler = handle_cache_l2x,
768 .mode = COMMAND_EXEC,
769 .help = "configure l2x cache "
770 "",
771 .usage = "[base_addr] [number_of_way]",
772 },
773 COMMAND_REGISTRATION_DONE
774
775 };
776
777 const struct command_registration l2x_cache_command_handlers[] = {
778 {
779 .name = "cache_config",
780 .mode = COMMAND_EXEC,
781 .help = "cache configuation for a target",
782 .usage = "",
783 .chain = l2_cache_commands,
784 },
785 COMMAND_REGISTRATION_DONE
786 };
787
788
789 const struct command_registration armv7a_command_handlers[] = {
790 {
791 .chain = dap_command_handlers,
792 },
793 {
794 .chain = l2x_cache_command_handlers,
795 },
796 COMMAND_REGISTRATION_DONE
797 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)