#define XT_INS_RFWU(X) (XT_ISBE(X) ? 0x005300 << 8 : 0x003500)
#define XT_INS_RFWO_RFWU_MASK(X) (XT_ISBE(X) ? 0xFFFFFF << 8 : 0xFFFFFF)
+/* Read Protection TLB Entry Info */
+#define XT_INS_PPTLB(X, S, T) _XT_INS_FORMAT_RRR(X, 0x500000, ((S) << 4) | (T), 0xD)
+
+#define XT_TLB1_ACC_SHIFT 8
+#define XT_TLB1_ACC_MSK 0xF
+
#define XT_WATCHPOINTS_NUM_MAX 2
/* Special register number macro for DDR, PS, WB, A3, A4 registers.
XTENSA_MEM_REGS_NUM
};
+/**
+ * Types of access rights for MPU option
+ * The first block is kernel RWX ARs; the second block is user rwx ARs.
+ */
+enum xtensa_mpu_access_type {
+ XTENSA_ACC_00X_000 = 0x2,
+ XTENSA_ACC_000_00X,
+ XTENSA_ACC_R00_000,
+ XTENSA_ACC_R0X_000,
+ XTENSA_ACC_RW0_000,
+ XTENSA_ACC_RWX_000,
+ XTENSA_ACC_0W0_0W0,
+ XTENSA_ACC_RW0_RWX,
+ XTENSA_ACC_RW0_R00,
+ XTENSA_ACC_RWX_R0X,
+ XTENSA_ACC_R00_R00,
+ XTENSA_ACC_R0X_R0X,
+ XTENSA_ACC_RW0_RW0,
+ XTENSA_ACC_RWX_RWX
+};
+
/* Register definition as union for list allocation */
union xtensa_reg_val_u {
xtensa_reg_val_t val;
}
}
+/* NOTE: Assumes A3 has already been saved and marked dirty; A3 will be clobbered */
+static inline bool xtensa_region_ar_exec(struct target *target, target_addr_t start, target_addr_t end)
+{
+ struct xtensa *xtensa = target_to_xtensa(target);
+ if (xtensa->core_config->mpu.enabled) {
+ /* For cores with the MPU option, issue PPTLB on start and end addresses.
+ * Parse access rights field, and confirm both have execute permissions.
+ */
+ for (int i = 0; i <= 1; i++) {
+ uint32_t at, acc;
+ uint8_t at_buf[4];
+ bool exec_acc;
+ target_addr_t addr = i ? end : start;
+ xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addr);
+ xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
+ xtensa_queue_exec_ins(xtensa, XT_INS_PPTLB(xtensa, XT_REG_A3, XT_REG_A3));
+ xtensa_queue_exec_ins(xtensa, XT_INS_WSR(xtensa, XT_SR_DDR, XT_REG_A3));
+ xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, at_buf);
+ int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
+ if (res != ERROR_OK)
+ LOG_TARGET_ERROR(target, "Error queuing PPTLB: %d", res);
+ res = xtensa_core_status_check(target);
+ if (res != ERROR_OK)
+ LOG_TARGET_ERROR(target, "Error issuing PPTLB: %d", res);
+ at = buf_get_u32(at_buf, 0, 32);
+ acc = (at >> XT_TLB1_ACC_SHIFT) & XT_TLB1_ACC_MSK;
+ exec_acc = ((acc == XTENSA_ACC_00X_000) || (acc == XTENSA_ACC_R0X_000) ||
+ (acc == XTENSA_ACC_RWX_000) || (acc == XTENSA_ACC_RWX_R0X) ||
+ (acc == XTENSA_ACC_R0X_R0X) || (acc == XTENSA_ACC_RWX_RWX));
+ LOG_TARGET_DEBUG(target, "PPTLB(" TARGET_ADDR_FMT ") -> 0x%08" PRIx32 " exec_acc %d",
+ addr, at, exec_acc);
+ if (!exec_acc)
+ return false;
+ }
+ }
+ return true;
+}
+
static int xtensa_queue_pwr_reg_write(struct xtensa *xtensa, unsigned int reg, uint32_t data)
{
struct xtensa_debug_module *dm = &xtensa->dbg_mod;
}
} else {
/* Invalidate ICACHE, writeback DCACHE if present */
- uint32_t issue_ihi = xtensa_is_icacheable(xtensa, address);
- uint32_t issue_dhwb = xtensa_is_dcacheable(xtensa, address);
- if (issue_ihi || issue_dhwb) {
+ bool issue_ihi = xtensa_is_icacheable(xtensa, address) &&
+ xtensa_region_ar_exec(target, addrstart_al, addrend_al);
+ bool issue_dhwbi = xtensa_is_dcacheable(xtensa, address);
+ LOG_TARGET_DEBUG(target, "Cache OPs: IHI %d, DHWBI %d", issue_ihi, issue_dhwbi);
+ if (issue_ihi || issue_dhwbi) {
uint32_t ilinesize = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
- uint32_t dlinesize = issue_dhwb ? xtensa->core_config->dcache.line_size : UINT32_MAX;
+ uint32_t dlinesize = issue_dhwbi ? xtensa->core_config->dcache.line_size : UINT32_MAX;
uint32_t linesize = MIN(ilinesize, dlinesize);
uint32_t off = 0;
adr = addrstart_al;
}
if (issue_ihi)
xtensa_queue_exec_ins(xtensa, XT_INS_IHI(xtensa, XT_REG_A3, off));
- if (issue_dhwb)
+ if (issue_dhwbi)
xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, off));
off += linesize;
if (off > 1020) {
/* Execute cache WB/INV instructions */
res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
- xtensa_core_status_check(target);
+ if (res != ERROR_OK)
+ LOG_TARGET_ERROR(target,
+ "Error queuing cache writeback/invaldate instruction(s): %d",
+ res);
+ res = xtensa_core_status_check(target);
if (res != ERROR_OK)
LOG_TARGET_ERROR(target,
"Error issuing cache writeback/invaldate instruction(s): %d",
static int xtensa_update_instruction(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
{
struct xtensa *xtensa = target_to_xtensa(target);
- unsigned int issue_ihi = xtensa_is_icacheable(xtensa, address);
+ unsigned int issue_ihi = xtensa_is_icacheable(xtensa, address) &&
+ xtensa_region_ar_exec(target, address, address + size);
unsigned int issue_dhwbi = xtensa_is_dcacheable(xtensa, address);
uint32_t icache_line_size = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
uint32_t dcache_line_size = issue_dhwbi ? xtensa->core_config->dcache.line_size : UINT32_MAX;
/* Write start address to A3 and invalidate */
xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, address);
xtensa_queue_exec_ins(xtensa, XT_INS_RSR(xtensa, XT_SR_DDR, XT_REG_A3));
- LOG_TARGET_DEBUG(target, "DHWBI, IHI for address "TARGET_ADDR_FMT, address);
+ LOG_TARGET_DEBUG(target, "IHI %d, DHWBI %d for address " TARGET_ADDR_FMT,
+ issue_ihi, issue_dhwbi, address);
if (issue_dhwbi) {
xtensa_queue_exec_ins(xtensa, XT_INS_DHWBI(xtensa, XT_REG_A3, 0));
if (!same_dc_line) {