This is an automated email from the ASF dual-hosted git repository.
xiaoxiang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nuttx.git
The following commit(s) were added to refs/heads/master by this push:
new f9cab5b9dc arch/arm64: add ARM64_DCACHE_DISABLE and
ARM64_ICACHE_DISABLE config
f9cab5b9dc is described below
commit f9cab5b9dcbd585123b5e505e2c055f02c74e3aa
Author: zhangyuan21 <[email protected]>
AuthorDate: Tue Mar 14 13:28:44 2023 +0800
arch/arm64: add ARM64_DCACHE_DISABLE and ARM64_ICACHE_DISABLE config
Enable dcache and icache when ARM64_DCACHE_DISABLE and ARM64_ICACHE_DISABLE
disabled at __start.
Signed-off-by: zhangyuan21 <[email protected]>
---
arch/arm64/Kconfig | 8 ++++
arch/arm64/src/common/arm64_boot.c | 84 +++++++++++++++++++++-----------------
arch/arm64/src/common/arm64_head.S | 2 +-
arch/arm64/src/common/arm64_mmu.c | 6 ++-
arch/arm64/src/common/arm64_mpu.c | 20 +++++----
5 files changed, 73 insertions(+), 47 deletions(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 6bcab33616..873b773ebc 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -219,6 +219,14 @@ config ARM_GIC_EOIMODE
endif
+config ARM64_DCACHE_DISABLE
+ bool "Disable DCACHE at __start"
+ default n
+
+config ARM64_ICACHE_DISABLE
+ bool "Disable ICACHE at __start"
+ default n
+
if ARCH_CHIP_A64
source "arch/arm64/src/a64/Kconfig"
endif
diff --git a/arch/arm64/src/common/arm64_boot.c
b/arch/arm64/src/common/arm64_boot.c
index 72b2569895..42df595307 100644
--- a/arch/arm64/src/common/arm64_boot.c
+++ b/arch/arm64/src/common/arm64_boot.c
@@ -55,35 +55,41 @@ void arm64_boot_el3_init(void)
{
uint64_t reg;
+#ifndef CONFIG_ARM64_ICACHE_DISABLE
+ reg = read_sysreg(sctlr_el3);
+ reg |= SCTLR_I_BIT;
+ write_sysreg(reg, sctlr_el3);
+#endif
+
/* Setup vector table */
write_sysreg((uint64_t)_vector_table, vbar_el3);
ARM64_ISB();
- reg = 0U; /* Mostly RES0 */
- reg &= ~(CPTR_TTA_BIT | /* Do not trap sysreg accesses */
- CPTR_TFP_BIT | /* Do not trap SVE, SIMD and FP */
- CPTR_TCPAC_BIT); /* Do not trap CPTR_EL2 CPACR_EL1 accesses */
+ reg = 0U; /* Mostly RES0 */
+ reg &= ~(CPTR_TTA_BIT | /* Do not trap sysreg accesses */
+ CPTR_TFP_BIT | /* Do not trap SVE, SIMD and FP */
+ CPTR_TCPAC_BIT); /* Do not trap CPTR_EL2 CPACR_EL1 accesses */
/* CPTR_EL3, Architectural Feature Trap Register (EL3) */
write_sysreg(reg, cptr_el3);
- reg = 0U; /* Reset */
- reg |= SCR_NS_BIT; /* EL2 / EL3 non-secure */
- reg |= (SCR_RES1 | /* RES1 */
- SCR_RW_BIT | /* EL2 execution state is AArch64 */
- SCR_ST_BIT | /* Do not trap EL1 accesses to timer */
- SCR_HCE_BIT | /* Do not trap HVC */
- SCR_SMD_BIT); /* Do not trap SMC */
+ reg = 0U; /* Reset */
+ reg |= SCR_NS_BIT; /* EL2 / EL3 non-secure */
+ reg |= (SCR_RES1 | /* RES1 */
+ SCR_RW_BIT | /* EL2 execution state is AArch64 */
+ SCR_ST_BIT | /* Do not trap EL1 accesses to timer */
+ SCR_HCE_BIT | /* Do not trap HVC */
+ SCR_SMD_BIT); /* Do not trap SMC */
write_sysreg(reg, scr_el3);
- reg = read_sysreg(ICC_SRE_EL3);
- reg |= (ICC_SRE_ELX_DFB_BIT | /* Disable FIQ bypass */
- ICC_SRE_ELX_DIB_BIT | /* Disable IRQ bypass */
- ICC_SRE_ELX_SRE_BIT | /* System register interface is used */
- ICC_SRE_EL3_EN_BIT); /* Enables lower Exception level access to
- * ICC_SRE_EL1 */
+ reg = read_sysreg(ICC_SRE_EL3);
+ reg |= (ICC_SRE_ELX_DFB_BIT | /* Disable FIQ bypass */
+ ICC_SRE_ELX_DIB_BIT | /* Disable IRQ bypass */
+ ICC_SRE_ELX_SRE_BIT | /* System register interface is used */
+ ICC_SRE_EL3_EN_BIT); /* Enables lower Exception level access to
+ * ICC_SRE_EL1 */
write_sysreg(reg, ICC_SRE_EL3);
ARM64_ISB();
@@ -97,8 +103,8 @@ void arm64_boot_el3_get_next_el(uint64_t switch_addr)
/* Mask the DAIF */
- spsr = SPSR_DAIF_MASK;
- spsr |= SPSR_MODE_EL2T;
+ spsr = SPSR_DAIF_MASK;
+ spsr |= SPSR_MODE_EL2T;
write_sysreg(spsr, spsr_el3);
}
@@ -108,26 +114,28 @@ void arm64_boot_el2_init(void)
{
uint64_t reg;
- reg = read_sysreg(sctlr_el2);
- reg |= (SCTLR_EL2_RES1 | /* RES1 */
- SCTLR_I_BIT | /* Enable i-cache */
- SCTLR_SA_BIT); /* Enable SP alignment check */
+ reg = 0U; /* RES0 */
+ reg = (SCTLR_EL2_RES1 | /* RES1 */
+#ifndef CONFIG_ARM64_ICACHE_DISABLE
+ SCTLR_I_BIT | /* Enable i-cache */
+#endif
+ SCTLR_SA_BIT); /* Enable SP alignment check */
write_sysreg(reg, sctlr_el2);
- reg = read_sysreg(hcr_el2);
- reg |= HCR_RW_BIT; /* EL1 Execution state is AArch64 */
+ reg = read_sysreg(hcr_el2);
+ reg |= HCR_RW_BIT; /* EL1 Execution state is AArch64 */
write_sysreg(reg, hcr_el2);
- reg = 0U; /* RES0 */
- reg |= CPTR_EL2_RES1; /* RES1 */
- reg &= ~(CPTR_TFP_BIT | /* Do not trap SVE, SIMD and FP */
- CPTR_TCPAC_BIT); /* Do not trap CPACR_EL1 accesses */
+ reg = 0U; /* RES0 */
+ reg |= CPTR_EL2_RES1; /* RES1 */
+ reg &= ~(CPTR_TFP_BIT | /* Do not trap SVE, SIMD and FP */
+ CPTR_TCPAC_BIT); /* Do not trap CPACR_EL1 accesses */
write_sysreg(reg, cptr_el2);
/* Enable EL1 access to timers */
- reg = read_sysreg(cnthctl_el2);
- reg |= (CNTHCTL_EL2_EL1PCEN_EN | CNTHCTL_EL2_EL1PCTEN_EN);
+ reg = read_sysreg(cnthctl_el2);
+ reg |= (CNTHCTL_EL2_EL1PCEN_EN | CNTHCTL_EL2_EL1PCTEN_EN);
write_sysreg(reg, cnthctl_el2);
zero_sysreg(cntvoff_el2); /* Set 64-bit virtual timer offset to 0 */
@@ -159,17 +167,19 @@ void arm64_boot_el1_init(void)
write_sysreg((uint64_t)_vector_table, vbar_el1);
ARM64_ISB();
- reg = 0U; /* RES0 */
- reg |= CPACR_EL1_FPEN_NOTRAP; /* Do not trap NEON/SIMD/FP initially */
+ reg = 0U; /* RES0 */
+ reg |= CPACR_EL1_FPEN_NOTRAP; /* Do not trap NEON/SIMD/FP initially */
/* TODO: CONFIG_FLOAT_*_FORBIDDEN */
write_sysreg(reg, cpacr_el1);
- reg = read_sysreg(sctlr_el1);
- reg |= (SCTLR_EL1_RES1 | /* RES1 */
- SCTLR_I_BIT | /* Enable i-cache */
- SCTLR_SA_BIT); /* Enable SP alignment check */
+ reg = 0U; /* RES0 */
+ reg = (SCTLR_EL1_RES1 | /* RES1 */
+#ifndef CONFIG_ARM64_ICACHE_DISABLE
+ SCTLR_I_BIT | /* Enable i-cache */
+#endif
+ SCTLR_SA_BIT); /* Enable SP alignment check */
write_sysreg(reg, sctlr_el1);
write_sysreg((~(uint64_t)0), cntv_cval_el0);
diff --git a/arch/arm64/src/common/arm64_head.S
b/arch/arm64/src/common/arm64_head.S
index 886c1202ea..a4aa0b73d5 100644
--- a/arch/arm64/src/common/arm64_head.S
+++ b/arch/arm64/src/common/arm64_head.S
@@ -292,7 +292,7 @@ __reset_prep_c:
#ifdef CONFIG_ARCH_HAVE_EL3
/* Reinitialize SCTLR from scratch in EL3 */
- ldr w0, =(SCTLR_EL3_RES1 | SCTLR_I_BIT | SCTLR_SA_BIT)
+ ldr w0, =(SCTLR_EL3_RES1 | SCTLR_SA_BIT)
msr sctlr_el3, x0
#endif
diff --git a/arch/arm64/src/common/arm64_mmu.c
b/arch/arm64/src/common/arm64_mmu.c
index 19c40a6cd8..8121dcc5e1 100644
--- a/arch/arm64/src/common/arm64_mmu.c
+++ b/arch/arm64/src/common/arm64_mmu.c
@@ -541,7 +541,11 @@ static void enable_mmu_el1(unsigned int flags)
/* Enable the MMU and data cache */
value = read_sysreg(sctlr_el1);
- write_sysreg((value | SCTLR_M_BIT | SCTLR_C_BIT), sctlr_el1);
+ write_sysreg((value | SCTLR_M_BIT
+#ifndef CONFIG_ARM64_DCACHE_DISABLE
+ | SCTLR_C_BIT
+#endif
+ ), sctlr_el1);
/* Ensure the MMU enable takes effect immediately */
diff --git a/arch/arm64/src/common/arm64_mpu.c
b/arch/arm64/src/common/arm64_mpu.c
index 4227516b62..7f28dec3db 100644
--- a/arch/arm64/src/common/arm64_mpu.c
+++ b/arch/arm64/src/common/arm64_mpu.c
@@ -82,8 +82,8 @@ static inline uint8_t get_num_regions(void)
{
uint64_t type;
- type = read_sysreg(mpuir_el1);
- type = type & MPU_IR_REGION_MSK;
+ type = read_sysreg(mpuir_el1);
+ type = type & MPU_IR_REGION_MSK;
return (uint8_t)type;
}
@@ -98,8 +98,12 @@ void arm64_core_mpu_enable(void)
{
uint64_t val;
- val = read_sysreg(sctlr_el1);
- val |= (SCTLR_M_BIT | SCTLR_C_BIT);
+ val = read_sysreg(sctlr_el1);
+ val |= (SCTLR_M_BIT
+#ifndef CONFIG_ARM64_DCACHE_DISABLE
+ | SCTLR_C_BIT
+#endif
+ );
write_sysreg(val, sctlr_el1);
ARM64_DSB();
ARM64_ISB();
@@ -117,8 +121,8 @@ void arm64_core_mpu_disable(void)
ARM64_DMB();
- val = read_sysreg(sctlr_el1);
- val &= ~(SCTLR_M_BIT | SCTLR_C_BIT);
+ val = read_sysreg(sctlr_el1);
+ val &= ~(SCTLR_M_BIT | SCTLR_C_BIT);
write_sysreg(val, sctlr_el1);
ARM64_DSB();
ARM64_ISB();
@@ -161,8 +165,8 @@ static inline void mpu_set_region(uint32_t rnr, uint64_t
rbar,
static void region_init(const uint32_t index,
const struct arm64_mpu_region *region_conf)
{
- uint64_t rbar = region_conf->base & MPU_RBAR_BASE_MSK;
- uint64_t rlar = (region_conf->limit - 1) & MPU_RLAR_LIMIT_MSK;
+ uint64_t rbar = region_conf->base & MPU_RBAR_BASE_MSK;
+ uint64_t rlar = (region_conf->limit - 1) & MPU_RLAR_LIMIT_MSK;
rbar |= region_conf->attr.rbar &
(MPU_RBAR_XN_MSK | MPU_RBAR_AP_MSK | MPU_RBAR_SH_MSK);