Merge "feat(sme): enable SME functionality" into integration
diff --git a/Makefile b/Makefile
index 2a1d4d8..4905291 100644
--- a/Makefile
+++ b/Makefile
@@ -777,6 +777,52 @@
endif
endif
+# SME/SVE only supported on AArch64
+ifeq (${ARCH},aarch32)
+ ifeq (${ENABLE_SME_FOR_NS},1)
+ $(error "ENABLE_SME_FOR_NS cannot be used with ARCH=aarch32")
+ endif
+ ifeq (${ENABLE_SVE_FOR_NS},1)
+ # Warning instead of error due to CI dependency on this
+ $(warning "ENABLE_SVE_FOR_NS cannot be used with ARCH=aarch32")
+ $(warning "Forced ENABLE_SVE_FOR_NS=0")
+ override ENABLE_SVE_FOR_NS := 0
+ endif
+endif
+
+# Ensure ENABLE_RME is not used with SME
+ifeq (${ENABLE_RME},1)
+ ifeq (${ENABLE_SME_FOR_NS},1)
+ $(error "ENABLE_SME_FOR_NS cannot be used with ENABLE_RME")
+ endif
+endif
+
+# Secure SME/SVE requires the non-secure component as well
+ifeq (${ENABLE_SME_FOR_SWD},1)
+ ifeq (${ENABLE_SME_FOR_NS},0)
+ $(error "ENABLE_SME_FOR_SWD requires ENABLE_SME_FOR_NS")
+ endif
+endif
+ifeq (${ENABLE_SVE_FOR_SWD},1)
+ ifeq (${ENABLE_SVE_FOR_NS},0)
+ $(error "ENABLE_SVE_FOR_SWD requires ENABLE_SVE_FOR_NS")
+ endif
+endif
+
+# SVE and SME cannot be used with CTX_INCLUDE_FPREGS since secure manager does
+# its own context management including FPU registers.
+ifeq (${CTX_INCLUDE_FPREGS},1)
+ ifeq (${ENABLE_SME_FOR_NS},1)
+ $(error "ENABLE_SME_FOR_NS cannot be used with CTX_INCLUDE_FPREGS")
+ endif
+ ifeq (${ENABLE_SVE_FOR_NS},1)
+ # Warning instead of error due to CI dependency on this
+ $(warning "ENABLE_SVE_FOR_NS cannot be used with CTX_INCLUDE_FPREGS")
+ $(warning "Forced ENABLE_SVE_FOR_NS=0")
+ override ENABLE_SVE_FOR_NS := 0
+ endif
+endif
+
################################################################################
# Process platform overrideable behaviour
################################################################################
@@ -941,6 +987,8 @@
ENABLE_PSCI_STAT \
ENABLE_RME \
ENABLE_RUNTIME_INSTRUMENTATION \
+ ENABLE_SME_FOR_NS \
+ ENABLE_SME_FOR_SWD \
ENABLE_SPE_FOR_LOWER_ELS \
ENABLE_SVE_FOR_NS \
ENABLE_SVE_FOR_SWD \
@@ -1048,6 +1096,8 @@
ENABLE_PSCI_STAT \
ENABLE_RME \
ENABLE_RUNTIME_INSTRUMENTATION \
+ ENABLE_SME_FOR_NS \
+ ENABLE_SME_FOR_SWD \
ENABLE_SPE_FOR_LOWER_ELS \
ENABLE_SVE_FOR_NS \
ENABLE_SVE_FOR_SWD \
diff --git a/bl31/bl31.mk b/bl31/bl31.mk
index 9baa0c2..e751824 100644
--- a/bl31/bl31.mk
+++ b/bl31/bl31.mk
@@ -87,9 +87,14 @@
BL31_SOURCES += ${MPMM_SOURCES}
endif
+ifeq (${ENABLE_SME_FOR_NS},1)
+BL31_SOURCES += lib/extensions/sme/sme.c
+BL31_SOURCES += lib/extensions/sve/sve.c
+else
ifeq (${ENABLE_SVE_FOR_NS},1)
BL31_SOURCES += lib/extensions/sve/sve.c
endif
+endif
ifeq (${ENABLE_MPAM_FOR_LOWER_ELS},1)
BL31_SOURCES += lib/extensions/mpam/mpam.c
diff --git a/docs/getting_started/build-options.rst b/docs/getting_started/build-options.rst
index 04e3c0b..e7ffd94 100644
--- a/docs/getting_started/build-options.rst
+++ b/docs/getting_started/build-options.rst
@@ -299,6 +299,21 @@
instrumented. Enabling this option enables the ``ENABLE_PMF`` build option
as well. Default is 0.
+- ``ENABLE_SME_FOR_NS``: Boolean option to enable Scalable Matrix Extension
+ (SME), SVE, and FPU/SIMD for the non-secure world only. These features share
+ registers so are enabled together. Using this option without
+ ENABLE_SME_FOR_SWD=1 will cause SME, SVE, and FPU/SIMD instructions in secure
+ world to trap to EL3. SME is an optional architectural feature for AArch64
+ and TF-A support is experimental. At this time, this build option cannot be
+ used on systems that have SPD=spmd or ENABLE_RME, and attempting to build
+ with these options will fail. Default is 0.
+
+- ``ENABLE_SME_FOR_SWD``: Boolean option to enable the Scalable Matrix
+ Extension for secure world use along with SVE and FPU/SIMD, ENABLE_SME_FOR_NS
+ must also be set to use this. If enabling this, the secure world MUST
+ handle context switching for SME, SVE, and FPU/SIMD registers to ensure that
+ no data is leaked to non-secure world. This is experimental. Default is 0.
+
- ``ENABLE_SPE_FOR_LOWER_ELS`` : Boolean option to enable Statistical Profiling
extensions. This is an optional architectural feature for AArch64.
The default is 1 but is automatically disabled when the target architecture
@@ -313,8 +328,8 @@
which are aliased by the SIMD and FP registers. The build option is not
compatible with the ``CTX_INCLUDE_FPREGS`` build option, and will raise an
assert on platforms where SVE is implemented and ``ENABLE_SVE_FOR_NS`` set to
- 1. The default is 1 but is automatically disabled when the target
- architecture is AArch32.
+ 1. The default is 1 but is automatically disabled when ENABLE_SME_FOR_NS=1
+ since SME encompasses SVE.
- ``ENABLE_SVE_FOR_SWD``: Boolean option to enable SVE for the Secure world.
SVE is an optional architectural feature for AArch64. Note that this option
diff --git a/include/arch/aarch64/arch.h b/include/arch/aarch64/arch.h
index 5408acf..0fb4e74 100644
--- a/include/arch/aarch64/arch.h
+++ b/include/arch/aarch64/arch.h
@@ -218,8 +218,8 @@
#define ID_AA64DFR0_MTPMU_SUPPORTED ULL(1)
/* ID_AA64ISAR0_EL1 definitions */
-#define ID_AA64ISAR0_RNDR_SHIFT U(60)
-#define ID_AA64ISAR0_RNDR_MASK ULL(0xf)
+#define ID_AA64ISAR0_RNDR_SHIFT U(60)
+#define ID_AA64ISAR0_RNDR_MASK ULL(0xf)
/* ID_AA64ISAR1_EL1 definitions */
#define ID_AA64ISAR1_EL1 S3_0_C0_C6_1
@@ -286,10 +286,10 @@
#define ID_AA64MMFR1_EL1_VHE_SHIFT U(8)
#define ID_AA64MMFR1_EL1_VHE_MASK ULL(0xf)
-#define ID_AA64MMFR1_EL1_HCX_SHIFT U(40)
-#define ID_AA64MMFR1_EL1_HCX_MASK ULL(0xf)
-#define ID_AA64MMFR1_EL1_HCX_SUPPORTED ULL(0x1)
-#define ID_AA64MMFR1_EL1_HCX_NOT_SUPPORTED ULL(0x0)
+#define ID_AA64MMFR1_EL1_HCX_SHIFT U(40)
+#define ID_AA64MMFR1_EL1_HCX_MASK ULL(0xf)
+#define ID_AA64MMFR1_EL1_HCX_SUPPORTED ULL(0x1)
+#define ID_AA64MMFR1_EL1_HCX_NOT_SUPPORTED ULL(0x0)
/* ID_AA64MMFR2_EL1 definitions */
#define ID_AA64MMFR2_EL1 S3_0_C0_C7_2
@@ -329,6 +329,9 @@
#define ID_AA64PFR1_MPAM_FRAC_SHIFT ULL(16)
#define ID_AA64PFR1_MPAM_FRAC_MASK ULL(0xf)
+#define ID_AA64PFR1_EL1_SME_SHIFT U(24)
+#define ID_AA64PFR1_EL1_SME_MASK ULL(0xf)
+
/* ID_PFR1_EL1 definitions */
#define ID_PFR1_VIRTEXT_SHIFT U(12)
#define ID_PFR1_VIRTEXT_MASK U(0xf)
@@ -388,6 +391,7 @@
#define SCTLR_ITFSB_BIT (ULL(1) << 37)
#define SCTLR_TCF0_SHIFT U(38)
#define SCTLR_TCF0_MASK ULL(3)
+#define SCTLR_ENTP2_BIT (ULL(1) << 60)
/* Tag Check Faults in EL0 have no effect on the PE */
#define SCTLR_TCF0_NO_EFFECT U(0)
@@ -442,7 +446,9 @@
#define SCR_GPF_BIT (UL(1) << 48)
#define SCR_TWEDEL_SHIFT U(30)
#define SCR_TWEDEL_MASK ULL(0xf)
-#define SCR_HXEn_BIT (UL(1) << 38)
+#define SCR_HXEn_BIT (UL(1) << 38)
+#define SCR_ENTP2_SHIFT U(41)
+#define SCR_ENTP2_BIT (UL(1) << SCR_ENTP2_SHIFT)
#define SCR_AMVOFFEN_BIT (UL(1) << 35)
#define SCR_TWEDEn_BIT (UL(1) << 29)
#define SCR_ECVEN_BIT (UL(1) << 28)
@@ -465,7 +471,7 @@
#define SCR_FIQ_BIT (UL(1) << 2)
#define SCR_IRQ_BIT (UL(1) << 1)
#define SCR_NS_BIT (UL(1) << 0)
-#define SCR_VALID_BIT_MASK U(0x2f8f)
+#define SCR_VALID_BIT_MASK U(0x24000002F8F)
#define SCR_RESET_VAL SCR_RES1_BITS
/* MDCR_EL3 definitions */
@@ -574,23 +580,28 @@
#define TAM_SHIFT U(30)
#define TAM_BIT (U(1) << TAM_SHIFT)
#define TTA_BIT (U(1) << 20)
+#define ESM_BIT (U(1) << 12)
#define TFP_BIT (U(1) << 10)
#define CPTR_EZ_BIT (U(1) << 8)
-#define CPTR_EL3_RESET_VAL (TCPAC_BIT | TAM_BIT | TTA_BIT | TFP_BIT & ~(CPTR_EZ_BIT))
+#define CPTR_EL3_RESET_VAL ((TCPAC_BIT | TAM_BIT | TTA_BIT | TFP_BIT) & \
+ ~(CPTR_EZ_BIT | ESM_BIT))
/* CPTR_EL2 definitions */
#define CPTR_EL2_RES1 ((U(1) << 13) | (U(1) << 12) | (U(0x3ff)))
#define CPTR_EL2_TCPAC_BIT (U(1) << 31)
#define CPTR_EL2_TAM_SHIFT U(30)
#define CPTR_EL2_TAM_BIT (U(1) << CPTR_EL2_TAM_SHIFT)
+#define CPTR_EL2_SMEN_MASK ULL(0x3)
+#define CPTR_EL2_SMEN_SHIFT U(24)
#define CPTR_EL2_TTA_BIT (U(1) << 20)
+#define CPTR_EL2_TSM_BIT (U(1) << 12)
#define CPTR_EL2_TFP_BIT (U(1) << 10)
#define CPTR_EL2_TZ_BIT (U(1) << 8)
#define CPTR_EL2_RESET_VAL CPTR_EL2_RES1
/* VTCR_EL2 definitions */
-#define VTCR_RESET_VAL U(0x0)
-#define VTCR_EL2_MSA (U(1) << 31)
+#define VTCR_RESET_VAL U(0x0)
+#define VTCR_EL2_MSA (U(1) << 31)
/* CPSR/SPSR definitions */
#define DAIF_FIQ_BIT (U(1) << 0)
@@ -918,6 +929,20 @@
#define ZCR_EL2_LEN_MASK U(0xf)
/*******************************************************************************
+ * Definitions for system register interface to SME as needed in EL3
+ ******************************************************************************/
+#define ID_AA64SMFR0_EL1 S3_0_C0_C4_5
+#define SMCR_EL3 S3_6_C1_C2_6
+
+/* ID_AA64SMFR0_EL1 definitions */
+#define ID_AA64SMFR0_EL1_FA64_BIT (UL(1) << 63)
+
+/* SMCR_ELx definitions */
+#define SMCR_ELX_LEN_SHIFT U(0)
+#define SMCR_ELX_LEN_MASK U(0x1ff)
+#define SMCR_ELX_FA64_BIT (U(1) << 31)
+
+/*******************************************************************************
* Definitions of MAIR encodings for device and normal memory
******************************************************************************/
/*
@@ -1199,12 +1224,12 @@
/*******************************************************************************
* FEAT_HCX - Extended Hypervisor Configuration Register
******************************************************************************/
-#define HCRX_EL2 S3_4_C1_C2_2
-#define HCRX_EL2_FGTnXS_BIT (UL(1) << 4)
-#define HCRX_EL2_FnXS_BIT (UL(1) << 3)
-#define HCRX_EL2_EnASR_BIT (UL(1) << 2)
-#define HCRX_EL2_EnALS_BIT (UL(1) << 1)
-#define HCRX_EL2_EnAS0_BIT (UL(1) << 0)
+#define HCRX_EL2 S3_4_C1_C2_2
+#define HCRX_EL2_FGTnXS_BIT (UL(1) << 4)
+#define HCRX_EL2_FnXS_BIT (UL(1) << 3)
+#define HCRX_EL2_EnASR_BIT (UL(1) << 2)
+#define HCRX_EL2_EnALS_BIT (UL(1) << 1)
+#define HCRX_EL2_EnAS0_BIT (UL(1) << 0)
/*******************************************************************************
* Definitions for DynamicIQ Shared Unit registers
diff --git a/include/arch/aarch64/arch_helpers.h b/include/arch/aarch64/arch_helpers.h
index 37fa047..733bb23 100644
--- a/include/arch/aarch64/arch_helpers.h
+++ b/include/arch/aarch64/arch_helpers.h
@@ -509,6 +509,9 @@
DEFINE_RENAME_SYSREG_WRITE_FUNC(zcr_el3, ZCR_EL3)
DEFINE_RENAME_SYSREG_WRITE_FUNC(zcr_el2, ZCR_EL2)
+DEFINE_RENAME_SYSREG_READ_FUNC(id_aa64smfr0_el1, ID_AA64SMFR0_EL1)
+DEFINE_RENAME_SYSREG_RW_FUNCS(smcr_el3, SMCR_EL3)
+
DEFINE_RENAME_SYSREG_READ_FUNC(erridr_el1, ERRIDR_EL1)
DEFINE_RENAME_SYSREG_WRITE_FUNC(errselr_el1, ERRSELR_EL1)
diff --git a/include/arch/aarch64/el3_common_macros.S b/include/arch/aarch64/el3_common_macros.S
index 8e8d334..f29def7 100644
--- a/include/arch/aarch64/el3_common_macros.S
+++ b/include/arch/aarch64/el3_common_macros.S
@@ -222,6 +222,9 @@
*
* CPTR_EL3.EZ: Set to zero so that all SVE functionality is trapped
* to EL3 by default.
+ *
+ * CPTR_EL3.ESM: Set to zero so that all SME functionality is trapped
+ * to EL3 by default.
*/
mov_imm x0, (CPTR_EL3_RESET_VAL & ~(TCPAC_BIT | TTA_BIT | TFP_BIT))
diff --git a/include/lib/extensions/sme.h b/include/lib/extensions/sme.h
new file mode 100644
index 0000000..893f9f2
--- /dev/null
+++ b/include/lib/extensions/sme.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef SME_H
+#define SME_H
+
+#include <stdbool.h>
+
+#include <context.h>
+
+/*
+ * Maximum value of LEN field in SMCR_ELx. This is different than the maximum
+ * supported value which is platform dependent. In the first version of SME the
+ * LEN field is limited to 4 bits but will be expanded in future iterations.
+ * To support different versions, the code that discovers the supported vector
+ * lengths will write the max value into SMCR_ELx then read it back to see how
+ * many bits are implemented.
+ */
+#define SME_SMCR_LEN_MAX U(0x1FF)
+
+void sme_enable(cpu_context_t *context);
+void sme_disable(cpu_context_t *context);
+
+#endif /* SME_H */
diff --git a/include/lib/extensions/sve.h b/include/lib/extensions/sve.h
index c85e08c..4b66cdb 100644
--- a/include/lib/extensions/sve.h
+++ b/include/lib/extensions/sve.h
@@ -10,5 +10,6 @@
#include <context.h>
void sve_enable(cpu_context_t *context);
+void sve_disable(cpu_context_t *context);
#endif /* SVE_H */
diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c
index 0ec7e7e..c69dc95 100644
--- a/lib/el3_runtime/aarch64/context_mgmt.c
+++ b/lib/el3_runtime/aarch64/context_mgmt.c
@@ -20,6 +20,7 @@
#include <lib/el3_runtime/pubsub_events.h>
#include <lib/extensions/amu.h>
#include <lib/extensions/mpam.h>
+#include <lib/extensions/sme.h>
#include <lib/extensions/spe.h>
#include <lib/extensions/sve.h>
#include <lib/extensions/sys_reg_trace.h>
@@ -28,7 +29,7 @@
#include <lib/extensions/twed.h>
#include <lib/utils.h>
-static void enable_extensions_secure(cpu_context_t *ctx);
+static void manage_extensions_secure(cpu_context_t *ctx);
/*******************************************************************************
* Context management library initialisation routine. This library is used by
@@ -219,7 +220,7 @@
/* Save the initialized value of CPTR_EL3 register */
write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, read_cptr_el3());
if (security_state == SECURE) {
- enable_extensions_secure(ctx);
+ manage_extensions_secure(ctx);
}
/*
@@ -365,7 +366,7 @@
* When EL2 is implemented but unused `el2_unused` is non-zero, otherwise
* it is zero.
******************************************************************************/
-static void enable_extensions_nonsecure(bool el2_unused, cpu_context_t *ctx)
+static void manage_extensions_nonsecure(bool el2_unused, cpu_context_t *ctx)
{
#if IMAGE_BL31
#if ENABLE_SPE_FOR_LOWER_ELS
@@ -376,7 +377,11 @@
amu_enable(el2_unused, ctx);
#endif
-#if ENABLE_SVE_FOR_NS
+#if ENABLE_SME_FOR_NS
+ /* Enable SME, SVE, and FPU/SIMD for non-secure world. */
+ sme_enable(ctx);
+#elif ENABLE_SVE_FOR_NS
+ /* Enable SVE and FPU/SIMD for non-secure world. */
sve_enable(ctx);
#endif
@@ -395,20 +400,45 @@
#if ENABLE_TRF_FOR_NS
trf_enable();
#endif /* ENABLE_TRF_FOR_NS */
-
#endif
}
/*******************************************************************************
* Enable architecture extensions on first entry to Secure world.
******************************************************************************/
-static void enable_extensions_secure(cpu_context_t *ctx)
+static void manage_extensions_secure(cpu_context_t *ctx)
{
#if IMAGE_BL31
-#if ENABLE_SVE_FOR_SWD
+ #if ENABLE_SME_FOR_NS
+ #if ENABLE_SME_FOR_SWD
+ /*
+ * Enable SME, SVE, FPU/SIMD in secure context, secure manager must
+ * ensure SME, SVE, and FPU/SIMD context properly managed.
+ */
+ sme_enable(ctx);
+ #else /* ENABLE_SME_FOR_SWD */
+ /*
+ * Disable SME, SVE, FPU/SIMD in secure context so non-secure world can
+ * safely use the associated registers.
+ */
+ sme_disable(ctx);
+ #endif /* ENABLE_SME_FOR_SWD */
+ #elif ENABLE_SVE_FOR_NS
+ #if ENABLE_SVE_FOR_SWD
+ /*
+ * Enable SVE and FPU in secure context, secure manager must ensure that
+ * the SVE and FPU register contexts are properly managed.
+ */
sve_enable(ctx);
-#endif
-#endif
+ #else /* ENABLE_SVE_FOR_SWD */
+ /*
+ * Disable SVE and FPU in secure context so non-secure world can safely
+ * use them.
+ */
+ sve_disable(ctx);
+ #endif /* ENABLE_SVE_FOR_SWD */
+ #endif /* ENABLE_SVE_FOR_NS */
+#endif /* IMAGE_BL31 */
}
/*******************************************************************************
@@ -654,7 +684,7 @@
write_cnthp_ctl_el2(CNTHP_CTL_RESET_VAL &
~(CNTHP_CTL_ENABLE_BIT));
}
- enable_extensions_nonsecure(el2_unused, ctx);
+ manage_extensions_nonsecure(el2_unused, ctx);
}
cm_el1_sysregs_context_restore(security_state);
diff --git a/lib/extensions/sme/sme.c b/lib/extensions/sme/sme.c
new file mode 100644
index 0000000..1c2b984
--- /dev/null
+++ b/lib/extensions/sme/sme.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdbool.h>
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <lib/el3_runtime/context_mgmt.h>
+#include <lib/extensions/sme.h>
+#include <lib/extensions/sve.h>
+
+static bool feat_sme_supported(void)
+{
+ uint64_t features;
+
+ features = read_id_aa64pfr1_el1() >> ID_AA64PFR1_EL1_SME_SHIFT;
+ return (features & ID_AA64PFR1_EL1_SME_MASK) != 0U;
+}
+
+static bool feat_sme_fa64_supported(void)
+{
+ uint64_t features;
+
+ features = read_id_aa64smfr0_el1();
+ return (features & ID_AA64SMFR0_EL1_FA64_BIT) != 0U;
+}
+
+void sme_enable(cpu_context_t *context)
+{
+ u_register_t reg;
+ u_register_t cptr_el3;
+ el3_state_t *state;
+
+ /* Make sure SME is implemented in hardware before continuing. */
+ if (!feat_sme_supported()) {
+ return;
+ }
+
+ /* Get the context state. */
+ state = get_el3state_ctx(context);
+
+ /* Enable SME in CPTR_EL3. */
+ reg = read_ctx_reg(state, CTX_CPTR_EL3);
+ reg |= ESM_BIT;
+ write_ctx_reg(state, CTX_CPTR_EL3, reg);
+
+ /* Set the ENTP2 bit in SCR_EL3 to enable access to TPIDR2_EL0. */
+ reg = read_ctx_reg(state, CTX_SCR_EL3);
+ reg |= SCR_ENTP2_BIT;
+ write_ctx_reg(state, CTX_SCR_EL3, reg);
+
+ /* Set CPTR_EL3.ESM bit so we can write SMCR_EL3 without trapping. */
+ cptr_el3 = read_cptr_el3();
+ write_cptr_el3(cptr_el3 | ESM_BIT);
+
+ /*
+ * Set the max LEN value and FA64 bit. This register is set up globally
+ * to be the least restrictive, then lower ELs can restrict as needed
+ * using SMCR_EL2 and SMCR_EL1.
+ */
+ reg = SMCR_ELX_LEN_MASK;
+ if (feat_sme_fa64_supported()) {
+ VERBOSE("[SME] FA64 enabled\n");
+ reg |= SMCR_ELX_FA64_BIT;
+ }
+ write_smcr_el3(reg);
+
+ /* Reset CPTR_EL3 value. */
+ write_cptr_el3(cptr_el3);
+
+ /* Enable SVE/FPU in addition to SME. */
+ sve_enable(context);
+}
+
+void sme_disable(cpu_context_t *context)
+{
+ u_register_t reg;
+ el3_state_t *state;
+
+ /* Make sure SME is implemented in hardware before continuing. */
+ if (!feat_sme_supported()) {
+ return;
+ }
+
+ /* Get the context state. */
+ state = get_el3state_ctx(context);
+
+ /* Disable SME, SVE, and FPU since they all share registers. */
+ reg = read_ctx_reg(state, CTX_CPTR_EL3);
+ reg &= ~ESM_BIT; /* Trap SME */
+ reg &= ~CPTR_EZ_BIT; /* Trap SVE */
+ reg |= TFP_BIT; /* Trap FPU/SIMD */
+ write_ctx_reg(state, CTX_CPTR_EL3, reg);
+
+ /* Disable access to TPIDR2_EL0. */
+ reg = read_ctx_reg(state, CTX_SCR_EL3);
+ reg &= ~SCR_ENTP2_BIT;
+ write_ctx_reg(state, CTX_SCR_EL3, reg);
+}
diff --git a/lib/extensions/sve/sve.c b/lib/extensions/sve/sve.c
index 2702c30..aa8904b 100644
--- a/lib/extensions/sve/sve.c
+++ b/lib/extensions/sve/sve.c
@@ -43,3 +43,23 @@
write_ctx_reg(get_el3state_ctx(context), CTX_ZCR_EL3,
(ZCR_EL3_LEN_MASK & CONVERT_SVE_LENGTH(512)));
}
+
+void sve_disable(cpu_context_t *context)
+{
+ u_register_t reg;
+ el3_state_t *state;
+
+ /* Make sure SME is implemented in hardware before continuing. */
+ if (!sve_supported()) {
+ return;
+ }
+
+ /* Get the context state. */
+ state = get_el3state_ctx(context);
+
+ /* Disable SVE and FPU since they share registers. */
+ reg = read_ctx_reg(state, CTX_CPTR_EL3);
+ reg &= ~CPTR_EZ_BIT; /* Trap SVE */
+ reg |= TFP_BIT; /* Trap FPU/SIMD */
+ write_ctx_reg(state, CTX_CPTR_EL3, reg);
+}
diff --git a/make_helpers/defaults.mk b/make_helpers/defaults.mk
index 45f5fa8..e88148f 100644
--- a/make_helpers/defaults.mk
+++ b/make_helpers/defaults.mk
@@ -134,7 +134,7 @@
ENABLE_PAUTH := 0
# Flag to enable access to the HCRX_EL2 register by setting SCR_EL3.HXEn.
-ENABLE_FEAT_HCX := 0
+ENABLE_FEAT_HCX := 0
# By default BL31 encryption disabled
ENCRYPT_BL31 := 0
@@ -222,13 +222,13 @@
SAVE_KEYS := 0
# Software Delegated Exception support
-SDEI_SUPPORT := 0
+SDEI_SUPPORT := 0
# True Random Number firmware Interface
-TRNG_SUPPORT := 0
+TRNG_SUPPORT := 0
# SMCCC PCI support
-SMC_PCI_SUPPORT := 0
+SMC_PCI_SUPPORT := 0
# Whether code and read-only data should be put on separate memory pages. The
# platform Makefile is free to override this value.
@@ -303,7 +303,7 @@
# SPE is only supported on AArch64 so disable it on AArch32.
ifeq (${ARCH},aarch32)
- override ENABLE_SPE_FOR_LOWER_ELS := 0
+ override ENABLE_SPE_FOR_LOWER_ELS := 0
endif
# Include Memory Tagging Extension registers in cpu context. This must be set
@@ -316,15 +316,18 @@
ENABLE_AMU_FCONF := 0
AMU_RESTRICT_COUNTERS := 0
-# By default, enable Scalable Vector Extension if implemented only for Non-secure
-# lower ELs
-# Note SVE is only supported on AArch64 - therefore do not enable in AArch32
-ifneq (${ARCH},aarch32)
- ENABLE_SVE_FOR_NS := 1
- ENABLE_SVE_FOR_SWD := 0
-else
- override ENABLE_SVE_FOR_NS := 0
- override ENABLE_SVE_FOR_SWD := 0
+# Enable SVE for non-secure world by default
+ENABLE_SVE_FOR_NS := 1
+ENABLE_SVE_FOR_SWD := 0
+
+# SME defaults to disabled
+ENABLE_SME_FOR_NS := 0
+ENABLE_SME_FOR_SWD := 0
+
+# If SME is enabled then force SVE off
+ifeq (${ENABLE_SME_FOR_NS},1)
+ override ENABLE_SVE_FOR_NS := 0
+ override ENABLE_SVE_FOR_SWD := 0
endif
SANITIZE_UB := off
@@ -348,7 +351,7 @@
SUPPORT_STACK_MEMTAG := no
# Select workaround for AT speculative behaviour.
-ERRATA_SPECULATIVE_AT := 0
+ERRATA_SPECULATIVE_AT := 0
# Trap RAS error record access from lower EL
RAS_TRAP_LOWER_EL_ERR_ACCESS := 0
@@ -379,9 +382,9 @@
# Note FEAT_TRBE is only supported on AArch64 - therefore do not enable in
# AArch32.
ifneq (${ARCH},aarch32)
- ENABLE_TRBE_FOR_NS := 0
+ ENABLE_TRBE_FOR_NS := 0
else
- override ENABLE_TRBE_FOR_NS := 0
+ override ENABLE_TRBE_FOR_NS := 0
endif
# By default, disable access of trace system registers from NS lower
diff --git a/services/std_svc/spmd/spmd.mk b/services/std_svc/spmd/spmd.mk
index 73f7c85..8efbdc8 100644
--- a/services/std_svc/spmd/spmd.mk
+++ b/services/std_svc/spmd/spmd.mk
@@ -1,11 +1,15 @@
#
-# Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2021, ARM Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
ifneq (${ARCH},aarch64)
- $(error "Error: SPMD is only supported on aarch64.")
+ $(error "Error: SPMD is only supported on aarch64.")
+endif
+
+ifeq (${ENABLE_SME_FOR_NS},1)
+ $(error "Error: SPMD is not compatible with ENABLE_SME_FOR_NS")
endif
SPMD_SOURCES += $(addprefix services/std_svc/spmd/, \