KVM: arm64: Enable SVE for protected guests
Now that the hypervisor has complete control over the guest's SVE
state, allow protected guests to use SVE.
Signed-off-by: Fuad Tabba <tabba@google.com>
diff --git a/arch/arm64/include/asm/kvm_pkvm.h b/arch/arm64/include/asm/kvm_pkvm.h
index a61711f..5e90ca8 100644
--- a/arch/arm64/include/asm/kvm_pkvm.h
+++ b/arch/arm64/include/asm/kvm_pkvm.h
@@ -51,6 +51,7 @@ bool pkvm_is_hyp_created(struct kvm *kvm);
* - AArch64 guests only (no support for AArch32 guests):
* AArch32 adds complexity in trap handling, emulation, condition codes,
* etc...
+ * - SVE
* - RAS (v1)
* Supported by KVM
*/
@@ -65,6 +66,7 @@ bool pkvm_is_hyp_created(struct kvm *kvm);
SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL1, IMP) | \
SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL2, IMP) | \
SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL3, IMP) | \
+ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), ID_AA64PFR0_EL1_SVE_IMP) | \
SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, RAS, IMP) \
)
@@ -145,11 +147,19 @@ bool pkvm_is_hyp_created(struct kvm *kvm);
#define PVM_ID_AA64MMFR3_ALLOW (0ULL)
/*
- * No support for Scalable Vectors for protected VMs:
- * Requires additional support from KVM, e.g., context-switching and
- * trapping at EL2
+ * No restrictions for Scalable Vectors (SVE).
*/
-#define PVM_ID_AA64ZFR0_ALLOW (0ULL)
+#define PVM_ID_AA64ZFR0_ALLOW (\
+ ARM64_FEATURE_MASK(ID_AA64ZFR0_EL1_SVEver) | \
+ ARM64_FEATURE_MASK(ID_AA64ZFR0_EL1_AES) | \
+ ARM64_FEATURE_MASK(ID_AA64ZFR0_EL1_BitPerm) | \
+ ARM64_FEATURE_MASK(ID_AA64ZFR0_EL1_BF16) | \
+ ARM64_FEATURE_MASK(ID_AA64ZFR0_EL1_SHA3) | \
+ ARM64_FEATURE_MASK(ID_AA64ZFR0_EL1_SM4) | \
+ ARM64_FEATURE_MASK(ID_AA64ZFR0_EL1_I8MM) | \
+ ARM64_FEATURE_MASK(ID_AA64ZFR0_EL1_F32MM) | \
+ ARM64_FEATURE_MASK(ID_AA64ZFR0_EL1_F64MM) \
+ )
/*
* No support for debug, including breakpoints, and watchpoints for protected
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index ea9d171..69c5aa68 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -224,7 +224,7 @@ static const exit_handler_fn pvm_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = NULL,
[ESR_ELx_EC_HVC64] = kvm_handle_pvm_hvc64,
[ESR_ELx_EC_SYS64] = kvm_handle_pvm_sys64,
- [ESR_ELx_EC_SVE] = kvm_handle_pvm_restricted,
+ [ESR_ELx_EC_SVE] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_SME] = kvm_handle_pvm_restricted,
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
index 7c66e8b..a870ef5 100644
--- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c
+++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
@@ -85,8 +85,13 @@ static u64 get_restricted_features_unsigned(u64 sys_reg_val,
static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
{
- return get_restricted_features_unsigned(id_aa64pfr0_el1_sys_val,
- PVM_ID_AA64PFR0_ALLOW);
+ u64 value = get_restricted_features_unsigned(id_aa64pfr0_el1_sys_val,
+ PVM_ID_AA64PFR0_ALLOW);
+
+ if (!vcpu_has_sve(vcpu))
+ value &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE);
+
+ return value;
}
static u64 get_pvm_id_aa64pfr1(const struct kvm_vcpu *vcpu)
@@ -103,7 +108,11 @@ static u64 get_pvm_id_aa64pfr1(const struct kvm_vcpu *vcpu)
static u64 get_pvm_id_aa64zfr0(const struct kvm_vcpu *vcpu)
{
- return id_aa64zfr0_el1_sys_val & PVM_ID_AA64ZFR0_ALLOW;
+ if (vcpu_has_sve(vcpu))
+ return get_restricted_features_unsigned(id_aa64zfr0_el1_sys_val,
+ PVM_ID_AA64ZFR0_ALLOW);
+
+ return 0;
}
static u64 get_pvm_id_aa64dfr0(const struct kvm_vcpu *vcpu)
@@ -436,8 +445,6 @@ static const struct sys_reg_desc pvm_sys_reg_descs[] = {
ID_UNALLOCATED(7,6),
ID_UNALLOCATED(7,7),
- /* Scalable Vector Registers are restricted. */
-
RAZ_WI(SYS_ERRIDR_EL1),
RAZ_WI(SYS_ERRSELR_EL1),
RAZ_WI(SYS_ERXFR_EL1),