KVM: arm64: activate FGT trapping for pvms

Enable fine grain traps for protected VMs to control access to
features that are restricted to these VMs.

Signed-off-by: Fuad Tabba <tabba@google.com>
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 9780e2de..a3e9ccc 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -36,14 +36,90 @@
 
 extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
 
+#define update_pvm_fgt_traps(hctxt, vcpu, kvm, reg)	\
+	update_fgt_traps_cs(hctxt, vcpu, kvm, reg, PVM_ ## reg ## _CLR, PVM_ ## reg ## _SET);
+
+static void __activate_pvm_traps_hcrx(struct kvm_vcpu *vcpu)
+{
+	struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
+	u64 clear = 0;
+	u64 set = 0;
+
+	if (!cpus_have_final_cap(ARM64_HAS_HCX))
+		return;
+
+	ctxt_sys_reg(hctxt, HCRX_EL2) = read_sysreg_s(SYS_HCRX_EL2);
+	if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))
+		compute_clr_set(vcpu, HCRX_EL2, clear, set);
+
+	set |= PVM_HCRX_EL2_SET;
+	clear |= PVM_HCRX_EL2_CLR;
+	if (clear || set) {
+		u64 val = __HCRX_EL2_nMASK;
+
+		val |= set;
+		val &= ~clear;
+		write_sysreg_s(val, SYS_HCRX_EL2);
+	}
+}
+
+static void __activate_pvm_traps_hfgxtr(struct kvm_vcpu *vcpu)
+{
+	struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
+	struct kvm *kvm = kern_hyp_va(vcpu->kvm);
+
+	if (!cpus_have_final_cap(ARM64_HAS_FGT))
+		return;
+
+	update_pvm_fgt_traps(hctxt, vcpu, kvm, HFGRTR_EL2);
+
+	/* Trap guest writes to TCR_EL1 to prevent it from enabling HA or HD. */
+	if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38)) {
+		update_fgt_traps_cs(hctxt, vcpu, kvm, HFGWTR_EL2, PVM_HFGWTR_EL2_CLR,
+			PVM_HFGWTR_EL2_SET | HFGxTR_EL2_TCR_EL1_MASK);
+	} else {
+		update_pvm_fgt_traps(hctxt, vcpu, kvm, HFGWTR_EL2);
+	}
+
+	update_pvm_fgt_traps(hctxt, vcpu, kvm, HFGITR_EL2);
+	update_pvm_fgt_traps(hctxt, vcpu, kvm, HDFGRTR_EL2);
+	update_pvm_fgt_traps(hctxt, vcpu, kvm, HDFGWTR_EL2);
+
+	if (cpu_has_amu())
+		update_pvm_fgt_traps(hctxt, vcpu, kvm, HAFGRTR_EL2);
+}
+
+static void __deactivate_pvm_traps_hfgxtr(struct kvm_vcpu *vcpu)
+{
+	struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
+
+	if (!cpus_have_final_cap(ARM64_HAS_FGT))
+		return;
+
+	write_sysreg_s(ctxt_sys_reg(hctxt, HFGRTR_EL2), SYS_HFGRTR_EL2);
+	write_sysreg_s(ctxt_sys_reg(hctxt, HFGWTR_EL2), SYS_HFGWTR_EL2);
+	write_sysreg_s(ctxt_sys_reg(hctxt, HFGITR_EL2), SYS_HFGITR_EL2);
+	write_sysreg_s(ctxt_sys_reg(hctxt, HDFGRTR_EL2), SYS_HDFGRTR_EL2);
+	write_sysreg_s(ctxt_sys_reg(hctxt, HDFGWTR_EL2), SYS_HDFGWTR_EL2);
+
+	if (cpu_has_amu())
+		write_sysreg_s(ctxt_sys_reg(hctxt, HAFGRTR_EL2), SYS_HAFGRTR_EL2);
+}
+
 static void __activate_traps(struct kvm_vcpu *vcpu)
 {
 	u64 val;
 
 	___activate_traps(vcpu, vcpu->arch.hcr_el2);
 	__activate_traps_common(vcpu);
-	__activate_traps_hcrx(vcpu);
-	__activate_traps_hfgxtr(vcpu);
+
+	if (unlikely(vcpu_is_protected(vcpu))) {
+		__activate_pvm_traps_hcrx(vcpu);
+		__activate_pvm_traps_hfgxtr(vcpu);
+	} else {
+		__activate_traps_hcrx(vcpu);
+		__activate_traps_hfgxtr(vcpu);
+	}
 
 	val = vcpu->arch.cptr_el2;
 	val |= CPTR_EL2_TAM;	/* Same bit irrespective of E2H */
@@ -106,7 +182,11 @@
 	}
 
 	__deactivate_traps_common(vcpu);
-	__deactivate_traps_hfgxtr(vcpu);
+
+	if (unlikely(vcpu_is_protected(vcpu)))
+		__deactivate_pvm_traps_hfgxtr(vcpu);
+	else
+		__deactivate_traps_hfgxtr(vcpu);
 
 	write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);