arm64: kvm: Migrate hyp interface to SMCCC

Rather than passing arbitrary function pointers to run at hyp, define
and equivalent set of SMCCC functions. Since the SMCCC functions are
strongly tied to true function prototypes, it is not expected for the
host to ever call an invalid ID but a warning is generared if this does
ever occur.

Signed-off-by: Andrew Scull <ascull@google.com>
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index abd08fe..86d63fc 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -38,10 +38,37 @@
 
 #define __SMCCC_WORKAROUND_1_SMC_SZ 36
 
+#define KVM_HOST_SMCCC_ID(id)						\
+	ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,				\
+			   ARM_SMCCC_SMC_64,				\
+			   ARM_SMCCC_OWNER_STANDARD_HYP,		\
+			   (id))
+
+#define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name)
+
+#define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init			0
+#define __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context		1
+#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa		2
+#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid		3
+#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_local_vmid	4
+#define __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff		5
+#define __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run			6
+#define __KVM_HOST_SMCCC_FUNC___kvm_set_ssbd_callback_required	7
+#define __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs			8
+#define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_ich_vtr_el2		9
+#define __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr		10
+#define __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr		11
+#define __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs		12
+#define __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2		13
+#define __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs		14
+#define __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs		15
+
 #ifndef __ASSEMBLY__
 
 #include <linux/mm.h>
 
+#include <kvm/arm_vgic.h>
+
 /*
  * Translate name of a symbol defined in nVHE hyp to the name seen
  * by kernel proper. All nVHE symbols are prefixed by the build system
@@ -85,21 +112,17 @@
 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
 extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
 extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
-
 extern void __kvm_timer_set_cntvoff(u64 cntvoff);
-
 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
-
 extern void __kvm_set_ssbd_callback_required(void);
 extern void __kvm_enable_ssbs(void);
-
 extern u64 __vgic_v3_get_ich_vtr_el2(void);
 extern u64 __vgic_v3_read_vmcr(void);
 extern void __vgic_v3_write_vmcr(u32 vmcr);
 extern void __vgic_v3_init_lrs(void);
-
 extern u32 __kvm_get_mdcr_el2(void);
-
+extern void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if);
+extern void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if);
 extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
 
 /*
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index fd4e660..345760d 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -11,6 +11,7 @@
 #ifndef __ARM64_KVM_HOST_H__
 #define __ARM64_KVM_HOST_H__
 
+#include <linux/arm-smccc.h>
 #include <linux/bitmap.h>
 #include <linux/types.h>
 #include <linux/jump_label.h>
@@ -446,18 +447,15 @@
 void kvm_arm_halt_guest(struct kvm *kvm);
 void kvm_arm_resume_guest(struct kvm *kvm);
 
-u64 __kvm_call_hyp(void *hypfn, ...);
-
 #define kvm_call_hyp_nvhe(f, ...)					\
-	do {								\
-		DECLARE_KVM_NVHE_SYM(f);				\
-		__kvm_call_hyp(kvm_ksym_ref_nvhe(f), ##__VA_ARGS__);	\
-	} while(0)
-
-#define kvm_call_hyp_nvhe_ret(f, ...)					\
 	({								\
-		DECLARE_KVM_NVHE_SYM(f);				\
-		__kvm_call_hyp(kvm_ksym_ref_nvhe(f), ##__VA_ARGS__);	\
+		struct arm_smccc_res res;				\
+									\
+		arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f),		\
+				  ##__VA_ARGS__, &res);			\
+		WARN_ON(res.a0 != SMCCC_RET_SUCCESS);			\
+									\
+		res.a1;							\
 	})
 
 /*
@@ -483,7 +481,7 @@
 			ret = f(__VA_ARGS__);				\
 			isb();						\
 		} else {						\
-			ret = kvm_call_hyp_nvhe_ret(f, ##__VA_ARGS__);	\
+			ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__);	\
 		}							\
 									\
 		ret;							\
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 7876567..68e8077 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -62,8 +62,6 @@
 void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if);
 void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if);
 void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if);
-void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if);
-void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if);
 int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
 
 #ifdef __KVM_NVHE_HYPERVISOR__
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 2fe5d63..244a793 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -1281,6 +1281,9 @@
 
 DECLARE_KVM_NVHE_SYM(__kvm_hyp_start);
 DECLARE_PER_CPU(struct kvm_nvhe_hyp_params, kvm_nvhe_sym(kvm_nvhe_hyp_params));
+u64 __kvm_call_hyp_init(phys_addr_t pgd_ptr,
+			unsigned long tpidr_el2,
+			void *start_hyp);
 
 static void cpu_init_hyp_mode(void)
 {
@@ -1326,7 +1329,7 @@
 	 * cpus_have_const_cap() wrapper.
 	 */
 	BUG_ON(!system_capabilities_finalized());
-	__kvm_call_hyp((void *)pgd_ptr, tpidr_el2, start_hyp);
+	__kvm_call_hyp_init(pgd_ptr, tpidr_el2, start_hyp);
 
 	/* Copy the arm64_ssbd_callback_required information to hyp. */
 	if (this_cpu_read(arm64_ssbd_callback_required))
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 3c79a11..08f1cf5 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -11,24 +11,11 @@
 #include <asm/cpufeature.h>
 
 /*
- * u64 __kvm_call_hyp(void *hypfn, ...);
- *
- * This is not really a variadic function in the classic C-way and care must
- * be taken when calling this to ensure parameters are passed in registers
- * only, since the stack will change between the caller and the callee.
- *
- * Call the function with the first argument containing a pointer to the
- * function you wish to call in Hyp mode, and subsequent arguments will be
- * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
- * function pointer can be passed).  The function being called must be mapped
- * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c).  Return values are
- * passed in x0.
- *
- * A function pointer with a value less than 0xfff has a special meaning,
- * and is used to implement hyp stubs in the same way as in
- * arch/arm64/kernel/hyp_stub.S.
+ * u64 __kvm_call_hyp_init(phys_addr_t pgd_ptr,
+ *			   void *start_hyp,
+ *			   struct kvm_nvhe_hyp_params *params);
  */
-SYM_FUNC_START(__kvm_call_hyp)
+SYM_FUNC_START(__kvm_call_hyp_init)
 	hvc	#0
 	ret
-SYM_FUNC_END(__kvm_call_hyp)
+SYM_FUNC_END(__kvm_call_hyp_init)
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 7218f02..fa9bbd0 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -5,14 +5,102 @@
  */
 
 #include <asm/kvm_asm.h>
-#include <asm/kvm_emulate.h>
 #include <asm/kvm_hyp.h>
 
+#include <kvm/arm_hypercalls.h>
+
 typedef unsigned long (*hypcall_fn_t)(unsigned long, unsigned long, unsigned long);
 
 DEFINE_PER_CPU(struct kvm_nvhe_hyp_params, kvm_nvhe_hyp_params);
 DEFINE_PER_CPU(struct kvm_vcpu, kvm_host_vcpu);
 
+static void handle_host_hcall(struct kvm_vcpu *host_vcpu)
+{
+	unsigned long ret = 0;
+
+	switch (smccc_get_function(host_vcpu)) {
+	case KVM_HOST_SMCCC_FUNC(__kvm_flush_vm_context):
+		__kvm_flush_vm_context();
+		break;
+	case KVM_HOST_SMCCC_FUNC(__kvm_tlb_flush_vmid_ipa): {
+			struct kvm *kvm =
+				(struct kvm *)smccc_get_arg1(host_vcpu);
+			phys_addr_t ipa = smccc_get_arg2(host_vcpu);
+
+			__kvm_tlb_flush_vmid_ipa(kvm, ipa);
+			break;
+		}
+	case KVM_HOST_SMCCC_FUNC(__kvm_tlb_flush_vmid): {
+			struct kvm *kvm =
+				(struct kvm *)smccc_get_arg1(host_vcpu);
+
+			__kvm_tlb_flush_vmid(kvm);
+			break;
+		}
+	case KVM_HOST_SMCCC_FUNC(__kvm_tlb_flush_local_vmid): {
+			struct kvm_vcpu *vcpu =
+				(struct kvm_vcpu *)smccc_get_arg1(host_vcpu);
+
+			__kvm_tlb_flush_local_vmid(vcpu);
+			break;
+		}
+	case KVM_HOST_SMCCC_FUNC(__kvm_timer_set_cntvoff): {
+			u64 cntvoff = smccc_get_arg1(host_vcpu);
+
+			__kvm_timer_set_cntvoff(cntvoff);
+			break;
+		}
+	case KVM_HOST_SMCCC_FUNC(__kvm_vcpu_run): {
+			struct kvm_vcpu *vcpu =
+				(struct kvm_vcpu *)smccc_get_arg1(host_vcpu);
+
+			ret = __kvm_vcpu_run(vcpu);
+			break;
+		}
+	case KVM_HOST_SMCCC_FUNC(__kvm_enable_ssbs):
+		__kvm_enable_ssbs();
+		break;
+	case KVM_HOST_SMCCC_FUNC(__vgic_v3_get_ich_vtr_el2):
+		ret = __vgic_v3_get_ich_vtr_el2();
+		break;
+	case KVM_HOST_SMCCC_FUNC(__vgic_v3_read_vmcr):
+		ret = __vgic_v3_read_vmcr();
+		break;
+	case KVM_HOST_SMCCC_FUNC(__vgic_v3_write_vmcr): {
+			u32 vmcr = smccc_get_arg1(host_vcpu);
+
+			__vgic_v3_write_vmcr(vmcr);
+			break;
+		}
+	case KVM_HOST_SMCCC_FUNC(__vgic_v3_init_lrs):
+		__vgic_v3_init_lrs();
+		break;
+	case KVM_HOST_SMCCC_FUNC(__kvm_get_mdcr_el2):
+		ret = __kvm_get_mdcr_el2();
+		break;
+	case KVM_HOST_SMCCC_FUNC(__vgic_v3_save_aprs): {
+			struct vgic_v3_cpu_if *cpu_if =
+				(struct vgic_v3_cpu_if *)smccc_get_arg1(host_vcpu);
+
+			__vgic_v3_save_aprs(cpu_if);
+			break;
+		}
+	case KVM_HOST_SMCCC_FUNC(__vgic_v3_restore_aprs): {
+			struct vgic_v3_cpu_if *cpu_if =
+				(struct vgic_v3_cpu_if *)smccc_get_arg1(host_vcpu);
+
+			__vgic_v3_restore_aprs(cpu_if);
+			break;
+		}
+	default:
+		/* Invalid host HVC. */
+		smccc_set_retval(host_vcpu, SMCCC_RET_NOT_SUPPORTED, 0, 0, 0);
+		return;
+	}
+
+	smccc_set_retval(host_vcpu, SMCCC_RET_SUCCESS, ret, 0, 0);
+}
+
 void __noreturn kvm_hyp_main(struct kvm_nvhe_hyp_params *params)
 {
 	/* Set tpidr_el2 for use by HYP */
@@ -50,19 +138,7 @@
 
 		/* TODO: handle exit codes properly */
 
-		/*
-		 * __kvm_call_hyp takes a pointer in the host address space and
-		 * up to three arguments.
-		 */
-		if (exit_code == ARM_EXCEPTION_TRAP) {
-			hypcall_fn_t func = (hypcall_fn_t)
-				kern_hyp_va(vcpu_get_reg(host_vcpu, 0));
-			unsigned long ret;
-
-			ret = func(vcpu_get_reg(host_vcpu, 1),
-				   vcpu_get_reg(host_vcpu, 2),
-				   vcpu_get_reg(host_vcpu, 3));
-			vcpu_set_reg(host_vcpu, 0, ret);
-		}
+		if (exit_code == ARM_EXCEPTION_TRAP)
+			handle_host_hcall(host_vcpu);
 	}
 }
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index a8d8fdc..14e89aa 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -6,6 +6,7 @@
 #define __KVM_ARM_VGIC_H
 
 #include <linux/kernel.h>
+#include <linux/kref.h>
 #include <linux/kvm.h>
 #include <linux/irqreturn.h>
 #include <linux/spinlock.h>