arm64: kvm: Migrate hyp-init to SMCCC

This makes all KVM HVCs consistent in their use of SMCCC by removing the
custom mechanism previously used by hyp-init.

Some care is taken to only clobber the registers x0-3 before the context
is saved as only those may be clobbered accoring to SMCCC. This is
acheived in __do_hyp_init by storing tpidr_el2 before checking the
function ID since tpidr_el2 won't be used until the correct function ID
is passed. The page table setup scratch registers are then remapped from
x{4,5,6} to x{0,1,2}.

Signed-off-by: Andrew Scull <ascull@google.com>
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 152d884..c652aee 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -13,7 +13,7 @@
 kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
 	 $(KVM)/vfio.o $(KVM)/irqchip.o \
 	 arm.o mmu.o mmio.o psci.o perf.o hypercalls.o pvtime.o \
-	 inject_fault.o regmap.o va_layout.o hyp.o handle_exit.o \
+	 inject_fault.o regmap.o va_layout.o handle_exit.o \
 	 guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o \
 	 vgic-sys-reg-v3.o fpsimd.o pmu.o \
 	 aarch32.o arch_timer.o \
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 244a793..f123b0ef 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -1281,9 +1281,6 @@
 
 DECLARE_KVM_NVHE_SYM(__kvm_hyp_start);
 DECLARE_PER_CPU(struct kvm_nvhe_hyp_params, kvm_nvhe_sym(kvm_nvhe_hyp_params));
-u64 __kvm_call_hyp_init(phys_addr_t pgd_ptr,
-			unsigned long tpidr_el2,
-			void *start_hyp);
 
 static void cpu_init_hyp_mode(void)
 {
@@ -1295,6 +1292,7 @@
 	unsigned long percpu_base_address;
 	unsigned long params_offset;
 	unsigned long tpidr_el2;
+	struct arm_smccc_res res;
 
 	/* Switch from the HYP stub to our own HYP init vector */
 	__hyp_set_vectors(kvm_get_idmap_vector());
@@ -1329,7 +1327,9 @@
 	 * cpus_have_const_cap() wrapper.
 	 */
 	BUG_ON(!system_capabilities_finalized());
-	__kvm_call_hyp_init(pgd_ptr, tpidr_el2, start_hyp);
+	arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(__kvm_hyp_init),
+			  pgd_ptr, tpidr_el2, start_hyp, &res);
+	WARN_ON(res.a0 != SMCCC_RET_SUCCESS);
 
 	/* Copy the arm64_ssbd_callback_required information to hyp. */
 	if (this_cpu_read(arm64_ssbd_callback_required))
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
deleted file mode 100644
index 08f1cf5..0000000
--- a/arch/arm64/kvm/hyp.S
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012,2013 - ARM Ltd
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- */
-
-#include <linux/linkage.h>
-
-#include <asm/alternative.h>
-#include <asm/assembler.h>
-#include <asm/cpufeature.h>
-
-/*
- * u64 __kvm_call_hyp_init(phys_addr_t pgd_ptr,
- *			   void *start_hyp,
- *			   struct kvm_nvhe_hyp_params *params);
- */
-SYM_FUNC_START(__kvm_call_hyp_init)
-	hvc	#0
-	ret
-SYM_FUNC_END(__kvm_call_hyp_init)
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
index c7642ea..86e1c6b 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
@@ -4,10 +4,12 @@
  * Author: Marc Zyngier <marc.zyngier@arm.com>
  */
 
+#include <linux/arm-smccc.h>
 #include <linux/linkage.h>
 
 #include <asm/assembler.h>
 #include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
 #include <asm/kvm_mmu.h>
 #include <asm/pgtable-hwdef.h>
 #include <asm/sysreg.h>
@@ -43,29 +45,36 @@
 	b	.
 
 	/*
-	 * x0: HYP pgd
-	 * x1: per-CPU offset
-	 * x2: __kvm_hyp_start HYP address
+	 * x0: SMCCC function ID
+	 * x1: HYP pgd
+	 * x2: per-CPU offset
+	 * x3: __kvm_hyp_start HYP address
 	 */
 __do_hyp_init:
 	/* Check for a stub HVC call */
 	cmp	x0, #HVC_STUB_HCALL_NR
 	b.lo	__kvm_handle_stub_hvc
 
-	/* Set tpidr_el2 for use by HYP */
-	msr	tpidr_el2, x1
+	/* Set tpidr_el2 for use by HYP to free a register */
+	msr	tpidr_el2, x2
 
-	phys_to_ttbr x4, x0
+	mov	x2, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init)
+	cmp	x0, x2
+	b.eq	1f
+	mov	x0, #SMCCC_RET_NOT_SUPPORTED
+	eret
+
+1:	phys_to_ttbr x0, x1
 alternative_if ARM64_HAS_CNP
-	orr	x4, x4, #TTBR_CNP_BIT
+	orr	x0, x0, #TTBR_CNP_BIT
 alternative_else_nop_endif
-	msr	ttbr0_el2, x4
+	msr	ttbr0_el2, x0
 
-	mrs	x4, tcr_el1
-	mov_q	x5, TCR_EL2_MASK
-	and	x4, x4, x5
-	mov	x5, #TCR_EL2_RES1
-	orr	x4, x4, x5
+	mrs	x0, tcr_el1
+	mov_q	x1, TCR_EL2_MASK
+	and	x0, x0, x1
+	mov	x1, #TCR_EL2_RES1
+	orr	x0, x0, x1
 
 	/*
 	 * The ID map may be configured to use an extended virtual address
@@ -81,18 +90,18 @@
 	 *
 	 * So use the same T0SZ value we use for the ID map.
 	 */
-	ldr_l	x5, idmap_t0sz
-	bfi	x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
+	ldr_l	x1, idmap_t0sz
+	bfi	x0, x1, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
 
 	/*
 	 * Set the PS bits in TCR_EL2.
 	 */
-	tcr_compute_pa_size x4, #TCR_EL2_PS_SHIFT, x5, x6
+	tcr_compute_pa_size x0, #TCR_EL2_PS_SHIFT, x1, x2
 
-	msr	tcr_el2, x4
+	msr	tcr_el2, x0
 
-	mrs	x4, mair_el1
-	msr	mair_el2, x4
+	mrs	x0, mair_el1
+	msr	mair_el2, x0
 	isb
 
 	/* Invalidate the stale TLBs from Bootloader */
@@ -104,13 +113,13 @@
 	 * as well as the EE bit on BE. Drop the A flag since the compiler
 	 * is allowed to generate unaligned accesses.
 	 */
-	mov_q	x4, (SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
-CPU_BE(	orr	x4, x4, #SCTLR_ELx_EE)
-	msr	sctlr_el2, x4
+	mov_q	x0, (SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
+CPU_BE(	orr	x0, x0, #SCTLR_ELx_EE)
+	msr	sctlr_el2, x0
 	isb
 
 	/* Leave the idmap posthaste and head over to __kvm_hyp_start */
-	br	x2
+	br	x3
 SYM_CODE_END(__kvm_hyp_init)
 
 SYM_CODE_START(__kvm_handle_stub_hvc)
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index fa9bbd0..d15597f 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -107,18 +107,19 @@
 	struct kvm_vcpu *host_vcpu;
 	struct kvm_host_data *hyp_data;
 	struct kvm_cpu_context *hyp_ctxt;
-	int i;
 
 	host_vcpu = this_cpu_ptr(&kvm_host_vcpu);
 	hyp_data = this_cpu_ptr(&kvm_host_data);
 	hyp_ctxt = &hyp_data->host_ctxt;
 
-	/* Wipe the caller saved registers. */
-	for (i = 0; i < 18; ++i)
-		vcpu_gp_regs(host_vcpu)->regs.regs[i] = 0;
-
 	__sysreg_save_state_nvhe(&host_vcpu->arch.ctxt);
 
+	/*
+	 * The first time entering the host is seen by the host as the return
+	 * of the initialization HVC so mark it as successful.
+	 */
+	smccc_set_retval(host_vcpu, SMCCC_RET_SUCCESS, 0, 0, 0);
+
 	while (true) {
 		u64 exit_code;
 
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-start.S b/arch/arm64/kvm/hyp/nvhe/hyp-start.S
index c1e5cd7..351b005 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-start.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-start.S
@@ -18,11 +18,29 @@
 	ldr	x1, [x0, #HYP_PARAMS_VECTOR]
 	msr	vbar_el2, x1
 
-	/* Save some of the host's state so it can be restored */
-	adr_this_cpu		x0, kvm_host_vcpu, x1
-	add			x0, x0, #VCPU_CONTEXT
-	save_callee_saved_regs	x0
-	save_sp_el0		x0, x1
+	/*
+	 * Save the host's general purpose registers so they can be restored
+	 * when entering back into the host. SMCCC only permits x0-3 to be
+	 * modified and they will be set to return values before entering back
+	 * into the host so have not had their original values saved.
+	 */
+	adr_this_cpu x0, kvm_host_vcpu, x1
+	add	x0, x0, #VCPU_CONTEXT
+
+	/* Store the host regs x4-x17 */
+	stp	x4, x5,   [x0, #CPU_XREG_OFFSET(4)]
+	stp	x6, x7,   [x0, #CPU_XREG_OFFSET(6)]
+	stp	x8, x9,   [x0, #CPU_XREG_OFFSET(8)]
+	stp	x10, x11, [x0, #CPU_XREG_OFFSET(10)]
+	stp	x12, x13, [x0, #CPU_XREG_OFFSET(12)]
+	stp	x14, x15, [x0, #CPU_XREG_OFFSET(14)]
+	stp	x16, x17, [x0, #CPU_XREG_OFFSET(16)]
+
+	/* Store the host regs x18-x29, lr */
+	save_callee_saved_regs x0
+
+	/* Store the host's sp_el0 */
+	save_sp_el0 x0, x1
 
 	b	kvm_hyp_main
 SYM_CODE_END(__kvm_hyp_start)