HACK: Bodge a PVM firmware loader at EL1

For the purposes of testing, cook up a PVM firmware loader at EL1 which
simply copies the image into the user mapping of the VMM. Obviously this
needs to be done by EL2 as part of PVM initialisation, but until that's
implemented this is useful for development.

Signed-off-by: Will Deacon <will@kernel.org>
Signed-off-by: Will Deacon <willdeacon@google.com>
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 5cea500..8723c81 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -789,6 +789,7 @@
 
 int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type);
 
+int kvm_arm_vcpu_pkvm_init(struct kvm_vcpu *vcpu);
 int kvm_arm_vm_ioctl_pkvm(struct kvm *kvm, struct kvm_enable_cap *cap);
 #define kvm_vm_is_protected(kvm) ((kvm)->arch.pkvm.enabled)
 
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 14f648f..19c634e 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -619,6 +619,10 @@
 		return ret;
 
 	ret = kvm_arm_pmu_v3_enable(vcpu);
+	if (ret)
+		return ret;
+
+	ret = kvm_arm_vcpu_pkvm_init(vcpu);
 
 	return ret;
 }
diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c
index c6fac22..6cbc55d 100644
--- a/arch/arm64/kvm/pkvm.c
+++ b/arch/arm64/kvm/pkvm.c
@@ -6,6 +6,7 @@
  * Author: Will Deacon <will@kernel.org>
  */
 
+#include <linux/io.h>
 #include <linux/kvm_host.h>
 #include <linux/mm.h>
 #include <linux/of_fdt.h>
@@ -51,8 +52,65 @@
 RESERVEDMEM_OF_DECLARE(pkvm_firmware, "linux,pkvm-guest-firmware-memory",
 		       pkvm_firmware_rmem_init);
 
+int kvm_arm_vcpu_pkvm_init(struct kvm_vcpu *vcpu)
+{
+	struct kvm *kvm = vcpu->kvm;
+
+	if (!kvm_vm_is_protected(kvm))
+		return 0;
+
+	if (!vcpu->vcpu_id) {
+		int i;
+		struct kvm_memory_slot *slot = kvm->arch.pkvm.firmware_slot;
+		struct user_pt_regs *regs = vcpu_gp_regs(vcpu);
+
+		if (!slot)
+			return 0;
+
+		/* X0 - X14 provided by VMM (preserved) */
+
+		/* X15: Boot protocol version */
+		regs->regs[15] = 0;
+
+		/* X16 - X30 reserved (zeroed) */
+		for (i = 16; i <= 30; ++i)
+			regs->regs[i] = 0;
+
+		/* PC: IPA base of bootloader memslot */
+		regs->pc = slot->base_gfn << PAGE_SHIFT;
+
+		/* SP: IPA end of bootloader memslot */
+		regs->sp = (slot->base_gfn + slot->npages) << PAGE_SHIFT;
+	} else if (!test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) {
+		return -EPERM;
+	}
+
+	return 0;
+}
+
+static int __do_not_call_this_function(struct kvm_memory_slot *slot)
+{
+	int uncopied;
+	size_t sz = pkvm_firmware_mem->size;
+	void *src, __user *dst = (__force void __user *)slot->userspace_addr;
+
+	if (clear_user(dst, slot->npages * PAGE_SIZE))
+		return -EFAULT;
+
+	src = memremap(pkvm_firmware_mem->base, sz, MEMREMAP_WB);
+	if (!src)
+		return -EFAULT;
+
+	//((u32 *)src)[0] = 0xaa0f03e0; // MOV	X0, X15
+	//((u32 *)src)[1] = 0xd61f0200; // BR	X16
+	uncopied = copy_to_user(dst, src, sz);
+	memunmap(src);
+	return uncopied ? -EFAULT : 0;
+}
+
 static int pkvm_init_el2_context(struct kvm *kvm)
 {
+#if 0
 	/*
 	 * TODO:
 	 * Eventually, this will involve a call to EL2 to:
@@ -66,6 +124,9 @@
 	 */
 	kvm_pr_unimpl("Stage-2 protection is not yet implemented; ignoring\n");
 	return 0;
+#else
+	return __do_not_call_this_function(kvm->arch.pkvm.firmware_slot);
+#endif
 }
 
 static int pkvm_init_firmware_slot(struct kvm *kvm, u64 slotid)