ANDROID: KVM: arm64: Prevent HVC calls outside of the core kernel text

Modules can easily wreak havoc in the hypervisor by calling into
it randomly, making it very hard to understand what is going on.

Given that limiting hypercalls to the core kernel is actually
pretty easy (a simple comparaison with _text and _etext), let's
implement that.

This is made extra-complicated due to KASLR and the disjointed
VA spaces (you can't just refer to _text, as this results in a
relative reference...).

Bug: 210011561
Signed-off-by: Marc Zyngier <maz@kernel.org>
Change-Id: I2f21871d7fe0fb22fd3660dbc1317ec8968d5b61
Signed-off-by: Sebastian Ene <sebastianene@google.com>
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 7a8dc3f..b3b03f1 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -118,6 +118,10 @@
 
 void kvm_update_va_mask(struct alt_instr *alt,
 			__le32 *origptr, __le32 *updptr, int nr_inst);
+void kvm_get__text(struct alt_instr *alt,
+		   __le32 *origptr, __le32 *updptr, int nr_inst);
+void kvm_get__etext(struct alt_instr *alt,
+		    __le32 *origptr, __le32 *updptr, int nr_inst);
 void kvm_compute_layout(void);
 void kvm_apply_hyp_relocations(void);
 
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index 022f8fe..8f674fb 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -65,6 +65,8 @@
 KVM_NVHE_ALIAS(kvm_patch_vector_branch);
 KVM_NVHE_ALIAS(kvm_update_va_mask);
 KVM_NVHE_ALIAS(kvm_get_kimage_voffset);
+KVM_NVHE_ALIAS(kvm_get__text);
+KVM_NVHE_ALIAS(kvm_get__etext);
 KVM_NVHE_ALIAS(kvm_compute_final_ctr_el0);
 KVM_NVHE_ALIAS(spectre_bhb_patch_loop_iter);
 KVM_NVHE_ALIAS(spectre_bhb_patch_loop_mitigation_enable);
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 4934592..14f9da9 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -1089,12 +1089,45 @@
 	HANDLE_FUNC(__pkvm_iommu_finalize),
 };
 
+static inline u64 kernel__text_addr(void)
+{
+	u64 val;
+
+	asm volatile(ALTERNATIVE_CB("movz	%0, #0\n"
+				    "movk	%0, #0, lsl #16\n"
+				    "movk	%0, #0, lsl #32\n"
+				    "movk	%0, #0, lsl #48\n",
+				    kvm_get__text)
+		     : "=r" (val));
+
+	return val;
+}
+
+static inline u64 kernel__etext_addr(void)
+{
+	u64 val;
+
+	asm volatile(ALTERNATIVE_CB("movz	%0, #0\n"
+				    "movk	%0, #0, lsl #16\n"
+				    "movk	%0, #0, lsl #32\n"
+				    "movk	%0, #0, lsl #48\n",
+				    kvm_get__etext)
+		     : "=r" (val));
+
+	return val;
+}
+
 static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
 {
 	DECLARE_REG(unsigned long, id, host_ctxt, 0);
+	u64 elr = read_sysreg_el2(SYS_ELR) - 4;
 	unsigned long hcall_min = 0;
 	hcall_t hfn;
 
+	/* Check for the provenance of the HC */
+	if (unlikely(elr < kernel__text_addr() || elr >= kernel__etext_addr()))
+		goto inval;
+
 	/*
 	 * If pKVM has been initialised then reject any calls to the
 	 * early "privileged" hypercalls. Note that we cannot reject
diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
index acdb7b3..f0b678d 100644
--- a/arch/arm64/kvm/va_layout.c
+++ b/arch/arm64/kvm/va_layout.c
@@ -295,3 +295,15 @@
 	generate_mov_q(read_sanitised_ftr_reg(SYS_CTR_EL0),
 		       origptr, updptr, nr_inst);
 }
+
+void kvm_get__text(struct alt_instr *alt,
+		   __le32 *origptr, __le32 *updptr, int nr_inst)
+{
+	generate_mov_q((u64)_text, origptr, updptr, nr_inst);
+}
+
+void kvm_get__etext(struct alt_instr *alt,
+		   __le32 *origptr, __le32 *updptr, int nr_inst)
+{
+	generate_mov_q((u64)_etext, origptr, updptr, nr_inst);
+}