arm64: alternatives: omit VHE code if VHE is disabled

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 6ac38f7..a9857fe 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -234,6 +234,10 @@ lr	.req	x30		// link register
 	.macro	this_cpu_offset, dst
 	mrs	\dst, tpidr_el2
 	.endm
+#elif !defined(CONFIG_ARM64_VHE)
+	.macro	this_cpu_offset, dst
+	mrs	\dst, tpidr_el1
+	.endm
 #else
 	.macro	this_cpu_offset, dst
 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 8f16616..0bdb028 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -36,9 +36,10 @@ static inline unsigned long __kern_my_cpu_offset(void)
 	 * We want to allow caching the value, so avoid using volatile and
 	 * instead use a fake stack read to hazard against barrier().
 	 */
-	asm(ALTERNATIVE("mrs %0, tpidr_el1",
-			"mrs %0, tpidr_el2",
-			ARM64_HAS_VIRT_HOST_EXTN)
+	asm(ALTERNATIVE_IF("mrs %0, tpidr_el1",
+			   "mrs %0, tpidr_el2",
+			   ARM64_HAS_VIRT_HOST_EXTN,
+			   CONFIG_ARM64_VHE)
 		: "=r" (off) :
 		"Q" (*(const unsigned long *)current_stack_pointer));