KVM: arm64: Introduce DEFINE_PER_CPU_NVHE_SHARED
DEFINE_PER_CPU_NVHE_SHARED(...) can be used to define per-cpu
variables shared between the protected-nVHE and the hypervisor.
In non-protected mode, the host can directly access nVHE memory,
so only use this for data that also needs to be shared in protected
mode (pKVM).
The regular per-cpu macros and helpers can be reused used with per-cpu
variables defined as shared. Only the definition needs to be marked
shared (_NVHE_SHARED). Marking a variable shared doesn't create a new
section, rather carves a page aligned chunk from .data..percpu and
groups the shared data there. It needs to be page aligned since the
host
stage 2 protection has per-page granularity.
Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index ef1cf41..29b8ad4 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -105,6 +105,10 @@ enum __kvm_host_smccc_func {
DECLARE_KVM_VHE_PER_CPU(type, sym); \
DECLARE_KVM_NVHE_PER_CPU(type, sym)
+/* Shared per-cpu data between protected-nVHE and host */
+#define DEFINE_PER_CPU_NVHE_SHARED(type, name) \
+ DEFINE_PER_CPU_SECTION(type, name, "..nvhe..shared")
+
/*
* Compute pointer to a symbol defined in nVHE percpu region.
* Returns NULL if percpu memory has not been allocated yet.
@@ -204,6 +208,8 @@ DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
extern unsigned long kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[];
DECLARE_KVM_NVHE_SYM(__per_cpu_start);
DECLARE_KVM_NVHE_SYM(__per_cpu_end);
+DECLARE_KVM_NVHE_SYM(__per_cpu_nvhe_shared_start);
+DECLARE_KVM_NVHE_SYM(__per_cpu_nvhe_shared_end);
DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index ec52e85..90a3616 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -36,6 +36,8 @@
. = ALIGN(PAGE_SIZE); \
HYP_SECTION_NAME(.data..percpu) : { \
*(HYP_SECTION_NAME(.data..percpu)) \
+ . = ALIGN(PAGE_SIZE); \
+ *(HYP_SECTION_NAME(.data..percpu..nvhe..shared)) \
}
#define HYPERVISOR_RELOC_SECTION \
diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
index a3d8d41..8cc7c83 100644
--- a/arch/arm64/kvm/hyp/nvhe/setup.c
+++ b/arch/arm64/kvm/hyp/nvhe/setup.c
@@ -27,6 +27,12 @@ phys_addr_t pvmfw_size;
#define hyp_percpu_size ((unsigned long)__per_cpu_end - \
(unsigned long)__per_cpu_start)
+#define hyp_percpu_shared_size ((unsigned long)__per_cpu_nvhe_shared_end - \
+ (unsigned long)__per_cpu_nvhe_shared_start)
+#define hyp_percpu_shared_base(cpu) \
+ ((unsigned long)kern_hyp_va(per_cpu_base[cpu]) \
+ + (unsigned long)__per_cpu_nvhe_shared_start \
+ - (unsigned long)__per_cpu_start)
static void *vmemmap_base;
static void *shadow_table_base;
@@ -119,12 +125,26 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
return ret;
for (i = 0; i < hyp_nr_cpus; i++) {
+ /* Map the private nVHE per-cpu data */
start = (void *)kern_hyp_va(per_cpu_base[i]);
- end = start + PAGE_ALIGN(hyp_percpu_size);
+ end = start + PAGE_ALIGN(hyp_percpu_size - hyp_percpu_shared_size);
ret = pkvm_create_mappings(start, end, PAGE_HYP);
if (ret)
return ret;
+ /*
+ * Map the shared per-cpu data and transfer ownership to the
+ * hypervisor
+ */
+ start = (void *)hyp_percpu_shared_base(i);
+ end = start + PAGE_ALIGN(hyp_percpu_shared_size);
+ prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_OWNED);
+ ret = pkvm_create_mappings(start, end, prot);
+ if (ret)
+ return ret;
+
+
+
end = (void *)per_cpu_ptr(&kvm_init_params, i)->stack_hyp_va;
start = end - PAGE_SIZE;
ret = pkvm_create_mappings(start, end, PAGE_HYP);
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 398f5e2..4ab27ee 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -1020,6 +1020,16 @@
#define PERCPU_DECRYPTED_SECTION
#endif
+/* Percpu data shared from protected nVHE to host */
+#ifdef CONFIG_KVM
+#define PERCPU_NVHE_SHARED_SECTION \
+ . = ALIGN(PAGE_SIZE); \
+ __per_cpu_nvhe_shared_start = .; \
+ *(.data..percpu..nvhe..shared) \
+ __per_cpu_nvhe_shared_end = .;
+#else
+#define PERCPU_NVHE_SHARED_SECTION
+#endif
/*
* Default discarded sections.
@@ -1094,6 +1104,7 @@
*(.data..percpu) \
*(.data..percpu..shared_aligned) \
PERCPU_DECRYPTED_SECTION \
+ PERCPU_NVHE_SHARED_SECTION \
__per_cpu_end = .;
/**