KVM: arm64: kcov: Add kcov support in EL2 object
Change-Id: Ib1556c1194a0bae23438746bd3175ac780fae727
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 43c3bc0..eec9e01 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -79,6 +79,13 @@
__KVM_HOST_SMCCC_FUNC___pkvm_init_vm,
__KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu,
__KVM_HOST_SMCCC_FUNC___pkvm_teardown_vm,
+
+ /* Hypercalls available if CONFIG_NVHE_KCOV is set */
+ __KVM_HOST_SMCCC_FUNC___pkvm_kcov_init_buffer,
+ __KVM_HOST_SMCCC_FUNC___pkvm_kcov_buffer_add_page,
+ __KVM_HOST_SMCCC_FUNC___pkvm_kcov_teardown_buffer,
+ __KVM_HOST_SMCCC_FUNC___pkvm_kcov_enable,
+ __KVM_HOST_SMCCC_FUNC___pkvm_kcov_disable,
};
#define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 9787503..d40eeb0 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -424,6 +424,19 @@
extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS];
#define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map)
+#ifdef CONFIG_NVHE_KCOV
+extern u64 kvm_nvhe_sym(hyp_kimage_voffset);
+#define hyp_kimage_voffset CHOOSE_NVHE_SYM(hyp_kimage_voffset)
+
+extern u64 kvm_nvhe_sym(hyp_kimage_vaddr);
+#define hyp_kimage_vaddr CHOOSE_NVHE_SYM(hyp_kimage_vaddr)
+
+/* This function will fill in the previous two variable at setup time */
+void init_hyp_kcov_layout(void);
+#else
+static inline void init_hyp_kcov_layout(void){}
+#endif
+
struct vcpu_reset_state {
unsigned long pc;
unsigned long r0;
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index f531da6b..9ecbd1d 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -71,4 +71,23 @@
If unsure, or not using protected nVHE (pKVM), say N.
+config NVHE_KCOV
+ bool "Enable KCOV on the EL2 object"
+ depends on NVHE_EL2_DEBUG && KCOV
+ select PID_IN_CONTEXTIDR
+ select ARCH_HAS_HYPERVISOR_KCOV
+ default y
+ help
+ Provide KCOV support at EL2. Enables support for the KCOV_ENABLE_HYP
+ option (for buffers initialized with KCOV_INIT_HYP_TRACE)
+
+config NVHE_KCOV_NUM_BUFFERS
+ int "Number of KCOV buffers in the EL2 object"
+ depends on NVHE_KCOV
+ default 10
+ help
+ The maximum number of buffers simultaneous initialized at EL2. Those
+ are KCOV buffers intialized with KCOV_INIT_HYP_TRACE instead of
+ KCOV_INIT_TRACE.
+
endif # VIRTUALIZATION
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index c0c050e5..099080a 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -23,6 +23,7 @@
vgic/vgic-its.o vgic/vgic-debug.o
kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o pmu.o
+kvm-$(CONFIG_NVHE_KCOV) += nvhe_kcov.o
always-y := hyp_constants.h hyp-constants.s
diff --git a/arch/arm64/kvm/hyp/include/nvhe/kcov.h b/arch/arm64/kvm/hyp/include/nvhe/kcov.h
new file mode 100644
index 0000000..9bc2717
--- /dev/null
+++ b/arch/arm64/kvm/hyp/include/nvhe/kcov.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Google LLC
+ * Author: Thibaut Pérami <thibautp@google.com>
+ */
+
+#ifndef __ARM64_KVM_NVHE_KCOV_H__
+#define __ARM64_KVM_NVHE_KCOV_H__
+
+#include <nvhe/pkvm.h>
+
+
+#ifdef CONFIG_NVHE_KCOV
+
+/** Initialize a kcov buffer and returns an index. Negative values for errors. */
+u64 __pkvm_kcov_init_buffer(uint size);
+
+/** Map the next page in a kcov buffer */
+u64 __pkvm_kcov_buffer_add_page(u64 idx, u64 pfn);
+
+/** Tear down a kcov buffer by index */
+int __pkvm_kcov_teardown_buffer(u64 idx);
+
+/** Enable tracing for the current user thread, on the buffer at that index */
+int __pkvm_kcov_enable(u64 idx);
+
+/** Disable tracing for the current user thread. One cannot disable tracing
+ * enabled by another thread */
+int __pkvm_kcov_disable(void);
+
+/** Call it on each EL2 entry point from host that require tracing, e.g. not
+ * asynchronous exceptions */
+void pkvm_kcov_enter_from_host(void);
+
+/** Call it on each EL2 exit to the host */
+void pkvm_kcov_exit_to_host(void);
+
+#else
+
+static inline u64 __pkvm_kcov_init_buffer(uint){
+ return -ENOSYS;
+}
+static inline u64 __pkvm_kcov_buffer_add_page(u64 idx, u64 pfn){
+ return -ENOSYS;
+}
+static inline u64 __pkvm_kcov_teardown_buffer(u64)
+{
+ return -ENOSYS;
+}
+static inline u64 __pkvm_kcov_enable(u64)
+{
+ return -ENOSYS;
+}
+static inline u64 __pkvm_kcov_disable(void)
+{
+ return -ENOSYS;
+}
+
+static inline void pkvm_kcov_enter_from_host(void) {}
+static inline void pkvm_kcov_exit_to_host(void) {}
+
+#endif /* CONFIG_NVHE_KCOV */
+
+#endif /* __ARM64_KVM_NVHE_KCOV_H__ */
diff --git a/arch/arm64/kvm/hyp/include/nvhe/memory.h b/arch/arm64/kvm/hyp/include/nvhe/memory.h
index ab205c4..8aaefab 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/memory.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/memory.h
@@ -29,6 +29,7 @@
#define hyp_phys_to_pfn(phys) ((phys) >> PAGE_SHIFT)
#define hyp_pfn_to_phys(pfn) ((phys_addr_t)((pfn) << PAGE_SHIFT))
+#define hyp_pfn_to_virt(pfn) (__hyp_va(hyp_pfn_to_phys(pfn)))
#define hyp_phys_to_page(phys) (&hyp_vmemmap[hyp_phys_to_pfn(phys)])
#define hyp_virt_to_page(virt) hyp_phys_to_page(__hyp_pa(virt))
#define hyp_virt_to_pfn(virt) hyp_phys_to_pfn(__hyp_pa(virt))
diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
index 530347c..1cbdd49 100644
--- a/arch/arm64/kvm/hyp/nvhe/Makefile
+++ b/arch/arm64/kvm/hyp/nvhe/Makefile
@@ -23,6 +23,7 @@
hyp-obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \
hyp-main.o hyp-smp.o psci-relay.o early_alloc.o page_alloc.o \
cache.o setup.o mm.o mem_protect.o sys_regs.o pkvm.o stacktrace.o
+hyp-obj-$(CONFIG_NVHE_KCOV) += kcov.o
hyp-obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
../fpsimd.o ../hyp-entry.o ../exception.o ../pgtable.o
hyp-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
@@ -105,7 +106,10 @@
KASAN_SANITIZE := n
KCSAN_SANITIZE := n
UBSAN_SANITIZE := n
-KCOV_INSTRUMENT := n
+KCOV_INSTRUMENT := $(if $(CONFIG_NVHE_KCOV),y,n)
+
+# Don't self instrument the kcov file
+KCOV_INSTRUMENT_kcov.nvhe.o := n
# Skip objtool checking for this directory because nVHE code is compiled with
# non-standard build rules.
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 728e01d..fd9153a 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -17,6 +17,7 @@
#include <nvhe/mm.h>
#include <nvhe/pkvm.h>
#include <nvhe/trap_handler.h>
+#include <nvhe/kcov.h>
DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
@@ -294,6 +295,40 @@
cpu_reg(host_ctxt, 1) = __pkvm_teardown_vm(handle);
}
+static void handle___pkvm_kcov_init_buffer(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(uint, size, host_ctxt, 1);
+
+ cpu_reg(host_ctxt, 1) = __pkvm_kcov_init_buffer(size);
+}
+
+static void handle___pkvm_kcov_buffer_add_page(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(u64, index, host_ctxt, 1);
+ DECLARE_REG(u64, pfn, host_ctxt, 2);
+
+ cpu_reg(host_ctxt, 1) = __pkvm_kcov_buffer_add_page(index, pfn);
+}
+
+static void handle___pkvm_kcov_teardown_buffer(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(u64, index, host_ctxt, 1);
+
+ cpu_reg(host_ctxt, 1) = __pkvm_kcov_teardown_buffer(index);
+}
+
+static void handle___pkvm_kcov_enable(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(u64, index, host_ctxt, 1);
+
+ cpu_reg(host_ctxt, 1) = __pkvm_kcov_enable(index);
+}
+
+static void handle___pkvm_kcov_disable(struct kvm_cpu_context *host_ctxt)
+{
+ cpu_reg(host_ctxt, 1) = __pkvm_kcov_disable();
+}
+
typedef void (*hcall_t)(struct kvm_cpu_context *);
#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
@@ -326,6 +361,12 @@
HANDLE_FUNC(__pkvm_init_vm),
HANDLE_FUNC(__pkvm_init_vcpu),
HANDLE_FUNC(__pkvm_teardown_vm),
+
+ HANDLE_FUNC(__pkvm_kcov_init_buffer),
+ HANDLE_FUNC(__pkvm_kcov_buffer_add_page),
+ HANDLE_FUNC(__pkvm_kcov_teardown_buffer),
+ HANDLE_FUNC(__pkvm_kcov_enable),
+ HANDLE_FUNC(__pkvm_kcov_disable),
};
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
@@ -384,6 +425,8 @@
{
u64 esr = read_sysreg_el2(SYS_ESR);
+ pkvm_kcov_enter_from_host();
+
switch (ESR_ELx_EC(esr)) {
case ESR_ELx_EC_HVC64:
handle_host_hcall(host_ctxt);
@@ -403,4 +446,5 @@
default:
BUG();
}
+ pkvm_kcov_exit_to_host();
}
diff --git a/arch/arm64/kvm/hyp/nvhe/kcov.c b/arch/arm64/kvm/hyp/nvhe/kcov.c
new file mode 100644
index 0000000..a2753f5
--- /dev/null
+++ b/arch/arm64/kvm/hyp/nvhe/kcov.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Google LLC
+ * Author: Sebastian Ene <sebastianene@google.com>
+ * Author: Thibaut Perami <thibautp@google.com>
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/mm.h>
+
+#include <kvm/arm_hypercalls.h>
+#include <kvm/arm_psci.h>
+
+#include <asm/kvm_emulate.h>
+
+#include <nvhe/mem_protect.h>
+#include <nvhe/memory.h>
+#include <nvhe/mm.h>
+#include <nvhe/pkvm.h>
+#include <nvhe/trap_handler.h>
+
+u64 __ro_after_init hyp_kimage_voffset;
+u64 __ro_after_init hyp_kimage_vaddr;
+
+static DEFINE_HYP_SPINLOCK(kcov_buffer_lock);
+
+static inline u32 get_active_pid(void){
+ /* The EL1 scheduler is responsible for putting the user thread PID in
+ * this register */
+ return read_sysreg(contextidr_el1);
+}
+
+struct kcov_buffer {
+ /* Size of the buffer (number of 8-bytes words *)
+ * It should be a multiple of PTRS_PER_PTE */
+ unsigned int size;
+ /* How much of size is currently mapped, should be <= size. */
+ unsigned int mapped;
+ /* The location of the buffer. If NULL, this slot is free in the
+ * kcov_buffers array */
+ u64 *area;
+ /* If the buffer is inactive this contains 0.
+ *
+ * If the buffer is active, this contains the user-thread PID. In that
+ * case area must not be null, and mapped must equal size
+ */
+ u32 current_pid;
+};
+
+/* The list of mapped kcov buffers. The area address being NULL, means
+ * unallocated buffer. Any modification on an active buffer (current_id set)
+ * must be done by code called by the same user thread, that way it cannot
+ * interfere/race with tracing by other CPUs
+ */
+static struct kcov_buffer kcov_buffers[CONFIG_NVHE_KCOV_NUM_BUFFERS];
+
+/* Per-CPU pointers into the kcov_buffers array.
+ * NULL means the current thread is not actively tracing
+ */
+static DEFINE_PER_CPU(struct kcov_buffer *, kcov_active_buffer);
+
+void pkvm_kcov_enter_from_host(void)
+{
+ u64 cpu_offset = read_sysreg(tpidr_el2);
+
+ u32 pid = get_active_pid();
+ if(!pid)
+ return;
+
+ /* This linear walk is inefficient on every host hypercall
+ * TODO: Perf improvements */
+ for (int i = 0; i < CONFIG_NVHE_KCOV_NUM_BUFFERS; i++) {
+ if (kcov_buffers[i].current_pid == pid) {
+ if (WARN_ON(kcov_buffers[i].area == NULL))
+ continue;
+ *SHIFT_PERCPU_PTR(&kcov_active_buffer, cpu_offset) =
+ &kcov_buffers[i];
+ return;
+ }
+ }
+}
+
+void pkvm_kcov_exit_to_host(void)
+{
+ u64 cpu_offset = read_sysreg(tpidr_el2);
+ *SHIFT_PERCPU_PTR(&kcov_active_buffer, cpu_offset) = NULL;
+}
+
+/*
+ * Convert a hypervisor active IP to static kernel image pointer that can index
+ * the kernel image file
+ */
+static inline u64 canonicalize_ip(u64 ip)
+{
+ ip = __hyp_pa(ip);
+ ip += hyp_kimage_voffset;
+
+#ifdef CONFIG_RANDOMIZE_BASE
+ ip -= (hyp_kimage_vaddr - KIMAGE_VADDR);
+#endif
+ return ip;
+}
+
+/*
+ * Entry point from hypervisor instrumented code.
+ * This is called once per basic-block/edge.
+ * The EL1 kernel equivalent function is in kernel/kcov.c
+ */
+void __sanitizer_cov_trace_pc(void)
+{
+ u64 cpu_offset = read_sysreg(tpidr_el2);
+ u64 pos;
+ struct kcov_buffer *buf =
+ *SHIFT_PERCPU_PTR(&kcov_active_buffer, cpu_offset);
+
+ if (!buf)
+ return;
+ if (WARN_ON(!(buf->area))) {
+ return;
+ }
+
+ /* There is no concurrent access possible, because only one physical CPU
+ * can have the current thread active at a time and the hypervisor code
+ * is not interruptible */
+
+ pos = buf->area[0] + 1;
+ if (unlikely(pos >= buf->size))
+ return;
+ buf->area[0] = pos;
+ buf->area[pos] = canonicalize_ip(_RET_IP_);
+}
+
+static int find_free_buffer_slot(void)
+{
+ for (int i = 0; i < CONFIG_NVHE_KCOV_NUM_BUFFERS; i++) {
+ if (kcov_buffers[i].area == NULL) {
+ BUG_ON(kcov_buffers[i].current_pid != 0);
+ return i;
+ }
+ }
+ return -ENOBUFS;
+}
+
+u64 __pkvm_kcov_init_buffer(uint size)
+{
+ unsigned long vaddr;
+ int index;
+ int ret;
+
+ if (!IS_ALIGNED(size, PTRS_PER_PTE))
+ return -EINVAL;
+
+ if (!size)
+ return -EINVAL;
+
+ hyp_spin_lock(&kcov_buffer_lock);
+ ret = find_free_buffer_slot();
+ if (ret < 0)
+ goto unlock;
+ index = ret;
+
+ ret = pkvm_alloc_private_va_range(size * sizeof(u64), &vaddr);
+ if (ret)
+ goto unlock;
+
+ kcov_buffers[index].area = (u64 *)vaddr;
+ kcov_buffers[index].size = size;
+ kcov_buffers[index].mapped = 0;
+ ret = index;
+unlock:
+ hyp_spin_unlock(&kcov_buffer_lock);
+ return ret;
+}
+
+u64 __pkvm_kcov_buffer_add_page(u64 index, u64 pfn)
+{
+ int ret = -EINVAL;
+ void *page_lm = hyp_pfn_to_virt(pfn);
+ u64* vaddr;
+
+ if (index >= CONFIG_NVHE_KCOV_NUM_BUFFERS)
+ return -EINVAL;
+
+ hyp_spin_lock(&kcov_buffer_lock);
+ if (!kcov_buffers[index].area)
+ goto unlock;
+
+ if (kcov_buffers[index].mapped >= kcov_buffers[index].size)
+ goto unlock;
+
+ BUG_ON(kcov_buffers[index].current_pid);
+
+ ret = __pkvm_host_share_hyp(pfn);
+ if(ret)
+ goto unlock;
+
+ ret = hyp_pin_shared_mem(page_lm, page_lm + PAGE_SIZE);
+ if (ret)
+ goto unshare;
+
+ vaddr = kcov_buffers[index].area + kcov_buffers[index].mapped;
+
+ hyp_spin_lock(&pkvm_pgd_lock);
+ ret = kvm_pgtable_hyp_map(&pkvm_pgtable, (u64)vaddr, PAGE_SIZE, hyp_pfn_to_phys(pfn), PAGE_HYP);
+ hyp_spin_unlock(&pkvm_pgd_lock);
+ if (ret)
+ goto unpin;
+
+ // Success!
+ kcov_buffers[index].mapped += PTRS_PER_PTE;
+ goto unlock;
+
+unpin:
+ hyp_unpin_shared_mem(page_lm, page_lm + PAGE_SIZE);
+
+unshare:
+ WARN_ON(__pkvm_host_unshare_hyp(pfn));
+
+unlock:
+ hyp_spin_unlock(&kcov_buffer_lock);
+ return ret;
+}
+
+int __pkvm_kcov_teardown_buffer(u64 index)
+{
+ u64 *vaddr;
+ kvm_pte_t pte;
+ u32 level;
+ u64 pfn;
+ void *page_lm;
+
+ if (index >= CONFIG_NVHE_KCOV_NUM_BUFFERS)
+ return -EINVAL;
+
+ hyp_spin_lock(&kcov_buffer_lock);
+
+ if (kcov_buffers[index].current_pid) {
+ hyp_spin_unlock(&kcov_buffer_lock);
+ return -EBUSY;
+ }
+
+ if (kcov_buffers[index].mapped == 0) {
+ kcov_buffers[index].area = NULL;
+ hyp_spin_unlock(&kcov_buffer_lock);
+ return 0;
+ }
+
+ kcov_buffers[index].mapped -= PTRS_PER_PTE;
+
+ vaddr = kcov_buffers[index].area + kcov_buffers[index].mapped;
+
+ hyp_spin_lock(&pkvm_pgd_lock);
+ BUG_ON(kvm_pgtable_get_leaf(&pkvm_pgtable, (u64)vaddr, &pte, &level));
+ BUG_ON(level != KVM_PGTABLE_MAX_LEVELS - 1);
+ BUG_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, (u64)vaddr, PAGE_SIZE) != PAGE_SIZE);
+ hyp_spin_unlock(&pkvm_pgd_lock);
+
+ pfn = kvm_pte_to_pfn(pte);
+ page_lm = hyp_pfn_to_virt(pfn);
+ hyp_unpin_shared_mem(page_lm, page_lm + PAGE_SIZE);
+ WARN_ON(__pkvm_host_unshare_hyp(pfn));
+
+ hyp_spin_unlock(&kcov_buffer_lock);
+
+ return -EAGAIN;
+}
+
+int __pkvm_kcov_enable(u64 index)
+{
+ u32 pid = get_active_pid();
+ int ret;
+
+ /* The kernel should not call kcov_enable for the idle process */
+ if (WARN_ON(!pid))
+ return -EINVAL;
+
+ hyp_spin_lock(&kcov_buffer_lock);
+ if (kcov_buffers[index].current_pid) {
+ ret = -EBUSY;
+ goto unlock_kcov;
+ }
+ if (kcov_buffers[index].area == NULL) {
+ ret = -EINVAL;
+ goto unlock_kcov;
+ }
+ if (kcov_buffers[index].mapped != kcov_buffers[index].size) {
+ ret = -ENOMEM;
+ goto unlock_kcov;
+ }
+
+ kcov_buffers[index].current_pid = pid;
+ ret = 0;
+
+unlock_kcov:
+ hyp_spin_unlock(&kcov_buffer_lock);
+ return ret;
+}
+
+int __pkvm_kcov_disable(void)
+{
+ u64 cpu_offset = read_sysreg(tpidr_el2);
+ struct kcov_buffer *buf =
+ *SHIFT_PERCPU_PTR(&kcov_active_buffer, cpu_offset);
+
+ if (!buf)
+ return -EINVAL;
+
+ BUG_ON(buf->current_pid != get_active_pid());
+
+ *SHIFT_PERCPU_PTR(&kcov_active_buffer, cpu_offset) = NULL;
+ hyp_spin_lock(&kcov_buffer_lock);
+ buf->current_pid = 0;
+ hyp_spin_unlock(&kcov_buffer_lock);
+ return 0;
+}
diff --git a/arch/arm64/kvm/nvhe_kcov.c b/arch/arm64/kvm/nvhe_kcov.c
new file mode 100644
index 0000000..fda9b52
--- /dev/null
+++ b/arch/arm64/kvm/nvhe_kcov.c
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Google LLC
+ * Author: Thibaut Pérami <thibautp@google.com>
+ */
+
+#include <linux/mman.h>
+#include <linux/kvm_host.h>
+#include <linux/kcov.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_mmu.h>
+#include <asm/kvm_pgtable.h>
+#include <asm/kvm_asm.h>
+
+void init_hyp_kcov_layout(){
+ hyp_kimage_voffset = kimage_voffset;
+ hyp_kimage_vaddr = kimage_vaddr;
+}
+
+int kvm_kcov_hyp_init_tracing_buffer(void *mem, uint size)
+{
+ int nr_page;
+ int ret;
+ u64 idx;
+
+ /* The current implementation of Hyp kcov does not work for the normal
+ * NVHE (un protected) code. This is because:
+ * - In unprotected mode, the EL2 vectors are disabled when no VM is active,
+ * - The buffer sharing in the following code is done assume EL2 manages its
+ * own page tables, a different code path need to be implemented for the
+ * unprotected case.
+ */
+ if (!is_protected_kvm_enabled())
+ return -ENOSYS;
+
+ if (WARN_ON(!is_vmalloc_addr(mem)))
+ return -EINVAL;
+
+ if (!IS_ALIGNED((u64)mem, PAGE_SIZE))
+ return -EINVAL;
+
+ if (!IS_ALIGNED(size, PTRS_PER_PTE))
+ return -EINVAL;
+
+ ret = kvm_call_hyp_nvhe(__pkvm_kcov_init_buffer, size);
+ if (ret < 0)
+ return ret;
+
+ idx = ret;
+
+ nr_page = size / PTRS_PER_PTE;
+
+ for (uint i = 0; i < nr_page; ++i) {
+ u64 pfn = vmalloc_to_pfn(mem + i * PAGE_SIZE);
+ ret = kvm_call_hyp_nvhe(__pkvm_kcov_buffer_add_page, idx, pfn);
+ if (ret){
+ WARN_ON(kvm_kcov_hyp_teardown_tracing_buffer(idx));
+ return ret;
+ }
+ }
+ return idx;
+}
+
+int kvm_kcov_hyp_teardown_tracing_buffer(int buffer_index)
+{
+ int ret;
+
+ if (WARN_ON(!is_protected_kvm_enabled()))
+ return -ENOSYS;
+
+ do {
+ ret = kvm_call_hyp_nvhe(__pkvm_kcov_teardown_buffer, buffer_index);
+ } while (ret == -EAGAIN);
+ return ret;
+}
+
+int kvm_kcov_hyp_enable_tracing(int buffer_index)
+{
+ if (WARN_ON(!is_protected_kvm_enabled()))
+ return -ENOSYS;
+
+ return kvm_call_hyp_nvhe(__pkvm_kcov_enable, buffer_index);
+}
+
+int kvm_kcov_hyp_disable_tracing(void)
+{
+ if (WARN_ON(!is_protected_kvm_enabled()))
+ return -ENOSYS;
+
+ return kvm_call_hyp_nvhe(__pkvm_kcov_disable);
+}
diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
index 91b22a0..7c033c0 100644
--- a/arch/arm64/kvm/va_layout.c
+++ b/arch/arm64/kvm/va_layout.c
@@ -78,6 +78,7 @@
}
tag_val >>= tag_lsb;
+ init_hyp_kcov_layout();
init_hyp_physvirt_offset();
}