KCOV for pKVM!
Change-Id: Ie374d439dc2200489e0abd3a9b4d55df00577411
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 59c55ae..b2459a3 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -76,6 +76,9 @@
__KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs,
__KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs,
__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_init_traps,
+#ifdef CONFIG_KCOV
+ __KVM_HOST_SMCCC_FUNC___kvm_kcov_set_area,
+#endif
};
#define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 9f70e82..bb556d0 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -604,17 +604,27 @@
void kvm_arm_halt_guest(struct kvm *kvm);
void kvm_arm_resume_guest(struct kvm *kvm);
+#ifdef CONFIG_KCOV
+int kcov_start_kvm(void);
+void kcov_stop_kvm(int ret);
+#else
+static inline int kcov_start_kvm(void) { return 0; }
+static inline void kcov_stop_kvm(int ret) {}
+#endif
+
#define vcpu_has_run_once(vcpu) !!rcu_access_pointer((vcpu)->pid)
#ifndef __KVM_NVHE_HYPERVISOR__
-#define kvm_call_hyp_nvhe(f, ...) \
+#define kvm_call_hyp_nvhe(f, ...) \
({ \
+ int kcov = kcov_start_kvm(); \
struct arm_smccc_res res; \
\
arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f), \
##__VA_ARGS__, &res); \
WARN_ON(res.a0 != SMCCC_RET_SUCCESS); \
\
+ kcov_stop_kvm(kcov); \
res.a1; \
})
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 5afd14a..9bd0848 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -115,6 +115,8 @@
void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
#endif
+int kvm_kcov_set_area(kvm_pfn_t *pfns, size_t size);
+
extern u64 kvm_nvhe_sym(id_aa64pfr0_el1_sys_val);
extern u64 kvm_nvhe_sym(id_aa64pfr1_el1_sys_val);
extern u64 kvm_nvhe_sym(id_aa64isar0_el1_sys_val);
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 81839e9..8e87f68 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -110,7 +110,9 @@
#else
+#include <linux/kvm_host.h>
#include <linux/pgtable.h>
+
#include <asm/pgalloc.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
@@ -137,6 +139,36 @@
#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
+static __always_inline unsigned long __hyp_kimg_va(unsigned long v)
+{
+ unsigned long kimage_voffset;
+
+ asm volatile(ALTERNATIVE_CB("movz %0, #0\n"
+ "movk %0, #0, lsl #16\n"
+ "movk %0, #0, lsl #32\n"
+ "movk %0, #0, lsl #48\n",
+ kvm_get_kimage_voffset)
+ : "=r" (kimage_voffset));
+
+ return (unsigned long)__hyp_pa(v) + kimage_voffset;
+}
+
+#define hyp_kimg_va(v) ((typeof(v))(__hyp_kimg_va((unsigned long)(v))))
+
+static __always_inline unsigned long __kaslr_offset(void)
+{
+ unsigned long val;
+
+ asm volatile(ALTERNATIVE_CB("movz %0, #0\n"
+ "movk %0, #0, lsl #16\n"
+ "movk %0, #0, lsl #32\n"
+ "movk %0, #0, lsl #48\n",
+ kvm_get_kaslr_offset)
+ : "=r" (val));
+
+ return val;
+}
+
/*
* We currently support using a VM-specified IPA size. For backward
* compatibility, the default IPA size is fixed to 40bits.
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index c96a9a0..d134735 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -65,6 +65,7 @@
KVM_NVHE_ALIAS(kvm_patch_vector_branch);
KVM_NVHE_ALIAS(kvm_update_va_mask);
KVM_NVHE_ALIAS(kvm_get_kimage_voffset);
+KVM_NVHE_ALIAS(kvm_get_kaslr_offset);
KVM_NVHE_ALIAS(kvm_compute_final_ctr_el0);
/* Global kernel state accessed by nVHE hyp code. */
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 989bb5d..11bb618 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -25,3 +25,4 @@
vgic/vgic-its.o vgic/vgic-debug.o
kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o
+kvm-$(CONFIG_KCOV) += kcov.o
diff --git a/arch/arm64/kvm/hyp/include/nvhe/memory.h b/arch/arm64/kvm/hyp/include/nvhe/memory.h
index 592b7ed..0d26dd2 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/memory.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/memory.h
@@ -29,6 +29,7 @@
#define hyp_phys_to_pfn(phys) ((phys) >> PAGE_SHIFT)
#define hyp_pfn_to_phys(pfn) ((phys_addr_t)((pfn) << PAGE_SHIFT))
+#define hyp_pfn_to_virt(pfn) hyp_phys_to_virt(hyp_pfn_to_phys(pfn))
#define hyp_phys_to_page(phys) (&hyp_vmemmap[hyp_phys_to_pfn(phys)])
#define hyp_virt_to_page(virt) hyp_phys_to_page(__hyp_pa(virt))
#define hyp_virt_to_pfn(virt) hyp_phys_to_pfn(__hyp_pa(virt))
diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
index c3c1197..f178969 100644
--- a/arch/arm64/kvm/hyp/nvhe/Makefile
+++ b/arch/arm64/kvm/hyp/nvhe/Makefile
@@ -4,7 +4,7 @@
#
asflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS
-ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS
+ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS $(CFLAGS_KCOV)
hostprogs := gen-hyprel
HOST_EXTRACFLAGS += -I$(objtree)/include
@@ -18,6 +18,7 @@
obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
../fpsimd.o ../hyp-entry.o ../exception.o ../pgtable.o
obj-y += $(lib-objs)
+obj-$(CONFIG_KCOV) += kcov.o
##
## Build rules for compiling nVHE hyp code
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 5e2197d..f1e4a22 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -175,6 +175,16 @@
__pkvm_vcpu_init_traps(kern_hyp_va(vcpu));
}
+#ifdef CONFIG_KCOV
+static void handle___kvm_kcov_set_area(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(kvm_pfn_t *, pfns, host_ctxt, 1);
+ DECLARE_REG(size_t, size, host_ctxt, 2);
+
+ cpu_reg(host_ctxt, 1) = kvm_kcov_set_area(pfns, size);
+}
+#endif
+
typedef void (*hcall_t)(struct kvm_cpu_context *);
#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
@@ -204,6 +214,9 @@
HANDLE_FUNC(__vgic_v3_save_aprs),
HANDLE_FUNC(__vgic_v3_restore_aprs),
HANDLE_FUNC(__pkvm_vcpu_init_traps),
+#ifdef CONFIG_KCOV
+ HANDLE_FUNC(__kvm_kcov_set_area),
+#endif
};
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
diff --git a/arch/arm64/kvm/hyp/nvhe/kcov.c b/arch/arm64/kvm/hyp/nvhe/kcov.c
new file mode 100644
index 0000000..46ce519
--- /dev/null
+++ b/arch/arm64/kvm/hyp/nvhe/kcov.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 - Google LLC
+ * Author: David Brazdil <dbrazdil@google.com>
+ */
+
+#include <asm/kvm_asm.h>
+#include <asm/kvm_hyp.h>
+#include <asm/kvm_mmu.h>
+
+#include <nvhe/memory.h>
+
+struct kvm_kcov {
+ kvm_pfn_t *pfns;
+ size_t size;
+};
+
+static DEFINE_PER_CPU(struct kvm_kcov, kvm_kcov);
+
+int kvm_kcov_set_area(kvm_pfn_t *pfns, size_t size)
+{
+ *this_cpu_ptr(&kvm_kcov) = (struct kvm_kcov){
+ .pfns = pfns,
+ .size = size,
+ };
+
+ return 0;
+}
+
+static __always_inline notrace unsigned long canonicalize_ip(unsigned long ip)
+{
+ ip = hyp_kimg_va(ip);
+#ifdef CONFIG_RANDOMIZE_BASE
+ ip -= __kaslr_offset();
+#endif
+ return ip;
+}
+
+static __always_inline notrace unsigned long *area_ptr(size_t idx)
+{
+ struct kvm_kcov *kcov = this_cpu_ptr(&kvm_kcov);
+ size_t off = idx * sizeof(unsigned long);
+
+ if (!kcov->pfns || idx >= kcov->size)
+ return NULL;
+
+ return hyp_pfn_to_virt(kcov->pfns[off / PAGE_SIZE]) + (off % PAGE_SIZE);
+}
+
+/* Workaround for ptrauth */
+#undef __builtin_return_address
+
+void notrace __sanitizer_cov_trace_pc(void)
+{
+ unsigned long pos, ip = canonicalize_ip(_RET_IP_);
+
+ if (!area_ptr(0))
+ return;
+
+ /* The first 64-bit word is the number of subsequent PCs. */
+ pos = READ_ONCE(*area_ptr(0)) + 1;
+ if (likely(area_ptr(pos))) {
+ *area_ptr(pos) = ip;
+ WRITE_ONCE(*area_ptr(0), pos);
+ }
+}
diff --git a/arch/arm64/kvm/kcov.c b/arch/arm64/kvm/kcov.c
new file mode 100644
index 0000000..dd44ab3
--- /dev/null
+++ b/arch/arm64/kvm/kcov.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 - Google LLC
+ * Author: David Brazdil <dbrazdil@google.com>
+ */
+
+#include <linux/kcov.h>
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_mmu.h>
+
+#define KVM_KCOV_DISABLED 0
+#define KVM_KCOV_ENABLED BIT(0)
+#define KVM_KCOV_PREEMPT BIT(1)
+
+static void kcov_stop_kvm_cb(void);
+
+static kvm_pfn_t *kcov_map_area(void *start, size_t size)
+{
+ kvm_pfn_t *pfns;
+ void *cur, *end = start + PAGE_ALIGN(size * sizeof(unsigned long));
+ size_t nr_pfns, idx;
+ int ret;
+
+ /*
+ * The following code assumes area is page-aligned. Otherwise need
+ * to account for the offset in all pages.
+ */
+ WARN_ON(!PAGE_ALIGNED(start));
+
+ nr_pfns = (end - start) / PAGE_SIZE;
+ pfns = kmalloc(sizeof(kvm_pfn_t) * nr_pfns, GFP_KERNEL);
+ BUG_ON(!pfns);
+
+ for (cur = start; cur < end; cur += PAGE_SIZE) {
+ idx = (cur - start) / PAGE_SIZE;
+ pfns[idx] = __phys_to_pfn(page_to_phys(vmalloc_to_page(cur)));
+
+ ret = kvm_share_hyp(cur, cur + PAGE_SIZE - 1); /* XXX */
+ BUG_ON(ret);
+ }
+
+ ret = kvm_share_hyp(pfns, pfns + nr_pfns);
+ BUG_ON(ret);
+ return pfns;
+}
+
+static void kcov_unmap_area(kvm_pfn_t *pfns, void *start, size_t size)
+{
+ void *cur, *end = start + PAGE_ALIGN(size * sizeof(unsigned long));
+ size_t nr_pfns = (end - start) / PAGE_SIZE;
+
+ for (cur = start; cur < end; cur += PAGE_SIZE)
+ kvm_unshare_hyp(cur, cur + PAGE_SIZE - 1); /* XXX */
+
+ kvm_unshare_hyp(pfns, pfns + nr_pfns);
+ kfree(pfns);
+}
+
+int kcov_start_kvm(void)
+{
+ struct task_struct *t = current;
+ kvm_pfn_t *pfns;
+ int err, ret = KVM_KCOV_ENABLED;
+
+ /* Step 1: Are we in a task? */
+ if (!in_task())
+ return KVM_KCOV_DISABLED;
+
+ /* Step 2: Is kcov enabled for this task? Are we inside a kcov hyp section already? */
+ switch (t->kcov_mode) {
+ case KCOV_MODE_TRACE_PC:
+ kcov_prepare_switch(t); /* modifies mode, fails step 4 */
+ break;
+ default:
+ return KVM_KCOV_DISABLED;
+ }
+
+ /* Step 3: Should we map in the area? */
+ if (!t->kcov_stop_cb) {
+ t->kcov_stop_cb = kcov_stop_kvm_cb;
+ t->kcov_stop_cb_arg = kcov_map_area(t->kcov_area, t->kcov_size);
+ } else if (t->kcov_stop_cb != kcov_stop_kvm_cb) {
+ return KVM_KCOV_DISABLED;
+ }
+ pfns = t->kcov_stop_cb_arg;
+
+ /* Step 4: Disable preemption to pin the area to this core. */
+ if (preemptible()) {
+ preempt_disable();
+ ret |= KVM_KCOV_PREEMPT;
+ }
+
+ /* Step 5: Tell hyp to use this area. */
+ err = kvm_call_hyp_nvhe(__kvm_kcov_set_area, kern_hyp_va(pfns), t->kcov_size);
+ BUG_ON(err);
+
+ return ret;
+}
+
+void kcov_stop_kvm(int ret)
+{
+ struct task_struct *t = current;
+
+ if (!(ret & KVM_KCOV_ENABLED))
+ return;
+
+ /* Step 5B: Tell hyp to stop using this area. */
+ WARN_ON(kvm_call_hyp_nvhe(__kvm_kcov_set_area, NULL, 0));
+
+ /* Step 4B: Reenable preemption. */
+ if (ret & KVM_KCOV_PREEMPT)
+ preempt_enable();
+
+ /* Step 2B: Get out of the kcov hyp section. */
+ kcov_finish_switch(t);
+}
+
+static void kcov_stop_kvm_cb(void)
+{
+ struct task_struct *t = current;
+
+ /* Warn if still in the kcov hyp section. */
+ WARN_ON(t->kcov_mode != KCOV_MODE_TRACE_PC);
+
+ kcov_prepare_switch(t);
+ kcov_unmap_area(t->kcov_stop_cb_arg, t->kcov_area, t->kcov_size);
+ kcov_finish_switch(t);
+}
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index d548141..95fec54 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -318,7 +318,15 @@
{
struct rb_node **node, *parent;
struct hyp_shared_pfn *this;
- int ret = 0;
+ int kcov, ret = 0;
+
+ /*
+ * If KCOV is enabled, then the hypercall inside the critical section
+ * could recursively call this function to share KCOV pages with hyp.
+ * Call the initializer now to avoid recursively taking the mutex.
+ */
+ if (IS_ENABLED(CONFIG_KCOV))
+ kcov = kcov_start_kvm();
mutex_lock(&hyp_shared_pfns_lock);
this = find_shared_pfn(pfn, &node, &parent);
@@ -341,6 +349,8 @@
unlock:
mutex_unlock(&hyp_shared_pfns_lock);
+ if (IS_ENABLED(CONFIG_KCOV))
+ kcov_stop_kvm(kcov);
return ret;
}
diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
index acdb7b3..77c8653 100644
--- a/arch/arm64/kvm/va_layout.c
+++ b/arch/arm64/kvm/va_layout.c
@@ -289,6 +289,12 @@
generate_mov_q(kimage_voffset, origptr, updptr, nr_inst);
}
+void kvm_get_kaslr_offset(struct alt_instr *alt,
+ __le32 *origptr, __le32 *updptr, int nr_inst)
+{
+ generate_mov_q(kaslr_offset(), origptr, updptr, nr_inst);
+}
+
void kvm_compute_final_ctr_el0(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst)
{
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c1a927d..e787d62 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1401,6 +1401,9 @@
/* Collect coverage from softirq context: */
unsigned int kcov_softirq;
+
+ void (*kcov_stop_cb)(void);
+ void *kcov_stop_cb_arg;
#endif
#ifdef CONFIG_MEMCG
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 80bfe71..ed1d841 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -332,6 +332,7 @@
t->kcov_size = size;
t->kcov_area = area;
t->kcov_sequence = sequence;
+ t->kcov_stop_cb = NULL;
/* See comment in check_kcov_mode(). */
barrier();
WRITE_ONCE(t->kcov_mode, mode);
@@ -339,11 +340,14 @@
static void kcov_stop(struct task_struct *t)
{
+ if (t->kcov_stop_cb)
+ t->kcov_stop_cb();
WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED);
barrier();
t->kcov = NULL;
t->kcov_size = 0;
t->kcov_area = NULL;
+ t->kcov_stop_cb = NULL;
}
static void kcov_task_reset(struct task_struct *t)