ANDROID: KVM: arm64: Symbolize pKVM modules EL2 stack trace
The EL2 text section of pKVM modules is mapped in the hypervisor
private range. It means we can't convert an hyp addr to a kern one
with the hypervisor address offset.
To resolve those addresses, a list is created to keep track of pKVM
module VA spaces. When a panic happens, the ELR is compared against
those spaces to convert the address back to a kernel address.
Bug: 357781595
Bug: 270197971
Change-Id: Iae3336f344296de66a508142806e124d2fc25e54
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/arch/arm64/include/asm/kvm_pkvm_module.h b/arch/arm64/include/asm/kvm_pkvm_module.h
index cc9411b..48e6f17 100644
--- a/arch/arm64/include/asm/kvm_pkvm_module.h
+++ b/arch/arm64/include/asm/kvm_pkvm_module.h
@@ -173,6 +173,8 @@ struct pkvm_module_ops {
int __pkvm_load_el2_module(struct module *this, unsigned long *token);
int __pkvm_register_el2_call(unsigned long hfn_hyp_va);
+
+unsigned long pkvm_el2_mod_kern_va(unsigned long addr);
#else
static inline int __pkvm_load_el2_module(struct module *this,
unsigned long *token)
@@ -184,6 +186,11 @@ static inline int __pkvm_register_el2_call(unsigned long hfn_hyp_va)
{
return -ENOSYS;
}
+
+static inline unsigned long pkvm_el2_mod_kern_va(unsigned long addr)
+{
+ return 0;
+}
#endif /* CONFIG_MODULES */
int pkvm_load_early_modules(void);
diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
index 6caa2b4..28ca598 100644
--- a/arch/arm64/include/asm/module.h
+++ b/arch/arm64/include/asm/module.h
@@ -29,6 +29,8 @@ struct pkvm_el2_module {
unsigned int nr_hyp_events;
unsigned int nr_hyp_printk_fmts;
kvm_nvhe_reloc_t *relocs;
+ struct list_head node;
+ struct list_head ext_symbols;
unsigned int nr_relocs;
int (*init)(const struct pkvm_module_ops *ops);
};
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 4a19179..b490c398 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -444,15 +444,15 @@ void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
}
-static void print_nvhe_hyp_panic(const char *name, u64 panic_addr)
+static void print_nvhe_hyp_panic(const char *name, u64 panic_addr, u64 kaslr_off)
{
kvm_err("nVHE hyp %s at: [<%016llx>] %pB!\n", name, panic_addr,
- (void *)(panic_addr + kaslr_offset()));
+ (void *)(panic_addr + kaslr_off));
}
-static void kvm_nvhe_report_cfi_failure(u64 panic_addr)
+static void kvm_nvhe_report_cfi_failure(u64 panic_addr, u64 kaslr_off)
{
- print_nvhe_hyp_panic("CFI failure", panic_addr);
+ print_nvhe_hyp_panic("CFI failure", panic_addr, kaslr_off);
if (IS_ENABLED(CONFIG_CFI_PERMISSIVE))
kvm_err(" (CONFIG_CFI_PERMISSIVE ignored for hyp failures)\n");
@@ -461,11 +461,19 @@ static void kvm_nvhe_report_cfi_failure(u64 panic_addr)
void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
u64 elr_virt, u64 elr_phys,
u64 par, uintptr_t vcpu,
- u64 far, u64 hpfar) {
+ u64 far, u64 hpfar)
+{
u64 elr_in_kimg = __phys_to_kimg(elr_phys);
- u64 hyp_offset = elr_in_kimg - kaslr_offset() - elr_virt;
+ u64 kaslr_off = kaslr_offset();
+ u64 hyp_offset = elr_in_kimg - kaslr_off - elr_virt;
u64 mode = spsr & PSR_MODE_MASK;
u64 panic_addr = elr_virt + hyp_offset;
+ u64 mod_addr = pkvm_el2_mod_kern_va(elr_virt);
+
+ if (mod_addr) {
+ panic_addr = mod_addr;
+ kaslr_off = 0;
+ }
if (mode != PSR_MODE_EL2t && mode != PSR_MODE_EL2h) {
kvm_err("Invalid host exception to nVHE hyp!\n");
@@ -486,11 +494,11 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
if (file)
kvm_err("nVHE hyp BUG at: %s:%u!\n", file, line);
else
- print_nvhe_hyp_panic("BUG", panic_addr);
+ print_nvhe_hyp_panic("BUG", panic_addr, kaslr_off);
} else if (IS_ENABLED(CONFIG_CFI_CLANG) && esr_is_cfi_brk(esr)) {
- kvm_nvhe_report_cfi_failure(panic_addr);
+ kvm_nvhe_report_cfi_failure(panic_addr, kaslr_off);
} else {
- print_nvhe_hyp_panic("panic", panic_addr);
+ print_nvhe_hyp_panic("panic", panic_addr, kaslr_off);
}
/* Dump the nVHE hypervisor backtrace */
diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c
index 384994b..b07bafe 100644
--- a/arch/arm64/kvm/pkvm.c
+++ b/arch/arm64/kvm/pkvm.c
@@ -696,6 +696,36 @@ int __init pkvm_load_early_modules(void)
return 0;
}
+#ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
+static LIST_HEAD(pkvm_modules);
+
+static void pkvm_el2_mod_add(struct pkvm_el2_module *mod)
+{
+ INIT_LIST_HEAD(&mod->node);
+ list_add(&mod->node, &pkvm_modules);
+}
+
+unsigned long pkvm_el2_mod_kern_va(unsigned long addr)
+{
+ struct pkvm_el2_module *mod;
+
+ list_for_each_entry(mod, &pkvm_modules, node) {
+ size_t len = (unsigned long)mod->sections.end -
+ (unsigned long)mod->sections.start;
+
+ if (addr >= (unsigned long)mod->token &&
+ addr < (unsigned long)mod->token + len)
+ return (unsigned long)mod->sections.start +
+ (addr - mod->token);
+ }
+
+ return 0;
+}
+#else
+static void pkvm_el2_mod_add(struct pkvm_el2_module *mod) { }
+unsigned long pkvm_el2_mod_kern_va(unsigned long addr) { return 0; }
+#endif
+
struct pkvm_mod_sec_mapping {
struct pkvm_module_section *sec;
enum kvm_pgtable_prot prot;
@@ -834,6 +864,9 @@ int __pkvm_load_el2_module(struct module *this, unsigned long *token)
if (token)
*token = (unsigned long)hyp_va;
+ mod->sections.start = start;
+ mod->sections.end = end;
+
endrel = (void *)mod->relocs + mod->nr_relocs * sizeof(*endrel);
kvm_apply_hyp_module_relocations(mod, mod->relocs, endrel);
@@ -873,6 +906,8 @@ int __pkvm_load_el2_module(struct module *this, unsigned long *token)
return ret;
}
+ pkvm_el2_mod_add(mod);
+
return 0;
}
EXPORT_SYMBOL(__pkvm_load_el2_module);
diff --git a/arch/arm64/kvm/stacktrace.c b/arch/arm64/kvm/stacktrace.c
index 3ace5b7..03bbf5f 100644
--- a/arch/arm64/kvm/stacktrace.c
+++ b/arch/arm64/kvm/stacktrace.c
@@ -20,6 +20,7 @@
#include <linux/kvm_host.h>
#include <asm/stacktrace/nvhe.h>
+#include <asm/kvm_pkvm_module.h>
static struct stack_info stackinfo_get_overflow(void)
{
@@ -147,10 +148,18 @@ static bool kvm_nvhe_dump_backtrace_entry(void *arg, unsigned long where)
{
unsigned long va_mask = GENMASK_ULL(vabits_actual - 1, 0);
unsigned long hyp_offset = (unsigned long)arg;
+ unsigned long mod_addr = pkvm_el2_mod_kern_va(where & va_mask);
+ unsigned long where_kaslr;
- /* Mask tags and convert to kern addr */
- where = (where & va_mask) + hyp_offset;
- kvm_err(" [<%016lx>] %pB\n", where, (void *)(where + kaslr_offset()));
+ if (mod_addr) {
+ where_kaslr = where = mod_addr;
+ } else {
+ /* Mask tags and convert to kern addr */
+ where = (where & va_mask) + hyp_offset;
+ where_kaslr = where + kaslr_offset();
+ }
+
+ kvm_err(" [<%016lx>] %pB\n", where, (void *)(where_kaslr));
return true;
}