KVM: arm64: Use scoped_guards in pKVM The error handling code is often source of bugs because it doesn't get much love and testing. scoped_guards allow to reduce the error handling complexity by simplifying greatly locking patterns. This is pretty much always a good thing, and in particular in security-sensitive code where bugs are costly. Convert pKVM to use scoped_guards where possible. No functional changes intended. Signed-off-by: Quentin Perret <qperret@google.com>
diff --git a/arch/arm64/kvm/hyp/include/nvhe/spinlock.h b/arch/arm64/kvm/hyp/include/nvhe/spinlock.h index 7c7ea8c..9ccf5b0 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/spinlock.h +++ b/arch/arm64/kvm/hyp/include/nvhe/spinlock.h
@@ -122,4 +122,6 @@ static inline void hyp_assert_lock_held(hyp_spinlock_t *lock) static inline void hyp_assert_lock_held(hyp_spinlock_t *lock) { } #endif +DEFINE_GUARD(hyp_spinlock, hyp_spinlock_t *, hyp_spin_lock(_T), hyp_spin_unlock(_T)) + #endif /* __ARM64_KVM_NVHE_SPINLOCK_H__ */
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index 3f4dfa7..5290d1e 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -28,37 +28,21 @@ static struct hyp_pool host_s2_pool; static DEFINE_PER_CPU(struct pkvm_hyp_vm *, __current_vm); #define current_vm (*this_cpu_ptr(&__current_vm)) -static void guest_lock_component(struct pkvm_hyp_vm *vm) +static void __guest_lock(struct pkvm_hyp_vm *vm) { hyp_spin_lock(&vm->lock); current_vm = vm; } -static void guest_unlock_component(struct pkvm_hyp_vm *vm) +static void __guest_unlock(struct pkvm_hyp_vm *vm) { current_vm = NULL; hyp_spin_unlock(&vm->lock); } -static void host_lock_component(void) -{ - hyp_spin_lock(&host_mmu.lock); -} - -static void host_unlock_component(void) -{ - hyp_spin_unlock(&host_mmu.lock); -} - -static void hyp_lock_component(void) -{ - hyp_spin_lock(&pkvm_pgd_lock); -} - -static void hyp_unlock_component(void) -{ - hyp_spin_unlock(&pkvm_pgd_lock); -} +DEFINE_GUARD(guest, struct pkvm_hyp_vm *, __guest_lock(_T), __guest_unlock(_T)) +DEFINE_LOCK_GUARD_0(host, hyp_spin_lock(&host_mmu.lock), hyp_spin_unlock(&host_mmu.lock)) +DEFINE_LOCK_GUARD_0(hyp, hyp_spin_lock(&pkvm_pgd_lock), hyp_spin_unlock(&pkvm_pgd_lock)) static void *host_s2_zalloc_pages_exact(size_t size) { @@ -254,10 +238,9 @@ int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd) .icache_inval_pou = invalidate_icache_guest_page, }; - guest_lock_component(vm); - ret = __kvm_pgtable_stage2_init(mmu->pgt, mmu, &vm->mm_ops, 0, - guest_stage2_force_pte_cb); - guest_unlock_component(vm); + scoped_guard(guest, vm) + ret = __kvm_pgtable_stage2_init(mmu->pgt, mmu, &vm->mm_ops, 0, + guest_stage2_force_pte_cb); if (ret) return ret; @@ -272,10 +255,10 @@ void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc) void *addr; /* Dump all pgtable pages in the hyp_pool */ - guest_lock_component(vm); - kvm_pgtable_stage2_destroy(&vm->pgt); - vm->kvm.arch.mmu.pgd_phys = 0ULL; - guest_unlock_component(vm); + scoped_guard(guest, vm) { + kvm_pgtable_stage2_destroy(&vm->pgt); + vm->kvm.arch.mmu.pgd_phys = 0ULL; + } /* Drain the hyp_pool into the memcache */ addr = hyp_alloc_pages(&vm->pool, 0); @@ -551,16 +534,12 @@ static int host_stage2_idmap(u64 addr) prot = is_memory ? PKVM_HOST_MEM_PROT : PKVM_HOST_MMIO_PROT; - host_lock_component(); + guard(host)(); ret = host_stage2_adjust_range(addr, &range); if (ret) - goto unlock; + return ret; - ret = host_stage2_idmap_locked(range.start, range.end - range.start, prot); -unlock: - host_unlock_component(); - - return ret; + return host_stage2_idmap_locked(range.start, range.end - range.start, prot); } void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt) @@ -691,27 +670,23 @@ int __pkvm_host_share_hyp(u64 pfn) u64 size = PAGE_SIZE; int ret; - host_lock_component(); - hyp_lock_component(); + guard(host)(); + guard(hyp)(); ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED); if (ret) - goto unlock; + return ret; if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) { ret = __hyp_check_page_state_range((u64)virt, size, PKVM_NOPAGE); if (ret) - goto unlock; + return ret; } prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_BORROWED); WARN_ON(pkvm_create_mappings_locked(virt, virt + size, prot)); WARN_ON(__host_set_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED)); -unlock: - hyp_unlock_component(); - host_unlock_component(); - - return ret; + return 0; } int __pkvm_host_unshare_hyp(u64 pfn) @@ -721,28 +696,22 @@ int __pkvm_host_unshare_hyp(u64 pfn) u64 size = PAGE_SIZE; int ret; - host_lock_component(); - hyp_lock_component(); + guard(host)(); + guard(hyp)(); ret = __host_check_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED); if (ret) - goto unlock; + return ret; ret = __hyp_check_page_state_range(virt, size, PKVM_PAGE_SHARED_BORROWED); if (ret) - goto unlock; - if (hyp_page_count((void *)virt)) { - ret = -EBUSY; - goto unlock; - } + return ret; + if (hyp_page_count((void *)virt)) + return -EBUSY; WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, virt, size) != size); WARN_ON(__host_set_page_state_range(phys, size, PKVM_PAGE_OWNED)); -unlock: - hyp_unlock_component(); - host_unlock_component(); - - return ret; + return 0; } int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages) @@ -753,27 +722,23 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages) enum kvm_pgtable_prot prot; int ret; - host_lock_component(); - hyp_lock_component(); + guard(host)(); + guard(hyp)(); ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED); if (ret) - goto unlock; + return ret; if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) { ret = __hyp_check_page_state_range((u64)virt, size, PKVM_NOPAGE); if (ret) - goto unlock; + return ret; } prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_OWNED); WARN_ON(pkvm_create_mappings_locked(virt, virt + size, prot)); WARN_ON(host_stage2_set_owner_locked(phys, size, PKVM_ID_HYP)); -unlock: - hyp_unlock_component(); - host_unlock_component(); - - return ret; + return 0; } int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages) @@ -783,26 +748,22 @@ int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages) u64 virt = (u64)__hyp_va(phys); int ret; - host_lock_component(); - hyp_lock_component(); + guard(host)(); + guard(hyp)(); ret = __hyp_check_page_state_range(virt, size, PKVM_PAGE_OWNED); if (ret) - goto unlock; + return ret; if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) { ret = __host_check_page_state_range(phys, size, PKVM_NOPAGE); if (ret) - goto unlock; + return ret; } WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, virt, size) != size); WARN_ON(host_stage2_set_owner_locked(phys, size, PKVM_ID_HOST)); -unlock: - hyp_unlock_component(); - host_unlock_component(); - - return ret; + return 0; } int hyp_pin_shared_mem(void *from, void *to) @@ -812,27 +773,21 @@ int hyp_pin_shared_mem(void *from, void *to) u64 size = end - start; int ret; - host_lock_component(); - hyp_lock_component(); + guard(host)(); + guard(hyp)(); - ret = __host_check_page_state_range(__hyp_pa(start), size, - PKVM_PAGE_SHARED_OWNED); + ret = __host_check_page_state_range(__hyp_pa(start), size, PKVM_PAGE_SHARED_OWNED); if (ret) - goto unlock; + return ret; - ret = __hyp_check_page_state_range(start, size, - PKVM_PAGE_SHARED_BORROWED); + ret = __hyp_check_page_state_range(start, size, PKVM_PAGE_SHARED_BORROWED); if (ret) - goto unlock; + return ret; for (cur = start; cur < end; cur += PAGE_SIZE) hyp_page_ref_inc(hyp_virt_to_page(cur)); -unlock: - hyp_unlock_component(); - host_unlock_component(); - - return ret; + return 0; } void hyp_unpin_shared_mem(void *from, void *to) @@ -840,14 +795,11 @@ void hyp_unpin_shared_mem(void *from, void *to) u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE); u64 end = PAGE_ALIGN((u64)to); - host_lock_component(); - hyp_lock_component(); + guard(host)(); + guard(hyp)(); for (cur = start; cur < end; cur += PAGE_SIZE) hyp_page_ref_dec(hyp_virt_to_page(cur)); - - hyp_unlock_component(); - host_unlock_component(); } int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages) @@ -856,11 +808,10 @@ int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages) u64 size = PAGE_SIZE * nr_pages; int ret; - host_lock_component(); + guard(host)(); ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED); if (!ret) ret = __host_set_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED); - host_unlock_component(); return ret; } @@ -871,11 +822,10 @@ int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages) u64 size = PAGE_SIZE * nr_pages; int ret; - host_lock_component(); + guard(host)(); ret = __host_check_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED); if (!ret) ret = __host_set_page_state_range(phys, size, PKVM_PAGE_OWNED); - host_unlock_component(); return ret; } @@ -896,12 +846,12 @@ int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu, if (ret) return ret; - host_lock_component(); - guest_lock_component(vm); + guard(host)(); + guard(guest)(vm); ret = __guest_check_page_state_range(vcpu, ipa, PAGE_SIZE, PKVM_NOPAGE); if (ret) - goto unlock; + return ret; page = hyp_phys_to_page(phys); switch (page->host_state) { @@ -915,8 +865,7 @@ int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu, WARN_ON(1); fallthrough; default: - ret = -EPERM; - goto unlock; + return -EPERM; } WARN_ON(kvm_pgtable_stage2_map(&vm->pgt, ipa, PAGE_SIZE, phys, @@ -924,11 +873,7 @@ int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu, &vcpu->vcpu.arch.pkvm_memcache, 0)); page->host_share_guest_count++; -unlock: - guest_unlock_component(vm); - host_unlock_component(); - - return ret; + return 0; } static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ipa) @@ -975,27 +920,23 @@ int __pkvm_host_unshare_guest(u64 gfn, struct pkvm_hyp_vm *vm) u64 phys; int ret; - host_lock_component(); - guest_lock_component(vm); + guard(host)(); + guard(guest)(vm); ret = __check_host_shared_guest(vm, &phys, ipa); if (ret) - goto unlock; + return ret; ret = kvm_pgtable_stage2_unmap(&vm->pgt, ipa, PAGE_SIZE); if (ret) - goto unlock; + return ret; page = hyp_phys_to_page(phys); page->host_share_guest_count--; if (!page->host_share_guest_count) WARN_ON(__host_set_page_state_range(phys, PAGE_SIZE, PKVM_PAGE_OWNED)); -unlock: - guest_unlock_component(vm); - host_unlock_component(); - - return ret; + return 0; } int __pkvm_host_relax_perms_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum kvm_pgtable_prot prot) @@ -1008,16 +949,13 @@ int __pkvm_host_relax_perms_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum kvm_ if (prot & ~KVM_PGTABLE_PROT_RWX) return -EINVAL; - host_lock_component(); - guest_lock_component(vm); + guard(host)(); + guard(guest)(vm); ret = __check_host_shared_guest(vm, &phys, ipa); if (!ret) ret = kvm_pgtable_stage2_relax_perms(&vm->pgt, ipa, prot, 0); - guest_unlock_component(vm); - host_unlock_component(); - return ret; } @@ -1027,16 +965,13 @@ int __pkvm_host_wrprotect_guest(u64 gfn, struct pkvm_hyp_vm *vm) u64 phys; int ret; - host_lock_component(); - guest_lock_component(vm); + guard(host)(); + guard(guest)(vm); ret = __check_host_shared_guest(vm, &phys, ipa); if (!ret) ret = kvm_pgtable_stage2_wrprotect(&vm->pgt, ipa, PAGE_SIZE); - guest_unlock_component(vm); - host_unlock_component(); - return ret; } @@ -1046,16 +981,13 @@ int __pkvm_host_test_clear_young_guest(u64 gfn, bool mkold, struct pkvm_hyp_vm * u64 phys; int ret; - host_lock_component(); - guest_lock_component(vm); + guard(host)(); + guard(guest)(vm); ret = __check_host_shared_guest(vm, &phys, ipa); if (!ret) ret = kvm_pgtable_stage2_test_clear_young(&vm->pgt, ipa, PAGE_SIZE, mkold); - guest_unlock_component(vm); - host_unlock_component(); - return ret; } @@ -1066,16 +998,13 @@ int __pkvm_host_mkyoung_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu) u64 phys; int ret; - host_lock_component(); - guest_lock_component(vm); + guard(host)(); + guard(guest)(vm); ret = __check_host_shared_guest(vm, &phys, ipa); if (!ret) kvm_pgtable_stage2_mkyoung(&vm->pgt, ipa, 0); - guest_unlock_component(vm); - host_unlock_component(); - return ret; } @@ -1094,13 +1023,11 @@ static void assert_page_state(void) u64 size = PAGE_SIZE << selftest_page->order; u64 phys = hyp_virt_to_phys(virt); - host_lock_component(); - WARN_ON(__host_check_page_state_range(phys, size, selftest_state.host)); - host_unlock_component(); + scoped_guard(host) + WARN_ON(__host_check_page_state_range(phys, size, selftest_state.host)); - hyp_lock_component(); - WARN_ON(__hyp_check_page_state_range((u64)virt, size, selftest_state.hyp)); - hyp_unlock_component(); + scoped_guard(hyp) + WARN_ON(__hyp_check_page_state_range((u64)virt, size, selftest_state.hyp)); } #define assert_transition_res(res, fn, ...) \
diff --git a/arch/arm64/kvm/hyp/nvhe/page_alloc.c b/arch/arm64/kvm/hyp/nvhe/page_alloc.c index a1eb27a..8c49e58 100644 --- a/arch/arm64/kvm/hyp/nvhe/page_alloc.c +++ b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
@@ -9,6 +9,8 @@ u64 __hyp_vmemmap; +DEFINE_GUARD(hyp_pool, struct hyp_pool *, hyp_spin_lock(&(_T)->lock), hyp_spin_unlock(&(_T)->lock)) + /* * Index the hyp_vmemmap to find a potential buddy page, but make no assumption * about its current state. @@ -165,20 +167,14 @@ static void __hyp_put_page(struct hyp_pool *pool, struct hyp_page *p) */ void hyp_put_page(struct hyp_pool *pool, void *addr) { - struct hyp_page *p = hyp_virt_to_page(addr); - - hyp_spin_lock(&pool->lock); - __hyp_put_page(pool, p); - hyp_spin_unlock(&pool->lock); + guard(hyp_pool)(pool); + __hyp_put_page(pool, hyp_virt_to_page(addr)); } void hyp_get_page(struct hyp_pool *pool, void *addr) { - struct hyp_page *p = hyp_virt_to_page(addr); - - hyp_spin_lock(&pool->lock); - hyp_page_ref_inc(p); - hyp_spin_unlock(&pool->lock); + guard(hyp_pool)(pool); + hyp_page_ref_inc(hyp_virt_to_page(addr)); } void hyp_split_page(struct hyp_page *p) @@ -200,22 +196,19 @@ void *hyp_alloc_pages(struct hyp_pool *pool, u8 order) struct hyp_page *p; u8 i = order; - hyp_spin_lock(&pool->lock); + guard(hyp_pool)(pool); /* Look for a high-enough-order page */ while (i <= pool->max_order && list_empty(&pool->free_area[i])) i++; - if (i > pool->max_order) { - hyp_spin_unlock(&pool->lock); + if (i > pool->max_order) return NULL; - } /* Extract it from the tree at the right order */ p = node_to_page(pool->free_area[i].next); p = __hyp_extract_page(pool, p, order); hyp_set_page_refcounted(p); - hyp_spin_unlock(&pool->lock); return hyp_page_to_virt(p); }
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index 2c618f2..027a473 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -316,26 +316,22 @@ struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle, if (__this_cpu_read(loaded_hyp_vcpu)) return NULL; - hyp_spin_lock(&vm_table_lock); - hyp_vm = get_vm_by_handle(handle); - if (!hyp_vm || hyp_vm->nr_vcpus <= vcpu_idx) - goto unlock; + scoped_guard(hyp_spinlock, &vm_table_lock) { + hyp_vm = get_vm_by_handle(handle); + if (!hyp_vm || hyp_vm->nr_vcpus <= vcpu_idx) + return NULL; - hyp_vcpu = hyp_vm->vcpus[vcpu_idx]; + hyp_vcpu = hyp_vm->vcpus[vcpu_idx]; - /* Ensure vcpu isn't loaded on more than one cpu simultaneously. */ - if (unlikely(hyp_vcpu->loaded_hyp_vcpu)) { - hyp_vcpu = NULL; - goto unlock; + /* Ensure vcpu isn't loaded on more than one cpu simultaneously. */ + if (unlikely(hyp_vcpu->loaded_hyp_vcpu)) + return NULL; + + hyp_vcpu->loaded_hyp_vcpu = this_cpu_ptr(&loaded_hyp_vcpu); + hyp_page_ref_inc(hyp_virt_to_page(hyp_vm)); } - hyp_vcpu->loaded_hyp_vcpu = this_cpu_ptr(&loaded_hyp_vcpu); - hyp_page_ref_inc(hyp_virt_to_page(hyp_vm)); -unlock: - hyp_spin_unlock(&vm_table_lock); - - if (hyp_vcpu) - __this_cpu_write(loaded_hyp_vcpu, hyp_vcpu); + __this_cpu_write(loaded_hyp_vcpu, hyp_vcpu); return hyp_vcpu; } @@ -343,11 +339,10 @@ void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu) { struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu); - hyp_spin_lock(&vm_table_lock); + guard(hyp_spinlock)(&vm_table_lock); hyp_vcpu->loaded_hyp_vcpu = NULL; __this_cpu_write(loaded_hyp_vcpu, NULL); hyp_page_ref_dec(hyp_virt_to_page(hyp_vm)); - hyp_spin_unlock(&vm_table_lock); } struct pkvm_hyp_vcpu *pkvm_get_loaded_hyp_vcpu(void) @@ -360,20 +355,18 @@ struct pkvm_hyp_vm *get_pkvm_hyp_vm(pkvm_handle_t handle) { struct pkvm_hyp_vm *hyp_vm; - hyp_spin_lock(&vm_table_lock); + guard(hyp_spinlock)(&vm_table_lock); hyp_vm = get_vm_by_handle(handle); if (hyp_vm) hyp_page_ref_inc(hyp_virt_to_page(hyp_vm)); - hyp_spin_unlock(&vm_table_lock); return hyp_vm; } void put_pkvm_hyp_vm(struct pkvm_hyp_vm *hyp_vm) { - hyp_spin_lock(&vm_table_lock); + guard(hyp_spinlock)(&vm_table_lock); hyp_page_ref_dec(hyp_virt_to_page(hyp_vm)); - hyp_spin_unlock(&vm_table_lock); } struct pkvm_hyp_vm *get_np_pkvm_hyp_vm(pkvm_handle_t handle) @@ -778,27 +771,21 @@ int __pkvm_teardown_vm(pkvm_handle_t handle) struct kvm *host_kvm; unsigned int idx; size_t vm_size; - int err; - hyp_spin_lock(&vm_table_lock); - hyp_vm = get_vm_by_handle(handle); - if (!hyp_vm) { - err = -ENOENT; - goto err_unlock; + scoped_guard(hyp_spinlock, &vm_table_lock) { + hyp_vm = get_vm_by_handle(handle); + if (!hyp_vm) + return -ENOENT; + if (WARN_ON(hyp_page_count(hyp_vm))) + return -EBUSY; + + host_kvm = hyp_vm->host_kvm; + + /* Ensure the VMID is clean before it can be reallocated */ + __kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu); + remove_vm_table_entry(handle); } - if (WARN_ON(hyp_page_count(hyp_vm))) { - err = -EBUSY; - goto err_unlock; - } - - host_kvm = hyp_vm->host_kvm; - - /* Ensure the VMID is clean before it can be reallocated */ - __kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu); - remove_vm_table_entry(handle); - hyp_spin_unlock(&vm_table_lock); - /* Reclaim guest pages (including page-table pages) */ mc = &host_kvm->arch.pkvm.teardown_mc; reclaim_guest_pages(hyp_vm, mc); @@ -822,9 +809,6 @@ int __pkvm_teardown_vm(pkvm_handle_t handle) vm_size = pkvm_get_hyp_vm_size(hyp_vm->kvm.created_vcpus); teardown_donated_memory(mc, hyp_vm, vm_size); hyp_unpin_shared_mem(host_kvm, host_kvm + 1); - return 0; -err_unlock: - hyp_spin_unlock(&vm_table_lock); - return err; + return 0; }