ANDROID: KVM: arm64: Add protected_hyp_mem VM statistic
When using nVHE in protected mode, the host allocates memory for the
hypervisor to store internal structures and the stage-2 page tables.
This has been proven to be an interesting value to follow, for debug and
health purpose. Account for those allocations in bytes, in a newly
created VM statistic "protected_hyp_mem".
Bug: 278749606
Bug: 222044477
Change-Id: I18657d275f2ced67ceb6d0e4bd5ce41cf1d41dc8
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index c76c478..5c2b455 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -299,6 +299,10 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm_unshare_hyp(kvm, kvm + 1);
kvm_arm_teardown_hypercalls(kvm);
+
+ if (atomic64_read(&kvm->stat.protected_hyp_mem))
+ kvm_err("%lluB of donations to the nVHE hyp are missing\n",
+ atomic64_read(&kvm->stat.protected_hyp_mem));
}
static bool kvm_has_full_ptr_auth(void)
@@ -539,10 +543,13 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
if (vcpu_has_run_once(vcpu) && unlikely(!irqchip_in_kernel(vcpu->kvm)))
static_branch_dec(&userspace_irqchip_in_use);
- if (is_protected_kvm_enabled())
+ if (is_protected_kvm_enabled()) {
+ atomic64_sub(vcpu->arch.stage2_mc.nr_pages << PAGE_SHIFT,
+ &vcpu->kvm->stat.protected_hyp_mem);
free_hyp_memcache(&vcpu->arch.stage2_mc);
- else
+ } else {
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
+ }
kvm_timer_vcpu_terminate(vcpu);
kvm_pmu_vcpu_destroy(vcpu);
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 962f985..0b797ed 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -30,7 +30,8 @@
#include "trace.h"
const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
- KVM_GENERIC_VM_STATS()
+ KVM_GENERIC_VM_STATS(),
+ STATS_DESC_ICOUNTER(VM, protected_hyp_mem),
};
const struct kvm_stats_header kvm_vm_stats_header = {
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index 424f4b3..ac148c9 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -575,10 +575,11 @@ static void unpin_host_sve_state(struct pkvm_hyp_vcpu *hyp_vcpu)
static void teardown_sve_state(struct pkvm_hyp_vcpu *hyp_vcpu)
{
+ struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
void *sve_state = hyp_vcpu->vcpu.arch.sve_state;
if (sve_state)
- hyp_free(sve_state);
+ hyp_free_account(sve_state, hyp_vm->host_kvm);
}
static void unpin_host_vcpus(struct pkvm_hyp_vcpu *hyp_vcpus[],
@@ -651,7 +652,10 @@ static int pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu *hyp_vcpu, struct kvm_vcpu *h
}
if (pkvm_hyp_vcpu_is_protected(hyp_vcpu)) {
- sve_state = hyp_alloc(sve_state_size);
+ struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
+
+ sve_state = hyp_alloc_account(sve_state_size,
+ hyp_vm->host_kvm);
if (!sve_state) {
ret = hyp_alloc_errno();
goto err;
@@ -817,13 +821,14 @@ int __pkvm_init_vm(struct kvm *host_kvm, unsigned long pgd_hva)
goto err_unpin_kvm;
}
- hyp_vm = hyp_alloc(pkvm_get_hyp_vm_size(nr_vcpus));
+ hyp_vm = hyp_alloc_account(pkvm_get_hyp_vm_size(nr_vcpus),
+ host_kvm);
if (!hyp_vm) {
ret = hyp_alloc_errno();
goto err_unpin_kvm;
}
- last_ran = hyp_alloc(pkvm_get_last_ran_size());
+ last_ran = hyp_alloc_account(pkvm_get_last_ran_size(), host_kvm);
if (!last_ran) {
ret = hyp_alloc_errno();
goto err_free_vm;
@@ -856,9 +861,9 @@ int __pkvm_init_vm(struct kvm *host_kvm, unsigned long pgd_hva)
hyp_write_unlock(&vm_table_lock);
unmap_donated_memory(pgd, pgd_size);
err_free_last_ran:
- hyp_free(last_ran);
+ hyp_free_account(last_ran, host_kvm);
err_free_vm:
- hyp_free(hyp_vm);
+ hyp_free_account(hyp_vm, host_kvm);
err_unpin_kvm:
hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
return ret;
@@ -880,10 +885,6 @@ int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu)
unsigned int idx;
int ret;
- hyp_vcpu = hyp_alloc(sizeof(*hyp_vcpu));
- if (!hyp_vcpu)
- return hyp_alloc_errno();
-
hyp_read_lock(&vm_table_lock);
hyp_vm = get_vm_by_handle(handle);
@@ -892,6 +893,12 @@ int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu)
goto unlock_vm;
}
+ hyp_vcpu = hyp_alloc_account(sizeof(*hyp_vcpu), hyp_vm->host_kvm);
+ if (!hyp_vcpu) {
+ ret = hyp_alloc_errno();
+ goto unlock_vm;
+ }
+
hyp_spin_lock(&hyp_vm->vcpus_lock);
idx = hyp_vm->nr_vcpus;
if (idx >= hyp_vm->kvm.created_vcpus) {
@@ -908,17 +915,17 @@ int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu)
unlock_vcpus:
hyp_spin_unlock(&hyp_vm->vcpus_lock);
+
+ if (ret)
+ hyp_free_account(hyp_vcpu, hyp_vm->host_kvm);
+
unlock_vm:
hyp_read_unlock(&vm_table_lock);
- if (ret) {
- hyp_free(hyp_vcpu);
- return ret;
- }
+ if (!ret)
+ hyp_vcpu->vcpu.arch.cptr_el2 = kvm_get_reset_cptr_el2(&hyp_vcpu->vcpu);
- hyp_vcpu->vcpu.arch.cptr_el2 = kvm_get_reset_cptr_el2(&hyp_vcpu->vcpu);
-
- return 0;
+ return ret;
}
int __pkvm_start_teardown_vm(pkvm_handle_t handle)
@@ -999,11 +1006,12 @@ int __pkvm_finalize_teardown_vm(pkvm_handle_t handle)
if (pkvm_hyp_vcpu_is_protected(hyp_vcpu))
teardown_sve_state(hyp_vcpu);
- hyp_free(hyp_vcpu);
+ hyp_free_account(hyp_vcpu, host_kvm);
}
- hyp_free((__force void *)hyp_vm->kvm.arch.mmu.last_vcpu_ran);
- hyp_free(hyp_vm);
+ hyp_free_account((__force void *)hyp_vm->kvm.arch.mmu.last_vcpu_ran,
+ host_kvm);
+ hyp_free_account(hyp_vm, host_kvm);
hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
return 0;
diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c
index 14e44aa..618c6ce 100644
--- a/arch/arm64/kvm/hypercalls.c
+++ b/arch/arm64/kvm/hypercalls.c
@@ -278,6 +278,7 @@ int kvm_smccc_call_handler(struct kvm_vcpu *vcpu)
struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat;
u32 func_id = smccc_get_function(vcpu);
u64 val[4] = {SMCCC_RET_NOT_SUPPORTED};
+ int nr_pages;
u32 feature;
u8 action;
gpa_t gpa;
@@ -378,10 +379,18 @@ int kvm_smccc_call_handler(struct kvm_vcpu *vcpu)
kvm_ptp_get_time(vcpu, val);
break;
case ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_MAP_FUNC_ID:
- if (kvm_vm_is_protected(vcpu->kvm) &&
- !topup_hyp_memcache(&vcpu->arch.stage2_mc,
- kvm_mmu_cache_min_pages(&vcpu->kvm->arch.mmu)))
+ if (!kvm_vm_is_protected(vcpu->kvm))
+ break;
+
+ nr_pages = vcpu->arch.stage2_mc.nr_pages;
+
+ if (!topup_hyp_memcache(&vcpu->arch.stage2_mc,
+ kvm_mmu_cache_min_pages(&vcpu->kvm->arch.mmu))) {
+ nr_pages = vcpu->arch.stage2_mc.nr_pages - nr_pages;
+ atomic64_add(nr_pages << PAGE_SHIFT,
+ &vcpu->kvm->stat.protected_hyp_mem);
val[0] = SMCCC_RET_SUCCESS;
+ }
break;
case ARM_SMCCC_VENDOR_HYP_KVM_MEM_RELINQUISH_FUNC_ID:
pkvm_host_reclaim_page(vcpu->kvm, smccc_get_arg1(vcpu));
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 11535bd..017cdbf 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1660,13 +1660,17 @@ static int pkvm_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct page *page;
gfn_t gfn = fault_ipa >> PAGE_SHIFT;
unsigned long hva = gfn_to_hva_memslot_prot(memslot, gfn, NULL);
+ int ret, nr_pages;
u64 pfn;
- int ret;
+ nr_pages = hyp_memcache->nr_pages;
ret = topup_hyp_memcache(hyp_memcache, kvm_mmu_cache_min_pages(mmu));
if (ret)
return -ENOMEM;
+ nr_pages = hyp_memcache->nr_pages - nr_pages;
+ atomic64_add(nr_pages << PAGE_SHIFT, &kvm->stat.protected_hyp_mem);
+
ppage = kmalloc(sizeof(*ppage), GFP_KERNEL_ACCOUNT);
if (!ppage)
return -ENOMEM;
diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c
index a4f7e7f..9574ae2 100644
--- a/arch/arm64/kvm/pkvm.c
+++ b/arch/arm64/kvm/pkvm.c
@@ -252,6 +252,8 @@ static void __pkvm_destroy_hyp_vm(struct kvm *host_kvm)
out_free:
host_kvm->arch.pkvm.handle = 0;
+ atomic64_sub(host_kvm->arch.pkvm.stage2_teardown_mc.nr_pages << PAGE_SHIFT,
+ &host_kvm->stat.protected_hyp_mem);
free_hyp_memcache(&host_kvm->arch.pkvm.stage2_teardown_mc);
}
@@ -293,6 +295,7 @@ static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
pgd = alloc_pages_exact(pgd_sz, GFP_KERNEL_ACCOUNT);
if (!pgd)
return -ENOMEM;
+ atomic64_add(pgd_sz, &host_kvm->stat.protected_hyp_mem);
init_hyp_stage2_memcache(&host_kvm->arch.pkvm.stage2_teardown_mc);
@@ -322,6 +325,8 @@ static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
return ret;
free_pgd:
free_pages_exact(pgd, pgd_sz);
+ atomic64_sub(pgd_sz, &host_kvm->stat.protected_hyp_mem);
+
return ret;
}