KVM: arm64: Fix sparse __percpu warning
Force the cast to silence the warning.
We don't have a proper way to dynamically allocate memory at EL2,
and hence no proper way to dynamically allocate percpu fields.
Instead, we rely on memory donated from the host and index it by
hyp_smp_processor_id().
Reported-by: Todd Kjos <tkjos@google.com>
Change-Id: Ib8b0565ba9550b3f6d8932b24f8f9f69de9168d5
Signed-off-by: Fuad Tabba <tabba@google.com>
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 163c95d..64eb84e 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -622,6 +622,7 @@ static void handle___pkvm_vcpu_load(struct kvm_cpu_context *host_ctxt)
DECLARE_REG(unsigned int, vcpu_idx, host_ctxt, 2);
DECLARE_REG(u64, hcr_el2, host_ctxt, 3);
struct pkvm_hyp_vcpu *hyp_vcpu;
+ int __percpu *last_vcpu_ran;
int *last_ran;
if (!is_protected_kvm_enabled())
@@ -636,7 +637,8 @@ static void handle___pkvm_vcpu_load(struct kvm_cpu_context *host_ctxt)
* vcpu from the same VM has previously run on the same physical CPU,
* nuke the relevant contexts.
*/
- last_ran = &hyp_vcpu->vcpu.arch.hw_mmu->last_vcpu_ran[hyp_smp_processor_id()];
+ last_vcpu_ran = hyp_vcpu->vcpu.arch.hw_mmu->last_vcpu_ran;
+ last_ran = (__force int *) &last_vcpu_ran[hyp_smp_processor_id()];
if (*last_ran != hyp_vcpu->vcpu.vcpu_id) {
__kvm_flush_cpu_context(hyp_vcpu->vcpu.arch.hw_mmu);
*last_ran = hyp_vcpu->vcpu.vcpu_id;
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index 905c05c..2eafde2 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -449,8 +449,8 @@ static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
hyp_vm->kvm.created_vcpus = nr_vcpus;
hyp_vm->kvm.arch.vtcr = host_mmu.arch.vtcr;
hyp_vm->kvm.arch.pkvm.enabled = READ_ONCE(host_kvm->arch.pkvm.enabled);
- hyp_vm->kvm.arch.mmu.last_vcpu_ran = last_ran;
- memset(hyp_vm->kvm.arch.mmu.last_vcpu_ran, -1, pkvm_get_last_ran_size());
+ hyp_vm->kvm.arch.mmu.last_vcpu_ran = (int __percpu *)last_ran;
+ memset(last_ran, -1, pkvm_get_last_ran_size());
}
static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
@@ -646,7 +646,7 @@ int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
unsigned long pgd_hva, unsigned long last_ran_hva)
{
struct pkvm_hyp_vm *hyp_vm = NULL;
- void *last_ran = NULL;
+ int *last_ran = NULL;
size_t vm_size, pgd_size, last_ran_size;
unsigned int nr_vcpus;
void *pgd = NULL;
@@ -774,6 +774,7 @@ teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr, size_t size)
int __pkvm_teardown_vm(pkvm_handle_t handle)
{
size_t vm_size, last_ran_size;
+ int __percpu *last_vcpu_ran;
struct kvm_hyp_memcache *mc;
struct pkvm_hyp_vm *hyp_vm;
struct kvm *host_kvm;
@@ -820,8 +821,9 @@ int __pkvm_teardown_vm(pkvm_handle_t handle)
teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu));
}
+ last_vcpu_ran = hyp_vm->kvm.arch.mmu.last_vcpu_ran;
last_ran_size = pkvm_get_last_ran_size();
- teardown_donated_memory(mc, hyp_vm->kvm.arch.mmu.last_vcpu_ran,
+ teardown_donated_memory(mc, (__force void *)last_vcpu_ran,
last_ran_size);
vm_size = pkvm_get_hyp_vm_size(hyp_vm->kvm.created_vcpus);