fixup! KVM: arm64: Handle guest_memfd()-backed guest page faults in pKVM
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index e5200d4..b9ed69b 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1589,19 +1589,24 @@ static int guest_memfd_abort_pkvm(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 	struct kvm *kvm = vcpu->kvm;
 	struct kvm_s2_mmu *mmu =  &kvm->arch.mmu;
 	struct page *page = NULL;
+	kvm_pfn_t pfn;
 	int ret;
 
 	ret = topup_hyp_memcache(hyp_memcache, kvm_mmu_cache_min_pages(mmu));
 	if (ret)
 		return ret;
 
-	page = get_guest_memfd_page(kvm, memslot, gfn);
-	if (IS_ERR(page))
-		return PTR_ERR(page);
+	ret = kvm_gmem_get_pfn_locked(kvm, memslot, gfn, &pfn, NULL);
+	if (ret)
+		return ret;
+
+	page = pfn_to_page(pfn);
 
 	guest_page = kmalloc(sizeof(*guest_page), GFP_KERNEL_ACCOUNT);
-	if (!guest_page)
-		return -ENOMEM;
+	if (!guest_page) {
+		ret = -ENOMEM;
+		goto put_page;
+	}
 
 	ret = account_locked_vm(mm, 1, true);
 	if (ret)
@@ -1622,6 +1627,9 @@ static int guest_memfd_abort_pkvm(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 free_ppage:
 	if (ret)
 		kfree(guest_page);
+put_page:
+	put_page(page);
+	unlock_page(page);
 
 	return ret != -EAGAIN ? ret : 0;
 }