KVM: arm64: Add a range to the guest MMIO guard hypercalls
Extend the guest MMIO guard HVCs with a size argument. For ABI
compabitiliy with guests, a 0 range is treated as a single page, while
the range argument is advertised in the meminfo HVC.
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
index e6e6e1d..6de4462 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
@@ -78,8 +78,10 @@
u64 *shared);
int __pkvm_guest_unshare_host(struct pkvm_hyp_vcpu *hyp_vcpu, u64 ipa, u64 size,
u64 *unshared);
-int __pkvm_install_ioguard_page(struct pkvm_hyp_vcpu *hyp_vcpu, u64 ipa);
-int __pkvm_remove_ioguard_page(struct pkvm_hyp_vcpu *hyp_vcpu, u64 ipa);
+int __pkvm_install_ioguard_page(struct pkvm_hyp_vcpu *hyp_vcpu, u64 ipa,
+ size_t size, size_t *guarded);
+int __pkvm_remove_ioguard_page(struct pkvm_hyp_vcpu *hyp_vcpu, u64 ipa,
+ size_t size, size_t *unguarded);
bool __pkvm_check_ioguard_page(struct pkvm_hyp_vcpu *hyp_vcpu);
int __pkvm_guest_relinquish_to_host(struct pkvm_hyp_vcpu *vcpu,
u64 ipa, u64 *ppa);
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index 854e252..f3b0f75 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -2002,46 +2002,53 @@
pte == MMIO_NOTE);
}
-int __pkvm_install_ioguard_page(struct pkvm_hyp_vcpu *hyp_vcpu, u64 ipa)
+int __pkvm_install_ioguard_page(struct pkvm_hyp_vcpu *hyp_vcpu, u64 ipa,
+ size_t size, size_t *guarded)
{
+ struct guest_request_walker_data data = GUEST_WALKER_DATA_INIT(PKVM_NOPAGE);
struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
- kvm_pte_t pte;
- u32 level;
+ struct kvm_pgtable_walker walker = {
+ .cb = guest_request_walker,
+ .flags = KVM_PGTABLE_WALK_LEAF,
+ .arg = (void *)&data,
+ };
int ret;
if (!test_bit(KVM_ARCH_FLAG_MMIO_GUARD, &vm->kvm.arch.flags))
return -EINVAL;
- if (ipa & ~PAGE_MASK)
+ if (!PAGE_ALIGNED(ipa) || !PAGE_ALIGNED(size))
return -EINVAL;
guest_lock_component(vm);
- ret = kvm_pgtable_get_leaf(&vm->pgt, ipa, &pte, &level);
- if (ret)
+ /*
+ * Check we either have NOMAP or NOMAP|MMIO in this range.
+ */
+ data.desired_mask = ~PKVM_MMIO;
+ ret = kvm_pgtable_walk(&vm->pgt, ipa, size, &walker);
+ if (ret == -E2BIG)
+ ret = 0;
+ else if (ret)
goto unlock;
- if (pte && BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(level)) == PAGE_SIZE) {
- /*
- * Already flagged as MMIO, let's accept it, and fail
- * otherwise
- */
- if (pte != MMIO_NOTE)
- ret = -EBUSY;
+ /*
+ * Intersection between the requested region and what has been verified
+ */
+ size = min(data.size - (size_t)(ipa - data.ipa_start), size);
- goto unlock;
- }
-
- ret = kvm_pgtable_stage2_annotate(&vm->pgt, ipa, PAGE_SIZE,
+ ret = kvm_pgtable_stage2_annotate(&vm->pgt, ipa, size,
&hyp_vcpu->vcpu.arch.pkvm_memcache,
MMIO_NOTE);
-
+ if (guarded)
+ *guarded = size;
unlock:
guest_unlock_component(vm);
return ret;
}
-int __pkvm_remove_ioguard_page(struct pkvm_hyp_vcpu *hyp_vcpu, u64 ipa)
+int __pkvm_remove_ioguard_page(struct pkvm_hyp_vcpu *hyp_vcpu, u64 ipa,
+ size_t size, size_t *unguarded)
{
struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
@@ -2055,6 +2062,10 @@
ALIGN_DOWN(ipa, PAGE_SIZE), PAGE_SIZE));
guest_unlock_component(vm);
+
+ if (unguarded)
+ *unguarded = PAGE_SIZE;
+
return 0;
}
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index 6e74e07..5011191 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -1415,9 +1415,14 @@
{
u64 retval = SMCCC_RET_SUCCESS;
u64 ipa = smccc_get_arg1(&hyp_vcpu->vcpu);
+ size_t size = smccc_get_arg3(&hyp_vcpu->vcpu);
+ size_t guarded;
int ret;
- ret = __pkvm_install_ioguard_page(hyp_vcpu, ipa);
+ if (!size)
+ size = PAGE_SIZE;
+
+ ret = __pkvm_install_ioguard_page(hyp_vcpu, ipa, size, &guarded);
if (ret == -ENOMEM) {
struct kvm_hyp_req *req;
@@ -1445,7 +1450,32 @@
if (ret)
retval = SMCCC_RET_INVALID_PARAMETER;
- smccc_set_retval(&hyp_vcpu->vcpu, retval, 0, 0, 0);
+ smccc_set_retval(&hyp_vcpu->vcpu, retval, guarded, 0, 0);
+ return true;
+}
+
+static bool pkvm_remove_ioguard_page(struct pkvm_hyp_vcpu *hyp_vcpu, u64 *exit_code)
+{
+ u64 retval = SMCCC_RET_SUCCESS;
+ u64 ipa = smccc_get_arg1(&hyp_vcpu->vcpu);
+ size_t size = smccc_get_arg2(&hyp_vcpu->vcpu);
+ u64 arg3 = smccc_get_arg3(&hyp_vcpu->vcpu);
+ size_t unguarded = 0;
+ int ret = -EINVAL;
+
+ if (arg3)
+ goto out_guest_err;
+
+ if (!size)
+ size = PAGE_SIZE;
+
+ ret = __pkvm_remove_ioguard_page(hyp_vcpu, ipa, size, &unguarded);
+
+out_guest_err:
+ if (ret)
+ retval = SMCCC_RET_INVALID_PARAMETER;
+
+ smccc_set_retval(&hyp_vcpu->vcpu, retval, unguarded, 0, 0);
return true;
}
@@ -1540,11 +1570,7 @@
case ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_MAP_FUNC_ID:
return pkvm_install_ioguard_page(hyp_vcpu, exit_code);
case ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_UNMAP_FUNC_ID:
- if (__pkvm_remove_ioguard_page(hyp_vcpu, vcpu_get_reg(vcpu, 1)))
- val[0] = SMCCC_RET_INVALID_PARAMETER;
- else
- val[0] = SMCCC_RET_SUCCESS;
- break;
+ return pkvm_remove_ioguard_page(hyp_vcpu, exit_code);
case ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_INFO_FUNC_ID:
case ARM_SMCCC_VENDOR_HYP_KVM_HYP_MEMINFO_FUNC_ID:
return pkvm_meminfo_call(hyp_vcpu);