ANDROID: KVM: arm64: Range-based module_change_host_prot
Extend module_change_host_prot to apply to a page range. This allows
protection attributes to be changed for a range of pages via a single
module API call.
Bug: 278749606
Bug: 308373293
Bug: 357781595
Change-Id: Iaae1de1724a8c2c739bcb4ed0de2293dae5096a4
Signed-off-by: Keir Fraser <keirf@google.com>
diff --git a/arch/arm64/include/asm/kvm_pkvm_module.h b/arch/arm64/include/asm/kvm_pkvm_module.h
index 5ba4d44..1e0c8d0 100644
--- a/arch/arm64/include/asm/kvm_pkvm_module.h
+++ b/arch/arm64/include/asm/kvm_pkvm_module.h
@@ -145,7 +145,7 @@
void (*update_hcr_el2)(unsigned long set_mask, unsigned long clear_mask);
void (*update_hfgwtr_el2)(unsigned long set_mask, unsigned long clear_mask);
int (*register_host_perm_fault_handler)(int (*cb)(struct user_pt_regs *regs, u64 esr, u64 addr));
- int (*host_stage2_mod_prot)(u64 pfn, enum kvm_pgtable_prot prot);
+ int (*host_stage2_mod_prot)(u64 pfn, enum kvm_pgtable_prot prot, u64 nr_pages);
int (*host_stage2_get_leaf)(phys_addr_t phys, kvm_pte_t *ptep, s8 *level);
int (*register_host_smc_handler)(bool (*cb)(struct user_pt_regs *));
int (*register_default_trap_handler)(bool (*cb)(struct user_pt_regs *));
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
index b4abf6e..30bcc8b 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
@@ -105,7 +105,7 @@
void destroy_hyp_vm_pgt(struct pkvm_hyp_vm *vm);
void drain_hyp_pool(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc);
-int module_change_host_page_prot(u64 pfn, enum kvm_pgtable_prot prot);
+int module_change_host_page_prot(u64 pfn, enum kvm_pgtable_prot prot, u64 nr_pages);
void psci_mem_protect_inc(u64 n);
void psci_mem_protect_dec(u64 n);
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index d5610cc..5debb32 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -1860,59 +1860,75 @@
KVM_PGTABLE_PROT_NORMAL_NC | \
KVM_PGTABLE_PROT_PXN | \
KVM_PGTABLE_PROT_UXN)
-int module_change_host_page_prot(u64 pfn, enum kvm_pgtable_prot prot)
+
+int module_change_host_page_prot(u64 pfn, enum kvm_pgtable_prot prot, u64 nr_pages)
{
- u64 addr = hyp_pfn_to_phys(pfn);
+ u64 i, addr = hyp_pfn_to_phys(pfn);
+ u64 end = addr + nr_pages * PAGE_SIZE;
struct hyp_page *page = NULL;
- kvm_pte_t pte;
- s8 level;
+ struct kvm_mem_range range;
+ bool is_mmio;
int ret;
if ((prot & MODULE_PROT_ALLOWLIST) != prot)
return -EINVAL;
+ is_mmio = !find_mem_range(addr, &range);
+ if (end > range.end) {
+ /* Specified range not in a single mmio or memory block. */
+ return -EPERM;
+ }
+
host_lock_component();
- ret = kvm_pgtable_get_leaf(&host_mmu.pgt, addr, &pte, &level);
- if (ret)
- goto unlock;
/*
* There is no hyp_vmemmap covering MMIO regions, which makes tracking
* of module-owned MMIO regions hard, so we trust the modules not to
* mess things up.
*/
- if (!addr_is_memory(addr))
+ if (is_mmio)
goto update;
- ret = -EPERM;
+ /* Range is memory: we can track module ownership. */
page = hyp_phys_to_page(addr);
/*
- * Modules can only relax permissions of pages they own, and restrict
- * permissions of pristine pages.
+ * Modules can only modify pages they already own, and pristine host
+ * pages. The entire range must be consistently one or the other.
*/
- if (prot == KVM_PGTABLE_PROT_RWX) {
- if (!(page->flags & MODULE_OWNED_PAGE))
+ if (page->flags & MODULE_OWNED_PAGE) {
+ /* The entire range must be module-owned. */
+ ret = -EPERM;
+ for (i = 1; i < nr_pages; i++) {
+ if (!(page[i].flags & MODULE_OWNED_PAGE))
+ goto unlock;
+ }
+ } else {
+ /* The entire range must be pristine. */
+ ret = __host_check_page_state_range(
+ addr, nr_pages << PAGE_SHIFT, PKVM_PAGE_OWNED);
+ if (ret)
goto unlock;
- } else if (host_get_page_state(pte, addr) != PKVM_PAGE_OWNED) {
- goto unlock;
}
update:
if (!prot) {
- ret = host_stage2_set_owner_locked(addr, PAGE_SIZE,
- PKVM_ID_PROTECTED);
+ ret = host_stage2_set_owner_locked(
+ addr, nr_pages << PAGE_SHIFT, PKVM_ID_PROTECTED);
} else {
- ret = host_stage2_idmap_locked(addr, PAGE_SIZE, prot);
+ ret = host_stage2_idmap_locked(
+ addr, nr_pages << PAGE_SHIFT, prot);
}
- if (ret || !page)
+ if (WARN_ON(ret) || !page)
goto unlock;
- if (prot != KVM_PGTABLE_PROT_RWX)
- hyp_phys_to_page(addr)->flags |= MODULE_OWNED_PAGE;
- else
- hyp_phys_to_page(addr)->flags &= ~MODULE_OWNED_PAGE;
+ for (i = 0; i < nr_pages; i++) {
+ if (prot != KVM_PGTABLE_PROT_RWX)
+ page[i].flags |= MODULE_OWNED_PAGE;
+ else
+ page[i].flags &= ~MODULE_OWNED_PAGE;
+ }
unlock:
host_unlock_component();