squash! KVM: arm64: Prefault entries when splitting a block mapping - Don't prefault entries intersecting with the target range Signed-off-by: Will Deacon <will@kernel.org>
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index 42c8a9d..81ecd2d 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -918,24 +918,34 @@ static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level, return 0; } -static void stage2_map_prefault_idmap(u64 addr, u32 level, kvm_pte_t *ptep, - kvm_pte_t attr) +static void stage2_map_prefault_idmap(u64 addr, u64 end, u32 level, + kvm_pte_t *ptep, kvm_pte_t attr) { - bool cont = kvm_contiguous_pte_alignment(level); + u64 addr_align, pte_align = kvm_contiguous_pte_alignment(level); + u64 curr = ALIGN_DOWN(addr, kvm_granule_size(level - 1)); u64 granule = kvm_granule_size(level); int i; if (!kvm_pte_valid(attr)) return; - for (i = 0; i < PTRS_PER_PTE; ++i, ++ptep, addr += granule) { - kvm_pte_t pte = kvm_init_valid_leaf_pte(addr, attr, level); + addr_align = pte_align / sizeof(kvm_pte_t) * granule; - if (cont) + for (i = 0; i < PTRS_PER_PTE; ++i, ++ptep, curr += granule) { + kvm_pte_t pte = kvm_init_valid_leaf_pte(curr, attr, level); + + if (curr >= addr && curr < end) + continue; + + if (addr_align && + (curr < ALIGN_DOWN(addr, addr_align) || + curr >= round_up(end, addr_align))) { pte |= KVM_PTE_LEAF_ATTR_HI_S2_CONT; + } WRITE_ONCE(*ptep, pte); } + } static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, @@ -986,10 +996,8 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, kvm_set_table_pte(ptep, childp, mm_ops); mm_ops->get_page(ptep); - if (pgt->flags & KVM_PGTABLE_S2_IDMAP) { - addr = ALIGN_DOWN(addr, kvm_granule_size(level)); - stage2_map_prefault_idmap(addr, level + 1, childp, pte); - } + if (pgt->flags & KVM_PGTABLE_S2_IDMAP) + stage2_map_prefault_idmap(addr, end, level + 1, childp, pte); return 0; }