Thomas Gleixner | 20c8ccb | 2019-06-04 10:11:32 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 2 | /* |
| 3 | * mm/userfaultfd.c |
| 4 | * |
| 5 | * Copyright (C) 2015 Red Hat, Inc. |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 6 | */ |
| 7 | |
| 8 | #include <linux/mm.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 9 | #include <linux/sched/signal.h> |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 10 | #include <linux/pagemap.h> |
| 11 | #include <linux/rmap.h> |
| 12 | #include <linux/swap.h> |
| 13 | #include <linux/swapops.h> |
| 14 | #include <linux/userfaultfd_k.h> |
| 15 | #include <linux/mmu_notifier.h> |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 16 | #include <linux/hugetlb.h> |
Mike Rapoport | 26071ce | 2017-02-22 15:43:34 -0800 | [diff] [blame] | 17 | #include <linux/shmem_fs.h> |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 18 | #include <asm/tlbflush.h> |
Nadav Amit | 4a18419 | 2022-05-09 18:20:50 -0700 | [diff] [blame] | 19 | #include <asm/tlb.h> |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 20 | #include "internal.h" |
| 21 | |
Wei Yang | 643aa36 | 2019-11-30 17:57:55 -0800 | [diff] [blame] | 22 | static __always_inline |
| 23 | struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm, |
| 24 | unsigned long dst_start, |
| 25 | unsigned long len) |
| 26 | { |
| 27 | /* |
| 28 | * Make sure that the dst range is both valid and fully within a |
| 29 | * single existing vma. |
| 30 | */ |
| 31 | struct vm_area_struct *dst_vma; |
| 32 | |
| 33 | dst_vma = find_vma(dst_mm, dst_start); |
ZhangPeng | 686ea6e | 2023-04-17 08:39:19 +0800 | [diff] [blame] | 34 | if (!range_in_vma(dst_vma, dst_start, dst_start + len)) |
Wei Yang | 643aa36 | 2019-11-30 17:57:55 -0800 | [diff] [blame] | 35 | return NULL; |
| 36 | |
| 37 | /* |
| 38 | * Check the vma is registered in uffd, this is required to |
| 39 | * enforce the VM_MAYWRITE check done at uffd registration |
| 40 | * time. |
| 41 | */ |
| 42 | if (!dst_vma->vm_userfaultfd_ctx.ctx) |
| 43 | return NULL; |
| 44 | |
| 45 | return dst_vma; |
| 46 | } |
| 47 | |
Axel Rasmussen | 435cdb4 | 2023-07-07 14:55:35 -0700 | [diff] [blame] | 48 | /* Check if dst_addr is outside of file's size. Must be called with ptl held. */ |
| 49 | static bool mfill_file_over_size(struct vm_area_struct *dst_vma, |
| 50 | unsigned long dst_addr) |
| 51 | { |
| 52 | struct inode *inode; |
| 53 | pgoff_t offset, max_off; |
| 54 | |
| 55 | if (!dst_vma->vm_file) |
| 56 | return false; |
| 57 | |
| 58 | inode = dst_vma->vm_file->f_inode; |
| 59 | offset = linear_page_index(dst_vma, dst_addr); |
| 60 | max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); |
| 61 | return offset >= max_off; |
| 62 | } |
| 63 | |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 64 | /* |
| 65 | * Install PTEs, to map dst_addr (within dst_vma) to page. |
| 66 | * |
Axel Rasmussen | 7d64ae3 | 2021-06-30 18:49:31 -0700 | [diff] [blame] | 67 | * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem |
| 68 | * and anon, and for both shared and private VMAs. |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 69 | */ |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 70 | int mfill_atomic_install_pte(pmd_t *dst_pmd, |
Axel Rasmussen | 7d64ae3 | 2021-06-30 18:49:31 -0700 | [diff] [blame] | 71 | struct vm_area_struct *dst_vma, |
| 72 | unsigned long dst_addr, struct page *page, |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 73 | bool newly_allocated, uffd_flags_t flags) |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 74 | { |
| 75 | int ret; |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 76 | struct mm_struct *dst_mm = dst_vma->vm_mm; |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 77 | pte_t _dst_pte, *dst_pte; |
| 78 | bool writable = dst_vma->vm_flags & VM_WRITE; |
| 79 | bool vm_shared = dst_vma->vm_flags & VM_SHARED; |
Peter Xu | 93b0d91 | 2022-11-02 14:41:52 -0400 | [diff] [blame] | 80 | bool page_in_cache = page_mapping(page); |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 81 | spinlock_t *ptl; |
Vishal Moola (Oracle) | 28965f0 | 2022-11-01 10:53:24 -0700 | [diff] [blame] | 82 | struct folio *folio; |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 83 | |
| 84 | _dst_pte = mk_pte(page, dst_vma->vm_page_prot); |
Peter Xu | 9ae0f87 | 2021-11-05 13:38:24 -0700 | [diff] [blame] | 85 | _dst_pte = pte_mkdirty(_dst_pte); |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 86 | if (page_in_cache && !vm_shared) |
| 87 | writable = false; |
Peter Xu | 8ee79ed | 2022-05-12 20:22:52 -0700 | [diff] [blame] | 88 | if (writable) |
Rick Edgecombe | 161e393 | 2023-06-12 17:10:29 -0700 | [diff] [blame] | 89 | _dst_pte = pte_mkwrite(_dst_pte, dst_vma); |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 90 | if (flags & MFILL_ATOMIC_WP) |
Peter Xu | f1eb1ba | 2022-12-14 15:15:33 -0500 | [diff] [blame] | 91 | _dst_pte = pte_mkuffd_wp(_dst_pte); |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 92 | |
Hugh Dickins | 3622d3c | 2023-06-08 18:26:04 -0700 | [diff] [blame] | 93 | ret = -EAGAIN; |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 94 | dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); |
Hugh Dickins | 3622d3c | 2023-06-08 18:26:04 -0700 | [diff] [blame] | 95 | if (!dst_pte) |
| 96 | goto out; |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 97 | |
Axel Rasmussen | 435cdb4 | 2023-07-07 14:55:35 -0700 | [diff] [blame] | 98 | if (mfill_file_over_size(dst_vma, dst_addr)) { |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 99 | ret = -EFAULT; |
Axel Rasmussen | 435cdb4 | 2023-07-07 14:55:35 -0700 | [diff] [blame] | 100 | goto out_unlock; |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 101 | } |
| 102 | |
| 103 | ret = -EEXIST; |
Peter Xu | 8ee79ed | 2022-05-12 20:22:52 -0700 | [diff] [blame] | 104 | /* |
| 105 | * We allow to overwrite a pte marker: consider when both MISSING|WP |
| 106 | * registered, we firstly wr-protect a none pte which has no page cache |
| 107 | * page backing it, then access the page. |
| 108 | */ |
Ryan Roberts | c33c794 | 2023-06-12 16:15:45 +0100 | [diff] [blame] | 109 | if (!pte_none_mostly(ptep_get(dst_pte))) |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 110 | goto out_unlock; |
| 111 | |
Vishal Moola (Oracle) | 28965f0 | 2022-11-01 10:53:24 -0700 | [diff] [blame] | 112 | folio = page_folio(page); |
Hugh Dickins | cea86fe | 2022-02-14 18:26:39 -0800 | [diff] [blame] | 113 | if (page_in_cache) { |
| 114 | /* Usually, cache pages are already added to LRU */ |
| 115 | if (newly_allocated) |
Vishal Moola (Oracle) | 28965f0 | 2022-11-01 10:53:24 -0700 | [diff] [blame] | 116 | folio_add_lru(folio); |
David Hildenbrand | 7123e19 | 2023-12-20 23:44:35 +0100 | [diff] [blame] | 117 | folio_add_file_rmap_pte(folio, page, dst_vma); |
Hugh Dickins | cea86fe | 2022-02-14 18:26:39 -0800 | [diff] [blame] | 118 | } else { |
Matthew Wilcox (Oracle) | 2853b66 | 2023-12-11 16:22:09 +0000 | [diff] [blame] | 119 | folio_add_new_anon_rmap(folio, dst_vma, dst_addr); |
Vishal Moola (Oracle) | 28965f0 | 2022-11-01 10:53:24 -0700 | [diff] [blame] | 120 | folio_add_lru_vma(folio, dst_vma); |
Hugh Dickins | cea86fe | 2022-02-14 18:26:39 -0800 | [diff] [blame] | 121 | } |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 122 | |
| 123 | /* |
| 124 | * Must happen after rmap, as mm_counter() checks mapping (via |
| 125 | * PageAnon()), which is set by __page_set_anon_rmap(). |
| 126 | */ |
| 127 | inc_mm_counter(dst_mm, mm_counter(page)); |
| 128 | |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 129 | set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); |
| 130 | |
| 131 | /* No need to invalidate - it was non-present before */ |
| 132 | update_mmu_cache(dst_vma, dst_addr, dst_pte); |
| 133 | ret = 0; |
| 134 | out_unlock: |
| 135 | pte_unmap_unlock(dst_pte, ptl); |
Hugh Dickins | 3622d3c | 2023-06-08 18:26:04 -0700 | [diff] [blame] | 136 | out: |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 137 | return ret; |
| 138 | } |
| 139 | |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 140 | static int mfill_atomic_pte_copy(pmd_t *dst_pmd, |
Axel Rasmussen | a734991 | 2023-03-14 15:12:47 -0700 | [diff] [blame] | 141 | struct vm_area_struct *dst_vma, |
| 142 | unsigned long dst_addr, |
| 143 | unsigned long src_addr, |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 144 | uffd_flags_t flags, |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 145 | struct folio **foliop) |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 146 | { |
ZhangPeng | 07e6d40 | 2023-04-10 21:39:27 +0800 | [diff] [blame] | 147 | void *kaddr; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 148 | int ret; |
ZhangPeng | 07e6d40 | 2023-04-10 21:39:27 +0800 | [diff] [blame] | 149 | struct folio *folio; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 150 | |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 151 | if (!*foliop) { |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 152 | ret = -ENOMEM; |
ZhangPeng | 07e6d40 | 2023-04-10 21:39:27 +0800 | [diff] [blame] | 153 | folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma, |
| 154 | dst_addr, false); |
| 155 | if (!folio) |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 156 | goto out; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 157 | |
ZhangPeng | 07e6d40 | 2023-04-10 21:39:27 +0800 | [diff] [blame] | 158 | kaddr = kmap_local_folio(folio, 0); |
Ira Weiny | 5521de7 | 2022-10-23 21:34:52 -0700 | [diff] [blame] | 159 | /* |
| 160 | * The read mmap_lock is held here. Despite the |
| 161 | * mmap_lock being read recursive a deadlock is still |
| 162 | * possible if a writer has taken a lock. For example: |
| 163 | * |
| 164 | * process A thread 1 takes read lock on own mmap_lock |
| 165 | * process A thread 2 calls mmap, blocks taking write lock |
| 166 | * process B thread 1 takes page fault, read lock on own mmap lock |
| 167 | * process B thread 2 calls mmap, blocks taking write lock |
| 168 | * process A thread 1 blocks taking read lock on process B |
| 169 | * process B thread 1 blocks taking read lock on process A |
| 170 | * |
| 171 | * Disable page faults to prevent potential deadlock |
| 172 | * and retry the copy outside the mmap_lock. |
| 173 | */ |
| 174 | pagefault_disable(); |
ZhangPeng | 07e6d40 | 2023-04-10 21:39:27 +0800 | [diff] [blame] | 175 | ret = copy_from_user(kaddr, (const void __user *) src_addr, |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 176 | PAGE_SIZE); |
Ira Weiny | 5521de7 | 2022-10-23 21:34:52 -0700 | [diff] [blame] | 177 | pagefault_enable(); |
ZhangPeng | 07e6d40 | 2023-04-10 21:39:27 +0800 | [diff] [blame] | 178 | kunmap_local(kaddr); |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 179 | |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 180 | /* fallback to copy_from_user outside mmap_lock */ |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 181 | if (unlikely(ret)) { |
Andrea Arcangeli | 9e36825 | 2018-11-30 14:09:25 -0800 | [diff] [blame] | 182 | ret = -ENOENT; |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 183 | *foliop = folio; |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 184 | /* don't free the page */ |
| 185 | goto out; |
| 186 | } |
Muchun Song | 7c25a0b | 2022-03-22 14:42:08 -0700 | [diff] [blame] | 187 | |
ZhangPeng | 07e6d40 | 2023-04-10 21:39:27 +0800 | [diff] [blame] | 188 | flush_dcache_folio(folio); |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 189 | } else { |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 190 | folio = *foliop; |
| 191 | *foliop = NULL; |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 192 | } |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 193 | |
| 194 | /* |
ZhangPeng | 07e6d40 | 2023-04-10 21:39:27 +0800 | [diff] [blame] | 195 | * The memory barrier inside __folio_mark_uptodate makes sure that |
Wei Yang | f4f5329 | 2019-11-30 17:58:17 -0800 | [diff] [blame] | 196 | * preceding stores to the page contents become visible before |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 197 | * the set_pte_at() write. |
| 198 | */ |
ZhangPeng | 07e6d40 | 2023-04-10 21:39:27 +0800 | [diff] [blame] | 199 | __folio_mark_uptodate(folio); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 200 | |
| 201 | ret = -ENOMEM; |
ZhangPeng | 07e6d40 | 2023-04-10 21:39:27 +0800 | [diff] [blame] | 202 | if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL)) |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 203 | goto out_release; |
| 204 | |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 205 | ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr, |
ZhangPeng | 07e6d40 | 2023-04-10 21:39:27 +0800 | [diff] [blame] | 206 | &folio->page, true, flags); |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 207 | if (ret) |
| 208 | goto out_release; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 209 | out: |
| 210 | return ret; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 211 | out_release: |
ZhangPeng | 07e6d40 | 2023-04-10 21:39:27 +0800 | [diff] [blame] | 212 | folio_put(folio); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 213 | goto out; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 214 | } |
| 215 | |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 216 | static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd, |
Axel Rasmussen | a734991 | 2023-03-14 15:12:47 -0700 | [diff] [blame] | 217 | struct vm_area_struct *dst_vma, |
| 218 | unsigned long dst_addr) |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 219 | { |
| 220 | pte_t _dst_pte, *dst_pte; |
| 221 | spinlock_t *ptl; |
| 222 | int ret; |
| 223 | |
| 224 | _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr), |
| 225 | dst_vma->vm_page_prot)); |
Hugh Dickins | 3622d3c | 2023-06-08 18:26:04 -0700 | [diff] [blame] | 226 | ret = -EAGAIN; |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 227 | dst_pte = pte_offset_map_lock(dst_vma->vm_mm, dst_pmd, dst_addr, &ptl); |
Hugh Dickins | 3622d3c | 2023-06-08 18:26:04 -0700 | [diff] [blame] | 228 | if (!dst_pte) |
| 229 | goto out; |
Axel Rasmussen | 435cdb4 | 2023-07-07 14:55:35 -0700 | [diff] [blame] | 230 | if (mfill_file_over_size(dst_vma, dst_addr)) { |
Andrea Arcangeli | e2a50c1 | 2018-11-30 14:09:37 -0800 | [diff] [blame] | 231 | ret = -EFAULT; |
Axel Rasmussen | 435cdb4 | 2023-07-07 14:55:35 -0700 | [diff] [blame] | 232 | goto out_unlock; |
Andrea Arcangeli | e2a50c1 | 2018-11-30 14:09:37 -0800 | [diff] [blame] | 233 | } |
| 234 | ret = -EEXIST; |
Ryan Roberts | c33c794 | 2023-06-12 16:15:45 +0100 | [diff] [blame] | 235 | if (!pte_none(ptep_get(dst_pte))) |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 236 | goto out_unlock; |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 237 | set_pte_at(dst_vma->vm_mm, dst_addr, dst_pte, _dst_pte); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 238 | /* No need to invalidate - it was non-present before */ |
| 239 | update_mmu_cache(dst_vma, dst_addr, dst_pte); |
| 240 | ret = 0; |
| 241 | out_unlock: |
| 242 | pte_unmap_unlock(dst_pte, ptl); |
Hugh Dickins | 3622d3c | 2023-06-08 18:26:04 -0700 | [diff] [blame] | 243 | out: |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 244 | return ret; |
| 245 | } |
| 246 | |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 247 | /* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */ |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 248 | static int mfill_atomic_pte_continue(pmd_t *dst_pmd, |
Axel Rasmussen | a734991 | 2023-03-14 15:12:47 -0700 | [diff] [blame] | 249 | struct vm_area_struct *dst_vma, |
| 250 | unsigned long dst_addr, |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 251 | uffd_flags_t flags) |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 252 | { |
| 253 | struct inode *inode = file_inode(dst_vma->vm_file); |
| 254 | pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); |
Matthew Wilcox (Oracle) | 12acf4f | 2022-09-02 20:46:28 +0100 | [diff] [blame] | 255 | struct folio *folio; |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 256 | struct page *page; |
| 257 | int ret; |
| 258 | |
Matthew Wilcox (Oracle) | 12acf4f | 2022-09-02 20:46:28 +0100 | [diff] [blame] | 259 | ret = shmem_get_folio(inode, pgoff, &folio, SGP_NOALLOC); |
| 260 | /* Our caller expects us to return -EFAULT if we failed to find folio */ |
Axel Rasmussen | 73f37db | 2022-06-10 10:38:12 -0700 | [diff] [blame] | 261 | if (ret == -ENOENT) |
| 262 | ret = -EFAULT; |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 263 | if (ret) |
| 264 | goto out; |
Matthew Wilcox (Oracle) | 12acf4f | 2022-09-02 20:46:28 +0100 | [diff] [blame] | 265 | if (!folio) { |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 266 | ret = -EFAULT; |
| 267 | goto out; |
| 268 | } |
| 269 | |
Matthew Wilcox (Oracle) | 12acf4f | 2022-09-02 20:46:28 +0100 | [diff] [blame] | 270 | page = folio_file_page(folio, pgoff); |
Yang Shi | a760542 | 2022-01-14 14:05:19 -0800 | [diff] [blame] | 271 | if (PageHWPoison(page)) { |
| 272 | ret = -EIO; |
| 273 | goto out_release; |
| 274 | } |
| 275 | |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 276 | ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr, |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 277 | page, false, flags); |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 278 | if (ret) |
| 279 | goto out_release; |
| 280 | |
Matthew Wilcox (Oracle) | 12acf4f | 2022-09-02 20:46:28 +0100 | [diff] [blame] | 281 | folio_unlock(folio); |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 282 | ret = 0; |
| 283 | out: |
| 284 | return ret; |
| 285 | out_release: |
Matthew Wilcox (Oracle) | 12acf4f | 2022-09-02 20:46:28 +0100 | [diff] [blame] | 286 | folio_unlock(folio); |
| 287 | folio_put(folio); |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 288 | goto out; |
| 289 | } |
| 290 | |
Axel Rasmussen | fc71884 | 2023-07-07 14:55:36 -0700 | [diff] [blame] | 291 | /* Handles UFFDIO_POISON for all non-hugetlb VMAs. */ |
| 292 | static int mfill_atomic_pte_poison(pmd_t *dst_pmd, |
| 293 | struct vm_area_struct *dst_vma, |
| 294 | unsigned long dst_addr, |
| 295 | uffd_flags_t flags) |
| 296 | { |
| 297 | int ret; |
| 298 | struct mm_struct *dst_mm = dst_vma->vm_mm; |
| 299 | pte_t _dst_pte, *dst_pte; |
| 300 | spinlock_t *ptl; |
| 301 | |
| 302 | _dst_pte = make_pte_marker(PTE_MARKER_POISONED); |
Hugh Dickins | 597425d | 2023-07-11 18:27:17 -0700 | [diff] [blame] | 303 | ret = -EAGAIN; |
Axel Rasmussen | fc71884 | 2023-07-07 14:55:36 -0700 | [diff] [blame] | 304 | dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); |
Hugh Dickins | 597425d | 2023-07-11 18:27:17 -0700 | [diff] [blame] | 305 | if (!dst_pte) |
| 306 | goto out; |
Axel Rasmussen | fc71884 | 2023-07-07 14:55:36 -0700 | [diff] [blame] | 307 | |
| 308 | if (mfill_file_over_size(dst_vma, dst_addr)) { |
| 309 | ret = -EFAULT; |
| 310 | goto out_unlock; |
| 311 | } |
| 312 | |
| 313 | ret = -EEXIST; |
| 314 | /* Refuse to overwrite any PTE, even a PTE marker (e.g. UFFD WP). */ |
Ryan Roberts | afccb08 | 2023-11-14 15:49:45 +0000 | [diff] [blame] | 315 | if (!pte_none(ptep_get(dst_pte))) |
Axel Rasmussen | fc71884 | 2023-07-07 14:55:36 -0700 | [diff] [blame] | 316 | goto out_unlock; |
| 317 | |
| 318 | set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); |
| 319 | |
| 320 | /* No need to invalidate - it was non-present before */ |
| 321 | update_mmu_cache(dst_vma, dst_addr, dst_pte); |
| 322 | ret = 0; |
| 323 | out_unlock: |
| 324 | pte_unmap_unlock(dst_pte, ptl); |
Hugh Dickins | 597425d | 2023-07-11 18:27:17 -0700 | [diff] [blame] | 325 | out: |
Axel Rasmussen | fc71884 | 2023-07-07 14:55:36 -0700 | [diff] [blame] | 326 | return ret; |
| 327 | } |
| 328 | |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 329 | static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address) |
| 330 | { |
| 331 | pgd_t *pgd; |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 332 | p4d_t *p4d; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 333 | pud_t *pud; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 334 | |
| 335 | pgd = pgd_offset(mm, address); |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 336 | p4d = p4d_alloc(mm, pgd, address); |
| 337 | if (!p4d) |
| 338 | return NULL; |
| 339 | pud = pud_alloc(mm, p4d, address); |
| 340 | if (!pud) |
| 341 | return NULL; |
| 342 | /* |
| 343 | * Note that we didn't run this because the pmd was |
| 344 | * missing, the *pmd may be already established and in |
| 345 | * turn it may also be a trans_huge_pmd. |
| 346 | */ |
| 347 | return pmd_alloc(mm, pud, address); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 348 | } |
| 349 | |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 350 | #ifdef CONFIG_HUGETLB_PAGE |
| 351 | /* |
Axel Rasmussen | a734991 | 2023-03-14 15:12:47 -0700 | [diff] [blame] | 352 | * mfill_atomic processing for HUGETLB vmas. Note that this routine is |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 353 | * called with mmap_lock held, it will release mmap_lock before returning. |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 354 | */ |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 355 | static __always_inline ssize_t mfill_atomic_hugetlb( |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 356 | struct vm_area_struct *dst_vma, |
| 357 | unsigned long dst_start, |
| 358 | unsigned long src_start, |
| 359 | unsigned long len, |
Lokesh Gidra | 67695f1 | 2024-01-17 14:37:29 -0800 | [diff] [blame] | 360 | atomic_t *mmap_changing, |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 361 | uffd_flags_t flags) |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 362 | { |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 363 | struct mm_struct *dst_mm = dst_vma->vm_mm; |
Mike Kravetz | 1c9e8de | 2017-02-22 15:43:43 -0800 | [diff] [blame] | 364 | int vm_shared = dst_vma->vm_flags & VM_SHARED; |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 365 | ssize_t err; |
| 366 | pte_t *dst_pte; |
| 367 | unsigned long src_addr, dst_addr; |
| 368 | long copied; |
ZhangPeng | 0169fd5 | 2023-04-10 21:39:30 +0800 | [diff] [blame] | 369 | struct folio *folio; |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 370 | unsigned long vma_hpagesize; |
| 371 | pgoff_t idx; |
| 372 | u32 hash; |
| 373 | struct address_space *mapping; |
| 374 | |
| 375 | /* |
| 376 | * There is no default zero huge page for all huge page sizes as |
| 377 | * supported by hugetlb. A PMD_SIZE huge pages may exist as used |
| 378 | * by THP. Since we can not reliably insert a zero page, this |
| 379 | * feature is not supported. |
| 380 | */ |
Axel Rasmussen | 8a13897 | 2023-07-07 14:55:37 -0700 | [diff] [blame] | 381 | if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) { |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 382 | mmap_read_unlock(dst_mm); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 383 | return -EINVAL; |
| 384 | } |
| 385 | |
| 386 | src_addr = src_start; |
| 387 | dst_addr = dst_start; |
| 388 | copied = 0; |
ZhangPeng | 0169fd5 | 2023-04-10 21:39:30 +0800 | [diff] [blame] | 389 | folio = NULL; |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 390 | vma_hpagesize = vma_kernel_pagesize(dst_vma); |
| 391 | |
| 392 | /* |
| 393 | * Validate alignment based on huge page size |
| 394 | */ |
| 395 | err = -EINVAL; |
| 396 | if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1)) |
| 397 | goto out_unlock; |
| 398 | |
| 399 | retry: |
| 400 | /* |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 401 | * On routine entry dst_vma is set. If we had to drop mmap_lock and |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 402 | * retry, dst_vma will be set to NULL and we must lookup again. |
| 403 | */ |
| 404 | if (!dst_vma) { |
Mike Rapoport | 27d0256 | 2017-02-24 14:58:28 -0800 | [diff] [blame] | 405 | err = -ENOENT; |
Wei Yang | 643aa36 | 2019-11-30 17:57:55 -0800 | [diff] [blame] | 406 | dst_vma = find_dst_vma(dst_mm, dst_start, len); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 407 | if (!dst_vma || !is_vm_hugetlb_page(dst_vma)) |
| 408 | goto out_unlock; |
Mike Kravetz | 1c9e8de | 2017-02-22 15:43:43 -0800 | [diff] [blame] | 409 | |
Mike Rapoport | 27d0256 | 2017-02-24 14:58:28 -0800 | [diff] [blame] | 410 | err = -EINVAL; |
| 411 | if (vma_hpagesize != vma_kernel_pagesize(dst_vma)) |
| 412 | goto out_unlock; |
| 413 | |
Mike Kravetz | 1c9e8de | 2017-02-22 15:43:43 -0800 | [diff] [blame] | 414 | vm_shared = dst_vma->vm_flags & VM_SHARED; |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 415 | } |
| 416 | |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 417 | /* |
Mike Kravetz | 1c9e8de | 2017-02-22 15:43:43 -0800 | [diff] [blame] | 418 | * If not shared, ensure the dst_vma has a anon_vma. |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 419 | */ |
| 420 | err = -ENOMEM; |
Mike Kravetz | 1c9e8de | 2017-02-22 15:43:43 -0800 | [diff] [blame] | 421 | if (!vm_shared) { |
| 422 | if (unlikely(anon_vma_prepare(dst_vma))) |
| 423 | goto out_unlock; |
| 424 | } |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 425 | |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 426 | while (src_addr < src_start + len) { |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 427 | BUG_ON(dst_addr >= dst_start + len); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 428 | |
| 429 | /* |
Mike Kravetz | 40549ba | 2022-09-14 15:18:09 -0700 | [diff] [blame] | 430 | * Serialize via vma_lock and hugetlb_fault_mutex. |
| 431 | * vma_lock ensures the dst_pte remains valid even |
| 432 | * in the case of shared pmds. fault mutex prevents |
| 433 | * races with other faulting threads. |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 434 | */ |
Mike Kravetz | c0d0381 | 2020-04-01 21:11:05 -0700 | [diff] [blame] | 435 | idx = linear_page_index(dst_vma, dst_addr); |
Mike Kravetz | 3a47c54 | 2022-09-14 15:18:03 -0700 | [diff] [blame] | 436 | mapping = dst_vma->vm_file->f_mapping; |
Wei Yang | 188b04a | 2019-11-30 17:57:02 -0800 | [diff] [blame] | 437 | hash = hugetlb_fault_mutex_hash(mapping, idx); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 438 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
Mike Kravetz | 40549ba | 2022-09-14 15:18:09 -0700 | [diff] [blame] | 439 | hugetlb_vma_lock_read(dst_vma); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 440 | |
| 441 | err = -ENOMEM; |
Peter Xu | aec44e0 | 2021-05-04 18:33:00 -0700 | [diff] [blame] | 442 | dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 443 | if (!dst_pte) { |
Mike Kravetz | 40549ba | 2022-09-14 15:18:09 -0700 | [diff] [blame] | 444 | hugetlb_vma_unlock_read(dst_vma); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 445 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
| 446 | goto out_unlock; |
| 447 | } |
| 448 | |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 449 | if (!uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE) && |
Peter Xu | 6041c69 | 2022-05-12 20:22:54 -0700 | [diff] [blame] | 450 | !huge_pte_none_mostly(huge_ptep_get(dst_pte))) { |
Axel Rasmussen | f619147 | 2021-05-04 18:35:49 -0700 | [diff] [blame] | 451 | err = -EEXIST; |
Mike Kravetz | 40549ba | 2022-09-14 15:18:09 -0700 | [diff] [blame] | 452 | hugetlb_vma_unlock_read(dst_vma); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 453 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
| 454 | goto out_unlock; |
| 455 | } |
| 456 | |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 457 | err = hugetlb_mfill_atomic_pte(dst_pte, dst_vma, dst_addr, |
ZhangPeng | 0169fd5 | 2023-04-10 21:39:30 +0800 | [diff] [blame] | 458 | src_addr, flags, &folio); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 459 | |
Mike Kravetz | 40549ba | 2022-09-14 15:18:09 -0700 | [diff] [blame] | 460 | hugetlb_vma_unlock_read(dst_vma); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 461 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
| 462 | |
| 463 | cond_resched(); |
| 464 | |
Andrea Arcangeli | 9e36825 | 2018-11-30 14:09:25 -0800 | [diff] [blame] | 465 | if (unlikely(err == -ENOENT)) { |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 466 | mmap_read_unlock(dst_mm); |
ZhangPeng | 0169fd5 | 2023-04-10 21:39:30 +0800 | [diff] [blame] | 467 | BUG_ON(!folio); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 468 | |
ZhangPeng | 0169fd5 | 2023-04-10 21:39:30 +0800 | [diff] [blame] | 469 | err = copy_folio_from_user(folio, |
ZhangPeng | e87340c | 2023-04-10 21:39:29 +0800 | [diff] [blame] | 470 | (const void __user *)src_addr, true); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 471 | if (unlikely(err)) { |
| 472 | err = -EFAULT; |
| 473 | goto out; |
| 474 | } |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 475 | mmap_read_lock(dst_mm); |
Lokesh Gidra | 67695f1 | 2024-01-17 14:37:29 -0800 | [diff] [blame] | 476 | /* |
| 477 | * If memory mappings are changing because of non-cooperative |
| 478 | * operation (e.g. mremap) running in parallel, bail out and |
| 479 | * request the user to retry later |
| 480 | */ |
| 481 | if (mmap_changing && atomic_read(mmap_changing)) { |
| 482 | err = -EAGAIN; |
| 483 | break; |
| 484 | } |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 485 | |
| 486 | dst_vma = NULL; |
| 487 | goto retry; |
| 488 | } else |
ZhangPeng | 0169fd5 | 2023-04-10 21:39:30 +0800 | [diff] [blame] | 489 | BUG_ON(folio); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 490 | |
| 491 | if (!err) { |
| 492 | dst_addr += vma_hpagesize; |
| 493 | src_addr += vma_hpagesize; |
| 494 | copied += vma_hpagesize; |
| 495 | |
| 496 | if (fatal_signal_pending(current)) |
| 497 | err = -EINTR; |
| 498 | } |
| 499 | if (err) |
| 500 | break; |
| 501 | } |
| 502 | |
| 503 | out_unlock: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 504 | mmap_read_unlock(dst_mm); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 505 | out: |
ZhangPeng | 0169fd5 | 2023-04-10 21:39:30 +0800 | [diff] [blame] | 506 | if (folio) |
| 507 | folio_put(folio); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 508 | BUG_ON(copied < 0); |
| 509 | BUG_ON(err > 0); |
| 510 | BUG_ON(!copied && !err); |
| 511 | return copied ? copied : err; |
| 512 | } |
| 513 | #else /* !CONFIG_HUGETLB_PAGE */ |
| 514 | /* fail at build time if gcc attempts to use this */ |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 515 | extern ssize_t mfill_atomic_hugetlb(struct vm_area_struct *dst_vma, |
| 516 | unsigned long dst_start, |
| 517 | unsigned long src_start, |
| 518 | unsigned long len, |
Lokesh Gidra | 67695f1 | 2024-01-17 14:37:29 -0800 | [diff] [blame] | 519 | atomic_t *mmap_changing, |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 520 | uffd_flags_t flags); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 521 | #endif /* CONFIG_HUGETLB_PAGE */ |
| 522 | |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 523 | static __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd, |
Mike Rapoport | 3217d3c | 2017-09-06 16:23:06 -0700 | [diff] [blame] | 524 | struct vm_area_struct *dst_vma, |
| 525 | unsigned long dst_addr, |
| 526 | unsigned long src_addr, |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 527 | uffd_flags_t flags, |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 528 | struct folio **foliop) |
Mike Rapoport | 3217d3c | 2017-09-06 16:23:06 -0700 | [diff] [blame] | 529 | { |
| 530 | ssize_t err; |
| 531 | |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 532 | if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) { |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 533 | return mfill_atomic_pte_continue(dst_pmd, dst_vma, |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 534 | dst_addr, flags); |
Axel Rasmussen | fc71884 | 2023-07-07 14:55:36 -0700 | [diff] [blame] | 535 | } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) { |
| 536 | return mfill_atomic_pte_poison(dst_pmd, dst_vma, |
| 537 | dst_addr, flags); |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 538 | } |
| 539 | |
Andrea Arcangeli | 5b51072 | 2018-11-30 14:09:28 -0800 | [diff] [blame] | 540 | /* |
| 541 | * The normal page fault path for a shmem will invoke the |
| 542 | * fault, fill the hole in the file and COW it right away. The |
| 543 | * result generates plain anonymous memory. So when we are |
| 544 | * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll |
| 545 | * generate anonymous memory directly without actually filling |
| 546 | * the hole. For the MAP_PRIVATE case the robustness check |
| 547 | * only happens in the pagetable (to verify it's still none) |
| 548 | * and not in the radix tree. |
| 549 | */ |
| 550 | if (!(dst_vma->vm_flags & VM_SHARED)) { |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 551 | if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 552 | err = mfill_atomic_pte_copy(dst_pmd, dst_vma, |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 553 | dst_addr, src_addr, |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 554 | flags, foliop); |
Mike Rapoport | 3217d3c | 2017-09-06 16:23:06 -0700 | [diff] [blame] | 555 | else |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 556 | err = mfill_atomic_pte_zeropage(dst_pmd, |
Mike Rapoport | 3217d3c | 2017-09-06 16:23:06 -0700 | [diff] [blame] | 557 | dst_vma, dst_addr); |
| 558 | } else { |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 559 | err = shmem_mfill_atomic_pte(dst_pmd, dst_vma, |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 560 | dst_addr, src_addr, |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 561 | flags, foliop); |
Mike Rapoport | 3217d3c | 2017-09-06 16:23:06 -0700 | [diff] [blame] | 562 | } |
| 563 | |
| 564 | return err; |
| 565 | } |
| 566 | |
Axel Rasmussen | a734991 | 2023-03-14 15:12:47 -0700 | [diff] [blame] | 567 | static __always_inline ssize_t mfill_atomic(struct mm_struct *dst_mm, |
| 568 | unsigned long dst_start, |
| 569 | unsigned long src_start, |
| 570 | unsigned long len, |
Axel Rasmussen | a734991 | 2023-03-14 15:12:47 -0700 | [diff] [blame] | 571 | atomic_t *mmap_changing, |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 572 | uffd_flags_t flags) |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 573 | { |
| 574 | struct vm_area_struct *dst_vma; |
| 575 | ssize_t err; |
| 576 | pmd_t *dst_pmd; |
| 577 | unsigned long src_addr, dst_addr; |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 578 | long copied; |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 579 | struct folio *folio; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 580 | |
| 581 | /* |
| 582 | * Sanitize the command parameters: |
| 583 | */ |
| 584 | BUG_ON(dst_start & ~PAGE_MASK); |
| 585 | BUG_ON(len & ~PAGE_MASK); |
| 586 | |
| 587 | /* Does the address range wrap, or is the span zero-sized? */ |
| 588 | BUG_ON(src_start + len <= src_start); |
| 589 | BUG_ON(dst_start + len <= dst_start); |
| 590 | |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 591 | src_addr = src_start; |
| 592 | dst_addr = dst_start; |
| 593 | copied = 0; |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 594 | folio = NULL; |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 595 | retry: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 596 | mmap_read_lock(dst_mm); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 597 | |
| 598 | /* |
Mike Rapoport | df2cc96 | 2018-06-07 17:09:25 -0700 | [diff] [blame] | 599 | * If memory mappings are changing because of non-cooperative |
| 600 | * operation (e.g. mremap) running in parallel, bail out and |
| 601 | * request the user to retry later |
| 602 | */ |
| 603 | err = -EAGAIN; |
Nadav Amit | a759a90 | 2021-09-02 14:58:56 -0700 | [diff] [blame] | 604 | if (mmap_changing && atomic_read(mmap_changing)) |
Mike Rapoport | df2cc96 | 2018-06-07 17:09:25 -0700 | [diff] [blame] | 605 | goto out_unlock; |
| 606 | |
| 607 | /* |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 608 | * Make sure the vma is not shared, that the dst range is |
| 609 | * both valid and fully within a single existing vma. |
| 610 | */ |
Mike Rapoport | 27d0256 | 2017-02-24 14:58:28 -0800 | [diff] [blame] | 611 | err = -ENOENT; |
Wei Yang | 643aa36 | 2019-11-30 17:57:55 -0800 | [diff] [blame] | 612 | dst_vma = find_dst_vma(dst_mm, dst_start, len); |
Mike Rapoport | 26071ce | 2017-02-22 15:43:34 -0800 | [diff] [blame] | 613 | if (!dst_vma) |
| 614 | goto out_unlock; |
Mike Rapoport | 27d0256 | 2017-02-24 14:58:28 -0800 | [diff] [blame] | 615 | |
| 616 | err = -EINVAL; |
| 617 | /* |
| 618 | * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but |
| 619 | * it will overwrite vm_ops, so vma_is_anonymous must return false. |
| 620 | */ |
| 621 | if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) && |
| 622 | dst_vma->vm_flags & VM_SHARED)) |
| 623 | goto out_unlock; |
| 624 | |
| 625 | /* |
Andrea Arcangeli | 72981e0 | 2020-04-06 20:05:41 -0700 | [diff] [blame] | 626 | * validate 'mode' now that we know the dst_vma: don't allow |
| 627 | * a wrprotect copy if the userfaultfd didn't register as WP. |
| 628 | */ |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 629 | if ((flags & MFILL_ATOMIC_WP) && !(dst_vma->vm_flags & VM_UFFD_WP)) |
Andrea Arcangeli | 72981e0 | 2020-04-06 20:05:41 -0700 | [diff] [blame] | 630 | goto out_unlock; |
| 631 | |
| 632 | /* |
Mike Rapoport | 27d0256 | 2017-02-24 14:58:28 -0800 | [diff] [blame] | 633 | * If this is a HUGETLB vma, pass off to appropriate routine |
| 634 | */ |
| 635 | if (is_vm_hugetlb_page(dst_vma)) |
Lokesh Gidra | 67695f1 | 2024-01-17 14:37:29 -0800 | [diff] [blame] | 636 | return mfill_atomic_hugetlb(dst_vma, dst_start, src_start, |
| 637 | len, mmap_changing, flags); |
Mike Rapoport | 27d0256 | 2017-02-24 14:58:28 -0800 | [diff] [blame] | 638 | |
Mike Rapoport | 26071ce | 2017-02-22 15:43:34 -0800 | [diff] [blame] | 639 | if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 640 | goto out_unlock; |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 641 | if (!vma_is_shmem(dst_vma) && |
| 642 | uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) |
Axel Rasmussen | f619147 | 2021-05-04 18:35:49 -0700 | [diff] [blame] | 643 | goto out_unlock; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 644 | |
| 645 | /* |
| 646 | * Ensure the dst_vma has a anon_vma or this page |
| 647 | * would get a NULL anon_vma when moved in the |
| 648 | * dst_vma. |
| 649 | */ |
| 650 | err = -ENOMEM; |
Andrea Arcangeli | 5b51072 | 2018-11-30 14:09:28 -0800 | [diff] [blame] | 651 | if (!(dst_vma->vm_flags & VM_SHARED) && |
| 652 | unlikely(anon_vma_prepare(dst_vma))) |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 653 | goto out_unlock; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 654 | |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 655 | while (src_addr < src_start + len) { |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 656 | pmd_t dst_pmdval; |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 657 | |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 658 | BUG_ON(dst_addr >= dst_start + len); |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 659 | |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 660 | dst_pmd = mm_alloc_pmd(dst_mm, dst_addr); |
| 661 | if (unlikely(!dst_pmd)) { |
| 662 | err = -ENOMEM; |
| 663 | break; |
| 664 | } |
| 665 | |
Peter Zijlstra | dab6e71 | 2020-11-26 17:20:28 +0100 | [diff] [blame] | 666 | dst_pmdval = pmdp_get_lockless(dst_pmd); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 667 | /* |
| 668 | * If the dst_pmd is mapped as THP don't |
| 669 | * override it and just be strict. |
| 670 | */ |
| 671 | if (unlikely(pmd_trans_huge(dst_pmdval))) { |
| 672 | err = -EEXIST; |
| 673 | break; |
| 674 | } |
| 675 | if (unlikely(pmd_none(dst_pmdval)) && |
Joel Fernandes (Google) | 4cf5892 | 2019-01-03 15:28:34 -0800 | [diff] [blame] | 676 | unlikely(__pte_alloc(dst_mm, dst_pmd))) { |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 677 | err = -ENOMEM; |
| 678 | break; |
| 679 | } |
| 680 | /* If an huge pmd materialized from under us fail */ |
| 681 | if (unlikely(pmd_trans_huge(*dst_pmd))) { |
| 682 | err = -EFAULT; |
| 683 | break; |
| 684 | } |
| 685 | |
| 686 | BUG_ON(pmd_none(*dst_pmd)); |
| 687 | BUG_ON(pmd_trans_huge(*dst_pmd)); |
| 688 | |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 689 | err = mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 690 | src_addr, flags, &folio); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 691 | cond_resched(); |
| 692 | |
Andrea Arcangeli | 9e36825 | 2018-11-30 14:09:25 -0800 | [diff] [blame] | 693 | if (unlikely(err == -ENOENT)) { |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 694 | void *kaddr; |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 695 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 696 | mmap_read_unlock(dst_mm); |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 697 | BUG_ON(!folio); |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 698 | |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 699 | kaddr = kmap_local_folio(folio, 0); |
| 700 | err = copy_from_user(kaddr, |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 701 | (const void __user *) src_addr, |
| 702 | PAGE_SIZE); |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 703 | kunmap_local(kaddr); |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 704 | if (unlikely(err)) { |
| 705 | err = -EFAULT; |
| 706 | goto out; |
| 707 | } |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 708 | flush_dcache_folio(folio); |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 709 | goto retry; |
| 710 | } else |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 711 | BUG_ON(folio); |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 712 | |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 713 | if (!err) { |
| 714 | dst_addr += PAGE_SIZE; |
| 715 | src_addr += PAGE_SIZE; |
| 716 | copied += PAGE_SIZE; |
| 717 | |
| 718 | if (fatal_signal_pending(current)) |
| 719 | err = -EINTR; |
| 720 | } |
| 721 | if (err) |
| 722 | break; |
| 723 | } |
| 724 | |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 725 | out_unlock: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 726 | mmap_read_unlock(dst_mm); |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 727 | out: |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 728 | if (folio) |
| 729 | folio_put(folio); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 730 | BUG_ON(copied < 0); |
| 731 | BUG_ON(err > 0); |
| 732 | BUG_ON(!copied && !err); |
| 733 | return copied ? copied : err; |
| 734 | } |
| 735 | |
Axel Rasmussen | a734991 | 2023-03-14 15:12:47 -0700 | [diff] [blame] | 736 | ssize_t mfill_atomic_copy(struct mm_struct *dst_mm, unsigned long dst_start, |
| 737 | unsigned long src_start, unsigned long len, |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 738 | atomic_t *mmap_changing, uffd_flags_t flags) |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 739 | { |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 740 | return mfill_atomic(dst_mm, dst_start, src_start, len, mmap_changing, |
| 741 | uffd_flags_set_mode(flags, MFILL_ATOMIC_COPY)); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 742 | } |
| 743 | |
Axel Rasmussen | a734991 | 2023-03-14 15:12:47 -0700 | [diff] [blame] | 744 | ssize_t mfill_atomic_zeropage(struct mm_struct *dst_mm, unsigned long start, |
| 745 | unsigned long len, atomic_t *mmap_changing) |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 746 | { |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 747 | return mfill_atomic(dst_mm, start, 0, len, mmap_changing, |
| 748 | uffd_flags_set_mode(0, MFILL_ATOMIC_ZEROPAGE)); |
Axel Rasmussen | f619147 | 2021-05-04 18:35:49 -0700 | [diff] [blame] | 749 | } |
| 750 | |
Axel Rasmussen | a734991 | 2023-03-14 15:12:47 -0700 | [diff] [blame] | 751 | ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long start, |
Axel Rasmussen | 0289184 | 2023-03-14 15:12:50 -0700 | [diff] [blame] | 752 | unsigned long len, atomic_t *mmap_changing, |
| 753 | uffd_flags_t flags) |
Axel Rasmussen | f619147 | 2021-05-04 18:35:49 -0700 | [diff] [blame] | 754 | { |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 755 | return mfill_atomic(dst_mm, start, 0, len, mmap_changing, |
Axel Rasmussen | 0289184 | 2023-03-14 15:12:50 -0700 | [diff] [blame] | 756 | uffd_flags_set_mode(flags, MFILL_ATOMIC_CONTINUE)); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 757 | } |
Shaohua Li | ffd0579 | 2020-04-06 20:06:09 -0700 | [diff] [blame] | 758 | |
Axel Rasmussen | fc71884 | 2023-07-07 14:55:36 -0700 | [diff] [blame] | 759 | ssize_t mfill_atomic_poison(struct mm_struct *dst_mm, unsigned long start, |
| 760 | unsigned long len, atomic_t *mmap_changing, |
| 761 | uffd_flags_t flags) |
| 762 | { |
| 763 | return mfill_atomic(dst_mm, start, 0, len, mmap_changing, |
| 764 | uffd_flags_set_mode(flags, MFILL_ATOMIC_POISON)); |
| 765 | } |
| 766 | |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 767 | long uffd_wp_range(struct vm_area_struct *dst_vma, |
Peter Xu | f369b07 | 2022-08-11 16:13:40 -0400 | [diff] [blame] | 768 | unsigned long start, unsigned long len, bool enable_wp) |
| 769 | { |
David Hildenbrand | 931298e | 2022-12-23 16:56:15 +0100 | [diff] [blame] | 770 | unsigned int mm_cp_flags; |
Peter Xu | f369b07 | 2022-08-11 16:13:40 -0400 | [diff] [blame] | 771 | struct mmu_gather tlb; |
Peter Xu | d175111 | 2023-01-04 17:52:07 -0500 | [diff] [blame] | 772 | long ret; |
Peter Xu | f369b07 | 2022-08-11 16:13:40 -0400 | [diff] [blame] | 773 | |
Muhammad Usama Anjum | a1b92a3 | 2023-02-17 15:55:58 +0500 | [diff] [blame] | 774 | VM_WARN_ONCE(start < dst_vma->vm_start || start + len > dst_vma->vm_end, |
| 775 | "The address range exceeds VMA boundary.\n"); |
Peter Xu | f369b07 | 2022-08-11 16:13:40 -0400 | [diff] [blame] | 776 | if (enable_wp) |
David Hildenbrand | 931298e | 2022-12-23 16:56:15 +0100 | [diff] [blame] | 777 | mm_cp_flags = MM_CP_UFFD_WP; |
Peter Xu | f369b07 | 2022-08-11 16:13:40 -0400 | [diff] [blame] | 778 | else |
David Hildenbrand | 931298e | 2022-12-23 16:56:15 +0100 | [diff] [blame] | 779 | mm_cp_flags = MM_CP_UFFD_WP_RESOLVE; |
Peter Xu | f369b07 | 2022-08-11 16:13:40 -0400 | [diff] [blame] | 780 | |
David Hildenbrand | 931298e | 2022-12-23 16:56:15 +0100 | [diff] [blame] | 781 | /* |
| 782 | * vma->vm_page_prot already reflects that uffd-wp is enabled for this |
| 783 | * VMA (see userfaultfd_set_vm_flags()) and that all PTEs are supposed |
| 784 | * to be write-protected as default whenever protection changes. |
| 785 | * Try upgrading write permissions manually. |
| 786 | */ |
| 787 | if (!enable_wp && vma_wants_manual_pte_write_upgrade(dst_vma)) |
| 788 | mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE; |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 789 | tlb_gather_mmu(&tlb, dst_vma->vm_mm); |
Peter Xu | d175111 | 2023-01-04 17:52:07 -0500 | [diff] [blame] | 790 | ret = change_protection(&tlb, dst_vma, start, start + len, mm_cp_flags); |
Peter Xu | f369b07 | 2022-08-11 16:13:40 -0400 | [diff] [blame] | 791 | tlb_finish_mmu(&tlb); |
Peter Xu | d175111 | 2023-01-04 17:52:07 -0500 | [diff] [blame] | 792 | |
| 793 | return ret; |
Peter Xu | f369b07 | 2022-08-11 16:13:40 -0400 | [diff] [blame] | 794 | } |
| 795 | |
Shaohua Li | ffd0579 | 2020-04-06 20:06:09 -0700 | [diff] [blame] | 796 | int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start, |
Nadav Amit | a759a90 | 2021-09-02 14:58:56 -0700 | [diff] [blame] | 797 | unsigned long len, bool enable_wp, |
| 798 | atomic_t *mmap_changing) |
Shaohua Li | ffd0579 | 2020-04-06 20:06:09 -0700 | [diff] [blame] | 799 | { |
Muhammad Usama Anjum | a1b92a3 | 2023-02-17 15:55:58 +0500 | [diff] [blame] | 800 | unsigned long end = start + len; |
| 801 | unsigned long _start, _end; |
Shaohua Li | ffd0579 | 2020-04-06 20:06:09 -0700 | [diff] [blame] | 802 | struct vm_area_struct *dst_vma; |
Peter Xu | 5a90d5a | 2022-05-12 20:22:54 -0700 | [diff] [blame] | 803 | unsigned long page_mask; |
Peter Xu | d175111 | 2023-01-04 17:52:07 -0500 | [diff] [blame] | 804 | long err; |
Muhammad Usama Anjum | a1b92a3 | 2023-02-17 15:55:58 +0500 | [diff] [blame] | 805 | VMA_ITERATOR(vmi, dst_mm, start); |
Shaohua Li | ffd0579 | 2020-04-06 20:06:09 -0700 | [diff] [blame] | 806 | |
| 807 | /* |
| 808 | * Sanitize the command parameters: |
| 809 | */ |
| 810 | BUG_ON(start & ~PAGE_MASK); |
| 811 | BUG_ON(len & ~PAGE_MASK); |
| 812 | |
| 813 | /* Does the address range wrap, or is the span zero-sized? */ |
| 814 | BUG_ON(start + len <= start); |
| 815 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 816 | mmap_read_lock(dst_mm); |
Shaohua Li | ffd0579 | 2020-04-06 20:06:09 -0700 | [diff] [blame] | 817 | |
| 818 | /* |
| 819 | * If memory mappings are changing because of non-cooperative |
| 820 | * operation (e.g. mremap) running in parallel, bail out and |
| 821 | * request the user to retry later |
| 822 | */ |
| 823 | err = -EAGAIN; |
Nadav Amit | a759a90 | 2021-09-02 14:58:56 -0700 | [diff] [blame] | 824 | if (mmap_changing && atomic_read(mmap_changing)) |
Shaohua Li | ffd0579 | 2020-04-06 20:06:09 -0700 | [diff] [blame] | 825 | goto out_unlock; |
| 826 | |
| 827 | err = -ENOENT; |
Muhammad Usama Anjum | a1b92a3 | 2023-02-17 15:55:58 +0500 | [diff] [blame] | 828 | for_each_vma_range(vmi, dst_vma, end) { |
Peter Xu | b1f9e87 | 2022-05-12 20:22:56 -0700 | [diff] [blame] | 829 | |
Muhammad Usama Anjum | a1b92a3 | 2023-02-17 15:55:58 +0500 | [diff] [blame] | 830 | if (!userfaultfd_wp(dst_vma)) { |
| 831 | err = -ENOENT; |
| 832 | break; |
| 833 | } |
Shaohua Li | ffd0579 | 2020-04-06 20:06:09 -0700 | [diff] [blame] | 834 | |
Muhammad Usama Anjum | a1b92a3 | 2023-02-17 15:55:58 +0500 | [diff] [blame] | 835 | if (is_vm_hugetlb_page(dst_vma)) { |
| 836 | err = -EINVAL; |
| 837 | page_mask = vma_kernel_pagesize(dst_vma) - 1; |
| 838 | if ((start & page_mask) || (len & page_mask)) |
| 839 | break; |
| 840 | } |
Peter Xu | 5a90d5a | 2022-05-12 20:22:54 -0700 | [diff] [blame] | 841 | |
Muhammad Usama Anjum | a1b92a3 | 2023-02-17 15:55:58 +0500 | [diff] [blame] | 842 | _start = max(dst_vma->vm_start, start); |
| 843 | _end = min(dst_vma->vm_end, end); |
Shaohua Li | ffd0579 | 2020-04-06 20:06:09 -0700 | [diff] [blame] | 844 | |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 845 | err = uffd_wp_range(dst_vma, _start, _end - _start, enable_wp); |
Muhammad Usama Anjum | a1b92a3 | 2023-02-17 15:55:58 +0500 | [diff] [blame] | 846 | |
| 847 | /* Return 0 on success, <0 on failures */ |
| 848 | if (err < 0) |
| 849 | break; |
Peter Xu | d175111 | 2023-01-04 17:52:07 -0500 | [diff] [blame] | 850 | err = 0; |
Muhammad Usama Anjum | a1b92a3 | 2023-02-17 15:55:58 +0500 | [diff] [blame] | 851 | } |
Shaohua Li | ffd0579 | 2020-04-06 20:06:09 -0700 | [diff] [blame] | 852 | out_unlock: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 853 | mmap_read_unlock(dst_mm); |
Shaohua Li | ffd0579 | 2020-04-06 20:06:09 -0700 | [diff] [blame] | 854 | return err; |
| 855 | } |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 856 | |
| 857 | |
| 858 | void double_pt_lock(spinlock_t *ptl1, |
| 859 | spinlock_t *ptl2) |
| 860 | __acquires(ptl1) |
| 861 | __acquires(ptl2) |
| 862 | { |
| 863 | spinlock_t *ptl_tmp; |
| 864 | |
| 865 | if (ptl1 > ptl2) { |
| 866 | /* exchange ptl1 and ptl2 */ |
| 867 | ptl_tmp = ptl1; |
| 868 | ptl1 = ptl2; |
| 869 | ptl2 = ptl_tmp; |
| 870 | } |
| 871 | /* lock in virtual address order to avoid lock inversion */ |
| 872 | spin_lock(ptl1); |
| 873 | if (ptl1 != ptl2) |
| 874 | spin_lock_nested(ptl2, SINGLE_DEPTH_NESTING); |
| 875 | else |
| 876 | __acquire(ptl2); |
| 877 | } |
| 878 | |
| 879 | void double_pt_unlock(spinlock_t *ptl1, |
| 880 | spinlock_t *ptl2) |
| 881 | __releases(ptl1) |
| 882 | __releases(ptl2) |
| 883 | { |
| 884 | spin_unlock(ptl1); |
| 885 | if (ptl1 != ptl2) |
| 886 | spin_unlock(ptl2); |
| 887 | else |
| 888 | __release(ptl2); |
| 889 | } |
| 890 | |
| 891 | |
| 892 | static int move_present_pte(struct mm_struct *mm, |
| 893 | struct vm_area_struct *dst_vma, |
| 894 | struct vm_area_struct *src_vma, |
| 895 | unsigned long dst_addr, unsigned long src_addr, |
| 896 | pte_t *dst_pte, pte_t *src_pte, |
| 897 | pte_t orig_dst_pte, pte_t orig_src_pte, |
| 898 | spinlock_t *dst_ptl, spinlock_t *src_ptl, |
| 899 | struct folio *src_folio) |
| 900 | { |
| 901 | int err = 0; |
| 902 | |
| 903 | double_pt_lock(dst_ptl, src_ptl); |
| 904 | |
Ryan Roberts | 56ae10c | 2024-01-23 14:17:55 +0000 | [diff] [blame^] | 905 | if (!pte_same(ptep_get(src_pte), orig_src_pte) || |
| 906 | !pte_same(ptep_get(dst_pte), orig_dst_pte)) { |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 907 | err = -EAGAIN; |
| 908 | goto out; |
| 909 | } |
| 910 | if (folio_test_large(src_folio) || |
| 911 | folio_maybe_dma_pinned(src_folio) || |
| 912 | !PageAnonExclusive(&src_folio->page)) { |
| 913 | err = -EBUSY; |
| 914 | goto out; |
| 915 | } |
| 916 | |
| 917 | folio_move_anon_rmap(src_folio, dst_vma); |
| 918 | WRITE_ONCE(src_folio->index, linear_page_index(dst_vma, dst_addr)); |
| 919 | |
| 920 | orig_src_pte = ptep_clear_flush(src_vma, src_addr, src_pte); |
| 921 | /* Folio got pinned from under us. Put it back and fail the move. */ |
| 922 | if (folio_maybe_dma_pinned(src_folio)) { |
| 923 | set_pte_at(mm, src_addr, src_pte, orig_src_pte); |
| 924 | err = -EBUSY; |
| 925 | goto out; |
| 926 | } |
| 927 | |
| 928 | orig_dst_pte = mk_pte(&src_folio->page, dst_vma->vm_page_prot); |
| 929 | /* Follow mremap() behavior and treat the entry dirty after the move */ |
| 930 | orig_dst_pte = pte_mkwrite(pte_mkdirty(orig_dst_pte), dst_vma); |
| 931 | |
| 932 | set_pte_at(mm, dst_addr, dst_pte, orig_dst_pte); |
| 933 | out: |
| 934 | double_pt_unlock(dst_ptl, src_ptl); |
| 935 | return err; |
| 936 | } |
| 937 | |
| 938 | static int move_swap_pte(struct mm_struct *mm, |
| 939 | unsigned long dst_addr, unsigned long src_addr, |
| 940 | pte_t *dst_pte, pte_t *src_pte, |
| 941 | pte_t orig_dst_pte, pte_t orig_src_pte, |
| 942 | spinlock_t *dst_ptl, spinlock_t *src_ptl) |
| 943 | { |
| 944 | if (!pte_swp_exclusive(orig_src_pte)) |
| 945 | return -EBUSY; |
| 946 | |
| 947 | double_pt_lock(dst_ptl, src_ptl); |
| 948 | |
Ryan Roberts | 56ae10c | 2024-01-23 14:17:55 +0000 | [diff] [blame^] | 949 | if (!pte_same(ptep_get(src_pte), orig_src_pte) || |
| 950 | !pte_same(ptep_get(dst_pte), orig_dst_pte)) { |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 951 | double_pt_unlock(dst_ptl, src_ptl); |
| 952 | return -EAGAIN; |
| 953 | } |
| 954 | |
| 955 | orig_src_pte = ptep_get_and_clear(mm, src_addr, src_pte); |
| 956 | set_pte_at(mm, dst_addr, dst_pte, orig_src_pte); |
| 957 | double_pt_unlock(dst_ptl, src_ptl); |
| 958 | |
| 959 | return 0; |
| 960 | } |
| 961 | |
| 962 | /* |
| 963 | * The mmap_lock for reading is held by the caller. Just move the page |
| 964 | * from src_pmd to dst_pmd if possible, and return true if succeeded |
| 965 | * in moving the page. |
| 966 | */ |
| 967 | static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, |
| 968 | struct vm_area_struct *dst_vma, |
| 969 | struct vm_area_struct *src_vma, |
| 970 | unsigned long dst_addr, unsigned long src_addr, |
| 971 | __u64 mode) |
| 972 | { |
| 973 | swp_entry_t entry; |
| 974 | pte_t orig_src_pte, orig_dst_pte; |
| 975 | pte_t src_folio_pte; |
| 976 | spinlock_t *src_ptl, *dst_ptl; |
| 977 | pte_t *src_pte = NULL; |
| 978 | pte_t *dst_pte = NULL; |
| 979 | |
| 980 | struct folio *src_folio = NULL; |
| 981 | struct anon_vma *src_anon_vma = NULL; |
| 982 | struct mmu_notifier_range range; |
| 983 | int err = 0; |
| 984 | |
| 985 | flush_cache_range(src_vma, src_addr, src_addr + PAGE_SIZE); |
| 986 | mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, |
| 987 | src_addr, src_addr + PAGE_SIZE); |
| 988 | mmu_notifier_invalidate_range_start(&range); |
| 989 | retry: |
| 990 | dst_pte = pte_offset_map_nolock(mm, dst_pmd, dst_addr, &dst_ptl); |
| 991 | |
| 992 | /* Retry if a huge pmd materialized from under us */ |
| 993 | if (unlikely(!dst_pte)) { |
| 994 | err = -EAGAIN; |
| 995 | goto out; |
| 996 | } |
| 997 | |
| 998 | src_pte = pte_offset_map_nolock(mm, src_pmd, src_addr, &src_ptl); |
| 999 | |
| 1000 | /* |
| 1001 | * We held the mmap_lock for reading so MADV_DONTNEED |
| 1002 | * can zap transparent huge pages under us, or the |
| 1003 | * transparent huge page fault can establish new |
| 1004 | * transparent huge pages under us. |
| 1005 | */ |
| 1006 | if (unlikely(!src_pte)) { |
| 1007 | err = -EAGAIN; |
| 1008 | goto out; |
| 1009 | } |
| 1010 | |
| 1011 | /* Sanity checks before the operation */ |
| 1012 | if (WARN_ON_ONCE(pmd_none(*dst_pmd)) || WARN_ON_ONCE(pmd_none(*src_pmd)) || |
| 1013 | WARN_ON_ONCE(pmd_trans_huge(*dst_pmd)) || WARN_ON_ONCE(pmd_trans_huge(*src_pmd))) { |
| 1014 | err = -EINVAL; |
| 1015 | goto out; |
| 1016 | } |
| 1017 | |
| 1018 | spin_lock(dst_ptl); |
Ryan Roberts | 56ae10c | 2024-01-23 14:17:55 +0000 | [diff] [blame^] | 1019 | orig_dst_pte = ptep_get(dst_pte); |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 1020 | spin_unlock(dst_ptl); |
| 1021 | if (!pte_none(orig_dst_pte)) { |
| 1022 | err = -EEXIST; |
| 1023 | goto out; |
| 1024 | } |
| 1025 | |
| 1026 | spin_lock(src_ptl); |
Ryan Roberts | 56ae10c | 2024-01-23 14:17:55 +0000 | [diff] [blame^] | 1027 | orig_src_pte = ptep_get(src_pte); |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 1028 | spin_unlock(src_ptl); |
| 1029 | if (pte_none(orig_src_pte)) { |
| 1030 | if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES)) |
| 1031 | err = -ENOENT; |
| 1032 | else /* nothing to do to move a hole */ |
| 1033 | err = 0; |
| 1034 | goto out; |
| 1035 | } |
| 1036 | |
| 1037 | /* If PTE changed after we locked the folio them start over */ |
| 1038 | if (src_folio && unlikely(!pte_same(src_folio_pte, orig_src_pte))) { |
| 1039 | err = -EAGAIN; |
| 1040 | goto out; |
| 1041 | } |
| 1042 | |
| 1043 | if (pte_present(orig_src_pte)) { |
| 1044 | /* |
| 1045 | * Pin and lock both source folio and anon_vma. Since we are in |
| 1046 | * RCU read section, we can't block, so on contention have to |
| 1047 | * unmap the ptes, obtain the lock and retry. |
| 1048 | */ |
| 1049 | if (!src_folio) { |
| 1050 | struct folio *folio; |
| 1051 | |
| 1052 | /* |
| 1053 | * Pin the page while holding the lock to be sure the |
| 1054 | * page isn't freed under us |
| 1055 | */ |
| 1056 | spin_lock(src_ptl); |
Ryan Roberts | 56ae10c | 2024-01-23 14:17:55 +0000 | [diff] [blame^] | 1057 | if (!pte_same(orig_src_pte, ptep_get(src_pte))) { |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 1058 | spin_unlock(src_ptl); |
| 1059 | err = -EAGAIN; |
| 1060 | goto out; |
| 1061 | } |
| 1062 | |
| 1063 | folio = vm_normal_folio(src_vma, src_addr, orig_src_pte); |
| 1064 | if (!folio || !PageAnonExclusive(&folio->page)) { |
| 1065 | spin_unlock(src_ptl); |
| 1066 | err = -EBUSY; |
| 1067 | goto out; |
| 1068 | } |
| 1069 | |
| 1070 | folio_get(folio); |
| 1071 | src_folio = folio; |
| 1072 | src_folio_pte = orig_src_pte; |
| 1073 | spin_unlock(src_ptl); |
| 1074 | |
| 1075 | if (!folio_trylock(src_folio)) { |
| 1076 | pte_unmap(&orig_src_pte); |
| 1077 | pte_unmap(&orig_dst_pte); |
| 1078 | src_pte = dst_pte = NULL; |
| 1079 | /* now we can block and wait */ |
| 1080 | folio_lock(src_folio); |
| 1081 | goto retry; |
| 1082 | } |
| 1083 | |
| 1084 | if (WARN_ON_ONCE(!folio_test_anon(src_folio))) { |
| 1085 | err = -EBUSY; |
| 1086 | goto out; |
| 1087 | } |
| 1088 | } |
| 1089 | |
| 1090 | /* at this point we have src_folio locked */ |
| 1091 | if (folio_test_large(src_folio)) { |
Suren Baghdasaryan | 982ae05 | 2024-01-02 15:32:56 -0800 | [diff] [blame] | 1092 | /* split_folio() can block */ |
| 1093 | pte_unmap(&orig_src_pte); |
| 1094 | pte_unmap(&orig_dst_pte); |
| 1095 | src_pte = dst_pte = NULL; |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 1096 | err = split_folio(src_folio); |
| 1097 | if (err) |
| 1098 | goto out; |
Suren Baghdasaryan | 982ae05 | 2024-01-02 15:32:56 -0800 | [diff] [blame] | 1099 | /* have to reacquire the folio after it got split */ |
| 1100 | folio_unlock(src_folio); |
| 1101 | folio_put(src_folio); |
| 1102 | src_folio = NULL; |
| 1103 | goto retry; |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 1104 | } |
| 1105 | |
| 1106 | if (!src_anon_vma) { |
| 1107 | /* |
| 1108 | * folio_referenced walks the anon_vma chain |
| 1109 | * without the folio lock. Serialize against it with |
| 1110 | * the anon_vma lock, the folio lock is not enough. |
| 1111 | */ |
| 1112 | src_anon_vma = folio_get_anon_vma(src_folio); |
| 1113 | if (!src_anon_vma) { |
| 1114 | /* page was unmapped from under us */ |
| 1115 | err = -EAGAIN; |
| 1116 | goto out; |
| 1117 | } |
| 1118 | if (!anon_vma_trylock_write(src_anon_vma)) { |
| 1119 | pte_unmap(&orig_src_pte); |
| 1120 | pte_unmap(&orig_dst_pte); |
| 1121 | src_pte = dst_pte = NULL; |
| 1122 | /* now we can block and wait */ |
| 1123 | anon_vma_lock_write(src_anon_vma); |
| 1124 | goto retry; |
| 1125 | } |
| 1126 | } |
| 1127 | |
| 1128 | err = move_present_pte(mm, dst_vma, src_vma, |
| 1129 | dst_addr, src_addr, dst_pte, src_pte, |
| 1130 | orig_dst_pte, orig_src_pte, |
| 1131 | dst_ptl, src_ptl, src_folio); |
| 1132 | } else { |
| 1133 | entry = pte_to_swp_entry(orig_src_pte); |
| 1134 | if (non_swap_entry(entry)) { |
| 1135 | if (is_migration_entry(entry)) { |
| 1136 | pte_unmap(&orig_src_pte); |
| 1137 | pte_unmap(&orig_dst_pte); |
| 1138 | src_pte = dst_pte = NULL; |
| 1139 | migration_entry_wait(mm, src_pmd, src_addr); |
| 1140 | err = -EAGAIN; |
| 1141 | } else |
| 1142 | err = -EFAULT; |
| 1143 | goto out; |
| 1144 | } |
| 1145 | |
| 1146 | err = move_swap_pte(mm, dst_addr, src_addr, |
| 1147 | dst_pte, src_pte, |
| 1148 | orig_dst_pte, orig_src_pte, |
| 1149 | dst_ptl, src_ptl); |
| 1150 | } |
| 1151 | |
| 1152 | out: |
| 1153 | if (src_anon_vma) { |
| 1154 | anon_vma_unlock_write(src_anon_vma); |
| 1155 | put_anon_vma(src_anon_vma); |
| 1156 | } |
| 1157 | if (src_folio) { |
| 1158 | folio_unlock(src_folio); |
| 1159 | folio_put(src_folio); |
| 1160 | } |
| 1161 | if (dst_pte) |
| 1162 | pte_unmap(dst_pte); |
| 1163 | if (src_pte) |
| 1164 | pte_unmap(src_pte); |
| 1165 | mmu_notifier_invalidate_range_end(&range); |
| 1166 | |
| 1167 | return err; |
| 1168 | } |
| 1169 | |
| 1170 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 1171 | static inline bool move_splits_huge_pmd(unsigned long dst_addr, |
| 1172 | unsigned long src_addr, |
| 1173 | unsigned long src_end) |
| 1174 | { |
| 1175 | return (src_addr & ~HPAGE_PMD_MASK) || (dst_addr & ~HPAGE_PMD_MASK) || |
| 1176 | src_end - src_addr < HPAGE_PMD_SIZE; |
| 1177 | } |
| 1178 | #else |
| 1179 | static inline bool move_splits_huge_pmd(unsigned long dst_addr, |
| 1180 | unsigned long src_addr, |
| 1181 | unsigned long src_end) |
| 1182 | { |
| 1183 | /* This is unreachable anyway, just to avoid warnings when HPAGE_PMD_SIZE==0 */ |
| 1184 | return false; |
| 1185 | } |
| 1186 | #endif |
| 1187 | |
| 1188 | static inline bool vma_move_compatible(struct vm_area_struct *vma) |
| 1189 | { |
| 1190 | return !(vma->vm_flags & (VM_PFNMAP | VM_IO | VM_HUGETLB | |
| 1191 | VM_MIXEDMAP | VM_SHADOW_STACK)); |
| 1192 | } |
| 1193 | |
| 1194 | static int validate_move_areas(struct userfaultfd_ctx *ctx, |
| 1195 | struct vm_area_struct *src_vma, |
| 1196 | struct vm_area_struct *dst_vma) |
| 1197 | { |
| 1198 | /* Only allow moving if both have the same access and protection */ |
| 1199 | if ((src_vma->vm_flags & VM_ACCESS_FLAGS) != (dst_vma->vm_flags & VM_ACCESS_FLAGS) || |
| 1200 | pgprot_val(src_vma->vm_page_prot) != pgprot_val(dst_vma->vm_page_prot)) |
| 1201 | return -EINVAL; |
| 1202 | |
| 1203 | /* Only allow moving if both are mlocked or both aren't */ |
| 1204 | if ((src_vma->vm_flags & VM_LOCKED) != (dst_vma->vm_flags & VM_LOCKED)) |
| 1205 | return -EINVAL; |
| 1206 | |
| 1207 | /* |
| 1208 | * For now, we keep it simple and only move between writable VMAs. |
| 1209 | * Access flags are equal, therefore cheching only the source is enough. |
| 1210 | */ |
| 1211 | if (!(src_vma->vm_flags & VM_WRITE)) |
| 1212 | return -EINVAL; |
| 1213 | |
| 1214 | /* Check if vma flags indicate content which can be moved */ |
| 1215 | if (!vma_move_compatible(src_vma) || !vma_move_compatible(dst_vma)) |
| 1216 | return -EINVAL; |
| 1217 | |
| 1218 | /* Ensure dst_vma is registered in uffd we are operating on */ |
| 1219 | if (!dst_vma->vm_userfaultfd_ctx.ctx || |
| 1220 | dst_vma->vm_userfaultfd_ctx.ctx != ctx) |
| 1221 | return -EINVAL; |
| 1222 | |
| 1223 | /* Only allow moving across anonymous vmas */ |
| 1224 | if (!vma_is_anonymous(src_vma) || !vma_is_anonymous(dst_vma)) |
| 1225 | return -EINVAL; |
| 1226 | |
| 1227 | /* |
| 1228 | * Ensure the dst_vma has a anon_vma or this page |
| 1229 | * would get a NULL anon_vma when moved in the |
| 1230 | * dst_vma. |
| 1231 | */ |
| 1232 | if (unlikely(anon_vma_prepare(dst_vma))) |
| 1233 | return -ENOMEM; |
| 1234 | |
| 1235 | return 0; |
| 1236 | } |
| 1237 | |
| 1238 | /** |
| 1239 | * move_pages - move arbitrary anonymous pages of an existing vma |
| 1240 | * @ctx: pointer to the userfaultfd context |
| 1241 | * @mm: the address space to move pages |
| 1242 | * @dst_start: start of the destination virtual memory range |
| 1243 | * @src_start: start of the source virtual memory range |
| 1244 | * @len: length of the virtual memory range |
| 1245 | * @mode: flags from uffdio_move.mode |
| 1246 | * |
| 1247 | * Must be called with mmap_lock held for read. |
| 1248 | * |
| 1249 | * move_pages() remaps arbitrary anonymous pages atomically in zero |
| 1250 | * copy. It only works on non shared anonymous pages because those can |
| 1251 | * be relocated without generating non linear anon_vmas in the rmap |
| 1252 | * code. |
| 1253 | * |
| 1254 | * It provides a zero copy mechanism to handle userspace page faults. |
| 1255 | * The source vma pages should have mapcount == 1, which can be |
| 1256 | * enforced by using madvise(MADV_DONTFORK) on src vma. |
| 1257 | * |
| 1258 | * The thread receiving the page during the userland page fault |
| 1259 | * will receive the faulting page in the source vma through the network, |
| 1260 | * storage or any other I/O device (MADV_DONTFORK in the source vma |
| 1261 | * avoids move_pages() to fail with -EBUSY if the process forks before |
| 1262 | * move_pages() is called), then it will call move_pages() to map the |
| 1263 | * page in the faulting address in the destination vma. |
| 1264 | * |
| 1265 | * This userfaultfd command works purely via pagetables, so it's the |
| 1266 | * most efficient way to move physical non shared anonymous pages |
| 1267 | * across different virtual addresses. Unlike mremap()/mmap()/munmap() |
| 1268 | * it does not create any new vmas. The mapping in the destination |
| 1269 | * address is atomic. |
| 1270 | * |
| 1271 | * It only works if the vma protection bits are identical from the |
| 1272 | * source and destination vma. |
| 1273 | * |
| 1274 | * It can remap non shared anonymous pages within the same vma too. |
| 1275 | * |
| 1276 | * If the source virtual memory range has any unmapped holes, or if |
| 1277 | * the destination virtual memory range is not a whole unmapped hole, |
| 1278 | * move_pages() will fail respectively with -ENOENT or -EEXIST. This |
| 1279 | * provides a very strict behavior to avoid any chance of memory |
| 1280 | * corruption going unnoticed if there are userland race conditions. |
| 1281 | * Only one thread should resolve the userland page fault at any given |
| 1282 | * time for any given faulting address. This means that if two threads |
| 1283 | * try to both call move_pages() on the same destination address at the |
| 1284 | * same time, the second thread will get an explicit error from this |
| 1285 | * command. |
| 1286 | * |
| 1287 | * The command retval will return "len" is successful. The command |
| 1288 | * however can be interrupted by fatal signals or errors. If |
| 1289 | * interrupted it will return the number of bytes successfully |
| 1290 | * remapped before the interruption if any, or the negative error if |
| 1291 | * none. It will never return zero. Either it will return an error or |
| 1292 | * an amount of bytes successfully moved. If the retval reports a |
| 1293 | * "short" remap, the move_pages() command should be repeated by |
| 1294 | * userland with src+retval, dst+reval, len-retval if it wants to know |
| 1295 | * about the error that interrupted it. |
| 1296 | * |
| 1297 | * The UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES flag can be specified to |
| 1298 | * prevent -ENOENT errors to materialize if there are holes in the |
| 1299 | * source virtual range that is being remapped. The holes will be |
| 1300 | * accounted as successfully remapped in the retval of the |
| 1301 | * command. This is mostly useful to remap hugepage naturally aligned |
| 1302 | * virtual regions without knowing if there are transparent hugepage |
| 1303 | * in the regions or not, but preventing the risk of having to split |
| 1304 | * the hugepmd during the remap. |
| 1305 | * |
| 1306 | * If there's any rmap walk that is taking the anon_vma locks without |
| 1307 | * first obtaining the folio lock (the only current instance is |
| 1308 | * folio_referenced), they will have to verify if the folio->mapping |
| 1309 | * has changed after taking the anon_vma lock. If it changed they |
| 1310 | * should release the lock and retry obtaining a new anon_vma, because |
| 1311 | * it means the anon_vma was changed by move_pages() before the lock |
| 1312 | * could be obtained. This is the only additional complexity added to |
| 1313 | * the rmap code to provide this anonymous page remapping functionality. |
| 1314 | */ |
| 1315 | ssize_t move_pages(struct userfaultfd_ctx *ctx, struct mm_struct *mm, |
| 1316 | unsigned long dst_start, unsigned long src_start, |
| 1317 | unsigned long len, __u64 mode) |
| 1318 | { |
| 1319 | struct vm_area_struct *src_vma, *dst_vma; |
| 1320 | unsigned long src_addr, dst_addr; |
| 1321 | pmd_t *src_pmd, *dst_pmd; |
| 1322 | long err = -EINVAL; |
| 1323 | ssize_t moved = 0; |
| 1324 | |
| 1325 | /* Sanitize the command parameters. */ |
| 1326 | if (WARN_ON_ONCE(src_start & ~PAGE_MASK) || |
| 1327 | WARN_ON_ONCE(dst_start & ~PAGE_MASK) || |
| 1328 | WARN_ON_ONCE(len & ~PAGE_MASK)) |
| 1329 | goto out; |
| 1330 | |
| 1331 | /* Does the address range wrap, or is the span zero-sized? */ |
| 1332 | if (WARN_ON_ONCE(src_start + len <= src_start) || |
| 1333 | WARN_ON_ONCE(dst_start + len <= dst_start)) |
| 1334 | goto out; |
| 1335 | |
| 1336 | /* |
| 1337 | * Make sure the vma is not shared, that the src and dst remap |
| 1338 | * ranges are both valid and fully within a single existing |
| 1339 | * vma. |
| 1340 | */ |
| 1341 | src_vma = find_vma(mm, src_start); |
| 1342 | if (!src_vma || (src_vma->vm_flags & VM_SHARED)) |
| 1343 | goto out; |
| 1344 | if (src_start < src_vma->vm_start || |
| 1345 | src_start + len > src_vma->vm_end) |
| 1346 | goto out; |
| 1347 | |
| 1348 | dst_vma = find_vma(mm, dst_start); |
| 1349 | if (!dst_vma || (dst_vma->vm_flags & VM_SHARED)) |
| 1350 | goto out; |
| 1351 | if (dst_start < dst_vma->vm_start || |
| 1352 | dst_start + len > dst_vma->vm_end) |
| 1353 | goto out; |
| 1354 | |
| 1355 | err = validate_move_areas(ctx, src_vma, dst_vma); |
| 1356 | if (err) |
| 1357 | goto out; |
| 1358 | |
| 1359 | for (src_addr = src_start, dst_addr = dst_start; |
| 1360 | src_addr < src_start + len;) { |
| 1361 | spinlock_t *ptl; |
| 1362 | pmd_t dst_pmdval; |
| 1363 | unsigned long step_size; |
| 1364 | |
| 1365 | /* |
| 1366 | * Below works because anonymous area would not have a |
| 1367 | * transparent huge PUD. If file-backed support is added, |
| 1368 | * that case would need to be handled here. |
| 1369 | */ |
| 1370 | src_pmd = mm_find_pmd(mm, src_addr); |
| 1371 | if (unlikely(!src_pmd)) { |
| 1372 | if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES)) { |
| 1373 | err = -ENOENT; |
| 1374 | break; |
| 1375 | } |
| 1376 | src_pmd = mm_alloc_pmd(mm, src_addr); |
| 1377 | if (unlikely(!src_pmd)) { |
| 1378 | err = -ENOMEM; |
| 1379 | break; |
| 1380 | } |
| 1381 | } |
| 1382 | dst_pmd = mm_alloc_pmd(mm, dst_addr); |
| 1383 | if (unlikely(!dst_pmd)) { |
| 1384 | err = -ENOMEM; |
| 1385 | break; |
| 1386 | } |
| 1387 | |
| 1388 | dst_pmdval = pmdp_get_lockless(dst_pmd); |
| 1389 | /* |
| 1390 | * If the dst_pmd is mapped as THP don't override it and just |
| 1391 | * be strict. If dst_pmd changes into TPH after this check, the |
| 1392 | * move_pages_huge_pmd() will detect the change and retry |
| 1393 | * while move_pages_pte() will detect the change and fail. |
| 1394 | */ |
| 1395 | if (unlikely(pmd_trans_huge(dst_pmdval))) { |
| 1396 | err = -EEXIST; |
| 1397 | break; |
| 1398 | } |
| 1399 | |
| 1400 | ptl = pmd_trans_huge_lock(src_pmd, src_vma); |
| 1401 | if (ptl) { |
| 1402 | if (pmd_devmap(*src_pmd)) { |
| 1403 | spin_unlock(ptl); |
| 1404 | err = -ENOENT; |
| 1405 | break; |
| 1406 | } |
Suren Baghdasaryan | 5d4747a | 2024-01-11 17:39:35 -0800 | [diff] [blame] | 1407 | /* Avoid moving zeropages for now */ |
| 1408 | if (is_huge_zero_pmd(*src_pmd)) { |
| 1409 | spin_unlock(ptl); |
| 1410 | err = -EBUSY; |
| 1411 | break; |
| 1412 | } |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 1413 | |
| 1414 | /* Check if we can move the pmd without splitting it. */ |
| 1415 | if (move_splits_huge_pmd(dst_addr, src_addr, src_start + len) || |
| 1416 | !pmd_none(dst_pmdval)) { |
| 1417 | struct folio *folio = pfn_folio(pmd_pfn(*src_pmd)); |
| 1418 | |
| 1419 | if (!folio || !PageAnonExclusive(&folio->page)) { |
| 1420 | spin_unlock(ptl); |
| 1421 | err = -EBUSY; |
| 1422 | break; |
| 1423 | } |
| 1424 | |
| 1425 | spin_unlock(ptl); |
| 1426 | split_huge_pmd(src_vma, src_pmd, src_addr); |
| 1427 | /* The folio will be split by move_pages_pte() */ |
| 1428 | continue; |
| 1429 | } |
| 1430 | |
| 1431 | err = move_pages_huge_pmd(mm, dst_pmd, src_pmd, |
| 1432 | dst_pmdval, dst_vma, src_vma, |
| 1433 | dst_addr, src_addr); |
| 1434 | step_size = HPAGE_PMD_SIZE; |
| 1435 | } else { |
| 1436 | if (pmd_none(*src_pmd)) { |
| 1437 | if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES)) { |
| 1438 | err = -ENOENT; |
| 1439 | break; |
| 1440 | } |
| 1441 | if (unlikely(__pte_alloc(mm, src_pmd))) { |
| 1442 | err = -ENOMEM; |
| 1443 | break; |
| 1444 | } |
| 1445 | } |
| 1446 | |
| 1447 | if (unlikely(pte_alloc(mm, dst_pmd))) { |
| 1448 | err = -ENOMEM; |
| 1449 | break; |
| 1450 | } |
| 1451 | |
| 1452 | err = move_pages_pte(mm, dst_pmd, src_pmd, |
| 1453 | dst_vma, src_vma, |
| 1454 | dst_addr, src_addr, mode); |
| 1455 | step_size = PAGE_SIZE; |
| 1456 | } |
| 1457 | |
| 1458 | cond_resched(); |
| 1459 | |
| 1460 | if (fatal_signal_pending(current)) { |
| 1461 | /* Do not override an error */ |
| 1462 | if (!err || err == -EAGAIN) |
| 1463 | err = -EINTR; |
| 1464 | break; |
| 1465 | } |
| 1466 | |
| 1467 | if (err) { |
| 1468 | if (err == -EAGAIN) |
| 1469 | continue; |
| 1470 | break; |
| 1471 | } |
| 1472 | |
| 1473 | /* Proceed to the next page */ |
| 1474 | dst_addr += step_size; |
| 1475 | src_addr += step_size; |
| 1476 | moved += step_size; |
| 1477 | } |
| 1478 | |
| 1479 | out: |
| 1480 | VM_WARN_ON(moved < 0); |
| 1481 | VM_WARN_ON(err > 0); |
| 1482 | VM_WARN_ON(!moved && !err); |
| 1483 | return moved ? moved : err; |
| 1484 | } |