Thomas Gleixner | 20c8ccb | 2019-06-04 10:11:32 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 2 | /* |
| 3 | * mm/userfaultfd.c |
| 4 | * |
| 5 | * Copyright (C) 2015 Red Hat, Inc. |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 6 | */ |
| 7 | |
| 8 | #include <linux/mm.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 9 | #include <linux/sched/signal.h> |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 10 | #include <linux/pagemap.h> |
| 11 | #include <linux/rmap.h> |
| 12 | #include <linux/swap.h> |
| 13 | #include <linux/swapops.h> |
| 14 | #include <linux/userfaultfd_k.h> |
| 15 | #include <linux/mmu_notifier.h> |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 16 | #include <linux/hugetlb.h> |
Mike Rapoport | 26071ce | 2017-02-22 15:43:34 -0800 | [diff] [blame] | 17 | #include <linux/shmem_fs.h> |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 18 | #include <asm/tlbflush.h> |
Nadav Amit | 4a18419 | 2022-05-09 18:20:50 -0700 | [diff] [blame] | 19 | #include <asm/tlb.h> |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 20 | #include "internal.h" |
| 21 | |
Wei Yang | 643aa36 | 2019-11-30 17:57:55 -0800 | [diff] [blame] | 22 | static __always_inline |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 23 | bool validate_dst_vma(struct vm_area_struct *dst_vma, unsigned long dst_end) |
Wei Yang | 643aa36 | 2019-11-30 17:57:55 -0800 | [diff] [blame] | 24 | { |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 25 | /* Make sure that the dst range is fully within dst_vma. */ |
| 26 | if (dst_end > dst_vma->vm_end) |
| 27 | return false; |
Wei Yang | 643aa36 | 2019-11-30 17:57:55 -0800 | [diff] [blame] | 28 | |
| 29 | /* |
| 30 | * Check the vma is registered in uffd, this is required to |
| 31 | * enforce the VM_MAYWRITE check done at uffd registration |
| 32 | * time. |
| 33 | */ |
| 34 | if (!dst_vma->vm_userfaultfd_ctx.ctx) |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 35 | return false; |
Wei Yang | 643aa36 | 2019-11-30 17:57:55 -0800 | [diff] [blame] | 36 | |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 37 | return true; |
| 38 | } |
| 39 | |
| 40 | static __always_inline |
| 41 | struct vm_area_struct *find_vma_and_prepare_anon(struct mm_struct *mm, |
| 42 | unsigned long addr) |
| 43 | { |
| 44 | struct vm_area_struct *vma; |
| 45 | |
| 46 | mmap_assert_locked(mm); |
| 47 | vma = vma_lookup(mm, addr); |
| 48 | if (!vma) |
| 49 | vma = ERR_PTR(-ENOENT); |
| 50 | else if (!(vma->vm_flags & VM_SHARED) && |
| 51 | unlikely(anon_vma_prepare(vma))) |
| 52 | vma = ERR_PTR(-ENOMEM); |
| 53 | |
| 54 | return vma; |
| 55 | } |
| 56 | |
| 57 | #ifdef CONFIG_PER_VMA_LOCK |
| 58 | /* |
Matthew Wilcox (Oracle) | 73b4a0c | 2024-04-26 15:45:02 +0100 | [diff] [blame] | 59 | * uffd_lock_vma() - Lookup and lock vma corresponding to @address. |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 60 | * @mm: mm to search vma in. |
| 61 | * @address: address that the vma should contain. |
| 62 | * |
Matthew Wilcox (Oracle) | 73b4a0c | 2024-04-26 15:45:02 +0100 | [diff] [blame] | 63 | * Should be called without holding mmap_lock. |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 64 | * |
| 65 | * Return: A locked vma containing @address, -ENOENT if no vma is found, or |
| 66 | * -ENOMEM if anon_vma couldn't be allocated. |
| 67 | */ |
Matthew Wilcox (Oracle) | 73b4a0c | 2024-04-26 15:45:02 +0100 | [diff] [blame] | 68 | static struct vm_area_struct *uffd_lock_vma(struct mm_struct *mm, |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 69 | unsigned long address) |
| 70 | { |
| 71 | struct vm_area_struct *vma; |
| 72 | |
| 73 | vma = lock_vma_under_rcu(mm, address); |
| 74 | if (vma) { |
| 75 | /* |
Matthew Wilcox (Oracle) | 73b4a0c | 2024-04-26 15:45:02 +0100 | [diff] [blame] | 76 | * We know we're going to need to use anon_vma, so check |
| 77 | * that early. |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 78 | */ |
| 79 | if (!(vma->vm_flags & VM_SHARED) && unlikely(!vma->anon_vma)) |
| 80 | vma_end_read(vma); |
| 81 | else |
| 82 | return vma; |
| 83 | } |
| 84 | |
| 85 | mmap_read_lock(mm); |
| 86 | vma = find_vma_and_prepare_anon(mm, address); |
| 87 | if (!IS_ERR(vma)) { |
| 88 | /* |
| 89 | * We cannot use vma_start_read() as it may fail due to |
| 90 | * false locked (see comment in vma_start_read()). We |
| 91 | * can avoid that by directly locking vm_lock under |
| 92 | * mmap_lock, which guarantees that nobody can lock the |
| 93 | * vma for write (vma_start_write()) under us. |
| 94 | */ |
| 95 | down_read(&vma->vm_lock->lock); |
| 96 | } |
| 97 | |
| 98 | mmap_read_unlock(mm); |
| 99 | return vma; |
| 100 | } |
| 101 | |
| 102 | static struct vm_area_struct *uffd_mfill_lock(struct mm_struct *dst_mm, |
| 103 | unsigned long dst_start, |
| 104 | unsigned long len) |
| 105 | { |
| 106 | struct vm_area_struct *dst_vma; |
| 107 | |
Matthew Wilcox (Oracle) | 73b4a0c | 2024-04-26 15:45:02 +0100 | [diff] [blame] | 108 | dst_vma = uffd_lock_vma(dst_mm, dst_start); |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 109 | if (IS_ERR(dst_vma) || validate_dst_vma(dst_vma, dst_start + len)) |
| 110 | return dst_vma; |
| 111 | |
| 112 | vma_end_read(dst_vma); |
| 113 | return ERR_PTR(-ENOENT); |
| 114 | } |
| 115 | |
| 116 | static void uffd_mfill_unlock(struct vm_area_struct *vma) |
| 117 | { |
| 118 | vma_end_read(vma); |
| 119 | } |
| 120 | |
| 121 | #else |
| 122 | |
| 123 | static struct vm_area_struct *uffd_mfill_lock(struct mm_struct *dst_mm, |
| 124 | unsigned long dst_start, |
| 125 | unsigned long len) |
| 126 | { |
| 127 | struct vm_area_struct *dst_vma; |
| 128 | |
| 129 | mmap_read_lock(dst_mm); |
| 130 | dst_vma = find_vma_and_prepare_anon(dst_mm, dst_start); |
| 131 | if (IS_ERR(dst_vma)) |
| 132 | goto out_unlock; |
| 133 | |
| 134 | if (validate_dst_vma(dst_vma, dst_start + len)) |
| 135 | return dst_vma; |
| 136 | |
| 137 | dst_vma = ERR_PTR(-ENOENT); |
| 138 | out_unlock: |
| 139 | mmap_read_unlock(dst_mm); |
Wei Yang | 643aa36 | 2019-11-30 17:57:55 -0800 | [diff] [blame] | 140 | return dst_vma; |
| 141 | } |
| 142 | |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 143 | static void uffd_mfill_unlock(struct vm_area_struct *vma) |
| 144 | { |
| 145 | mmap_read_unlock(vma->vm_mm); |
| 146 | } |
| 147 | #endif |
| 148 | |
Axel Rasmussen | 435cdb4 | 2023-07-07 14:55:35 -0700 | [diff] [blame] | 149 | /* Check if dst_addr is outside of file's size. Must be called with ptl held. */ |
| 150 | static bool mfill_file_over_size(struct vm_area_struct *dst_vma, |
| 151 | unsigned long dst_addr) |
| 152 | { |
| 153 | struct inode *inode; |
| 154 | pgoff_t offset, max_off; |
| 155 | |
| 156 | if (!dst_vma->vm_file) |
| 157 | return false; |
| 158 | |
| 159 | inode = dst_vma->vm_file->f_inode; |
| 160 | offset = linear_page_index(dst_vma, dst_addr); |
| 161 | max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); |
| 162 | return offset >= max_off; |
| 163 | } |
| 164 | |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 165 | /* |
| 166 | * Install PTEs, to map dst_addr (within dst_vma) to page. |
| 167 | * |
Axel Rasmussen | 7d64ae3 | 2021-06-30 18:49:31 -0700 | [diff] [blame] | 168 | * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem |
| 169 | * and anon, and for both shared and private VMAs. |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 170 | */ |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 171 | int mfill_atomic_install_pte(pmd_t *dst_pmd, |
Axel Rasmussen | 7d64ae3 | 2021-06-30 18:49:31 -0700 | [diff] [blame] | 172 | struct vm_area_struct *dst_vma, |
| 173 | unsigned long dst_addr, struct page *page, |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 174 | bool newly_allocated, uffd_flags_t flags) |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 175 | { |
| 176 | int ret; |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 177 | struct mm_struct *dst_mm = dst_vma->vm_mm; |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 178 | pte_t _dst_pte, *dst_pte; |
| 179 | bool writable = dst_vma->vm_flags & VM_WRITE; |
| 180 | bool vm_shared = dst_vma->vm_flags & VM_SHARED; |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 181 | spinlock_t *ptl; |
Matthew Wilcox (Oracle) | a568b41 | 2024-04-23 23:55:36 +0100 | [diff] [blame] | 182 | struct folio *folio = page_folio(page); |
| 183 | bool page_in_cache = folio_mapping(folio); |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 184 | |
| 185 | _dst_pte = mk_pte(page, dst_vma->vm_page_prot); |
Peter Xu | 9ae0f87 | 2021-11-05 13:38:24 -0700 | [diff] [blame] | 186 | _dst_pte = pte_mkdirty(_dst_pte); |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 187 | if (page_in_cache && !vm_shared) |
| 188 | writable = false; |
Peter Xu | 8ee79ed | 2022-05-12 20:22:52 -0700 | [diff] [blame] | 189 | if (writable) |
Rick Edgecombe | 161e393 | 2023-06-12 17:10:29 -0700 | [diff] [blame] | 190 | _dst_pte = pte_mkwrite(_dst_pte, dst_vma); |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 191 | if (flags & MFILL_ATOMIC_WP) |
Peter Xu | f1eb1ba | 2022-12-14 15:15:33 -0500 | [diff] [blame] | 192 | _dst_pte = pte_mkuffd_wp(_dst_pte); |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 193 | |
Hugh Dickins | 3622d3c | 2023-06-08 18:26:04 -0700 | [diff] [blame] | 194 | ret = -EAGAIN; |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 195 | dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); |
Hugh Dickins | 3622d3c | 2023-06-08 18:26:04 -0700 | [diff] [blame] | 196 | if (!dst_pte) |
| 197 | goto out; |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 198 | |
Axel Rasmussen | 435cdb4 | 2023-07-07 14:55:35 -0700 | [diff] [blame] | 199 | if (mfill_file_over_size(dst_vma, dst_addr)) { |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 200 | ret = -EFAULT; |
Axel Rasmussen | 435cdb4 | 2023-07-07 14:55:35 -0700 | [diff] [blame] | 201 | goto out_unlock; |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 202 | } |
| 203 | |
| 204 | ret = -EEXIST; |
Peter Xu | 8ee79ed | 2022-05-12 20:22:52 -0700 | [diff] [blame] | 205 | /* |
| 206 | * We allow to overwrite a pte marker: consider when both MISSING|WP |
| 207 | * registered, we firstly wr-protect a none pte which has no page cache |
| 208 | * page backing it, then access the page. |
| 209 | */ |
Ryan Roberts | c33c794 | 2023-06-12 16:15:45 +0100 | [diff] [blame] | 210 | if (!pte_none_mostly(ptep_get(dst_pte))) |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 211 | goto out_unlock; |
| 212 | |
Hugh Dickins | cea86fe | 2022-02-14 18:26:39 -0800 | [diff] [blame] | 213 | if (page_in_cache) { |
| 214 | /* Usually, cache pages are already added to LRU */ |
| 215 | if (newly_allocated) |
Vishal Moola (Oracle) | 28965f0 | 2022-11-01 10:53:24 -0700 | [diff] [blame] | 216 | folio_add_lru(folio); |
David Hildenbrand | 7123e19 | 2023-12-20 23:44:35 +0100 | [diff] [blame] | 217 | folio_add_file_rmap_pte(folio, page, dst_vma); |
Hugh Dickins | cea86fe | 2022-02-14 18:26:39 -0800 | [diff] [blame] | 218 | } else { |
Matthew Wilcox (Oracle) | 2853b66 | 2023-12-11 16:22:09 +0000 | [diff] [blame] | 219 | folio_add_new_anon_rmap(folio, dst_vma, dst_addr); |
Vishal Moola (Oracle) | 28965f0 | 2022-11-01 10:53:24 -0700 | [diff] [blame] | 220 | folio_add_lru_vma(folio, dst_vma); |
Hugh Dickins | cea86fe | 2022-02-14 18:26:39 -0800 | [diff] [blame] | 221 | } |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 222 | |
| 223 | /* |
| 224 | * Must happen after rmap, as mm_counter() checks mapping (via |
| 225 | * PageAnon()), which is set by __page_set_anon_rmap(). |
| 226 | */ |
Kefeng Wang | a23f517 | 2024-01-11 15:24:28 +0000 | [diff] [blame] | 227 | inc_mm_counter(dst_mm, mm_counter(folio)); |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 228 | |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 229 | set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); |
| 230 | |
| 231 | /* No need to invalidate - it was non-present before */ |
| 232 | update_mmu_cache(dst_vma, dst_addr, dst_pte); |
| 233 | ret = 0; |
| 234 | out_unlock: |
| 235 | pte_unmap_unlock(dst_pte, ptl); |
Hugh Dickins | 3622d3c | 2023-06-08 18:26:04 -0700 | [diff] [blame] | 236 | out: |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 237 | return ret; |
| 238 | } |
| 239 | |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 240 | static int mfill_atomic_pte_copy(pmd_t *dst_pmd, |
Axel Rasmussen | a734991 | 2023-03-14 15:12:47 -0700 | [diff] [blame] | 241 | struct vm_area_struct *dst_vma, |
| 242 | unsigned long dst_addr, |
| 243 | unsigned long src_addr, |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 244 | uffd_flags_t flags, |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 245 | struct folio **foliop) |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 246 | { |
ZhangPeng | 07e6d40 | 2023-04-10 21:39:27 +0800 | [diff] [blame] | 247 | void *kaddr; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 248 | int ret; |
ZhangPeng | 07e6d40 | 2023-04-10 21:39:27 +0800 | [diff] [blame] | 249 | struct folio *folio; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 250 | |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 251 | if (!*foliop) { |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 252 | ret = -ENOMEM; |
ZhangPeng | 07e6d40 | 2023-04-10 21:39:27 +0800 | [diff] [blame] | 253 | folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma, |
| 254 | dst_addr, false); |
| 255 | if (!folio) |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 256 | goto out; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 257 | |
ZhangPeng | 07e6d40 | 2023-04-10 21:39:27 +0800 | [diff] [blame] | 258 | kaddr = kmap_local_folio(folio, 0); |
Ira Weiny | 5521de7 | 2022-10-23 21:34:52 -0700 | [diff] [blame] | 259 | /* |
| 260 | * The read mmap_lock is held here. Despite the |
| 261 | * mmap_lock being read recursive a deadlock is still |
| 262 | * possible if a writer has taken a lock. For example: |
| 263 | * |
| 264 | * process A thread 1 takes read lock on own mmap_lock |
| 265 | * process A thread 2 calls mmap, blocks taking write lock |
| 266 | * process B thread 1 takes page fault, read lock on own mmap lock |
| 267 | * process B thread 2 calls mmap, blocks taking write lock |
| 268 | * process A thread 1 blocks taking read lock on process B |
| 269 | * process B thread 1 blocks taking read lock on process A |
| 270 | * |
| 271 | * Disable page faults to prevent potential deadlock |
| 272 | * and retry the copy outside the mmap_lock. |
| 273 | */ |
| 274 | pagefault_disable(); |
ZhangPeng | 07e6d40 | 2023-04-10 21:39:27 +0800 | [diff] [blame] | 275 | ret = copy_from_user(kaddr, (const void __user *) src_addr, |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 276 | PAGE_SIZE); |
Ira Weiny | 5521de7 | 2022-10-23 21:34:52 -0700 | [diff] [blame] | 277 | pagefault_enable(); |
ZhangPeng | 07e6d40 | 2023-04-10 21:39:27 +0800 | [diff] [blame] | 278 | kunmap_local(kaddr); |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 279 | |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 280 | /* fallback to copy_from_user outside mmap_lock */ |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 281 | if (unlikely(ret)) { |
Andrea Arcangeli | 9e36825 | 2018-11-30 14:09:25 -0800 | [diff] [blame] | 282 | ret = -ENOENT; |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 283 | *foliop = folio; |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 284 | /* don't free the page */ |
| 285 | goto out; |
| 286 | } |
Muchun Song | 7c25a0b | 2022-03-22 14:42:08 -0700 | [diff] [blame] | 287 | |
ZhangPeng | 07e6d40 | 2023-04-10 21:39:27 +0800 | [diff] [blame] | 288 | flush_dcache_folio(folio); |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 289 | } else { |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 290 | folio = *foliop; |
| 291 | *foliop = NULL; |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 292 | } |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 293 | |
| 294 | /* |
ZhangPeng | 07e6d40 | 2023-04-10 21:39:27 +0800 | [diff] [blame] | 295 | * The memory barrier inside __folio_mark_uptodate makes sure that |
Wei Yang | f4f5329 | 2019-11-30 17:58:17 -0800 | [diff] [blame] | 296 | * preceding stores to the page contents become visible before |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 297 | * the set_pte_at() write. |
| 298 | */ |
ZhangPeng | 07e6d40 | 2023-04-10 21:39:27 +0800 | [diff] [blame] | 299 | __folio_mark_uptodate(folio); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 300 | |
| 301 | ret = -ENOMEM; |
ZhangPeng | 07e6d40 | 2023-04-10 21:39:27 +0800 | [diff] [blame] | 302 | if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL)) |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 303 | goto out_release; |
| 304 | |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 305 | ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr, |
ZhangPeng | 07e6d40 | 2023-04-10 21:39:27 +0800 | [diff] [blame] | 306 | &folio->page, true, flags); |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 307 | if (ret) |
| 308 | goto out_release; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 309 | out: |
| 310 | return ret; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 311 | out_release: |
ZhangPeng | 07e6d40 | 2023-04-10 21:39:27 +0800 | [diff] [blame] | 312 | folio_put(folio); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 313 | goto out; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 314 | } |
| 315 | |
David Hildenbrand | 90a7592 | 2024-04-11 18:14:40 +0200 | [diff] [blame] | 316 | static int mfill_atomic_pte_zeroed_folio(pmd_t *dst_pmd, |
| 317 | struct vm_area_struct *dst_vma, |
| 318 | unsigned long dst_addr) |
| 319 | { |
| 320 | struct folio *folio; |
| 321 | int ret = -ENOMEM; |
| 322 | |
| 323 | folio = vma_alloc_zeroed_movable_folio(dst_vma, dst_addr); |
| 324 | if (!folio) |
| 325 | return ret; |
| 326 | |
| 327 | if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL)) |
| 328 | goto out_put; |
| 329 | |
| 330 | /* |
| 331 | * The memory barrier inside __folio_mark_uptodate makes sure that |
| 332 | * zeroing out the folio become visible before mapping the page |
| 333 | * using set_pte_at(). See do_anonymous_page(). |
| 334 | */ |
| 335 | __folio_mark_uptodate(folio); |
| 336 | |
| 337 | ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr, |
| 338 | &folio->page, true, 0); |
| 339 | if (ret) |
| 340 | goto out_put; |
| 341 | |
| 342 | return 0; |
| 343 | out_put: |
| 344 | folio_put(folio); |
| 345 | return ret; |
| 346 | } |
| 347 | |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 348 | static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd, |
Axel Rasmussen | a734991 | 2023-03-14 15:12:47 -0700 | [diff] [blame] | 349 | struct vm_area_struct *dst_vma, |
| 350 | unsigned long dst_addr) |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 351 | { |
| 352 | pte_t _dst_pte, *dst_pte; |
| 353 | spinlock_t *ptl; |
| 354 | int ret; |
| 355 | |
David Hildenbrand | 90a7592 | 2024-04-11 18:14:40 +0200 | [diff] [blame] | 356 | if (mm_forbids_zeropage(dst_vma->vm_mm)) |
| 357 | return mfill_atomic_pte_zeroed_folio(dst_pmd, dst_vma, dst_addr); |
| 358 | |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 359 | _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr), |
| 360 | dst_vma->vm_page_prot)); |
Hugh Dickins | 3622d3c | 2023-06-08 18:26:04 -0700 | [diff] [blame] | 361 | ret = -EAGAIN; |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 362 | dst_pte = pte_offset_map_lock(dst_vma->vm_mm, dst_pmd, dst_addr, &ptl); |
Hugh Dickins | 3622d3c | 2023-06-08 18:26:04 -0700 | [diff] [blame] | 363 | if (!dst_pte) |
| 364 | goto out; |
Axel Rasmussen | 435cdb4 | 2023-07-07 14:55:35 -0700 | [diff] [blame] | 365 | if (mfill_file_over_size(dst_vma, dst_addr)) { |
Andrea Arcangeli | e2a50c1 | 2018-11-30 14:09:37 -0800 | [diff] [blame] | 366 | ret = -EFAULT; |
Axel Rasmussen | 435cdb4 | 2023-07-07 14:55:35 -0700 | [diff] [blame] | 367 | goto out_unlock; |
Andrea Arcangeli | e2a50c1 | 2018-11-30 14:09:37 -0800 | [diff] [blame] | 368 | } |
| 369 | ret = -EEXIST; |
Ryan Roberts | c33c794 | 2023-06-12 16:15:45 +0100 | [diff] [blame] | 370 | if (!pte_none(ptep_get(dst_pte))) |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 371 | goto out_unlock; |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 372 | set_pte_at(dst_vma->vm_mm, dst_addr, dst_pte, _dst_pte); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 373 | /* No need to invalidate - it was non-present before */ |
| 374 | update_mmu_cache(dst_vma, dst_addr, dst_pte); |
| 375 | ret = 0; |
| 376 | out_unlock: |
| 377 | pte_unmap_unlock(dst_pte, ptl); |
Hugh Dickins | 3622d3c | 2023-06-08 18:26:04 -0700 | [diff] [blame] | 378 | out: |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 379 | return ret; |
| 380 | } |
| 381 | |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 382 | /* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */ |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 383 | static int mfill_atomic_pte_continue(pmd_t *dst_pmd, |
Axel Rasmussen | a734991 | 2023-03-14 15:12:47 -0700 | [diff] [blame] | 384 | struct vm_area_struct *dst_vma, |
| 385 | unsigned long dst_addr, |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 386 | uffd_flags_t flags) |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 387 | { |
| 388 | struct inode *inode = file_inode(dst_vma->vm_file); |
| 389 | pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); |
Matthew Wilcox (Oracle) | 12acf4f | 2022-09-02 20:46:28 +0100 | [diff] [blame] | 390 | struct folio *folio; |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 391 | struct page *page; |
| 392 | int ret; |
| 393 | |
Matthew Wilcox (Oracle) | 12acf4f | 2022-09-02 20:46:28 +0100 | [diff] [blame] | 394 | ret = shmem_get_folio(inode, pgoff, &folio, SGP_NOALLOC); |
| 395 | /* Our caller expects us to return -EFAULT if we failed to find folio */ |
Axel Rasmussen | 73f37db | 2022-06-10 10:38:12 -0700 | [diff] [blame] | 396 | if (ret == -ENOENT) |
| 397 | ret = -EFAULT; |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 398 | if (ret) |
| 399 | goto out; |
Matthew Wilcox (Oracle) | 12acf4f | 2022-09-02 20:46:28 +0100 | [diff] [blame] | 400 | if (!folio) { |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 401 | ret = -EFAULT; |
| 402 | goto out; |
| 403 | } |
| 404 | |
Matthew Wilcox (Oracle) | 12acf4f | 2022-09-02 20:46:28 +0100 | [diff] [blame] | 405 | page = folio_file_page(folio, pgoff); |
Yang Shi | a760542 | 2022-01-14 14:05:19 -0800 | [diff] [blame] | 406 | if (PageHWPoison(page)) { |
| 407 | ret = -EIO; |
| 408 | goto out_release; |
| 409 | } |
| 410 | |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 411 | ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr, |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 412 | page, false, flags); |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 413 | if (ret) |
| 414 | goto out_release; |
| 415 | |
Matthew Wilcox (Oracle) | 12acf4f | 2022-09-02 20:46:28 +0100 | [diff] [blame] | 416 | folio_unlock(folio); |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 417 | ret = 0; |
| 418 | out: |
| 419 | return ret; |
| 420 | out_release: |
Matthew Wilcox (Oracle) | 12acf4f | 2022-09-02 20:46:28 +0100 | [diff] [blame] | 421 | folio_unlock(folio); |
| 422 | folio_put(folio); |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 423 | goto out; |
| 424 | } |
| 425 | |
Axel Rasmussen | fc71884 | 2023-07-07 14:55:36 -0700 | [diff] [blame] | 426 | /* Handles UFFDIO_POISON for all non-hugetlb VMAs. */ |
| 427 | static int mfill_atomic_pte_poison(pmd_t *dst_pmd, |
| 428 | struct vm_area_struct *dst_vma, |
| 429 | unsigned long dst_addr, |
| 430 | uffd_flags_t flags) |
| 431 | { |
| 432 | int ret; |
| 433 | struct mm_struct *dst_mm = dst_vma->vm_mm; |
| 434 | pte_t _dst_pte, *dst_pte; |
| 435 | spinlock_t *ptl; |
| 436 | |
| 437 | _dst_pte = make_pte_marker(PTE_MARKER_POISONED); |
Hugh Dickins | 597425d | 2023-07-11 18:27:17 -0700 | [diff] [blame] | 438 | ret = -EAGAIN; |
Axel Rasmussen | fc71884 | 2023-07-07 14:55:36 -0700 | [diff] [blame] | 439 | dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); |
Hugh Dickins | 597425d | 2023-07-11 18:27:17 -0700 | [diff] [blame] | 440 | if (!dst_pte) |
| 441 | goto out; |
Axel Rasmussen | fc71884 | 2023-07-07 14:55:36 -0700 | [diff] [blame] | 442 | |
| 443 | if (mfill_file_over_size(dst_vma, dst_addr)) { |
| 444 | ret = -EFAULT; |
| 445 | goto out_unlock; |
| 446 | } |
| 447 | |
| 448 | ret = -EEXIST; |
| 449 | /* Refuse to overwrite any PTE, even a PTE marker (e.g. UFFD WP). */ |
Ryan Roberts | afccb08 | 2023-11-14 15:49:45 +0000 | [diff] [blame] | 450 | if (!pte_none(ptep_get(dst_pte))) |
Axel Rasmussen | fc71884 | 2023-07-07 14:55:36 -0700 | [diff] [blame] | 451 | goto out_unlock; |
| 452 | |
| 453 | set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); |
| 454 | |
| 455 | /* No need to invalidate - it was non-present before */ |
| 456 | update_mmu_cache(dst_vma, dst_addr, dst_pte); |
| 457 | ret = 0; |
| 458 | out_unlock: |
| 459 | pte_unmap_unlock(dst_pte, ptl); |
Hugh Dickins | 597425d | 2023-07-11 18:27:17 -0700 | [diff] [blame] | 460 | out: |
Axel Rasmussen | fc71884 | 2023-07-07 14:55:36 -0700 | [diff] [blame] | 461 | return ret; |
| 462 | } |
| 463 | |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 464 | static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address) |
| 465 | { |
| 466 | pgd_t *pgd; |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 467 | p4d_t *p4d; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 468 | pud_t *pud; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 469 | |
| 470 | pgd = pgd_offset(mm, address); |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 471 | p4d = p4d_alloc(mm, pgd, address); |
| 472 | if (!p4d) |
| 473 | return NULL; |
| 474 | pud = pud_alloc(mm, p4d, address); |
| 475 | if (!pud) |
| 476 | return NULL; |
| 477 | /* |
| 478 | * Note that we didn't run this because the pmd was |
| 479 | * missing, the *pmd may be already established and in |
| 480 | * turn it may also be a trans_huge_pmd. |
| 481 | */ |
| 482 | return pmd_alloc(mm, pud, address); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 483 | } |
| 484 | |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 485 | #ifdef CONFIG_HUGETLB_PAGE |
| 486 | /* |
Axel Rasmussen | a734991 | 2023-03-14 15:12:47 -0700 | [diff] [blame] | 487 | * mfill_atomic processing for HUGETLB vmas. Note that this routine is |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 488 | * called with either vma-lock or mmap_lock held, it will release the lock |
| 489 | * before returning. |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 490 | */ |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 491 | static __always_inline ssize_t mfill_atomic_hugetlb( |
Lokesh Gidra | 5e4c24a | 2024-02-15 10:27:54 -0800 | [diff] [blame] | 492 | struct userfaultfd_ctx *ctx, |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 493 | struct vm_area_struct *dst_vma, |
| 494 | unsigned long dst_start, |
| 495 | unsigned long src_start, |
| 496 | unsigned long len, |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 497 | uffd_flags_t flags) |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 498 | { |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 499 | struct mm_struct *dst_mm = dst_vma->vm_mm; |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 500 | ssize_t err; |
| 501 | pte_t *dst_pte; |
| 502 | unsigned long src_addr, dst_addr; |
| 503 | long copied; |
ZhangPeng | 0169fd5 | 2023-04-10 21:39:30 +0800 | [diff] [blame] | 504 | struct folio *folio; |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 505 | unsigned long vma_hpagesize; |
| 506 | pgoff_t idx; |
| 507 | u32 hash; |
| 508 | struct address_space *mapping; |
| 509 | |
| 510 | /* |
| 511 | * There is no default zero huge page for all huge page sizes as |
| 512 | * supported by hugetlb. A PMD_SIZE huge pages may exist as used |
| 513 | * by THP. Since we can not reliably insert a zero page, this |
| 514 | * feature is not supported. |
| 515 | */ |
Axel Rasmussen | 8a13897 | 2023-07-07 14:55:37 -0700 | [diff] [blame] | 516 | if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) { |
Lokesh Gidra | 5e4c24a | 2024-02-15 10:27:54 -0800 | [diff] [blame] | 517 | up_read(&ctx->map_changing_lock); |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 518 | uffd_mfill_unlock(dst_vma); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 519 | return -EINVAL; |
| 520 | } |
| 521 | |
| 522 | src_addr = src_start; |
| 523 | dst_addr = dst_start; |
| 524 | copied = 0; |
ZhangPeng | 0169fd5 | 2023-04-10 21:39:30 +0800 | [diff] [blame] | 525 | folio = NULL; |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 526 | vma_hpagesize = vma_kernel_pagesize(dst_vma); |
| 527 | |
| 528 | /* |
| 529 | * Validate alignment based on huge page size |
| 530 | */ |
| 531 | err = -EINVAL; |
| 532 | if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1)) |
| 533 | goto out_unlock; |
| 534 | |
| 535 | retry: |
| 536 | /* |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 537 | * On routine entry dst_vma is set. If we had to drop mmap_lock and |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 538 | * retry, dst_vma will be set to NULL and we must lookup again. |
| 539 | */ |
| 540 | if (!dst_vma) { |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 541 | dst_vma = uffd_mfill_lock(dst_mm, dst_start, len); |
| 542 | if (IS_ERR(dst_vma)) { |
| 543 | err = PTR_ERR(dst_vma); |
| 544 | goto out; |
| 545 | } |
| 546 | |
Mike Rapoport | 27d0256 | 2017-02-24 14:58:28 -0800 | [diff] [blame] | 547 | err = -ENOENT; |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 548 | if (!is_vm_hugetlb_page(dst_vma)) |
| 549 | goto out_unlock_vma; |
Mike Kravetz | 1c9e8de | 2017-02-22 15:43:43 -0800 | [diff] [blame] | 550 | |
Mike Rapoport | 27d0256 | 2017-02-24 14:58:28 -0800 | [diff] [blame] | 551 | err = -EINVAL; |
| 552 | if (vma_hpagesize != vma_kernel_pagesize(dst_vma)) |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 553 | goto out_unlock_vma; |
Mike Rapoport | 27d0256 | 2017-02-24 14:58:28 -0800 | [diff] [blame] | 554 | |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 555 | /* |
| 556 | * If memory mappings are changing because of non-cooperative |
| 557 | * operation (e.g. mremap) running in parallel, bail out and |
| 558 | * request the user to retry later |
| 559 | */ |
| 560 | down_read(&ctx->map_changing_lock); |
| 561 | err = -EAGAIN; |
| 562 | if (atomic_read(&ctx->mmap_changing)) |
Mike Kravetz | 1c9e8de | 2017-02-22 15:43:43 -0800 | [diff] [blame] | 563 | goto out_unlock; |
| 564 | } |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 565 | |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 566 | while (src_addr < src_start + len) { |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 567 | BUG_ON(dst_addr >= dst_start + len); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 568 | |
| 569 | /* |
Mike Kravetz | 40549ba | 2022-09-14 15:18:09 -0700 | [diff] [blame] | 570 | * Serialize via vma_lock and hugetlb_fault_mutex. |
| 571 | * vma_lock ensures the dst_pte remains valid even |
| 572 | * in the case of shared pmds. fault mutex prevents |
| 573 | * races with other faulting threads. |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 574 | */ |
Mike Kravetz | c0d0381 | 2020-04-01 21:11:05 -0700 | [diff] [blame] | 575 | idx = linear_page_index(dst_vma, dst_addr); |
Mike Kravetz | 3a47c54 | 2022-09-14 15:18:03 -0700 | [diff] [blame] | 576 | mapping = dst_vma->vm_file->f_mapping; |
Wei Yang | 188b04a | 2019-11-30 17:57:02 -0800 | [diff] [blame] | 577 | hash = hugetlb_fault_mutex_hash(mapping, idx); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 578 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
Mike Kravetz | 40549ba | 2022-09-14 15:18:09 -0700 | [diff] [blame] | 579 | hugetlb_vma_lock_read(dst_vma); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 580 | |
| 581 | err = -ENOMEM; |
Peter Xu | aec44e0 | 2021-05-04 18:33:00 -0700 | [diff] [blame] | 582 | dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 583 | if (!dst_pte) { |
Mike Kravetz | 40549ba | 2022-09-14 15:18:09 -0700 | [diff] [blame] | 584 | hugetlb_vma_unlock_read(dst_vma); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 585 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
| 586 | goto out_unlock; |
| 587 | } |
| 588 | |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 589 | if (!uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE) && |
Peter Xu | 6041c69 | 2022-05-12 20:22:54 -0700 | [diff] [blame] | 590 | !huge_pte_none_mostly(huge_ptep_get(dst_pte))) { |
Axel Rasmussen | f619147 | 2021-05-04 18:35:49 -0700 | [diff] [blame] | 591 | err = -EEXIST; |
Mike Kravetz | 40549ba | 2022-09-14 15:18:09 -0700 | [diff] [blame] | 592 | hugetlb_vma_unlock_read(dst_vma); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 593 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
| 594 | goto out_unlock; |
| 595 | } |
| 596 | |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 597 | err = hugetlb_mfill_atomic_pte(dst_pte, dst_vma, dst_addr, |
ZhangPeng | 0169fd5 | 2023-04-10 21:39:30 +0800 | [diff] [blame] | 598 | src_addr, flags, &folio); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 599 | |
Mike Kravetz | 40549ba | 2022-09-14 15:18:09 -0700 | [diff] [blame] | 600 | hugetlb_vma_unlock_read(dst_vma); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 601 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
| 602 | |
| 603 | cond_resched(); |
| 604 | |
Andrea Arcangeli | 9e36825 | 2018-11-30 14:09:25 -0800 | [diff] [blame] | 605 | if (unlikely(err == -ENOENT)) { |
Lokesh Gidra | 5e4c24a | 2024-02-15 10:27:54 -0800 | [diff] [blame] | 606 | up_read(&ctx->map_changing_lock); |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 607 | uffd_mfill_unlock(dst_vma); |
ZhangPeng | 0169fd5 | 2023-04-10 21:39:30 +0800 | [diff] [blame] | 608 | BUG_ON(!folio); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 609 | |
ZhangPeng | 0169fd5 | 2023-04-10 21:39:30 +0800 | [diff] [blame] | 610 | err = copy_folio_from_user(folio, |
ZhangPeng | e87340c | 2023-04-10 21:39:29 +0800 | [diff] [blame] | 611 | (const void __user *)src_addr, true); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 612 | if (unlikely(err)) { |
| 613 | err = -EFAULT; |
| 614 | goto out; |
| 615 | } |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 616 | |
| 617 | dst_vma = NULL; |
| 618 | goto retry; |
| 619 | } else |
ZhangPeng | 0169fd5 | 2023-04-10 21:39:30 +0800 | [diff] [blame] | 620 | BUG_ON(folio); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 621 | |
| 622 | if (!err) { |
| 623 | dst_addr += vma_hpagesize; |
| 624 | src_addr += vma_hpagesize; |
| 625 | copied += vma_hpagesize; |
| 626 | |
| 627 | if (fatal_signal_pending(current)) |
| 628 | err = -EINTR; |
| 629 | } |
| 630 | if (err) |
| 631 | break; |
| 632 | } |
| 633 | |
| 634 | out_unlock: |
Lokesh Gidra | 5e4c24a | 2024-02-15 10:27:54 -0800 | [diff] [blame] | 635 | up_read(&ctx->map_changing_lock); |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 636 | out_unlock_vma: |
| 637 | uffd_mfill_unlock(dst_vma); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 638 | out: |
ZhangPeng | 0169fd5 | 2023-04-10 21:39:30 +0800 | [diff] [blame] | 639 | if (folio) |
| 640 | folio_put(folio); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 641 | BUG_ON(copied < 0); |
| 642 | BUG_ON(err > 0); |
| 643 | BUG_ON(!copied && !err); |
| 644 | return copied ? copied : err; |
| 645 | } |
| 646 | #else /* !CONFIG_HUGETLB_PAGE */ |
| 647 | /* fail at build time if gcc attempts to use this */ |
Lokesh Gidra | 5e4c24a | 2024-02-15 10:27:54 -0800 | [diff] [blame] | 648 | extern ssize_t mfill_atomic_hugetlb(struct userfaultfd_ctx *ctx, |
| 649 | struct vm_area_struct *dst_vma, |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 650 | unsigned long dst_start, |
| 651 | unsigned long src_start, |
| 652 | unsigned long len, |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 653 | uffd_flags_t flags); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 654 | #endif /* CONFIG_HUGETLB_PAGE */ |
| 655 | |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 656 | static __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd, |
Mike Rapoport | 3217d3c | 2017-09-06 16:23:06 -0700 | [diff] [blame] | 657 | struct vm_area_struct *dst_vma, |
| 658 | unsigned long dst_addr, |
| 659 | unsigned long src_addr, |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 660 | uffd_flags_t flags, |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 661 | struct folio **foliop) |
Mike Rapoport | 3217d3c | 2017-09-06 16:23:06 -0700 | [diff] [blame] | 662 | { |
| 663 | ssize_t err; |
| 664 | |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 665 | if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) { |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 666 | return mfill_atomic_pte_continue(dst_pmd, dst_vma, |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 667 | dst_addr, flags); |
Axel Rasmussen | fc71884 | 2023-07-07 14:55:36 -0700 | [diff] [blame] | 668 | } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) { |
| 669 | return mfill_atomic_pte_poison(dst_pmd, dst_vma, |
| 670 | dst_addr, flags); |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 671 | } |
| 672 | |
Andrea Arcangeli | 5b51072 | 2018-11-30 14:09:28 -0800 | [diff] [blame] | 673 | /* |
| 674 | * The normal page fault path for a shmem will invoke the |
| 675 | * fault, fill the hole in the file and COW it right away. The |
| 676 | * result generates plain anonymous memory. So when we are |
| 677 | * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll |
| 678 | * generate anonymous memory directly without actually filling |
| 679 | * the hole. For the MAP_PRIVATE case the robustness check |
| 680 | * only happens in the pagetable (to verify it's still none) |
| 681 | * and not in the radix tree. |
| 682 | */ |
| 683 | if (!(dst_vma->vm_flags & VM_SHARED)) { |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 684 | if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 685 | err = mfill_atomic_pte_copy(dst_pmd, dst_vma, |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 686 | dst_addr, src_addr, |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 687 | flags, foliop); |
Mike Rapoport | 3217d3c | 2017-09-06 16:23:06 -0700 | [diff] [blame] | 688 | else |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 689 | err = mfill_atomic_pte_zeropage(dst_pmd, |
Mike Rapoport | 3217d3c | 2017-09-06 16:23:06 -0700 | [diff] [blame] | 690 | dst_vma, dst_addr); |
| 691 | } else { |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 692 | err = shmem_mfill_atomic_pte(dst_pmd, dst_vma, |
Axel Rasmussen | 1531325 | 2021-06-30 18:49:24 -0700 | [diff] [blame] | 693 | dst_addr, src_addr, |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 694 | flags, foliop); |
Mike Rapoport | 3217d3c | 2017-09-06 16:23:06 -0700 | [diff] [blame] | 695 | } |
| 696 | |
| 697 | return err; |
| 698 | } |
| 699 | |
Lokesh Gidra | 5e4c24a | 2024-02-15 10:27:54 -0800 | [diff] [blame] | 700 | static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx, |
Axel Rasmussen | a734991 | 2023-03-14 15:12:47 -0700 | [diff] [blame] | 701 | unsigned long dst_start, |
| 702 | unsigned long src_start, |
| 703 | unsigned long len, |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 704 | uffd_flags_t flags) |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 705 | { |
Lokesh Gidra | 5e4c24a | 2024-02-15 10:27:54 -0800 | [diff] [blame] | 706 | struct mm_struct *dst_mm = ctx->mm; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 707 | struct vm_area_struct *dst_vma; |
| 708 | ssize_t err; |
| 709 | pmd_t *dst_pmd; |
| 710 | unsigned long src_addr, dst_addr; |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 711 | long copied; |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 712 | struct folio *folio; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 713 | |
| 714 | /* |
| 715 | * Sanitize the command parameters: |
| 716 | */ |
| 717 | BUG_ON(dst_start & ~PAGE_MASK); |
| 718 | BUG_ON(len & ~PAGE_MASK); |
| 719 | |
| 720 | /* Does the address range wrap, or is the span zero-sized? */ |
| 721 | BUG_ON(src_start + len <= src_start); |
| 722 | BUG_ON(dst_start + len <= dst_start); |
| 723 | |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 724 | src_addr = src_start; |
| 725 | dst_addr = dst_start; |
| 726 | copied = 0; |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 727 | folio = NULL; |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 728 | retry: |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 729 | /* |
| 730 | * Make sure the vma is not shared, that the dst range is |
| 731 | * both valid and fully within a single existing vma. |
| 732 | */ |
| 733 | dst_vma = uffd_mfill_lock(dst_mm, dst_start, len); |
| 734 | if (IS_ERR(dst_vma)) { |
| 735 | err = PTR_ERR(dst_vma); |
| 736 | goto out; |
| 737 | } |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 738 | |
| 739 | /* |
Mike Rapoport | df2cc96 | 2018-06-07 17:09:25 -0700 | [diff] [blame] | 740 | * If memory mappings are changing because of non-cooperative |
| 741 | * operation (e.g. mremap) running in parallel, bail out and |
| 742 | * request the user to retry later |
| 743 | */ |
Lokesh Gidra | 5e4c24a | 2024-02-15 10:27:54 -0800 | [diff] [blame] | 744 | down_read(&ctx->map_changing_lock); |
Mike Rapoport | df2cc96 | 2018-06-07 17:09:25 -0700 | [diff] [blame] | 745 | err = -EAGAIN; |
Lokesh Gidra | 5e4c24a | 2024-02-15 10:27:54 -0800 | [diff] [blame] | 746 | if (atomic_read(&ctx->mmap_changing)) |
Mike Rapoport | 26071ce | 2017-02-22 15:43:34 -0800 | [diff] [blame] | 747 | goto out_unlock; |
Mike Rapoport | 27d0256 | 2017-02-24 14:58:28 -0800 | [diff] [blame] | 748 | |
| 749 | err = -EINVAL; |
| 750 | /* |
| 751 | * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but |
| 752 | * it will overwrite vm_ops, so vma_is_anonymous must return false. |
| 753 | */ |
| 754 | if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) && |
| 755 | dst_vma->vm_flags & VM_SHARED)) |
| 756 | goto out_unlock; |
| 757 | |
| 758 | /* |
Andrea Arcangeli | 72981e0 | 2020-04-06 20:05:41 -0700 | [diff] [blame] | 759 | * validate 'mode' now that we know the dst_vma: don't allow |
| 760 | * a wrprotect copy if the userfaultfd didn't register as WP. |
| 761 | */ |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 762 | if ((flags & MFILL_ATOMIC_WP) && !(dst_vma->vm_flags & VM_UFFD_WP)) |
Andrea Arcangeli | 72981e0 | 2020-04-06 20:05:41 -0700 | [diff] [blame] | 763 | goto out_unlock; |
| 764 | |
| 765 | /* |
Mike Rapoport | 27d0256 | 2017-02-24 14:58:28 -0800 | [diff] [blame] | 766 | * If this is a HUGETLB vma, pass off to appropriate routine |
| 767 | */ |
| 768 | if (is_vm_hugetlb_page(dst_vma)) |
Lokesh Gidra | 5e4c24a | 2024-02-15 10:27:54 -0800 | [diff] [blame] | 769 | return mfill_atomic_hugetlb(ctx, dst_vma, dst_start, |
| 770 | src_start, len, flags); |
Mike Rapoport | 27d0256 | 2017-02-24 14:58:28 -0800 | [diff] [blame] | 771 | |
Mike Rapoport | 26071ce | 2017-02-22 15:43:34 -0800 | [diff] [blame] | 772 | if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 773 | goto out_unlock; |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 774 | if (!vma_is_shmem(dst_vma) && |
| 775 | uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) |
Axel Rasmussen | f619147 | 2021-05-04 18:35:49 -0700 | [diff] [blame] | 776 | goto out_unlock; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 777 | |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 778 | while (src_addr < src_start + len) { |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 779 | pmd_t dst_pmdval; |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 780 | |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 781 | BUG_ON(dst_addr >= dst_start + len); |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 782 | |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 783 | dst_pmd = mm_alloc_pmd(dst_mm, dst_addr); |
| 784 | if (unlikely(!dst_pmd)) { |
| 785 | err = -ENOMEM; |
| 786 | break; |
| 787 | } |
| 788 | |
Peter Zijlstra | dab6e71 | 2020-11-26 17:20:28 +0100 | [diff] [blame] | 789 | dst_pmdval = pmdp_get_lockless(dst_pmd); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 790 | /* |
| 791 | * If the dst_pmd is mapped as THP don't |
| 792 | * override it and just be strict. |
| 793 | */ |
| 794 | if (unlikely(pmd_trans_huge(dst_pmdval))) { |
| 795 | err = -EEXIST; |
| 796 | break; |
| 797 | } |
| 798 | if (unlikely(pmd_none(dst_pmdval)) && |
Joel Fernandes (Google) | 4cf5892 | 2019-01-03 15:28:34 -0800 | [diff] [blame] | 799 | unlikely(__pte_alloc(dst_mm, dst_pmd))) { |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 800 | err = -ENOMEM; |
| 801 | break; |
| 802 | } |
| 803 | /* If an huge pmd materialized from under us fail */ |
| 804 | if (unlikely(pmd_trans_huge(*dst_pmd))) { |
| 805 | err = -EFAULT; |
| 806 | break; |
| 807 | } |
| 808 | |
| 809 | BUG_ON(pmd_none(*dst_pmd)); |
| 810 | BUG_ON(pmd_trans_huge(*dst_pmd)); |
| 811 | |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 812 | err = mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 813 | src_addr, flags, &folio); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 814 | cond_resched(); |
| 815 | |
Andrea Arcangeli | 9e36825 | 2018-11-30 14:09:25 -0800 | [diff] [blame] | 816 | if (unlikely(err == -ENOENT)) { |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 817 | void *kaddr; |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 818 | |
Lokesh Gidra | 5e4c24a | 2024-02-15 10:27:54 -0800 | [diff] [blame] | 819 | up_read(&ctx->map_changing_lock); |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 820 | uffd_mfill_unlock(dst_vma); |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 821 | BUG_ON(!folio); |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 822 | |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 823 | kaddr = kmap_local_folio(folio, 0); |
| 824 | err = copy_from_user(kaddr, |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 825 | (const void __user *) src_addr, |
| 826 | PAGE_SIZE); |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 827 | kunmap_local(kaddr); |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 828 | if (unlikely(err)) { |
| 829 | err = -EFAULT; |
| 830 | goto out; |
| 831 | } |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 832 | flush_dcache_folio(folio); |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 833 | goto retry; |
| 834 | } else |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 835 | BUG_ON(folio); |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 836 | |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 837 | if (!err) { |
| 838 | dst_addr += PAGE_SIZE; |
| 839 | src_addr += PAGE_SIZE; |
| 840 | copied += PAGE_SIZE; |
| 841 | |
| 842 | if (fatal_signal_pending(current)) |
| 843 | err = -EINTR; |
| 844 | } |
| 845 | if (err) |
| 846 | break; |
| 847 | } |
| 848 | |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 849 | out_unlock: |
Lokesh Gidra | 5e4c24a | 2024-02-15 10:27:54 -0800 | [diff] [blame] | 850 | up_read(&ctx->map_changing_lock); |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 851 | uffd_mfill_unlock(dst_vma); |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 852 | out: |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 853 | if (folio) |
| 854 | folio_put(folio); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 855 | BUG_ON(copied < 0); |
| 856 | BUG_ON(err > 0); |
| 857 | BUG_ON(!copied && !err); |
| 858 | return copied ? copied : err; |
| 859 | } |
| 860 | |
Lokesh Gidra | 5e4c24a | 2024-02-15 10:27:54 -0800 | [diff] [blame] | 861 | ssize_t mfill_atomic_copy(struct userfaultfd_ctx *ctx, unsigned long dst_start, |
Axel Rasmussen | a734991 | 2023-03-14 15:12:47 -0700 | [diff] [blame] | 862 | unsigned long src_start, unsigned long len, |
Lokesh Gidra | 5e4c24a | 2024-02-15 10:27:54 -0800 | [diff] [blame] | 863 | uffd_flags_t flags) |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 864 | { |
Lokesh Gidra | 5e4c24a | 2024-02-15 10:27:54 -0800 | [diff] [blame] | 865 | return mfill_atomic(ctx, dst_start, src_start, len, |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 866 | uffd_flags_set_mode(flags, MFILL_ATOMIC_COPY)); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 867 | } |
| 868 | |
Lokesh Gidra | 5e4c24a | 2024-02-15 10:27:54 -0800 | [diff] [blame] | 869 | ssize_t mfill_atomic_zeropage(struct userfaultfd_ctx *ctx, |
| 870 | unsigned long start, |
| 871 | unsigned long len) |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 872 | { |
Lokesh Gidra | 5e4c24a | 2024-02-15 10:27:54 -0800 | [diff] [blame] | 873 | return mfill_atomic(ctx, start, 0, len, |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 874 | uffd_flags_set_mode(0, MFILL_ATOMIC_ZEROPAGE)); |
Axel Rasmussen | f619147 | 2021-05-04 18:35:49 -0700 | [diff] [blame] | 875 | } |
| 876 | |
Lokesh Gidra | 5e4c24a | 2024-02-15 10:27:54 -0800 | [diff] [blame] | 877 | ssize_t mfill_atomic_continue(struct userfaultfd_ctx *ctx, unsigned long start, |
| 878 | unsigned long len, uffd_flags_t flags) |
Axel Rasmussen | f619147 | 2021-05-04 18:35:49 -0700 | [diff] [blame] | 879 | { |
James Houghton | b14d167 | 2024-03-07 01:02:50 +0000 | [diff] [blame] | 880 | |
| 881 | /* |
| 882 | * A caller might reasonably assume that UFFDIO_CONTINUE contains an |
| 883 | * smp_wmb() to ensure that any writes to the about-to-be-mapped page by |
| 884 | * the thread doing the UFFDIO_CONTINUE are guaranteed to be visible to |
| 885 | * subsequent loads from the page through the newly mapped address range. |
| 886 | */ |
| 887 | smp_wmb(); |
| 888 | |
Lokesh Gidra | 5e4c24a | 2024-02-15 10:27:54 -0800 | [diff] [blame] | 889 | return mfill_atomic(ctx, start, 0, len, |
Axel Rasmussen | 0289184 | 2023-03-14 15:12:50 -0700 | [diff] [blame] | 890 | uffd_flags_set_mode(flags, MFILL_ATOMIC_CONTINUE)); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 891 | } |
Shaohua Li | ffd0579 | 2020-04-06 20:06:09 -0700 | [diff] [blame] | 892 | |
Lokesh Gidra | 5e4c24a | 2024-02-15 10:27:54 -0800 | [diff] [blame] | 893 | ssize_t mfill_atomic_poison(struct userfaultfd_ctx *ctx, unsigned long start, |
| 894 | unsigned long len, uffd_flags_t flags) |
Axel Rasmussen | fc71884 | 2023-07-07 14:55:36 -0700 | [diff] [blame] | 895 | { |
Lokesh Gidra | 5e4c24a | 2024-02-15 10:27:54 -0800 | [diff] [blame] | 896 | return mfill_atomic(ctx, start, 0, len, |
Axel Rasmussen | fc71884 | 2023-07-07 14:55:36 -0700 | [diff] [blame] | 897 | uffd_flags_set_mode(flags, MFILL_ATOMIC_POISON)); |
| 898 | } |
| 899 | |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 900 | long uffd_wp_range(struct vm_area_struct *dst_vma, |
Peter Xu | f369b07 | 2022-08-11 16:13:40 -0400 | [diff] [blame] | 901 | unsigned long start, unsigned long len, bool enable_wp) |
| 902 | { |
David Hildenbrand | 931298e | 2022-12-23 16:56:15 +0100 | [diff] [blame] | 903 | unsigned int mm_cp_flags; |
Peter Xu | f369b07 | 2022-08-11 16:13:40 -0400 | [diff] [blame] | 904 | struct mmu_gather tlb; |
Peter Xu | d175111 | 2023-01-04 17:52:07 -0500 | [diff] [blame] | 905 | long ret; |
Peter Xu | f369b07 | 2022-08-11 16:13:40 -0400 | [diff] [blame] | 906 | |
Muhammad Usama Anjum | a1b92a3 | 2023-02-17 15:55:58 +0500 | [diff] [blame] | 907 | VM_WARN_ONCE(start < dst_vma->vm_start || start + len > dst_vma->vm_end, |
| 908 | "The address range exceeds VMA boundary.\n"); |
Peter Xu | f369b07 | 2022-08-11 16:13:40 -0400 | [diff] [blame] | 909 | if (enable_wp) |
David Hildenbrand | 931298e | 2022-12-23 16:56:15 +0100 | [diff] [blame] | 910 | mm_cp_flags = MM_CP_UFFD_WP; |
Peter Xu | f369b07 | 2022-08-11 16:13:40 -0400 | [diff] [blame] | 911 | else |
David Hildenbrand | 931298e | 2022-12-23 16:56:15 +0100 | [diff] [blame] | 912 | mm_cp_flags = MM_CP_UFFD_WP_RESOLVE; |
Peter Xu | f369b07 | 2022-08-11 16:13:40 -0400 | [diff] [blame] | 913 | |
David Hildenbrand | 931298e | 2022-12-23 16:56:15 +0100 | [diff] [blame] | 914 | /* |
| 915 | * vma->vm_page_prot already reflects that uffd-wp is enabled for this |
| 916 | * VMA (see userfaultfd_set_vm_flags()) and that all PTEs are supposed |
| 917 | * to be write-protected as default whenever protection changes. |
| 918 | * Try upgrading write permissions manually. |
| 919 | */ |
| 920 | if (!enable_wp && vma_wants_manual_pte_write_upgrade(dst_vma)) |
| 921 | mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE; |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 922 | tlb_gather_mmu(&tlb, dst_vma->vm_mm); |
Peter Xu | d175111 | 2023-01-04 17:52:07 -0500 | [diff] [blame] | 923 | ret = change_protection(&tlb, dst_vma, start, start + len, mm_cp_flags); |
Peter Xu | f369b07 | 2022-08-11 16:13:40 -0400 | [diff] [blame] | 924 | tlb_finish_mmu(&tlb); |
Peter Xu | d175111 | 2023-01-04 17:52:07 -0500 | [diff] [blame] | 925 | |
| 926 | return ret; |
Peter Xu | f369b07 | 2022-08-11 16:13:40 -0400 | [diff] [blame] | 927 | } |
| 928 | |
Lokesh Gidra | 5e4c24a | 2024-02-15 10:27:54 -0800 | [diff] [blame] | 929 | int mwriteprotect_range(struct userfaultfd_ctx *ctx, unsigned long start, |
| 930 | unsigned long len, bool enable_wp) |
Shaohua Li | ffd0579 | 2020-04-06 20:06:09 -0700 | [diff] [blame] | 931 | { |
Lokesh Gidra | 5e4c24a | 2024-02-15 10:27:54 -0800 | [diff] [blame] | 932 | struct mm_struct *dst_mm = ctx->mm; |
Muhammad Usama Anjum | a1b92a3 | 2023-02-17 15:55:58 +0500 | [diff] [blame] | 933 | unsigned long end = start + len; |
| 934 | unsigned long _start, _end; |
Shaohua Li | ffd0579 | 2020-04-06 20:06:09 -0700 | [diff] [blame] | 935 | struct vm_area_struct *dst_vma; |
Peter Xu | 5a90d5a | 2022-05-12 20:22:54 -0700 | [diff] [blame] | 936 | unsigned long page_mask; |
Peter Xu | d175111 | 2023-01-04 17:52:07 -0500 | [diff] [blame] | 937 | long err; |
Muhammad Usama Anjum | a1b92a3 | 2023-02-17 15:55:58 +0500 | [diff] [blame] | 938 | VMA_ITERATOR(vmi, dst_mm, start); |
Shaohua Li | ffd0579 | 2020-04-06 20:06:09 -0700 | [diff] [blame] | 939 | |
| 940 | /* |
| 941 | * Sanitize the command parameters: |
| 942 | */ |
| 943 | BUG_ON(start & ~PAGE_MASK); |
| 944 | BUG_ON(len & ~PAGE_MASK); |
| 945 | |
| 946 | /* Does the address range wrap, or is the span zero-sized? */ |
| 947 | BUG_ON(start + len <= start); |
| 948 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 949 | mmap_read_lock(dst_mm); |
Shaohua Li | ffd0579 | 2020-04-06 20:06:09 -0700 | [diff] [blame] | 950 | |
| 951 | /* |
| 952 | * If memory mappings are changing because of non-cooperative |
| 953 | * operation (e.g. mremap) running in parallel, bail out and |
| 954 | * request the user to retry later |
| 955 | */ |
Lokesh Gidra | 5e4c24a | 2024-02-15 10:27:54 -0800 | [diff] [blame] | 956 | down_read(&ctx->map_changing_lock); |
Shaohua Li | ffd0579 | 2020-04-06 20:06:09 -0700 | [diff] [blame] | 957 | err = -EAGAIN; |
Lokesh Gidra | 5e4c24a | 2024-02-15 10:27:54 -0800 | [diff] [blame] | 958 | if (atomic_read(&ctx->mmap_changing)) |
Shaohua Li | ffd0579 | 2020-04-06 20:06:09 -0700 | [diff] [blame] | 959 | goto out_unlock; |
| 960 | |
| 961 | err = -ENOENT; |
Muhammad Usama Anjum | a1b92a3 | 2023-02-17 15:55:58 +0500 | [diff] [blame] | 962 | for_each_vma_range(vmi, dst_vma, end) { |
Peter Xu | b1f9e87 | 2022-05-12 20:22:56 -0700 | [diff] [blame] | 963 | |
Muhammad Usama Anjum | a1b92a3 | 2023-02-17 15:55:58 +0500 | [diff] [blame] | 964 | if (!userfaultfd_wp(dst_vma)) { |
| 965 | err = -ENOENT; |
| 966 | break; |
| 967 | } |
Shaohua Li | ffd0579 | 2020-04-06 20:06:09 -0700 | [diff] [blame] | 968 | |
Muhammad Usama Anjum | a1b92a3 | 2023-02-17 15:55:58 +0500 | [diff] [blame] | 969 | if (is_vm_hugetlb_page(dst_vma)) { |
| 970 | err = -EINVAL; |
| 971 | page_mask = vma_kernel_pagesize(dst_vma) - 1; |
| 972 | if ((start & page_mask) || (len & page_mask)) |
| 973 | break; |
| 974 | } |
Peter Xu | 5a90d5a | 2022-05-12 20:22:54 -0700 | [diff] [blame] | 975 | |
Muhammad Usama Anjum | a1b92a3 | 2023-02-17 15:55:58 +0500 | [diff] [blame] | 976 | _start = max(dst_vma->vm_start, start); |
| 977 | _end = min(dst_vma->vm_end, end); |
Shaohua Li | ffd0579 | 2020-04-06 20:06:09 -0700 | [diff] [blame] | 978 | |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 979 | err = uffd_wp_range(dst_vma, _start, _end - _start, enable_wp); |
Muhammad Usama Anjum | a1b92a3 | 2023-02-17 15:55:58 +0500 | [diff] [blame] | 980 | |
| 981 | /* Return 0 on success, <0 on failures */ |
| 982 | if (err < 0) |
| 983 | break; |
Peter Xu | d175111 | 2023-01-04 17:52:07 -0500 | [diff] [blame] | 984 | err = 0; |
Muhammad Usama Anjum | a1b92a3 | 2023-02-17 15:55:58 +0500 | [diff] [blame] | 985 | } |
Shaohua Li | ffd0579 | 2020-04-06 20:06:09 -0700 | [diff] [blame] | 986 | out_unlock: |
Lokesh Gidra | 5e4c24a | 2024-02-15 10:27:54 -0800 | [diff] [blame] | 987 | up_read(&ctx->map_changing_lock); |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 988 | mmap_read_unlock(dst_mm); |
Shaohua Li | ffd0579 | 2020-04-06 20:06:09 -0700 | [diff] [blame] | 989 | return err; |
| 990 | } |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 991 | |
| 992 | |
| 993 | void double_pt_lock(spinlock_t *ptl1, |
| 994 | spinlock_t *ptl2) |
| 995 | __acquires(ptl1) |
| 996 | __acquires(ptl2) |
| 997 | { |
| 998 | spinlock_t *ptl_tmp; |
| 999 | |
| 1000 | if (ptl1 > ptl2) { |
| 1001 | /* exchange ptl1 and ptl2 */ |
| 1002 | ptl_tmp = ptl1; |
| 1003 | ptl1 = ptl2; |
| 1004 | ptl2 = ptl_tmp; |
| 1005 | } |
| 1006 | /* lock in virtual address order to avoid lock inversion */ |
| 1007 | spin_lock(ptl1); |
| 1008 | if (ptl1 != ptl2) |
| 1009 | spin_lock_nested(ptl2, SINGLE_DEPTH_NESTING); |
| 1010 | else |
| 1011 | __acquire(ptl2); |
| 1012 | } |
| 1013 | |
| 1014 | void double_pt_unlock(spinlock_t *ptl1, |
| 1015 | spinlock_t *ptl2) |
| 1016 | __releases(ptl1) |
| 1017 | __releases(ptl2) |
| 1018 | { |
| 1019 | spin_unlock(ptl1); |
| 1020 | if (ptl1 != ptl2) |
| 1021 | spin_unlock(ptl2); |
| 1022 | else |
| 1023 | __release(ptl2); |
| 1024 | } |
| 1025 | |
| 1026 | |
| 1027 | static int move_present_pte(struct mm_struct *mm, |
| 1028 | struct vm_area_struct *dst_vma, |
| 1029 | struct vm_area_struct *src_vma, |
| 1030 | unsigned long dst_addr, unsigned long src_addr, |
| 1031 | pte_t *dst_pte, pte_t *src_pte, |
| 1032 | pte_t orig_dst_pte, pte_t orig_src_pte, |
| 1033 | spinlock_t *dst_ptl, spinlock_t *src_ptl, |
| 1034 | struct folio *src_folio) |
| 1035 | { |
| 1036 | int err = 0; |
| 1037 | |
| 1038 | double_pt_lock(dst_ptl, src_ptl); |
| 1039 | |
Ryan Roberts | 56ae10c | 2024-01-23 14:17:55 +0000 | [diff] [blame] | 1040 | if (!pte_same(ptep_get(src_pte), orig_src_pte) || |
| 1041 | !pte_same(ptep_get(dst_pte), orig_dst_pte)) { |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 1042 | err = -EAGAIN; |
| 1043 | goto out; |
| 1044 | } |
| 1045 | if (folio_test_large(src_folio) || |
| 1046 | folio_maybe_dma_pinned(src_folio) || |
| 1047 | !PageAnonExclusive(&src_folio->page)) { |
| 1048 | err = -EBUSY; |
| 1049 | goto out; |
| 1050 | } |
| 1051 | |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 1052 | orig_src_pte = ptep_clear_flush(src_vma, src_addr, src_pte); |
| 1053 | /* Folio got pinned from under us. Put it back and fail the move. */ |
| 1054 | if (folio_maybe_dma_pinned(src_folio)) { |
| 1055 | set_pte_at(mm, src_addr, src_pte, orig_src_pte); |
| 1056 | err = -EBUSY; |
| 1057 | goto out; |
| 1058 | } |
| 1059 | |
Qi Zheng | d7a0883 | 2024-02-22 16:08:15 +0800 | [diff] [blame] | 1060 | folio_move_anon_rmap(src_folio, dst_vma); |
Suren Baghdasaryan | b5ba3a6 | 2024-04-14 19:08:21 -0700 | [diff] [blame] | 1061 | src_folio->index = linear_page_index(dst_vma, dst_addr); |
Qi Zheng | d7a0883 | 2024-02-22 16:08:15 +0800 | [diff] [blame] | 1062 | |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 1063 | orig_dst_pte = mk_pte(&src_folio->page, dst_vma->vm_page_prot); |
| 1064 | /* Follow mremap() behavior and treat the entry dirty after the move */ |
| 1065 | orig_dst_pte = pte_mkwrite(pte_mkdirty(orig_dst_pte), dst_vma); |
| 1066 | |
| 1067 | set_pte_at(mm, dst_addr, dst_pte, orig_dst_pte); |
| 1068 | out: |
| 1069 | double_pt_unlock(dst_ptl, src_ptl); |
| 1070 | return err; |
| 1071 | } |
| 1072 | |
| 1073 | static int move_swap_pte(struct mm_struct *mm, |
| 1074 | unsigned long dst_addr, unsigned long src_addr, |
| 1075 | pte_t *dst_pte, pte_t *src_pte, |
| 1076 | pte_t orig_dst_pte, pte_t orig_src_pte, |
| 1077 | spinlock_t *dst_ptl, spinlock_t *src_ptl) |
| 1078 | { |
| 1079 | if (!pte_swp_exclusive(orig_src_pte)) |
| 1080 | return -EBUSY; |
| 1081 | |
| 1082 | double_pt_lock(dst_ptl, src_ptl); |
| 1083 | |
Ryan Roberts | 56ae10c | 2024-01-23 14:17:55 +0000 | [diff] [blame] | 1084 | if (!pte_same(ptep_get(src_pte), orig_src_pte) || |
| 1085 | !pte_same(ptep_get(dst_pte), orig_dst_pte)) { |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 1086 | double_pt_unlock(dst_ptl, src_ptl); |
| 1087 | return -EAGAIN; |
| 1088 | } |
| 1089 | |
| 1090 | orig_src_pte = ptep_get_and_clear(mm, src_addr, src_pte); |
| 1091 | set_pte_at(mm, dst_addr, dst_pte, orig_src_pte); |
| 1092 | double_pt_unlock(dst_ptl, src_ptl); |
| 1093 | |
| 1094 | return 0; |
| 1095 | } |
| 1096 | |
Suren Baghdasaryan | eb1521d | 2024-01-31 09:56:18 -0800 | [diff] [blame] | 1097 | static int move_zeropage_pte(struct mm_struct *mm, |
| 1098 | struct vm_area_struct *dst_vma, |
| 1099 | struct vm_area_struct *src_vma, |
| 1100 | unsigned long dst_addr, unsigned long src_addr, |
| 1101 | pte_t *dst_pte, pte_t *src_pte, |
| 1102 | pte_t orig_dst_pte, pte_t orig_src_pte, |
| 1103 | spinlock_t *dst_ptl, spinlock_t *src_ptl) |
| 1104 | { |
| 1105 | pte_t zero_pte; |
| 1106 | |
| 1107 | double_pt_lock(dst_ptl, src_ptl); |
| 1108 | if (!pte_same(ptep_get(src_pte), orig_src_pte) || |
| 1109 | !pte_same(ptep_get(dst_pte), orig_dst_pte)) { |
| 1110 | double_pt_unlock(dst_ptl, src_ptl); |
| 1111 | return -EAGAIN; |
| 1112 | } |
| 1113 | |
| 1114 | zero_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr), |
| 1115 | dst_vma->vm_page_prot)); |
| 1116 | ptep_clear_flush(src_vma, src_addr, src_pte); |
| 1117 | set_pte_at(mm, dst_addr, dst_pte, zero_pte); |
| 1118 | double_pt_unlock(dst_ptl, src_ptl); |
| 1119 | |
| 1120 | return 0; |
| 1121 | } |
| 1122 | |
| 1123 | |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 1124 | /* |
| 1125 | * The mmap_lock for reading is held by the caller. Just move the page |
| 1126 | * from src_pmd to dst_pmd if possible, and return true if succeeded |
| 1127 | * in moving the page. |
| 1128 | */ |
| 1129 | static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, |
| 1130 | struct vm_area_struct *dst_vma, |
| 1131 | struct vm_area_struct *src_vma, |
| 1132 | unsigned long dst_addr, unsigned long src_addr, |
| 1133 | __u64 mode) |
| 1134 | { |
| 1135 | swp_entry_t entry; |
| 1136 | pte_t orig_src_pte, orig_dst_pte; |
| 1137 | pte_t src_folio_pte; |
| 1138 | spinlock_t *src_ptl, *dst_ptl; |
| 1139 | pte_t *src_pte = NULL; |
| 1140 | pte_t *dst_pte = NULL; |
| 1141 | |
| 1142 | struct folio *src_folio = NULL; |
| 1143 | struct anon_vma *src_anon_vma = NULL; |
| 1144 | struct mmu_notifier_range range; |
| 1145 | int err = 0; |
| 1146 | |
| 1147 | flush_cache_range(src_vma, src_addr, src_addr + PAGE_SIZE); |
| 1148 | mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, |
| 1149 | src_addr, src_addr + PAGE_SIZE); |
| 1150 | mmu_notifier_invalidate_range_start(&range); |
| 1151 | retry: |
| 1152 | dst_pte = pte_offset_map_nolock(mm, dst_pmd, dst_addr, &dst_ptl); |
| 1153 | |
| 1154 | /* Retry if a huge pmd materialized from under us */ |
| 1155 | if (unlikely(!dst_pte)) { |
| 1156 | err = -EAGAIN; |
| 1157 | goto out; |
| 1158 | } |
| 1159 | |
| 1160 | src_pte = pte_offset_map_nolock(mm, src_pmd, src_addr, &src_ptl); |
| 1161 | |
| 1162 | /* |
| 1163 | * We held the mmap_lock for reading so MADV_DONTNEED |
| 1164 | * can zap transparent huge pages under us, or the |
| 1165 | * transparent huge page fault can establish new |
| 1166 | * transparent huge pages under us. |
| 1167 | */ |
| 1168 | if (unlikely(!src_pte)) { |
| 1169 | err = -EAGAIN; |
| 1170 | goto out; |
| 1171 | } |
| 1172 | |
| 1173 | /* Sanity checks before the operation */ |
| 1174 | if (WARN_ON_ONCE(pmd_none(*dst_pmd)) || WARN_ON_ONCE(pmd_none(*src_pmd)) || |
| 1175 | WARN_ON_ONCE(pmd_trans_huge(*dst_pmd)) || WARN_ON_ONCE(pmd_trans_huge(*src_pmd))) { |
| 1176 | err = -EINVAL; |
| 1177 | goto out; |
| 1178 | } |
| 1179 | |
| 1180 | spin_lock(dst_ptl); |
Ryan Roberts | 56ae10c | 2024-01-23 14:17:55 +0000 | [diff] [blame] | 1181 | orig_dst_pte = ptep_get(dst_pte); |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 1182 | spin_unlock(dst_ptl); |
| 1183 | if (!pte_none(orig_dst_pte)) { |
| 1184 | err = -EEXIST; |
| 1185 | goto out; |
| 1186 | } |
| 1187 | |
| 1188 | spin_lock(src_ptl); |
Ryan Roberts | 56ae10c | 2024-01-23 14:17:55 +0000 | [diff] [blame] | 1189 | orig_src_pte = ptep_get(src_pte); |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 1190 | spin_unlock(src_ptl); |
| 1191 | if (pte_none(orig_src_pte)) { |
| 1192 | if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES)) |
| 1193 | err = -ENOENT; |
| 1194 | else /* nothing to do to move a hole */ |
| 1195 | err = 0; |
| 1196 | goto out; |
| 1197 | } |
| 1198 | |
| 1199 | /* If PTE changed after we locked the folio them start over */ |
| 1200 | if (src_folio && unlikely(!pte_same(src_folio_pte, orig_src_pte))) { |
| 1201 | err = -EAGAIN; |
| 1202 | goto out; |
| 1203 | } |
| 1204 | |
| 1205 | if (pte_present(orig_src_pte)) { |
Suren Baghdasaryan | eb1521d | 2024-01-31 09:56:18 -0800 | [diff] [blame] | 1206 | if (is_zero_pfn(pte_pfn(orig_src_pte))) { |
| 1207 | err = move_zeropage_pte(mm, dst_vma, src_vma, |
| 1208 | dst_addr, src_addr, dst_pte, src_pte, |
| 1209 | orig_dst_pte, orig_src_pte, |
| 1210 | dst_ptl, src_ptl); |
| 1211 | goto out; |
| 1212 | } |
| 1213 | |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 1214 | /* |
| 1215 | * Pin and lock both source folio and anon_vma. Since we are in |
| 1216 | * RCU read section, we can't block, so on contention have to |
| 1217 | * unmap the ptes, obtain the lock and retry. |
| 1218 | */ |
| 1219 | if (!src_folio) { |
| 1220 | struct folio *folio; |
| 1221 | |
| 1222 | /* |
| 1223 | * Pin the page while holding the lock to be sure the |
| 1224 | * page isn't freed under us |
| 1225 | */ |
| 1226 | spin_lock(src_ptl); |
Ryan Roberts | 56ae10c | 2024-01-23 14:17:55 +0000 | [diff] [blame] | 1227 | if (!pte_same(orig_src_pte, ptep_get(src_pte))) { |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 1228 | spin_unlock(src_ptl); |
| 1229 | err = -EAGAIN; |
| 1230 | goto out; |
| 1231 | } |
| 1232 | |
| 1233 | folio = vm_normal_folio(src_vma, src_addr, orig_src_pte); |
| 1234 | if (!folio || !PageAnonExclusive(&folio->page)) { |
| 1235 | spin_unlock(src_ptl); |
| 1236 | err = -EBUSY; |
| 1237 | goto out; |
| 1238 | } |
| 1239 | |
| 1240 | folio_get(folio); |
| 1241 | src_folio = folio; |
| 1242 | src_folio_pte = orig_src_pte; |
| 1243 | spin_unlock(src_ptl); |
| 1244 | |
| 1245 | if (!folio_trylock(src_folio)) { |
| 1246 | pte_unmap(&orig_src_pte); |
| 1247 | pte_unmap(&orig_dst_pte); |
| 1248 | src_pte = dst_pte = NULL; |
| 1249 | /* now we can block and wait */ |
| 1250 | folio_lock(src_folio); |
| 1251 | goto retry; |
| 1252 | } |
| 1253 | |
| 1254 | if (WARN_ON_ONCE(!folio_test_anon(src_folio))) { |
| 1255 | err = -EBUSY; |
| 1256 | goto out; |
| 1257 | } |
| 1258 | } |
| 1259 | |
| 1260 | /* at this point we have src_folio locked */ |
| 1261 | if (folio_test_large(src_folio)) { |
Suren Baghdasaryan | 982ae05 | 2024-01-02 15:32:56 -0800 | [diff] [blame] | 1262 | /* split_folio() can block */ |
| 1263 | pte_unmap(&orig_src_pte); |
| 1264 | pte_unmap(&orig_dst_pte); |
| 1265 | src_pte = dst_pte = NULL; |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 1266 | err = split_folio(src_folio); |
| 1267 | if (err) |
| 1268 | goto out; |
Suren Baghdasaryan | 982ae05 | 2024-01-02 15:32:56 -0800 | [diff] [blame] | 1269 | /* have to reacquire the folio after it got split */ |
| 1270 | folio_unlock(src_folio); |
| 1271 | folio_put(src_folio); |
| 1272 | src_folio = NULL; |
| 1273 | goto retry; |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 1274 | } |
| 1275 | |
| 1276 | if (!src_anon_vma) { |
| 1277 | /* |
| 1278 | * folio_referenced walks the anon_vma chain |
| 1279 | * without the folio lock. Serialize against it with |
| 1280 | * the anon_vma lock, the folio lock is not enough. |
| 1281 | */ |
| 1282 | src_anon_vma = folio_get_anon_vma(src_folio); |
| 1283 | if (!src_anon_vma) { |
| 1284 | /* page was unmapped from under us */ |
| 1285 | err = -EAGAIN; |
| 1286 | goto out; |
| 1287 | } |
| 1288 | if (!anon_vma_trylock_write(src_anon_vma)) { |
| 1289 | pte_unmap(&orig_src_pte); |
| 1290 | pte_unmap(&orig_dst_pte); |
| 1291 | src_pte = dst_pte = NULL; |
| 1292 | /* now we can block and wait */ |
| 1293 | anon_vma_lock_write(src_anon_vma); |
| 1294 | goto retry; |
| 1295 | } |
| 1296 | } |
| 1297 | |
| 1298 | err = move_present_pte(mm, dst_vma, src_vma, |
| 1299 | dst_addr, src_addr, dst_pte, src_pte, |
| 1300 | orig_dst_pte, orig_src_pte, |
| 1301 | dst_ptl, src_ptl, src_folio); |
| 1302 | } else { |
| 1303 | entry = pte_to_swp_entry(orig_src_pte); |
| 1304 | if (non_swap_entry(entry)) { |
| 1305 | if (is_migration_entry(entry)) { |
| 1306 | pte_unmap(&orig_src_pte); |
| 1307 | pte_unmap(&orig_dst_pte); |
| 1308 | src_pte = dst_pte = NULL; |
| 1309 | migration_entry_wait(mm, src_pmd, src_addr); |
| 1310 | err = -EAGAIN; |
| 1311 | } else |
| 1312 | err = -EFAULT; |
| 1313 | goto out; |
| 1314 | } |
| 1315 | |
| 1316 | err = move_swap_pte(mm, dst_addr, src_addr, |
| 1317 | dst_pte, src_pte, |
| 1318 | orig_dst_pte, orig_src_pte, |
| 1319 | dst_ptl, src_ptl); |
| 1320 | } |
| 1321 | |
| 1322 | out: |
| 1323 | if (src_anon_vma) { |
| 1324 | anon_vma_unlock_write(src_anon_vma); |
| 1325 | put_anon_vma(src_anon_vma); |
| 1326 | } |
| 1327 | if (src_folio) { |
| 1328 | folio_unlock(src_folio); |
| 1329 | folio_put(src_folio); |
| 1330 | } |
| 1331 | if (dst_pte) |
| 1332 | pte_unmap(dst_pte); |
| 1333 | if (src_pte) |
| 1334 | pte_unmap(src_pte); |
| 1335 | mmu_notifier_invalidate_range_end(&range); |
| 1336 | |
| 1337 | return err; |
| 1338 | } |
| 1339 | |
| 1340 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 1341 | static inline bool move_splits_huge_pmd(unsigned long dst_addr, |
| 1342 | unsigned long src_addr, |
| 1343 | unsigned long src_end) |
| 1344 | { |
| 1345 | return (src_addr & ~HPAGE_PMD_MASK) || (dst_addr & ~HPAGE_PMD_MASK) || |
| 1346 | src_end - src_addr < HPAGE_PMD_SIZE; |
| 1347 | } |
| 1348 | #else |
| 1349 | static inline bool move_splits_huge_pmd(unsigned long dst_addr, |
| 1350 | unsigned long src_addr, |
| 1351 | unsigned long src_end) |
| 1352 | { |
| 1353 | /* This is unreachable anyway, just to avoid warnings when HPAGE_PMD_SIZE==0 */ |
| 1354 | return false; |
| 1355 | } |
| 1356 | #endif |
| 1357 | |
| 1358 | static inline bool vma_move_compatible(struct vm_area_struct *vma) |
| 1359 | { |
| 1360 | return !(vma->vm_flags & (VM_PFNMAP | VM_IO | VM_HUGETLB | |
| 1361 | VM_MIXEDMAP | VM_SHADOW_STACK)); |
| 1362 | } |
| 1363 | |
| 1364 | static int validate_move_areas(struct userfaultfd_ctx *ctx, |
| 1365 | struct vm_area_struct *src_vma, |
| 1366 | struct vm_area_struct *dst_vma) |
| 1367 | { |
| 1368 | /* Only allow moving if both have the same access and protection */ |
| 1369 | if ((src_vma->vm_flags & VM_ACCESS_FLAGS) != (dst_vma->vm_flags & VM_ACCESS_FLAGS) || |
| 1370 | pgprot_val(src_vma->vm_page_prot) != pgprot_val(dst_vma->vm_page_prot)) |
| 1371 | return -EINVAL; |
| 1372 | |
| 1373 | /* Only allow moving if both are mlocked or both aren't */ |
| 1374 | if ((src_vma->vm_flags & VM_LOCKED) != (dst_vma->vm_flags & VM_LOCKED)) |
| 1375 | return -EINVAL; |
| 1376 | |
| 1377 | /* |
| 1378 | * For now, we keep it simple and only move between writable VMAs. |
| 1379 | * Access flags are equal, therefore cheching only the source is enough. |
| 1380 | */ |
| 1381 | if (!(src_vma->vm_flags & VM_WRITE)) |
| 1382 | return -EINVAL; |
| 1383 | |
| 1384 | /* Check if vma flags indicate content which can be moved */ |
| 1385 | if (!vma_move_compatible(src_vma) || !vma_move_compatible(dst_vma)) |
| 1386 | return -EINVAL; |
| 1387 | |
| 1388 | /* Ensure dst_vma is registered in uffd we are operating on */ |
| 1389 | if (!dst_vma->vm_userfaultfd_ctx.ctx || |
| 1390 | dst_vma->vm_userfaultfd_ctx.ctx != ctx) |
| 1391 | return -EINVAL; |
| 1392 | |
| 1393 | /* Only allow moving across anonymous vmas */ |
| 1394 | if (!vma_is_anonymous(src_vma) || !vma_is_anonymous(dst_vma)) |
| 1395 | return -EINVAL; |
| 1396 | |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 1397 | return 0; |
| 1398 | } |
| 1399 | |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 1400 | static __always_inline |
| 1401 | int find_vmas_mm_locked(struct mm_struct *mm, |
| 1402 | unsigned long dst_start, |
| 1403 | unsigned long src_start, |
| 1404 | struct vm_area_struct **dst_vmap, |
| 1405 | struct vm_area_struct **src_vmap) |
| 1406 | { |
| 1407 | struct vm_area_struct *vma; |
| 1408 | |
| 1409 | mmap_assert_locked(mm); |
| 1410 | vma = find_vma_and_prepare_anon(mm, dst_start); |
| 1411 | if (IS_ERR(vma)) |
| 1412 | return PTR_ERR(vma); |
| 1413 | |
| 1414 | *dst_vmap = vma; |
| 1415 | /* Skip finding src_vma if src_start is in dst_vma */ |
| 1416 | if (src_start >= vma->vm_start && src_start < vma->vm_end) |
| 1417 | goto out_success; |
| 1418 | |
| 1419 | vma = vma_lookup(mm, src_start); |
| 1420 | if (!vma) |
| 1421 | return -ENOENT; |
| 1422 | out_success: |
| 1423 | *src_vmap = vma; |
| 1424 | return 0; |
| 1425 | } |
| 1426 | |
| 1427 | #ifdef CONFIG_PER_VMA_LOCK |
| 1428 | static int uffd_move_lock(struct mm_struct *mm, |
| 1429 | unsigned long dst_start, |
| 1430 | unsigned long src_start, |
| 1431 | struct vm_area_struct **dst_vmap, |
| 1432 | struct vm_area_struct **src_vmap) |
| 1433 | { |
| 1434 | struct vm_area_struct *vma; |
| 1435 | int err; |
| 1436 | |
Matthew Wilcox (Oracle) | 73b4a0c | 2024-04-26 15:45:02 +0100 | [diff] [blame] | 1437 | vma = uffd_lock_vma(mm, dst_start); |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 1438 | if (IS_ERR(vma)) |
| 1439 | return PTR_ERR(vma); |
| 1440 | |
| 1441 | *dst_vmap = vma; |
| 1442 | /* |
| 1443 | * Skip finding src_vma if src_start is in dst_vma. This also ensures |
| 1444 | * that we don't lock the same vma twice. |
| 1445 | */ |
| 1446 | if (src_start >= vma->vm_start && src_start < vma->vm_end) { |
| 1447 | *src_vmap = vma; |
| 1448 | return 0; |
| 1449 | } |
| 1450 | |
| 1451 | /* |
Matthew Wilcox (Oracle) | 73b4a0c | 2024-04-26 15:45:02 +0100 | [diff] [blame] | 1452 | * Using uffd_lock_vma() to get src_vma can lead to following deadlock: |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 1453 | * |
| 1454 | * Thread1 Thread2 |
| 1455 | * ------- ------- |
| 1456 | * vma_start_read(dst_vma) |
| 1457 | * mmap_write_lock(mm) |
| 1458 | * vma_start_write(src_vma) |
| 1459 | * vma_start_read(src_vma) |
| 1460 | * mmap_read_lock(mm) |
| 1461 | * vma_start_write(dst_vma) |
| 1462 | */ |
| 1463 | *src_vmap = lock_vma_under_rcu(mm, src_start); |
| 1464 | if (likely(*src_vmap)) |
| 1465 | return 0; |
| 1466 | |
| 1467 | /* Undo any locking and retry in mmap_lock critical section */ |
| 1468 | vma_end_read(*dst_vmap); |
| 1469 | |
| 1470 | mmap_read_lock(mm); |
| 1471 | err = find_vmas_mm_locked(mm, dst_start, src_start, dst_vmap, src_vmap); |
| 1472 | if (!err) { |
| 1473 | /* |
Matthew Wilcox (Oracle) | 73b4a0c | 2024-04-26 15:45:02 +0100 | [diff] [blame] | 1474 | * See comment in uffd_lock_vma() as to why not using |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 1475 | * vma_start_read() here. |
| 1476 | */ |
| 1477 | down_read(&(*dst_vmap)->vm_lock->lock); |
| 1478 | if (*dst_vmap != *src_vmap) |
Lokesh Gidra | 30af24f | 2024-03-21 16:58:18 -0700 | [diff] [blame] | 1479 | down_read_nested(&(*src_vmap)->vm_lock->lock, |
| 1480 | SINGLE_DEPTH_NESTING); |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 1481 | } |
| 1482 | mmap_read_unlock(mm); |
| 1483 | return err; |
| 1484 | } |
| 1485 | |
| 1486 | static void uffd_move_unlock(struct vm_area_struct *dst_vma, |
| 1487 | struct vm_area_struct *src_vma) |
| 1488 | { |
| 1489 | vma_end_read(src_vma); |
| 1490 | if (src_vma != dst_vma) |
| 1491 | vma_end_read(dst_vma); |
| 1492 | } |
| 1493 | |
| 1494 | #else |
| 1495 | |
| 1496 | static int uffd_move_lock(struct mm_struct *mm, |
| 1497 | unsigned long dst_start, |
| 1498 | unsigned long src_start, |
| 1499 | struct vm_area_struct **dst_vmap, |
| 1500 | struct vm_area_struct **src_vmap) |
| 1501 | { |
| 1502 | int err; |
| 1503 | |
| 1504 | mmap_read_lock(mm); |
| 1505 | err = find_vmas_mm_locked(mm, dst_start, src_start, dst_vmap, src_vmap); |
| 1506 | if (err) |
| 1507 | mmap_read_unlock(mm); |
| 1508 | return err; |
| 1509 | } |
| 1510 | |
| 1511 | static void uffd_move_unlock(struct vm_area_struct *dst_vma, |
| 1512 | struct vm_area_struct *src_vma) |
| 1513 | { |
| 1514 | mmap_assert_locked(src_vma->vm_mm); |
| 1515 | mmap_read_unlock(dst_vma->vm_mm); |
| 1516 | } |
| 1517 | #endif |
| 1518 | |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 1519 | /** |
| 1520 | * move_pages - move arbitrary anonymous pages of an existing vma |
| 1521 | * @ctx: pointer to the userfaultfd context |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 1522 | * @dst_start: start of the destination virtual memory range |
| 1523 | * @src_start: start of the source virtual memory range |
| 1524 | * @len: length of the virtual memory range |
| 1525 | * @mode: flags from uffdio_move.mode |
| 1526 | * |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 1527 | * It will either use the mmap_lock in read mode or per-vma locks |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 1528 | * |
| 1529 | * move_pages() remaps arbitrary anonymous pages atomically in zero |
| 1530 | * copy. It only works on non shared anonymous pages because those can |
| 1531 | * be relocated without generating non linear anon_vmas in the rmap |
| 1532 | * code. |
| 1533 | * |
| 1534 | * It provides a zero copy mechanism to handle userspace page faults. |
| 1535 | * The source vma pages should have mapcount == 1, which can be |
| 1536 | * enforced by using madvise(MADV_DONTFORK) on src vma. |
| 1537 | * |
| 1538 | * The thread receiving the page during the userland page fault |
| 1539 | * will receive the faulting page in the source vma through the network, |
| 1540 | * storage or any other I/O device (MADV_DONTFORK in the source vma |
| 1541 | * avoids move_pages() to fail with -EBUSY if the process forks before |
| 1542 | * move_pages() is called), then it will call move_pages() to map the |
| 1543 | * page in the faulting address in the destination vma. |
| 1544 | * |
| 1545 | * This userfaultfd command works purely via pagetables, so it's the |
| 1546 | * most efficient way to move physical non shared anonymous pages |
| 1547 | * across different virtual addresses. Unlike mremap()/mmap()/munmap() |
| 1548 | * it does not create any new vmas. The mapping in the destination |
| 1549 | * address is atomic. |
| 1550 | * |
| 1551 | * It only works if the vma protection bits are identical from the |
| 1552 | * source and destination vma. |
| 1553 | * |
| 1554 | * It can remap non shared anonymous pages within the same vma too. |
| 1555 | * |
| 1556 | * If the source virtual memory range has any unmapped holes, or if |
| 1557 | * the destination virtual memory range is not a whole unmapped hole, |
| 1558 | * move_pages() will fail respectively with -ENOENT or -EEXIST. This |
| 1559 | * provides a very strict behavior to avoid any chance of memory |
| 1560 | * corruption going unnoticed if there are userland race conditions. |
| 1561 | * Only one thread should resolve the userland page fault at any given |
| 1562 | * time for any given faulting address. This means that if two threads |
| 1563 | * try to both call move_pages() on the same destination address at the |
| 1564 | * same time, the second thread will get an explicit error from this |
| 1565 | * command. |
| 1566 | * |
| 1567 | * The command retval will return "len" is successful. The command |
| 1568 | * however can be interrupted by fatal signals or errors. If |
| 1569 | * interrupted it will return the number of bytes successfully |
| 1570 | * remapped before the interruption if any, or the negative error if |
| 1571 | * none. It will never return zero. Either it will return an error or |
| 1572 | * an amount of bytes successfully moved. If the retval reports a |
| 1573 | * "short" remap, the move_pages() command should be repeated by |
| 1574 | * userland with src+retval, dst+reval, len-retval if it wants to know |
| 1575 | * about the error that interrupted it. |
| 1576 | * |
| 1577 | * The UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES flag can be specified to |
| 1578 | * prevent -ENOENT errors to materialize if there are holes in the |
| 1579 | * source virtual range that is being remapped. The holes will be |
| 1580 | * accounted as successfully remapped in the retval of the |
| 1581 | * command. This is mostly useful to remap hugepage naturally aligned |
| 1582 | * virtual regions without knowing if there are transparent hugepage |
| 1583 | * in the regions or not, but preventing the risk of having to split |
| 1584 | * the hugepmd during the remap. |
| 1585 | * |
| 1586 | * If there's any rmap walk that is taking the anon_vma locks without |
| 1587 | * first obtaining the folio lock (the only current instance is |
| 1588 | * folio_referenced), they will have to verify if the folio->mapping |
| 1589 | * has changed after taking the anon_vma lock. If it changed they |
| 1590 | * should release the lock and retry obtaining a new anon_vma, because |
| 1591 | * it means the anon_vma was changed by move_pages() before the lock |
| 1592 | * could be obtained. This is the only additional complexity added to |
| 1593 | * the rmap code to provide this anonymous page remapping functionality. |
| 1594 | */ |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 1595 | ssize_t move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start, |
| 1596 | unsigned long src_start, unsigned long len, __u64 mode) |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 1597 | { |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 1598 | struct mm_struct *mm = ctx->mm; |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 1599 | struct vm_area_struct *src_vma, *dst_vma; |
| 1600 | unsigned long src_addr, dst_addr; |
| 1601 | pmd_t *src_pmd, *dst_pmd; |
| 1602 | long err = -EINVAL; |
| 1603 | ssize_t moved = 0; |
| 1604 | |
| 1605 | /* Sanitize the command parameters. */ |
| 1606 | if (WARN_ON_ONCE(src_start & ~PAGE_MASK) || |
| 1607 | WARN_ON_ONCE(dst_start & ~PAGE_MASK) || |
| 1608 | WARN_ON_ONCE(len & ~PAGE_MASK)) |
| 1609 | goto out; |
| 1610 | |
| 1611 | /* Does the address range wrap, or is the span zero-sized? */ |
| 1612 | if (WARN_ON_ONCE(src_start + len <= src_start) || |
| 1613 | WARN_ON_ONCE(dst_start + len <= dst_start)) |
| 1614 | goto out; |
| 1615 | |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 1616 | err = uffd_move_lock(mm, dst_start, src_start, &dst_vma, &src_vma); |
| 1617 | if (err) |
| 1618 | goto out; |
| 1619 | |
| 1620 | /* Re-check after taking map_changing_lock */ |
| 1621 | err = -EAGAIN; |
| 1622 | down_read(&ctx->map_changing_lock); |
| 1623 | if (likely(atomic_read(&ctx->mmap_changing))) |
| 1624 | goto out_unlock; |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 1625 | /* |
| 1626 | * Make sure the vma is not shared, that the src and dst remap |
| 1627 | * ranges are both valid and fully within a single existing |
| 1628 | * vma. |
| 1629 | */ |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 1630 | err = -EINVAL; |
| 1631 | if (src_vma->vm_flags & VM_SHARED) |
| 1632 | goto out_unlock; |
| 1633 | if (src_start + len > src_vma->vm_end) |
| 1634 | goto out_unlock; |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 1635 | |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 1636 | if (dst_vma->vm_flags & VM_SHARED) |
| 1637 | goto out_unlock; |
| 1638 | if (dst_start + len > dst_vma->vm_end) |
| 1639 | goto out_unlock; |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 1640 | |
| 1641 | err = validate_move_areas(ctx, src_vma, dst_vma); |
| 1642 | if (err) |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 1643 | goto out_unlock; |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 1644 | |
| 1645 | for (src_addr = src_start, dst_addr = dst_start; |
| 1646 | src_addr < src_start + len;) { |
| 1647 | spinlock_t *ptl; |
| 1648 | pmd_t dst_pmdval; |
| 1649 | unsigned long step_size; |
| 1650 | |
| 1651 | /* |
| 1652 | * Below works because anonymous area would not have a |
| 1653 | * transparent huge PUD. If file-backed support is added, |
| 1654 | * that case would need to be handled here. |
| 1655 | */ |
| 1656 | src_pmd = mm_find_pmd(mm, src_addr); |
| 1657 | if (unlikely(!src_pmd)) { |
| 1658 | if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES)) { |
| 1659 | err = -ENOENT; |
| 1660 | break; |
| 1661 | } |
| 1662 | src_pmd = mm_alloc_pmd(mm, src_addr); |
| 1663 | if (unlikely(!src_pmd)) { |
| 1664 | err = -ENOMEM; |
| 1665 | break; |
| 1666 | } |
| 1667 | } |
| 1668 | dst_pmd = mm_alloc_pmd(mm, dst_addr); |
| 1669 | if (unlikely(!dst_pmd)) { |
| 1670 | err = -ENOMEM; |
| 1671 | break; |
| 1672 | } |
| 1673 | |
| 1674 | dst_pmdval = pmdp_get_lockless(dst_pmd); |
| 1675 | /* |
| 1676 | * If the dst_pmd is mapped as THP don't override it and just |
| 1677 | * be strict. If dst_pmd changes into TPH after this check, the |
| 1678 | * move_pages_huge_pmd() will detect the change and retry |
| 1679 | * while move_pages_pte() will detect the change and fail. |
| 1680 | */ |
| 1681 | if (unlikely(pmd_trans_huge(dst_pmdval))) { |
| 1682 | err = -EEXIST; |
| 1683 | break; |
| 1684 | } |
| 1685 | |
| 1686 | ptl = pmd_trans_huge_lock(src_pmd, src_vma); |
| 1687 | if (ptl) { |
| 1688 | if (pmd_devmap(*src_pmd)) { |
| 1689 | spin_unlock(ptl); |
| 1690 | err = -ENOENT; |
| 1691 | break; |
| 1692 | } |
| 1693 | |
| 1694 | /* Check if we can move the pmd without splitting it. */ |
| 1695 | if (move_splits_huge_pmd(dst_addr, src_addr, src_start + len) || |
| 1696 | !pmd_none(dst_pmdval)) { |
Matthew Wilcox (Oracle) | e06d03d | 2024-03-26 20:28:23 +0000 | [diff] [blame] | 1697 | struct folio *folio = pmd_folio(*src_pmd); |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 1698 | |
Matthew Wilcox (Oracle) | 5beaee5 | 2024-03-26 20:28:22 +0000 | [diff] [blame] | 1699 | if (!folio || (!is_huge_zero_folio(folio) && |
Suren Baghdasaryan | eb1521d | 2024-01-31 09:56:18 -0800 | [diff] [blame] | 1700 | !PageAnonExclusive(&folio->page))) { |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 1701 | spin_unlock(ptl); |
| 1702 | err = -EBUSY; |
| 1703 | break; |
| 1704 | } |
| 1705 | |
| 1706 | spin_unlock(ptl); |
| 1707 | split_huge_pmd(src_vma, src_pmd, src_addr); |
| 1708 | /* The folio will be split by move_pages_pte() */ |
| 1709 | continue; |
| 1710 | } |
| 1711 | |
| 1712 | err = move_pages_huge_pmd(mm, dst_pmd, src_pmd, |
| 1713 | dst_pmdval, dst_vma, src_vma, |
| 1714 | dst_addr, src_addr); |
| 1715 | step_size = HPAGE_PMD_SIZE; |
| 1716 | } else { |
| 1717 | if (pmd_none(*src_pmd)) { |
| 1718 | if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES)) { |
| 1719 | err = -ENOENT; |
| 1720 | break; |
| 1721 | } |
| 1722 | if (unlikely(__pte_alloc(mm, src_pmd))) { |
| 1723 | err = -ENOMEM; |
| 1724 | break; |
| 1725 | } |
| 1726 | } |
| 1727 | |
| 1728 | if (unlikely(pte_alloc(mm, dst_pmd))) { |
| 1729 | err = -ENOMEM; |
| 1730 | break; |
| 1731 | } |
| 1732 | |
| 1733 | err = move_pages_pte(mm, dst_pmd, src_pmd, |
| 1734 | dst_vma, src_vma, |
| 1735 | dst_addr, src_addr, mode); |
| 1736 | step_size = PAGE_SIZE; |
| 1737 | } |
| 1738 | |
| 1739 | cond_resched(); |
| 1740 | |
| 1741 | if (fatal_signal_pending(current)) { |
| 1742 | /* Do not override an error */ |
| 1743 | if (!err || err == -EAGAIN) |
| 1744 | err = -EINTR; |
| 1745 | break; |
| 1746 | } |
| 1747 | |
| 1748 | if (err) { |
| 1749 | if (err == -EAGAIN) |
| 1750 | continue; |
| 1751 | break; |
| 1752 | } |
| 1753 | |
| 1754 | /* Proceed to the next page */ |
| 1755 | dst_addr += step_size; |
| 1756 | src_addr += step_size; |
| 1757 | moved += step_size; |
| 1758 | } |
| 1759 | |
Lokesh Gidra | 867a43a | 2024-02-15 10:27:56 -0800 | [diff] [blame] | 1760 | out_unlock: |
| 1761 | up_read(&ctx->map_changing_lock); |
| 1762 | uffd_move_unlock(dst_vma, src_vma); |
Andrea Arcangeli | adef440 | 2023-12-06 02:36:56 -0800 | [diff] [blame] | 1763 | out: |
| 1764 | VM_WARN_ON(moved < 0); |
| 1765 | VM_WARN_ON(err > 0); |
| 1766 | VM_WARN_ON(!moved && !err); |
| 1767 | return moved ? moved : err; |
| 1768 | } |