Thomas Gleixner | 20c8ccb | 2019-06-04 10:11:32 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2 | /* |
| 3 | * fs/userfaultfd.c |
| 4 | * |
| 5 | * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> |
| 6 | * Copyright (C) 2008-2009 Red Hat, Inc. |
| 7 | * Copyright (C) 2015 Red Hat, Inc. |
| 8 | * |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 9 | * Some part derived from fs/eventfd.c (anon inode setup) and |
| 10 | * mm/ksm.c (mm hashing). |
| 11 | */ |
| 12 | |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 13 | #include <linux/list.h> |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 14 | #include <linux/hashtable.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 15 | #include <linux/sched/signal.h> |
Ingo Molnar | 6e84f31 | 2017-02-08 18:51:29 +0100 | [diff] [blame] | 16 | #include <linux/sched/mm.h> |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 17 | #include <linux/mm.h> |
Arnd Bergmann | 17fca13 | 2022-01-14 14:06:07 -0800 | [diff] [blame] | 18 | #include <linux/mm_inline.h> |
Peter Xu | 6dfeaff | 2021-05-04 18:33:13 -0700 | [diff] [blame] | 19 | #include <linux/mmu_notifier.h> |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 20 | #include <linux/poll.h> |
| 21 | #include <linux/slab.h> |
| 22 | #include <linux/seq_file.h> |
| 23 | #include <linux/file.h> |
| 24 | #include <linux/bug.h> |
| 25 | #include <linux/anon_inodes.h> |
| 26 | #include <linux/syscalls.h> |
| 27 | #include <linux/userfaultfd_k.h> |
| 28 | #include <linux/mempolicy.h> |
| 29 | #include <linux/ioctl.h> |
| 30 | #include <linux/security.h> |
Mike Kravetz | cab350a | 2017-02-22 15:43:04 -0800 | [diff] [blame] | 31 | #include <linux/hugetlb.h> |
Peter Xu | 5c041f5 | 2022-05-12 20:22:52 -0700 | [diff] [blame] | 32 | #include <linux/swapops.h> |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 33 | |
Lokesh Gidra | d0d4730 | 2020-12-14 19:13:54 -0800 | [diff] [blame] | 34 | int sysctl_unprivileged_userfaultfd __read_mostly; |
Peter Xu | cefdca0 | 2019-05-13 17:16:41 -0700 | [diff] [blame] | 35 | |
Andrea Arcangeli | 3004ec9 | 2015-09-04 15:46:48 -0700 | [diff] [blame] | 36 | static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly; |
| 37 | |
Andrea Arcangeli | 3004ec9 | 2015-09-04 15:46:48 -0700 | [diff] [blame] | 38 | /* |
| 39 | * Start with fault_pending_wqh and fault_wqh so they're more likely |
| 40 | * to be in the same cacheline. |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 41 | * |
| 42 | * Locking order: |
| 43 | * fd_wqh.lock |
| 44 | * fault_pending_wqh.lock |
| 45 | * fault_wqh.lock |
| 46 | * event_wqh.lock |
| 47 | * |
| 48 | * To avoid deadlocks, IRQs must be disabled when taking any of the above locks, |
| 49 | * since fd_wqh.lock is taken by aio_poll() while it's holding a lock that's |
| 50 | * also taken in IRQ context. |
Andrea Arcangeli | 3004ec9 | 2015-09-04 15:46:48 -0700 | [diff] [blame] | 51 | */ |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 52 | struct userfaultfd_ctx { |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 53 | /* waitqueue head for the pending (i.e. not read) userfaults */ |
| 54 | wait_queue_head_t fault_pending_wqh; |
| 55 | /* waitqueue head for the userfaults */ |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 56 | wait_queue_head_t fault_wqh; |
| 57 | /* waitqueue head for the pseudo fd to wakeup poll/read */ |
| 58 | wait_queue_head_t fd_wqh; |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 59 | /* waitqueue head for events */ |
| 60 | wait_queue_head_t event_wqh; |
Andrea Arcangeli | 2c5b7e1 | 2015-09-04 15:47:23 -0700 | [diff] [blame] | 61 | /* a refile sequence protected by fault_pending_wqh lock */ |
Ahmed S. Darwish | 2ca97ac | 2020-07-20 17:55:28 +0200 | [diff] [blame] | 62 | seqcount_spinlock_t refile_seq; |
Andrea Arcangeli | 3004ec9 | 2015-09-04 15:46:48 -0700 | [diff] [blame] | 63 | /* pseudo fd refcounting */ |
Eric Biggers | ca88042 | 2018-12-28 00:34:43 -0800 | [diff] [blame] | 64 | refcount_t refcount; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 65 | /* userfaultfd syscall flags */ |
| 66 | unsigned int flags; |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 67 | /* features requested from the userspace */ |
| 68 | unsigned int features; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 69 | /* released */ |
| 70 | bool released; |
Mike Rapoport | df2cc96 | 2018-06-07 17:09:25 -0700 | [diff] [blame] | 71 | /* memory mappings are changing because of non-cooperative event */ |
Nadav Amit | a759a90 | 2021-09-02 14:58:56 -0700 | [diff] [blame] | 72 | atomic_t mmap_changing; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 73 | /* mm with one ore more vmas attached to this userfaultfd_ctx */ |
| 74 | struct mm_struct *mm; |
| 75 | }; |
| 76 | |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 77 | struct userfaultfd_fork_ctx { |
| 78 | struct userfaultfd_ctx *orig; |
| 79 | struct userfaultfd_ctx *new; |
| 80 | struct list_head list; |
| 81 | }; |
| 82 | |
Mike Rapoport | 897ab3e | 2017-02-24 14:58:22 -0800 | [diff] [blame] | 83 | struct userfaultfd_unmap_ctx { |
| 84 | struct userfaultfd_ctx *ctx; |
| 85 | unsigned long start; |
| 86 | unsigned long end; |
| 87 | struct list_head list; |
| 88 | }; |
| 89 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 90 | struct userfaultfd_wait_queue { |
Andrea Arcangeli | a9b85f9 | 2015-09-04 15:46:37 -0700 | [diff] [blame] | 91 | struct uffd_msg msg; |
Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 92 | wait_queue_entry_t wq; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 93 | struct userfaultfd_ctx *ctx; |
Andrea Arcangeli | 15a77c6 | 2017-01-24 15:17:59 -0800 | [diff] [blame] | 94 | bool waken; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 95 | }; |
| 96 | |
| 97 | struct userfaultfd_wake_range { |
| 98 | unsigned long start; |
| 99 | unsigned long len; |
| 100 | }; |
| 101 | |
Nadav Amit | 22e5fe2a | 2021-09-02 14:58:59 -0700 | [diff] [blame] | 102 | /* internal indication that UFFD_API ioctl was successfully executed */ |
| 103 | #define UFFD_FEATURE_INITIALIZED (1u << 31) |
| 104 | |
| 105 | static bool userfaultfd_is_initialized(struct userfaultfd_ctx *ctx) |
| 106 | { |
| 107 | return ctx->features & UFFD_FEATURE_INITIALIZED; |
| 108 | } |
| 109 | |
Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 110 | static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode, |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 111 | int wake_flags, void *key) |
| 112 | { |
| 113 | struct userfaultfd_wake_range *range = key; |
| 114 | int ret; |
| 115 | struct userfaultfd_wait_queue *uwq; |
| 116 | unsigned long start, len; |
| 117 | |
| 118 | uwq = container_of(wq, struct userfaultfd_wait_queue, wq); |
| 119 | ret = 0; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 120 | /* len == 0 means wake all */ |
| 121 | start = range->start; |
| 122 | len = range->len; |
Andrea Arcangeli | a9b85f9 | 2015-09-04 15:46:37 -0700 | [diff] [blame] | 123 | if (len && (start > uwq->msg.arg.pagefault.address || |
| 124 | start + len <= uwq->msg.arg.pagefault.address)) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 125 | goto out; |
Andrea Arcangeli | 15a77c6 | 2017-01-24 15:17:59 -0800 | [diff] [blame] | 126 | WRITE_ONCE(uwq->waken, true); |
| 127 | /* |
Peter Zijlstra | a9668cd | 2017-06-07 17:51:27 +0200 | [diff] [blame] | 128 | * The Program-Order guarantees provided by the scheduler |
| 129 | * ensure uwq->waken is visible before the task is woken. |
Andrea Arcangeli | 15a77c6 | 2017-01-24 15:17:59 -0800 | [diff] [blame] | 130 | */ |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 131 | ret = wake_up_state(wq->private, mode); |
Peter Zijlstra | a9668cd | 2017-06-07 17:51:27 +0200 | [diff] [blame] | 132 | if (ret) { |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 133 | /* |
| 134 | * Wake only once, autoremove behavior. |
| 135 | * |
Peter Zijlstra | a9668cd | 2017-06-07 17:51:27 +0200 | [diff] [blame] | 136 | * After the effect of list_del_init is visible to the other |
| 137 | * CPUs, the waitqueue may disappear from under us, see the |
| 138 | * !list_empty_careful() in handle_userfault(). |
| 139 | * |
| 140 | * try_to_wake_up() has an implicit smp_mb(), and the |
| 141 | * wq->private is read before calling the extern function |
| 142 | * "wake_up_state" (which in turns calls try_to_wake_up). |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 143 | */ |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 144 | list_del_init(&wq->entry); |
Peter Zijlstra | a9668cd | 2017-06-07 17:51:27 +0200 | [diff] [blame] | 145 | } |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 146 | out: |
| 147 | return ret; |
| 148 | } |
| 149 | |
| 150 | /** |
| 151 | * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd |
| 152 | * context. |
| 153 | * @ctx: [in] Pointer to the userfaultfd context. |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 154 | */ |
| 155 | static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx) |
| 156 | { |
Eric Biggers | ca88042 | 2018-12-28 00:34:43 -0800 | [diff] [blame] | 157 | refcount_inc(&ctx->refcount); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 158 | } |
| 159 | |
| 160 | /** |
| 161 | * userfaultfd_ctx_put - Releases a reference to the internal userfaultfd |
| 162 | * context. |
| 163 | * @ctx: [in] Pointer to userfaultfd context. |
| 164 | * |
| 165 | * The userfaultfd context reference must have been previously acquired either |
| 166 | * with userfaultfd_ctx_get() or userfaultfd_ctx_fdget(). |
| 167 | */ |
| 168 | static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx) |
| 169 | { |
Eric Biggers | ca88042 | 2018-12-28 00:34:43 -0800 | [diff] [blame] | 170 | if (refcount_dec_and_test(&ctx->refcount)) { |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 171 | VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock)); |
| 172 | VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh)); |
| 173 | VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock)); |
| 174 | VM_BUG_ON(waitqueue_active(&ctx->fault_wqh)); |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 175 | VM_BUG_ON(spin_is_locked(&ctx->event_wqh.lock)); |
| 176 | VM_BUG_ON(waitqueue_active(&ctx->event_wqh)); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 177 | VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock)); |
| 178 | VM_BUG_ON(waitqueue_active(&ctx->fd_wqh)); |
Oleg Nesterov | d2005e3 | 2016-05-20 16:58:36 -0700 | [diff] [blame] | 179 | mmdrop(ctx->mm); |
Andrea Arcangeli | 3004ec9 | 2015-09-04 15:46:48 -0700 | [diff] [blame] | 180 | kmem_cache_free(userfaultfd_ctx_cachep, ctx); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 181 | } |
| 182 | } |
| 183 | |
Andrea Arcangeli | a9b85f9 | 2015-09-04 15:46:37 -0700 | [diff] [blame] | 184 | static inline void msg_init(struct uffd_msg *msg) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 185 | { |
Andrea Arcangeli | a9b85f9 | 2015-09-04 15:46:37 -0700 | [diff] [blame] | 186 | BUILD_BUG_ON(sizeof(struct uffd_msg) != 32); |
| 187 | /* |
| 188 | * Must use memset to zero out the paddings or kernel data is |
| 189 | * leaked to userland. |
| 190 | */ |
| 191 | memset(msg, 0, sizeof(struct uffd_msg)); |
| 192 | } |
| 193 | |
| 194 | static inline struct uffd_msg userfault_msg(unsigned long address, |
| 195 | unsigned int flags, |
Alexey Perevalov | 9d4ac93 | 2017-09-06 16:23:56 -0700 | [diff] [blame] | 196 | unsigned long reason, |
| 197 | unsigned int features) |
Andrea Arcangeli | a9b85f9 | 2015-09-04 15:46:37 -0700 | [diff] [blame] | 198 | { |
| 199 | struct uffd_msg msg; |
| 200 | msg_init(&msg); |
| 201 | msg.event = UFFD_EVENT_PAGEFAULT; |
Nadav Amit | 824ddc6 | 2022-03-22 14:45:32 -0700 | [diff] [blame] | 202 | |
| 203 | if (!(features & UFFD_FEATURE_EXACT_ADDRESS)) |
| 204 | address &= PAGE_MASK; |
Andrea Arcangeli | a9b85f9 | 2015-09-04 15:46:37 -0700 | [diff] [blame] | 205 | msg.arg.pagefault.address = address; |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 206 | /* |
| 207 | * These flags indicate why the userfault occurred: |
| 208 | * - UFFD_PAGEFAULT_FLAG_WP indicates a write protect fault. |
| 209 | * - UFFD_PAGEFAULT_FLAG_MINOR indicates a minor fault. |
| 210 | * - Neither of these flags being set indicates a MISSING fault. |
| 211 | * |
| 212 | * Separately, UFFD_PAGEFAULT_FLAG_WRITE indicates it was a write |
| 213 | * fault. Otherwise, it was a read fault. |
| 214 | */ |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 215 | if (flags & FAULT_FLAG_WRITE) |
Andrea Arcangeli | a9b85f9 | 2015-09-04 15:46:37 -0700 | [diff] [blame] | 216 | msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 217 | if (reason & VM_UFFD_WP) |
Andrea Arcangeli | a9b85f9 | 2015-09-04 15:46:37 -0700 | [diff] [blame] | 218 | msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP; |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 219 | if (reason & VM_UFFD_MINOR) |
| 220 | msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_MINOR; |
Alexey Perevalov | 9d4ac93 | 2017-09-06 16:23:56 -0700 | [diff] [blame] | 221 | if (features & UFFD_FEATURE_THREAD_ID) |
Andrea Arcangeli | a36985d | 2017-09-06 16:23:59 -0700 | [diff] [blame] | 222 | msg.arg.pagefault.feat.ptid = task_pid_vnr(current); |
Andrea Arcangeli | a9b85f9 | 2015-09-04 15:46:37 -0700 | [diff] [blame] | 223 | return msg; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 224 | } |
| 225 | |
Mike Kravetz | 369cd212 | 2017-02-22 15:43:10 -0800 | [diff] [blame] | 226 | #ifdef CONFIG_HUGETLB_PAGE |
| 227 | /* |
| 228 | * Same functionality as userfaultfd_must_wait below with modifications for |
| 229 | * hugepmd ranges. |
| 230 | */ |
| 231 | static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, |
Punit Agrawal | 7868a20 | 2017-07-06 15:39:42 -0700 | [diff] [blame] | 232 | struct vm_area_struct *vma, |
Mike Kravetz | 369cd212 | 2017-02-22 15:43:10 -0800 | [diff] [blame] | 233 | unsigned long address, |
| 234 | unsigned long flags, |
| 235 | unsigned long reason) |
| 236 | { |
| 237 | struct mm_struct *mm = ctx->mm; |
Janosch Frank | 1e2c043 | 2018-07-03 17:02:39 -0700 | [diff] [blame] | 238 | pte_t *ptep, pte; |
Mike Kravetz | 369cd212 | 2017-02-22 15:43:10 -0800 | [diff] [blame] | 239 | bool ret = true; |
| 240 | |
Michel Lespinasse | 42fc541 | 2020-06-08 21:33:44 -0700 | [diff] [blame] | 241 | mmap_assert_locked(mm); |
Mike Kravetz | 369cd212 | 2017-02-22 15:43:10 -0800 | [diff] [blame] | 242 | |
Janosch Frank | 1e2c043 | 2018-07-03 17:02:39 -0700 | [diff] [blame] | 243 | ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma)); |
| 244 | |
| 245 | if (!ptep) |
Mike Kravetz | 369cd212 | 2017-02-22 15:43:10 -0800 | [diff] [blame] | 246 | goto out; |
| 247 | |
| 248 | ret = false; |
Janosch Frank | 1e2c043 | 2018-07-03 17:02:39 -0700 | [diff] [blame] | 249 | pte = huge_ptep_get(ptep); |
Mike Kravetz | 369cd212 | 2017-02-22 15:43:10 -0800 | [diff] [blame] | 250 | |
| 251 | /* |
| 252 | * Lockless access: we're in a wait_event so it's ok if it |
Peter Xu | 5c041f5 | 2022-05-12 20:22:52 -0700 | [diff] [blame] | 253 | * changes under us. PTE markers should be handled the same as none |
| 254 | * ptes here. |
Mike Kravetz | 369cd212 | 2017-02-22 15:43:10 -0800 | [diff] [blame] | 255 | */ |
Peter Xu | 5c041f5 | 2022-05-12 20:22:52 -0700 | [diff] [blame] | 256 | if (huge_pte_none_mostly(pte)) |
Mike Kravetz | 369cd212 | 2017-02-22 15:43:10 -0800 | [diff] [blame] | 257 | ret = true; |
Janosch Frank | 1e2c043 | 2018-07-03 17:02:39 -0700 | [diff] [blame] | 258 | if (!huge_pte_write(pte) && (reason & VM_UFFD_WP)) |
Mike Kravetz | 369cd212 | 2017-02-22 15:43:10 -0800 | [diff] [blame] | 259 | ret = true; |
| 260 | out: |
| 261 | return ret; |
| 262 | } |
| 263 | #else |
| 264 | static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, |
Punit Agrawal | 7868a20 | 2017-07-06 15:39:42 -0700 | [diff] [blame] | 265 | struct vm_area_struct *vma, |
Mike Kravetz | 369cd212 | 2017-02-22 15:43:10 -0800 | [diff] [blame] | 266 | unsigned long address, |
| 267 | unsigned long flags, |
| 268 | unsigned long reason) |
| 269 | { |
| 270 | return false; /* should never get here */ |
| 271 | } |
| 272 | #endif /* CONFIG_HUGETLB_PAGE */ |
| 273 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 274 | /* |
Andrea Arcangeli | 8d2afd9 | 2015-09-04 15:46:51 -0700 | [diff] [blame] | 275 | * Verify the pagetables are still not ok after having reigstered into |
| 276 | * the fault_pending_wqh to avoid userland having to UFFDIO_WAKE any |
| 277 | * userfault that has already been resolved, if userfaultfd_read and |
| 278 | * UFFDIO_COPY|ZEROPAGE are being run simultaneously on two different |
| 279 | * threads. |
| 280 | */ |
| 281 | static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx, |
| 282 | unsigned long address, |
| 283 | unsigned long flags, |
| 284 | unsigned long reason) |
| 285 | { |
| 286 | struct mm_struct *mm = ctx->mm; |
| 287 | pgd_t *pgd; |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 288 | p4d_t *p4d; |
Andrea Arcangeli | 8d2afd9 | 2015-09-04 15:46:51 -0700 | [diff] [blame] | 289 | pud_t *pud; |
| 290 | pmd_t *pmd, _pmd; |
| 291 | pte_t *pte; |
| 292 | bool ret = true; |
| 293 | |
Michel Lespinasse | 42fc541 | 2020-06-08 21:33:44 -0700 | [diff] [blame] | 294 | mmap_assert_locked(mm); |
Andrea Arcangeli | 8d2afd9 | 2015-09-04 15:46:51 -0700 | [diff] [blame] | 295 | |
| 296 | pgd = pgd_offset(mm, address); |
| 297 | if (!pgd_present(*pgd)) |
| 298 | goto out; |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 299 | p4d = p4d_offset(pgd, address); |
| 300 | if (!p4d_present(*p4d)) |
| 301 | goto out; |
| 302 | pud = pud_offset(p4d, address); |
Andrea Arcangeli | 8d2afd9 | 2015-09-04 15:46:51 -0700 | [diff] [blame] | 303 | if (!pud_present(*pud)) |
| 304 | goto out; |
| 305 | pmd = pmd_offset(pud, address); |
| 306 | /* |
| 307 | * READ_ONCE must function as a barrier with narrower scope |
| 308 | * and it must be equivalent to: |
| 309 | * _pmd = *pmd; barrier(); |
| 310 | * |
| 311 | * This is to deal with the instability (as in |
| 312 | * pmd_trans_unstable) of the pmd. |
| 313 | */ |
| 314 | _pmd = READ_ONCE(*pmd); |
Huang Ying | a365ac0 | 2018-01-31 16:17:32 -0800 | [diff] [blame] | 315 | if (pmd_none(_pmd)) |
Andrea Arcangeli | 8d2afd9 | 2015-09-04 15:46:51 -0700 | [diff] [blame] | 316 | goto out; |
| 317 | |
| 318 | ret = false; |
Huang Ying | a365ac0 | 2018-01-31 16:17:32 -0800 | [diff] [blame] | 319 | if (!pmd_present(_pmd)) |
| 320 | goto out; |
| 321 | |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 322 | if (pmd_trans_huge(_pmd)) { |
| 323 | if (!pmd_write(_pmd) && (reason & VM_UFFD_WP)) |
| 324 | ret = true; |
Andrea Arcangeli | 8d2afd9 | 2015-09-04 15:46:51 -0700 | [diff] [blame] | 325 | goto out; |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 326 | } |
Andrea Arcangeli | 8d2afd9 | 2015-09-04 15:46:51 -0700 | [diff] [blame] | 327 | |
| 328 | /* |
| 329 | * the pmd is stable (as in !pmd_trans_unstable) so we can re-read it |
| 330 | * and use the standard pte_offset_map() instead of parsing _pmd. |
| 331 | */ |
| 332 | pte = pte_offset_map(pmd, address); |
| 333 | /* |
| 334 | * Lockless access: we're in a wait_event so it's ok if it |
Peter Xu | 5c041f5 | 2022-05-12 20:22:52 -0700 | [diff] [blame] | 335 | * changes under us. PTE markers should be handled the same as none |
| 336 | * ptes here. |
Andrea Arcangeli | 8d2afd9 | 2015-09-04 15:46:51 -0700 | [diff] [blame] | 337 | */ |
Peter Xu | 5c041f5 | 2022-05-12 20:22:52 -0700 | [diff] [blame] | 338 | if (pte_none_mostly(*pte)) |
Andrea Arcangeli | 8d2afd9 | 2015-09-04 15:46:51 -0700 | [diff] [blame] | 339 | ret = true; |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 340 | if (!pte_write(*pte) && (reason & VM_UFFD_WP)) |
| 341 | ret = true; |
Andrea Arcangeli | 8d2afd9 | 2015-09-04 15:46:51 -0700 | [diff] [blame] | 342 | pte_unmap(pte); |
| 343 | |
| 344 | out: |
| 345 | return ret; |
| 346 | } |
| 347 | |
Peter Zijlstra | 2f064a5 | 2021-06-11 10:28:17 +0200 | [diff] [blame] | 348 | static inline unsigned int userfaultfd_get_blocking_state(unsigned int flags) |
Peter Xu | 3e69ad0 | 2020-04-01 21:09:00 -0700 | [diff] [blame] | 349 | { |
| 350 | if (flags & FAULT_FLAG_INTERRUPTIBLE) |
| 351 | return TASK_INTERRUPTIBLE; |
| 352 | |
| 353 | if (flags & FAULT_FLAG_KILLABLE) |
| 354 | return TASK_KILLABLE; |
| 355 | |
| 356 | return TASK_UNINTERRUPTIBLE; |
| 357 | } |
| 358 | |
Andrea Arcangeli | 8d2afd9 | 2015-09-04 15:46:51 -0700 | [diff] [blame] | 359 | /* |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 360 | * The locking rules involved in returning VM_FAULT_RETRY depending on |
| 361 | * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and |
| 362 | * FAULT_FLAG_KILLABLE are not straightforward. The "Caution" |
| 363 | * recommendation in __lock_page_or_retry is not an understatement. |
| 364 | * |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 365 | * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_lock must be released |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 366 | * before returning VM_FAULT_RETRY only if FAULT_FLAG_RETRY_NOWAIT is |
| 367 | * not set. |
| 368 | * |
| 369 | * If FAULT_FLAG_ALLOW_RETRY is set but FAULT_FLAG_KILLABLE is not |
| 370 | * set, VM_FAULT_RETRY can still be returned if and only if there are |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 371 | * fatal_signal_pending()s, and the mmap_lock must be released before |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 372 | * returning it. |
| 373 | */ |
Souptick Joarder | 2b74030 | 2018-08-23 17:01:36 -0700 | [diff] [blame] | 374 | vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 375 | { |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 376 | struct mm_struct *mm = vmf->vma->vm_mm; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 377 | struct userfaultfd_ctx *ctx; |
| 378 | struct userfaultfd_wait_queue uwq; |
Souptick Joarder | 2b74030 | 2018-08-23 17:01:36 -0700 | [diff] [blame] | 379 | vm_fault_t ret = VM_FAULT_SIGBUS; |
Peter Xu | 3e69ad0 | 2020-04-01 21:09:00 -0700 | [diff] [blame] | 380 | bool must_wait; |
Peter Zijlstra | 2f064a5 | 2021-06-11 10:28:17 +0200 | [diff] [blame] | 381 | unsigned int blocking_state; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 382 | |
Andrea Arcangeli | 64c2b20 | 2017-06-16 14:02:37 -0700 | [diff] [blame] | 383 | /* |
| 384 | * We don't do userfault handling for the final child pid update. |
| 385 | * |
| 386 | * We also don't do userfault handling during |
| 387 | * coredumping. hugetlbfs has the special |
| 388 | * follow_hugetlb_page() to skip missing pages in the |
| 389 | * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with |
| 390 | * the no_page_table() helper in follow_page_mask(), but the |
| 391 | * shmem_vm_ops->fault method is invoked even during |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 392 | * coredumping without mmap_lock and it ends up here. |
Andrea Arcangeli | 64c2b20 | 2017-06-16 14:02:37 -0700 | [diff] [blame] | 393 | */ |
| 394 | if (current->flags & (PF_EXITING|PF_DUMPCORE)) |
| 395 | goto out; |
| 396 | |
| 397 | /* |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 398 | * Coredumping runs without mmap_lock so we can only check that |
| 399 | * the mmap_lock is held, if PF_DUMPCORE was not set. |
Andrea Arcangeli | 64c2b20 | 2017-06-16 14:02:37 -0700 | [diff] [blame] | 400 | */ |
Michel Lespinasse | 42fc541 | 2020-06-08 21:33:44 -0700 | [diff] [blame] | 401 | mmap_assert_locked(mm); |
Andrea Arcangeli | 64c2b20 | 2017-06-16 14:02:37 -0700 | [diff] [blame] | 402 | |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 403 | ctx = vmf->vma->vm_userfaultfd_ctx.ctx; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 404 | if (!ctx) |
Andrea Arcangeli | ba85c70 | 2015-09-04 15:46:41 -0700 | [diff] [blame] | 405 | goto out; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 406 | |
| 407 | BUG_ON(ctx->mm != mm); |
| 408 | |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 409 | /* Any unrecognized flag is a bug. */ |
| 410 | VM_BUG_ON(reason & ~__VM_UFFD_FLAGS); |
| 411 | /* 0 or > 1 flags set is a bug; we expect exactly 1. */ |
| 412 | VM_BUG_ON(!reason || (reason & (reason - 1))); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 413 | |
Prakash Sangappa | 2d6d6f5 | 2017-09-06 16:23:39 -0700 | [diff] [blame] | 414 | if (ctx->features & UFFD_FEATURE_SIGBUS) |
| 415 | goto out; |
Lokesh Gidra | 37cd057 | 2020-12-14 19:13:49 -0800 | [diff] [blame] | 416 | if ((vmf->flags & FAULT_FLAG_USER) == 0 && |
| 417 | ctx->flags & UFFD_USER_MODE_ONLY) { |
| 418 | printk_once(KERN_WARNING "uffd: Set unprivileged_userfaultfd " |
| 419 | "sysctl knob to 1 if kernel faults must be handled " |
| 420 | "without obtaining CAP_SYS_PTRACE capability\n"); |
| 421 | goto out; |
| 422 | } |
Prakash Sangappa | 2d6d6f5 | 2017-09-06 16:23:39 -0700 | [diff] [blame] | 423 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 424 | /* |
| 425 | * If it's already released don't get it. This avoids to loop |
| 426 | * in __get_user_pages if userfaultfd_release waits on the |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 427 | * caller of handle_userfault to release the mmap_lock. |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 428 | */ |
Mark Rutland | 6aa7de0 | 2017-10-23 14:07:29 -0700 | [diff] [blame] | 429 | if (unlikely(READ_ONCE(ctx->released))) { |
Andrea Arcangeli | 656710a | 2017-09-08 16:12:42 -0700 | [diff] [blame] | 430 | /* |
| 431 | * Don't return VM_FAULT_SIGBUS in this case, so a non |
| 432 | * cooperative manager can close the uffd after the |
| 433 | * last UFFDIO_COPY, without risking to trigger an |
| 434 | * involuntary SIGBUS if the process was starting the |
| 435 | * userfaultfd while the userfaultfd was still armed |
| 436 | * (but after the last UFFDIO_COPY). If the uffd |
| 437 | * wasn't already closed when the userfault reached |
| 438 | * this point, that would normally be solved by |
| 439 | * userfaultfd_must_wait returning 'false'. |
| 440 | * |
| 441 | * If we were to return VM_FAULT_SIGBUS here, the non |
| 442 | * cooperative manager would be instead forced to |
| 443 | * always call UFFDIO_UNREGISTER before it can safely |
| 444 | * close the uffd. |
| 445 | */ |
| 446 | ret = VM_FAULT_NOPAGE; |
Andrea Arcangeli | ba85c70 | 2015-09-04 15:46:41 -0700 | [diff] [blame] | 447 | goto out; |
Andrea Arcangeli | 656710a | 2017-09-08 16:12:42 -0700 | [diff] [blame] | 448 | } |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 449 | |
| 450 | /* |
| 451 | * Check that we can return VM_FAULT_RETRY. |
| 452 | * |
| 453 | * NOTE: it should become possible to return VM_FAULT_RETRY |
| 454 | * even if FAULT_FLAG_TRIED is set without leading to gup() |
| 455 | * -EBUSY failures, if the userfaultfd is to be extended for |
| 456 | * VM_UFFD_WP tracking and we intend to arm the userfault |
| 457 | * without first stopping userland access to the memory. For |
| 458 | * VM_UFFD_MISSING userfaults this is enough for now. |
| 459 | */ |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 460 | if (unlikely(!(vmf->flags & FAULT_FLAG_ALLOW_RETRY))) { |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 461 | /* |
| 462 | * Validate the invariant that nowait must allow retry |
| 463 | * to be sure not to return SIGBUS erroneously on |
| 464 | * nowait invocations. |
| 465 | */ |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 466 | BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 467 | #ifdef CONFIG_DEBUG_VM |
| 468 | if (printk_ratelimit()) { |
| 469 | printk(KERN_WARNING |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 470 | "FAULT_FLAG_ALLOW_RETRY missing %x\n", |
| 471 | vmf->flags); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 472 | dump_stack(); |
| 473 | } |
| 474 | #endif |
Andrea Arcangeli | ba85c70 | 2015-09-04 15:46:41 -0700 | [diff] [blame] | 475 | goto out; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 476 | } |
| 477 | |
| 478 | /* |
| 479 | * Handle nowait, not much to do other than tell it to retry |
| 480 | * and wait. |
| 481 | */ |
Andrea Arcangeli | ba85c70 | 2015-09-04 15:46:41 -0700 | [diff] [blame] | 482 | ret = VM_FAULT_RETRY; |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 483 | if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) |
Andrea Arcangeli | ba85c70 | 2015-09-04 15:46:41 -0700 | [diff] [blame] | 484 | goto out; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 485 | |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 486 | /* take the reference before dropping the mmap_lock */ |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 487 | userfaultfd_ctx_get(ctx); |
| 488 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 489 | init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function); |
| 490 | uwq.wq.private = current; |
Nadav Amit | 824ddc6 | 2022-03-22 14:45:32 -0700 | [diff] [blame] | 491 | uwq.msg = userfault_msg(vmf->real_address, vmf->flags, reason, |
Alexey Perevalov | 9d4ac93 | 2017-09-06 16:23:56 -0700 | [diff] [blame] | 492 | ctx->features); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 493 | uwq.ctx = ctx; |
Andrea Arcangeli | 15a77c6 | 2017-01-24 15:17:59 -0800 | [diff] [blame] | 494 | uwq.waken = false; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 495 | |
Peter Xu | 3e69ad0 | 2020-04-01 21:09:00 -0700 | [diff] [blame] | 496 | blocking_state = userfaultfd_get_blocking_state(vmf->flags); |
Andrea Arcangeli | dfa37dc | 2015-09-04 15:47:18 -0700 | [diff] [blame] | 497 | |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 498 | spin_lock_irq(&ctx->fault_pending_wqh.lock); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 499 | /* |
| 500 | * After the __add_wait_queue the uwq is visible to userland |
| 501 | * through poll/read(). |
| 502 | */ |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 503 | __add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq); |
| 504 | /* |
| 505 | * The smp_mb() after __set_current_state prevents the reads |
| 506 | * following the spin_unlock to happen before the list_add in |
| 507 | * __add_wait_queue. |
| 508 | */ |
Andrea Arcangeli | 15a77c6 | 2017-01-24 15:17:59 -0800 | [diff] [blame] | 509 | set_current_state(blocking_state); |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 510 | spin_unlock_irq(&ctx->fault_pending_wqh.lock); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 511 | |
Mike Kravetz | 369cd212 | 2017-02-22 15:43:10 -0800 | [diff] [blame] | 512 | if (!is_vm_hugetlb_page(vmf->vma)) |
| 513 | must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags, |
| 514 | reason); |
| 515 | else |
Punit Agrawal | 7868a20 | 2017-07-06 15:39:42 -0700 | [diff] [blame] | 516 | must_wait = userfaultfd_huge_must_wait(ctx, vmf->vma, |
| 517 | vmf->address, |
Mike Kravetz | 369cd212 | 2017-02-22 15:43:10 -0800 | [diff] [blame] | 518 | vmf->flags, reason); |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 519 | mmap_read_unlock(mm); |
Andrea Arcangeli | 8d2afd9 | 2015-09-04 15:46:51 -0700 | [diff] [blame] | 520 | |
Linus Torvalds | f9bf352 | 2020-08-02 10:42:31 -0700 | [diff] [blame] | 521 | if (likely(must_wait && !READ_ONCE(ctx->released))) { |
Linus Torvalds | a9a0884 | 2018-02-11 14:34:03 -0800 | [diff] [blame] | 522 | wake_up_poll(&ctx->fd_wqh, EPOLLIN); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 523 | schedule(); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 524 | } |
Andrea Arcangeli | ba85c70 | 2015-09-04 15:46:41 -0700 | [diff] [blame] | 525 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 526 | __set_current_state(TASK_RUNNING); |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 527 | |
| 528 | /* |
| 529 | * Here we race with the list_del; list_add in |
| 530 | * userfaultfd_ctx_read(), however because we don't ever run |
| 531 | * list_del_init() to refile across the two lists, the prev |
| 532 | * and next pointers will never point to self. list_add also |
| 533 | * would never let any of the two pointers to point to |
| 534 | * self. So list_empty_careful won't risk to see both pointers |
| 535 | * pointing to self at any time during the list refile. The |
| 536 | * only case where list_del_init() is called is the full |
| 537 | * removal in the wake function and there we don't re-list_add |
| 538 | * and it's fine not to block on the spinlock. The uwq on this |
| 539 | * kernel stack can be released after the list_del_init. |
| 540 | */ |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 541 | if (!list_empty_careful(&uwq.wq.entry)) { |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 542 | spin_lock_irq(&ctx->fault_pending_wqh.lock); |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 543 | /* |
| 544 | * No need of list_del_init(), the uwq on the stack |
| 545 | * will be freed shortly anyway. |
| 546 | */ |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 547 | list_del(&uwq.wq.entry); |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 548 | spin_unlock_irq(&ctx->fault_pending_wqh.lock); |
Andrea Arcangeli | ba85c70 | 2015-09-04 15:46:41 -0700 | [diff] [blame] | 549 | } |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 550 | |
| 551 | /* |
| 552 | * ctx may go away after this if the userfault pseudo fd is |
| 553 | * already released. |
| 554 | */ |
| 555 | userfaultfd_ctx_put(ctx); |
| 556 | |
Andrea Arcangeli | ba85c70 | 2015-09-04 15:46:41 -0700 | [diff] [blame] | 557 | out: |
| 558 | return ret; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 559 | } |
| 560 | |
Andrea Arcangeli | 8c9e7bb | 2017-03-09 16:16:54 -0800 | [diff] [blame] | 561 | static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, |
| 562 | struct userfaultfd_wait_queue *ewq) |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 563 | { |
Andrea Arcangeli | 0cbb4b4 | 2018-01-04 16:18:09 -0800 | [diff] [blame] | 564 | struct userfaultfd_ctx *release_new_ctx; |
| 565 | |
Andrea Arcangeli | 9a69a82 | 2017-03-09 16:16:52 -0800 | [diff] [blame] | 566 | if (WARN_ON_ONCE(current->flags & PF_EXITING)) |
| 567 | goto out; |
| 568 | |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 569 | ewq->ctx = ctx; |
| 570 | init_waitqueue_entry(&ewq->wq, current); |
Andrea Arcangeli | 0cbb4b4 | 2018-01-04 16:18:09 -0800 | [diff] [blame] | 571 | release_new_ctx = NULL; |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 572 | |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 573 | spin_lock_irq(&ctx->event_wqh.lock); |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 574 | /* |
| 575 | * After the __add_wait_queue the uwq is visible to userland |
| 576 | * through poll/read(). |
| 577 | */ |
| 578 | __add_wait_queue(&ctx->event_wqh, &ewq->wq); |
| 579 | for (;;) { |
| 580 | set_current_state(TASK_KILLABLE); |
| 581 | if (ewq->msg.event == 0) |
| 582 | break; |
Mark Rutland | 6aa7de0 | 2017-10-23 14:07:29 -0700 | [diff] [blame] | 583 | if (READ_ONCE(ctx->released) || |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 584 | fatal_signal_pending(current)) { |
Andrea Arcangeli | 384632e | 2017-10-03 16:15:38 -0700 | [diff] [blame] | 585 | /* |
| 586 | * &ewq->wq may be queued in fork_event, but |
| 587 | * __remove_wait_queue ignores the head |
| 588 | * parameter. It would be a problem if it |
| 589 | * didn't. |
| 590 | */ |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 591 | __remove_wait_queue(&ctx->event_wqh, &ewq->wq); |
Mike Rapoport | 7eb76d4 | 2017-03-09 16:17:09 -0800 | [diff] [blame] | 592 | if (ewq->msg.event == UFFD_EVENT_FORK) { |
| 593 | struct userfaultfd_ctx *new; |
| 594 | |
| 595 | new = (struct userfaultfd_ctx *) |
| 596 | (unsigned long) |
| 597 | ewq->msg.arg.reserved.reserved1; |
Andrea Arcangeli | 0cbb4b4 | 2018-01-04 16:18:09 -0800 | [diff] [blame] | 598 | release_new_ctx = new; |
Mike Rapoport | 7eb76d4 | 2017-03-09 16:17:09 -0800 | [diff] [blame] | 599 | } |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 600 | break; |
| 601 | } |
| 602 | |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 603 | spin_unlock_irq(&ctx->event_wqh.lock); |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 604 | |
Linus Torvalds | a9a0884 | 2018-02-11 14:34:03 -0800 | [diff] [blame] | 605 | wake_up_poll(&ctx->fd_wqh, EPOLLIN); |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 606 | schedule(); |
| 607 | |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 608 | spin_lock_irq(&ctx->event_wqh.lock); |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 609 | } |
| 610 | __set_current_state(TASK_RUNNING); |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 611 | spin_unlock_irq(&ctx->event_wqh.lock); |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 612 | |
Andrea Arcangeli | 0cbb4b4 | 2018-01-04 16:18:09 -0800 | [diff] [blame] | 613 | if (release_new_ctx) { |
| 614 | struct vm_area_struct *vma; |
| 615 | struct mm_struct *mm = release_new_ctx->mm; |
| 616 | |
| 617 | /* the various vma->vm_userfaultfd_ctx still points to it */ |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 618 | mmap_write_lock(mm); |
Andrea Arcangeli | 0cbb4b4 | 2018-01-04 16:18:09 -0800 | [diff] [blame] | 619 | for (vma = mm->mmap; vma; vma = vma->vm_next) |
Mike Rapoport | 31e810a | 2018-08-02 15:36:09 -0700 | [diff] [blame] | 620 | if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) { |
Andrea Arcangeli | 0cbb4b4 | 2018-01-04 16:18:09 -0800 | [diff] [blame] | 621 | vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 622 | vma->vm_flags &= ~__VM_UFFD_FLAGS; |
Mike Rapoport | 31e810a | 2018-08-02 15:36:09 -0700 | [diff] [blame] | 623 | } |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 624 | mmap_write_unlock(mm); |
Andrea Arcangeli | 0cbb4b4 | 2018-01-04 16:18:09 -0800 | [diff] [blame] | 625 | |
| 626 | userfaultfd_ctx_put(release_new_ctx); |
| 627 | } |
| 628 | |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 629 | /* |
| 630 | * ctx may go away after this if the userfault pseudo fd is |
| 631 | * already released. |
| 632 | */ |
Andrea Arcangeli | 9a69a82 | 2017-03-09 16:16:52 -0800 | [diff] [blame] | 633 | out: |
Nadav Amit | a759a90 | 2021-09-02 14:58:56 -0700 | [diff] [blame] | 634 | atomic_dec(&ctx->mmap_changing); |
| 635 | VM_BUG_ON(atomic_read(&ctx->mmap_changing) < 0); |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 636 | userfaultfd_ctx_put(ctx); |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 637 | } |
| 638 | |
| 639 | static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx, |
| 640 | struct userfaultfd_wait_queue *ewq) |
| 641 | { |
| 642 | ewq->msg.event = 0; |
| 643 | wake_up_locked(&ctx->event_wqh); |
| 644 | __remove_wait_queue(&ctx->event_wqh, &ewq->wq); |
| 645 | } |
| 646 | |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 647 | int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs) |
| 648 | { |
| 649 | struct userfaultfd_ctx *ctx = NULL, *octx; |
| 650 | struct userfaultfd_fork_ctx *fctx; |
| 651 | |
| 652 | octx = vma->vm_userfaultfd_ctx.ctx; |
| 653 | if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) { |
| 654 | vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 655 | vma->vm_flags &= ~__VM_UFFD_FLAGS; |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 656 | return 0; |
| 657 | } |
| 658 | |
| 659 | list_for_each_entry(fctx, fcs, list) |
| 660 | if (fctx->orig == octx) { |
| 661 | ctx = fctx->new; |
| 662 | break; |
| 663 | } |
| 664 | |
| 665 | if (!ctx) { |
| 666 | fctx = kmalloc(sizeof(*fctx), GFP_KERNEL); |
| 667 | if (!fctx) |
| 668 | return -ENOMEM; |
| 669 | |
| 670 | ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL); |
| 671 | if (!ctx) { |
| 672 | kfree(fctx); |
| 673 | return -ENOMEM; |
| 674 | } |
| 675 | |
Eric Biggers | ca88042 | 2018-12-28 00:34:43 -0800 | [diff] [blame] | 676 | refcount_set(&ctx->refcount, 1); |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 677 | ctx->flags = octx->flags; |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 678 | ctx->features = octx->features; |
| 679 | ctx->released = false; |
Nadav Amit | a759a90 | 2021-09-02 14:58:56 -0700 | [diff] [blame] | 680 | atomic_set(&ctx->mmap_changing, 0); |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 681 | ctx->mm = vma->vm_mm; |
Mike Rapoport | 00bb31f | 2017-11-15 17:36:56 -0800 | [diff] [blame] | 682 | mmgrab(ctx->mm); |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 683 | |
| 684 | userfaultfd_ctx_get(octx); |
Nadav Amit | a759a90 | 2021-09-02 14:58:56 -0700 | [diff] [blame] | 685 | atomic_inc(&octx->mmap_changing); |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 686 | fctx->orig = octx; |
| 687 | fctx->new = ctx; |
| 688 | list_add_tail(&fctx->list, fcs); |
| 689 | } |
| 690 | |
| 691 | vma->vm_userfaultfd_ctx.ctx = ctx; |
| 692 | return 0; |
| 693 | } |
| 694 | |
Andrea Arcangeli | 8c9e7bb | 2017-03-09 16:16:54 -0800 | [diff] [blame] | 695 | static void dup_fctx(struct userfaultfd_fork_ctx *fctx) |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 696 | { |
| 697 | struct userfaultfd_ctx *ctx = fctx->orig; |
| 698 | struct userfaultfd_wait_queue ewq; |
| 699 | |
| 700 | msg_init(&ewq.msg); |
| 701 | |
| 702 | ewq.msg.event = UFFD_EVENT_FORK; |
| 703 | ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new; |
| 704 | |
Andrea Arcangeli | 8c9e7bb | 2017-03-09 16:16:54 -0800 | [diff] [blame] | 705 | userfaultfd_event_wait_completion(ctx, &ewq); |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 706 | } |
| 707 | |
| 708 | void dup_userfaultfd_complete(struct list_head *fcs) |
| 709 | { |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 710 | struct userfaultfd_fork_ctx *fctx, *n; |
| 711 | |
| 712 | list_for_each_entry_safe(fctx, n, fcs, list) { |
Andrea Arcangeli | 8c9e7bb | 2017-03-09 16:16:54 -0800 | [diff] [blame] | 713 | dup_fctx(fctx); |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 714 | list_del(&fctx->list); |
| 715 | kfree(fctx); |
| 716 | } |
| 717 | } |
| 718 | |
Pavel Emelyanov | 72f8765 | 2017-02-22 15:42:34 -0800 | [diff] [blame] | 719 | void mremap_userfaultfd_prep(struct vm_area_struct *vma, |
| 720 | struct vm_userfaultfd_ctx *vm_ctx) |
| 721 | { |
| 722 | struct userfaultfd_ctx *ctx; |
| 723 | |
| 724 | ctx = vma->vm_userfaultfd_ctx.ctx; |
Peter Xu | 3cfd22b | 2018-12-28 00:38:47 -0800 | [diff] [blame] | 725 | |
| 726 | if (!ctx) |
| 727 | return; |
| 728 | |
| 729 | if (ctx->features & UFFD_FEATURE_EVENT_REMAP) { |
Pavel Emelyanov | 72f8765 | 2017-02-22 15:42:34 -0800 | [diff] [blame] | 730 | vm_ctx->ctx = ctx; |
| 731 | userfaultfd_ctx_get(ctx); |
Nadav Amit | a759a90 | 2021-09-02 14:58:56 -0700 | [diff] [blame] | 732 | atomic_inc(&ctx->mmap_changing); |
Peter Xu | 3cfd22b | 2018-12-28 00:38:47 -0800 | [diff] [blame] | 733 | } else { |
| 734 | /* Drop uffd context if remap feature not enabled */ |
| 735 | vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 736 | vma->vm_flags &= ~__VM_UFFD_FLAGS; |
Pavel Emelyanov | 72f8765 | 2017-02-22 15:42:34 -0800 | [diff] [blame] | 737 | } |
| 738 | } |
| 739 | |
Andrea Arcangeli | 90794bf | 2017-02-22 15:42:37 -0800 | [diff] [blame] | 740 | void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx, |
Pavel Emelyanov | 72f8765 | 2017-02-22 15:42:34 -0800 | [diff] [blame] | 741 | unsigned long from, unsigned long to, |
| 742 | unsigned long len) |
| 743 | { |
Andrea Arcangeli | 90794bf | 2017-02-22 15:42:37 -0800 | [diff] [blame] | 744 | struct userfaultfd_ctx *ctx = vm_ctx->ctx; |
Pavel Emelyanov | 72f8765 | 2017-02-22 15:42:34 -0800 | [diff] [blame] | 745 | struct userfaultfd_wait_queue ewq; |
| 746 | |
| 747 | if (!ctx) |
| 748 | return; |
| 749 | |
| 750 | if (to & ~PAGE_MASK) { |
| 751 | userfaultfd_ctx_put(ctx); |
| 752 | return; |
| 753 | } |
| 754 | |
| 755 | msg_init(&ewq.msg); |
| 756 | |
| 757 | ewq.msg.event = UFFD_EVENT_REMAP; |
| 758 | ewq.msg.arg.remap.from = from; |
| 759 | ewq.msg.arg.remap.to = to; |
| 760 | ewq.msg.arg.remap.len = len; |
| 761 | |
| 762 | userfaultfd_event_wait_completion(ctx, &ewq); |
| 763 | } |
| 764 | |
Andrea Arcangeli | 70ccb92 | 2017-03-09 16:17:11 -0800 | [diff] [blame] | 765 | bool userfaultfd_remove(struct vm_area_struct *vma, |
Mike Rapoport | d811914 | 2017-02-24 14:56:02 -0800 | [diff] [blame] | 766 | unsigned long start, unsigned long end) |
Pavel Emelyanov | 05ce772 | 2017-02-22 15:42:40 -0800 | [diff] [blame] | 767 | { |
| 768 | struct mm_struct *mm = vma->vm_mm; |
| 769 | struct userfaultfd_ctx *ctx; |
| 770 | struct userfaultfd_wait_queue ewq; |
| 771 | |
| 772 | ctx = vma->vm_userfaultfd_ctx.ctx; |
Mike Rapoport | d811914 | 2017-02-24 14:56:02 -0800 | [diff] [blame] | 773 | if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE)) |
Andrea Arcangeli | 70ccb92 | 2017-03-09 16:17:11 -0800 | [diff] [blame] | 774 | return true; |
Pavel Emelyanov | 05ce772 | 2017-02-22 15:42:40 -0800 | [diff] [blame] | 775 | |
| 776 | userfaultfd_ctx_get(ctx); |
Nadav Amit | a759a90 | 2021-09-02 14:58:56 -0700 | [diff] [blame] | 777 | atomic_inc(&ctx->mmap_changing); |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 778 | mmap_read_unlock(mm); |
Pavel Emelyanov | 05ce772 | 2017-02-22 15:42:40 -0800 | [diff] [blame] | 779 | |
Pavel Emelyanov | 05ce772 | 2017-02-22 15:42:40 -0800 | [diff] [blame] | 780 | msg_init(&ewq.msg); |
| 781 | |
Mike Rapoport | d811914 | 2017-02-24 14:56:02 -0800 | [diff] [blame] | 782 | ewq.msg.event = UFFD_EVENT_REMOVE; |
| 783 | ewq.msg.arg.remove.start = start; |
| 784 | ewq.msg.arg.remove.end = end; |
Pavel Emelyanov | 05ce772 | 2017-02-22 15:42:40 -0800 | [diff] [blame] | 785 | |
| 786 | userfaultfd_event_wait_completion(ctx, &ewq); |
| 787 | |
Andrea Arcangeli | 70ccb92 | 2017-03-09 16:17:11 -0800 | [diff] [blame] | 788 | return false; |
Pavel Emelyanov | 05ce772 | 2017-02-22 15:42:40 -0800 | [diff] [blame] | 789 | } |
| 790 | |
Mike Rapoport | 897ab3e | 2017-02-24 14:58:22 -0800 | [diff] [blame] | 791 | static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps, |
| 792 | unsigned long start, unsigned long end) |
| 793 | { |
| 794 | struct userfaultfd_unmap_ctx *unmap_ctx; |
| 795 | |
| 796 | list_for_each_entry(unmap_ctx, unmaps, list) |
| 797 | if (unmap_ctx->ctx == ctx && unmap_ctx->start == start && |
| 798 | unmap_ctx->end == end) |
| 799 | return true; |
| 800 | |
| 801 | return false; |
| 802 | } |
| 803 | |
| 804 | int userfaultfd_unmap_prep(struct vm_area_struct *vma, |
| 805 | unsigned long start, unsigned long end, |
| 806 | struct list_head *unmaps) |
| 807 | { |
| 808 | for ( ; vma && vma->vm_start < end; vma = vma->vm_next) { |
| 809 | struct userfaultfd_unmap_ctx *unmap_ctx; |
| 810 | struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx; |
| 811 | |
| 812 | if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) || |
| 813 | has_unmap_ctx(ctx, unmaps, start, end)) |
| 814 | continue; |
| 815 | |
| 816 | unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL); |
| 817 | if (!unmap_ctx) |
| 818 | return -ENOMEM; |
| 819 | |
| 820 | userfaultfd_ctx_get(ctx); |
Nadav Amit | a759a90 | 2021-09-02 14:58:56 -0700 | [diff] [blame] | 821 | atomic_inc(&ctx->mmap_changing); |
Mike Rapoport | 897ab3e | 2017-02-24 14:58:22 -0800 | [diff] [blame] | 822 | unmap_ctx->ctx = ctx; |
| 823 | unmap_ctx->start = start; |
| 824 | unmap_ctx->end = end; |
| 825 | list_add_tail(&unmap_ctx->list, unmaps); |
| 826 | } |
| 827 | |
| 828 | return 0; |
| 829 | } |
| 830 | |
| 831 | void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf) |
| 832 | { |
| 833 | struct userfaultfd_unmap_ctx *ctx, *n; |
| 834 | struct userfaultfd_wait_queue ewq; |
| 835 | |
| 836 | list_for_each_entry_safe(ctx, n, uf, list) { |
| 837 | msg_init(&ewq.msg); |
| 838 | |
| 839 | ewq.msg.event = UFFD_EVENT_UNMAP; |
| 840 | ewq.msg.arg.remove.start = ctx->start; |
| 841 | ewq.msg.arg.remove.end = ctx->end; |
| 842 | |
| 843 | userfaultfd_event_wait_completion(ctx->ctx, &ewq); |
| 844 | |
| 845 | list_del(&ctx->list); |
| 846 | kfree(ctx); |
| 847 | } |
| 848 | } |
| 849 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 850 | static int userfaultfd_release(struct inode *inode, struct file *file) |
| 851 | { |
| 852 | struct userfaultfd_ctx *ctx = file->private_data; |
| 853 | struct mm_struct *mm = ctx->mm; |
| 854 | struct vm_area_struct *vma, *prev; |
| 855 | /* len == 0 means wake all */ |
| 856 | struct userfaultfd_wake_range range = { .len = 0, }; |
| 857 | unsigned long new_flags; |
| 858 | |
Mark Rutland | 6aa7de0 | 2017-10-23 14:07:29 -0700 | [diff] [blame] | 859 | WRITE_ONCE(ctx->released, true); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 860 | |
Oleg Nesterov | d2005e3 | 2016-05-20 16:58:36 -0700 | [diff] [blame] | 861 | if (!mmget_not_zero(mm)) |
| 862 | goto wakeup; |
| 863 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 864 | /* |
| 865 | * Flush page faults out of all CPUs. NOTE: all page faults |
| 866 | * must be retried without returning VM_FAULT_SIGBUS if |
| 867 | * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 868 | * changes while handle_userfault released the mmap_lock. So |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 869 | * it's critical that released is set to true (above), before |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 870 | * taking the mmap_lock for writing. |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 871 | */ |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 872 | mmap_write_lock(mm); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 873 | prev = NULL; |
| 874 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
| 875 | cond_resched(); |
| 876 | BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^ |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 877 | !!(vma->vm_flags & __VM_UFFD_FLAGS)); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 878 | if (vma->vm_userfaultfd_ctx.ctx != ctx) { |
| 879 | prev = vma; |
| 880 | continue; |
| 881 | } |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 882 | new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS; |
Jann Horn | 4d45e75 | 2020-10-15 20:13:00 -0700 | [diff] [blame] | 883 | prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end, |
| 884 | new_flags, vma->anon_vma, |
| 885 | vma->vm_file, vma->vm_pgoff, |
| 886 | vma_policy(vma), |
Suren Baghdasaryan | 5c26f6a | 2022-03-04 20:28:51 -0800 | [diff] [blame] | 887 | NULL_VM_UFFD_CTX, anon_vma_name(vma)); |
Jann Horn | 4d45e75 | 2020-10-15 20:13:00 -0700 | [diff] [blame] | 888 | if (prev) |
| 889 | vma = prev; |
| 890 | else |
| 891 | prev = vma; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 892 | vma->vm_flags = new_flags; |
| 893 | vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; |
| 894 | } |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 895 | mmap_write_unlock(mm); |
Oleg Nesterov | d2005e3 | 2016-05-20 16:58:36 -0700 | [diff] [blame] | 896 | mmput(mm); |
| 897 | wakeup: |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 898 | /* |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 899 | * After no new page faults can wait on this fault_*wqh, flush |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 900 | * the last page faults that may have been already waiting on |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 901 | * the fault_*wqh. |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 902 | */ |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 903 | spin_lock_irq(&ctx->fault_pending_wqh.lock); |
Andrea Arcangeli | ac5be6b | 2015-09-22 14:58:49 -0700 | [diff] [blame] | 904 | __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range); |
Matthew Wilcox | c430d1e | 2018-08-21 21:56:30 -0700 | [diff] [blame] | 905 | __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range); |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 906 | spin_unlock_irq(&ctx->fault_pending_wqh.lock); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 907 | |
Mike Rapoport | 5a18b64 | 2017-08-02 13:32:24 -0700 | [diff] [blame] | 908 | /* Flush pending events that may still wait on event_wqh */ |
| 909 | wake_up_all(&ctx->event_wqh); |
| 910 | |
Linus Torvalds | a9a0884 | 2018-02-11 14:34:03 -0800 | [diff] [blame] | 911 | wake_up_poll(&ctx->fd_wqh, EPOLLHUP); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 912 | userfaultfd_ctx_put(ctx); |
| 913 | return 0; |
| 914 | } |
| 915 | |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 916 | /* fault_pending_wqh.lock must be hold by the caller */ |
Pavel Emelyanov | 6dcc27f | 2017-02-22 15:42:18 -0800 | [diff] [blame] | 917 | static inline struct userfaultfd_wait_queue *find_userfault_in( |
| 918 | wait_queue_head_t *wqh) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 919 | { |
Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 920 | wait_queue_entry_t *wq; |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 921 | struct userfaultfd_wait_queue *uwq; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 922 | |
Lance Roy | 456a737 | 2018-10-04 23:45:44 -0700 | [diff] [blame] | 923 | lockdep_assert_held(&wqh->lock); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 924 | |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 925 | uwq = NULL; |
Pavel Emelyanov | 6dcc27f | 2017-02-22 15:42:18 -0800 | [diff] [blame] | 926 | if (!waitqueue_active(wqh)) |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 927 | goto out; |
| 928 | /* walk in reverse to provide FIFO behavior to read userfaults */ |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 929 | wq = list_last_entry(&wqh->head, typeof(*wq), entry); |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 930 | uwq = container_of(wq, struct userfaultfd_wait_queue, wq); |
| 931 | out: |
| 932 | return uwq; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 933 | } |
| 934 | |
Pavel Emelyanov | 6dcc27f | 2017-02-22 15:42:18 -0800 | [diff] [blame] | 935 | static inline struct userfaultfd_wait_queue *find_userfault( |
| 936 | struct userfaultfd_ctx *ctx) |
| 937 | { |
| 938 | return find_userfault_in(&ctx->fault_pending_wqh); |
| 939 | } |
| 940 | |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 941 | static inline struct userfaultfd_wait_queue *find_userfault_evt( |
| 942 | struct userfaultfd_ctx *ctx) |
| 943 | { |
| 944 | return find_userfault_in(&ctx->event_wqh); |
| 945 | } |
| 946 | |
Al Viro | 076ccb7 | 2017-07-03 01:02:18 -0400 | [diff] [blame] | 947 | static __poll_t userfaultfd_poll(struct file *file, poll_table *wait) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 948 | { |
| 949 | struct userfaultfd_ctx *ctx = file->private_data; |
Al Viro | 076ccb7 | 2017-07-03 01:02:18 -0400 | [diff] [blame] | 950 | __poll_t ret; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 951 | |
| 952 | poll_wait(file, &ctx->fd_wqh, wait); |
| 953 | |
Nadav Amit | 22e5fe2a | 2021-09-02 14:58:59 -0700 | [diff] [blame] | 954 | if (!userfaultfd_is_initialized(ctx)) |
Linus Torvalds | a9a0884 | 2018-02-11 14:34:03 -0800 | [diff] [blame] | 955 | return EPOLLERR; |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 956 | |
Nadav Amit | 22e5fe2a | 2021-09-02 14:58:59 -0700 | [diff] [blame] | 957 | /* |
| 958 | * poll() never guarantees that read won't block. |
| 959 | * userfaults can be waken before they're read(). |
| 960 | */ |
| 961 | if (unlikely(!(file->f_flags & O_NONBLOCK))) |
Linus Torvalds | a9a0884 | 2018-02-11 14:34:03 -0800 | [diff] [blame] | 962 | return EPOLLERR; |
Nadav Amit | 22e5fe2a | 2021-09-02 14:58:59 -0700 | [diff] [blame] | 963 | /* |
| 964 | * lockless access to see if there are pending faults |
| 965 | * __pollwait last action is the add_wait_queue but |
| 966 | * the spin_unlock would allow the waitqueue_active to |
| 967 | * pass above the actual list_add inside |
| 968 | * add_wait_queue critical section. So use a full |
| 969 | * memory barrier to serialize the list_add write of |
| 970 | * add_wait_queue() with the waitqueue_active read |
| 971 | * below. |
| 972 | */ |
| 973 | ret = 0; |
| 974 | smp_mb(); |
| 975 | if (waitqueue_active(&ctx->fault_pending_wqh)) |
| 976 | ret = EPOLLIN; |
| 977 | else if (waitqueue_active(&ctx->event_wqh)) |
| 978 | ret = EPOLLIN; |
| 979 | |
| 980 | return ret; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 981 | } |
| 982 | |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 983 | static const struct file_operations userfaultfd_fops; |
| 984 | |
Daniel Colascione | b537900 | 2021-01-08 14:22:23 -0800 | [diff] [blame] | 985 | static int resolve_userfault_fork(struct userfaultfd_ctx *new, |
| 986 | struct inode *inode, |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 987 | struct uffd_msg *msg) |
| 988 | { |
| 989 | int fd; |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 990 | |
Daniel Colascione | b537900 | 2021-01-08 14:22:23 -0800 | [diff] [blame] | 991 | fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, new, |
| 992 | O_RDWR | (new->flags & UFFD_SHARED_FCNTL_FLAGS), inode); |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 993 | if (fd < 0) |
| 994 | return fd; |
| 995 | |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 996 | msg->arg.reserved.reserved1 = 0; |
| 997 | msg->arg.fork.ufd = fd; |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 998 | return 0; |
| 999 | } |
| 1000 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1001 | static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait, |
Daniel Colascione | b537900 | 2021-01-08 14:22:23 -0800 | [diff] [blame] | 1002 | struct uffd_msg *msg, struct inode *inode) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1003 | { |
| 1004 | ssize_t ret; |
| 1005 | DECLARE_WAITQUEUE(wait, current); |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 1006 | struct userfaultfd_wait_queue *uwq; |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 1007 | /* |
| 1008 | * Handling fork event requires sleeping operations, so |
| 1009 | * we drop the event_wqh lock, then do these ops, then |
| 1010 | * lock it back and wake up the waiter. While the lock is |
| 1011 | * dropped the ewq may go away so we keep track of it |
| 1012 | * carefully. |
| 1013 | */ |
| 1014 | LIST_HEAD(fork_event); |
| 1015 | struct userfaultfd_ctx *fork_nctx = NULL; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1016 | |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 1017 | /* always take the fd_wqh lock before the fault_pending_wqh lock */ |
Christoph Hellwig | ae62c16 | 2018-10-26 15:02:19 -0700 | [diff] [blame] | 1018 | spin_lock_irq(&ctx->fd_wqh.lock); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1019 | __add_wait_queue(&ctx->fd_wqh, &wait); |
| 1020 | for (;;) { |
| 1021 | set_current_state(TASK_INTERRUPTIBLE); |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 1022 | spin_lock(&ctx->fault_pending_wqh.lock); |
| 1023 | uwq = find_userfault(ctx); |
| 1024 | if (uwq) { |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1025 | /* |
Andrea Arcangeli | 2c5b7e1 | 2015-09-04 15:47:23 -0700 | [diff] [blame] | 1026 | * Use a seqcount to repeat the lockless check |
| 1027 | * in wake_userfault() to avoid missing |
| 1028 | * wakeups because during the refile both |
| 1029 | * waitqueue could become empty if this is the |
| 1030 | * only userfault. |
| 1031 | */ |
| 1032 | write_seqcount_begin(&ctx->refile_seq); |
| 1033 | |
| 1034 | /* |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 1035 | * The fault_pending_wqh.lock prevents the uwq |
| 1036 | * to disappear from under us. |
| 1037 | * |
| 1038 | * Refile this userfault from |
| 1039 | * fault_pending_wqh to fault_wqh, it's not |
| 1040 | * pending anymore after we read it. |
| 1041 | * |
| 1042 | * Use list_del() by hand (as |
| 1043 | * userfaultfd_wake_function also uses |
| 1044 | * list_del_init() by hand) to be sure nobody |
| 1045 | * changes __remove_wait_queue() to use |
| 1046 | * list_del_init() in turn breaking the |
| 1047 | * !list_empty_careful() check in |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 1048 | * handle_userfault(). The uwq->wq.head list |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 1049 | * must never be empty at any time during the |
| 1050 | * refile, or the waitqueue could disappear |
| 1051 | * from under us. The "wait_queue_head_t" |
| 1052 | * parameter of __remove_wait_queue() is unused |
| 1053 | * anyway. |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1054 | */ |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 1055 | list_del(&uwq->wq.entry); |
Matthew Wilcox | c430d1e | 2018-08-21 21:56:30 -0700 | [diff] [blame] | 1056 | add_wait_queue(&ctx->fault_wqh, &uwq->wq); |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 1057 | |
Andrea Arcangeli | 2c5b7e1 | 2015-09-04 15:47:23 -0700 | [diff] [blame] | 1058 | write_seqcount_end(&ctx->refile_seq); |
| 1059 | |
Andrea Arcangeli | a9b85f9 | 2015-09-04 15:46:37 -0700 | [diff] [blame] | 1060 | /* careful to always initialize msg if ret == 0 */ |
| 1061 | *msg = uwq->msg; |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 1062 | spin_unlock(&ctx->fault_pending_wqh.lock); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1063 | ret = 0; |
| 1064 | break; |
| 1065 | } |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 1066 | spin_unlock(&ctx->fault_pending_wqh.lock); |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 1067 | |
| 1068 | spin_lock(&ctx->event_wqh.lock); |
| 1069 | uwq = find_userfault_evt(ctx); |
| 1070 | if (uwq) { |
| 1071 | *msg = uwq->msg; |
| 1072 | |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 1073 | if (uwq->msg.event == UFFD_EVENT_FORK) { |
| 1074 | fork_nctx = (struct userfaultfd_ctx *) |
| 1075 | (unsigned long) |
| 1076 | uwq->msg.arg.reserved.reserved1; |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 1077 | list_move(&uwq->wq.entry, &fork_event); |
Andrea Arcangeli | 384632e | 2017-10-03 16:15:38 -0700 | [diff] [blame] | 1078 | /* |
| 1079 | * fork_nctx can be freed as soon as |
| 1080 | * we drop the lock, unless we take a |
| 1081 | * reference on it. |
| 1082 | */ |
| 1083 | userfaultfd_ctx_get(fork_nctx); |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 1084 | spin_unlock(&ctx->event_wqh.lock); |
| 1085 | ret = 0; |
| 1086 | break; |
| 1087 | } |
| 1088 | |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 1089 | userfaultfd_event_complete(ctx, uwq); |
| 1090 | spin_unlock(&ctx->event_wqh.lock); |
| 1091 | ret = 0; |
| 1092 | break; |
| 1093 | } |
| 1094 | spin_unlock(&ctx->event_wqh.lock); |
| 1095 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1096 | if (signal_pending(current)) { |
| 1097 | ret = -ERESTARTSYS; |
| 1098 | break; |
| 1099 | } |
| 1100 | if (no_wait) { |
| 1101 | ret = -EAGAIN; |
| 1102 | break; |
| 1103 | } |
Christoph Hellwig | ae62c16 | 2018-10-26 15:02:19 -0700 | [diff] [blame] | 1104 | spin_unlock_irq(&ctx->fd_wqh.lock); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1105 | schedule(); |
Christoph Hellwig | ae62c16 | 2018-10-26 15:02:19 -0700 | [diff] [blame] | 1106 | spin_lock_irq(&ctx->fd_wqh.lock); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1107 | } |
| 1108 | __remove_wait_queue(&ctx->fd_wqh, &wait); |
| 1109 | __set_current_state(TASK_RUNNING); |
Christoph Hellwig | ae62c16 | 2018-10-26 15:02:19 -0700 | [diff] [blame] | 1110 | spin_unlock_irq(&ctx->fd_wqh.lock); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1111 | |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 1112 | if (!ret && msg->event == UFFD_EVENT_FORK) { |
Daniel Colascione | b537900 | 2021-01-08 14:22:23 -0800 | [diff] [blame] | 1113 | ret = resolve_userfault_fork(fork_nctx, inode, msg); |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 1114 | spin_lock_irq(&ctx->event_wqh.lock); |
Andrea Arcangeli | 384632e | 2017-10-03 16:15:38 -0700 | [diff] [blame] | 1115 | if (!list_empty(&fork_event)) { |
| 1116 | /* |
| 1117 | * The fork thread didn't abort, so we can |
| 1118 | * drop the temporary refcount. |
| 1119 | */ |
| 1120 | userfaultfd_ctx_put(fork_nctx); |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 1121 | |
Andrea Arcangeli | 384632e | 2017-10-03 16:15:38 -0700 | [diff] [blame] | 1122 | uwq = list_first_entry(&fork_event, |
| 1123 | typeof(*uwq), |
| 1124 | wq.entry); |
| 1125 | /* |
| 1126 | * If fork_event list wasn't empty and in turn |
| 1127 | * the event wasn't already released by fork |
| 1128 | * (the event is allocated on fork kernel |
| 1129 | * stack), put the event back to its place in |
| 1130 | * the event_wq. fork_event head will be freed |
| 1131 | * as soon as we return so the event cannot |
| 1132 | * stay queued there no matter the current |
| 1133 | * "ret" value. |
| 1134 | */ |
| 1135 | list_del(&uwq->wq.entry); |
| 1136 | __add_wait_queue(&ctx->event_wqh, &uwq->wq); |
| 1137 | |
| 1138 | /* |
| 1139 | * Leave the event in the waitqueue and report |
| 1140 | * error to userland if we failed to resolve |
| 1141 | * the userfault fork. |
| 1142 | */ |
| 1143 | if (likely(!ret)) |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 1144 | userfaultfd_event_complete(ctx, uwq); |
Andrea Arcangeli | 384632e | 2017-10-03 16:15:38 -0700 | [diff] [blame] | 1145 | } else { |
| 1146 | /* |
| 1147 | * Here the fork thread aborted and the |
| 1148 | * refcount from the fork thread on fork_nctx |
| 1149 | * has already been released. We still hold |
| 1150 | * the reference we took before releasing the |
| 1151 | * lock above. If resolve_userfault_fork |
| 1152 | * failed we've to drop it because the |
| 1153 | * fork_nctx has to be freed in such case. If |
| 1154 | * it succeeded we'll hold it because the new |
| 1155 | * uffd references it. |
| 1156 | */ |
| 1157 | if (ret) |
| 1158 | userfaultfd_ctx_put(fork_nctx); |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 1159 | } |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 1160 | spin_unlock_irq(&ctx->event_wqh.lock); |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 1161 | } |
| 1162 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1163 | return ret; |
| 1164 | } |
| 1165 | |
| 1166 | static ssize_t userfaultfd_read(struct file *file, char __user *buf, |
| 1167 | size_t count, loff_t *ppos) |
| 1168 | { |
| 1169 | struct userfaultfd_ctx *ctx = file->private_data; |
| 1170 | ssize_t _ret, ret = 0; |
Andrea Arcangeli | a9b85f9 | 2015-09-04 15:46:37 -0700 | [diff] [blame] | 1171 | struct uffd_msg msg; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1172 | int no_wait = file->f_flags & O_NONBLOCK; |
Daniel Colascione | b537900 | 2021-01-08 14:22:23 -0800 | [diff] [blame] | 1173 | struct inode *inode = file_inode(file); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1174 | |
Nadav Amit | 22e5fe2a | 2021-09-02 14:58:59 -0700 | [diff] [blame] | 1175 | if (!userfaultfd_is_initialized(ctx)) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1176 | return -EINVAL; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1177 | |
| 1178 | for (;;) { |
Andrea Arcangeli | a9b85f9 | 2015-09-04 15:46:37 -0700 | [diff] [blame] | 1179 | if (count < sizeof(msg)) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1180 | return ret ? ret : -EINVAL; |
Daniel Colascione | b537900 | 2021-01-08 14:22:23 -0800 | [diff] [blame] | 1181 | _ret = userfaultfd_ctx_read(ctx, no_wait, &msg, inode); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1182 | if (_ret < 0) |
| 1183 | return ret ? ret : _ret; |
Andrea Arcangeli | a9b85f9 | 2015-09-04 15:46:37 -0700 | [diff] [blame] | 1184 | if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg))) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1185 | return ret ? ret : -EFAULT; |
Andrea Arcangeli | a9b85f9 | 2015-09-04 15:46:37 -0700 | [diff] [blame] | 1186 | ret += sizeof(msg); |
| 1187 | buf += sizeof(msg); |
| 1188 | count -= sizeof(msg); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1189 | /* |
| 1190 | * Allow to read more than one fault at time but only |
| 1191 | * block if waiting for the very first one. |
| 1192 | */ |
| 1193 | no_wait = O_NONBLOCK; |
| 1194 | } |
| 1195 | } |
| 1196 | |
| 1197 | static void __wake_userfault(struct userfaultfd_ctx *ctx, |
| 1198 | struct userfaultfd_wake_range *range) |
| 1199 | { |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 1200 | spin_lock_irq(&ctx->fault_pending_wqh.lock); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1201 | /* wake all in the range and autoremove */ |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 1202 | if (waitqueue_active(&ctx->fault_pending_wqh)) |
Andrea Arcangeli | ac5be6b | 2015-09-22 14:58:49 -0700 | [diff] [blame] | 1203 | __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 1204 | range); |
| 1205 | if (waitqueue_active(&ctx->fault_wqh)) |
Matthew Wilcox | c430d1e | 2018-08-21 21:56:30 -0700 | [diff] [blame] | 1206 | __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range); |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 1207 | spin_unlock_irq(&ctx->fault_pending_wqh.lock); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1208 | } |
| 1209 | |
| 1210 | static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx, |
| 1211 | struct userfaultfd_wake_range *range) |
| 1212 | { |
Andrea Arcangeli | 2c5b7e1 | 2015-09-04 15:47:23 -0700 | [diff] [blame] | 1213 | unsigned seq; |
| 1214 | bool need_wakeup; |
| 1215 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1216 | /* |
| 1217 | * To be sure waitqueue_active() is not reordered by the CPU |
| 1218 | * before the pagetable update, use an explicit SMP memory |
Michel Lespinasse | 3e4e28c | 2020-06-08 21:33:51 -0700 | [diff] [blame] | 1219 | * barrier here. PT lock release or mmap_read_unlock(mm) still |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1220 | * have release semantics that can allow the |
| 1221 | * waitqueue_active() to be reordered before the pte update. |
| 1222 | */ |
| 1223 | smp_mb(); |
| 1224 | |
| 1225 | /* |
| 1226 | * Use waitqueue_active because it's very frequent to |
| 1227 | * change the address space atomically even if there are no |
| 1228 | * userfaults yet. So we take the spinlock only when we're |
| 1229 | * sure we've userfaults to wake. |
| 1230 | */ |
Andrea Arcangeli | 2c5b7e1 | 2015-09-04 15:47:23 -0700 | [diff] [blame] | 1231 | do { |
| 1232 | seq = read_seqcount_begin(&ctx->refile_seq); |
| 1233 | need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) || |
| 1234 | waitqueue_active(&ctx->fault_wqh); |
| 1235 | cond_resched(); |
| 1236 | } while (read_seqcount_retry(&ctx->refile_seq, seq)); |
| 1237 | if (need_wakeup) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1238 | __wake_userfault(ctx, range); |
| 1239 | } |
| 1240 | |
| 1241 | static __always_inline int validate_range(struct mm_struct *mm, |
Peter Collingbourne | e71e2ac | 2021-07-23 15:50:01 -0700 | [diff] [blame] | 1242 | __u64 start, __u64 len) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1243 | { |
| 1244 | __u64 task_size = mm->task_size; |
| 1245 | |
Peter Collingbourne | e71e2ac | 2021-07-23 15:50:01 -0700 | [diff] [blame] | 1246 | if (start & ~PAGE_MASK) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1247 | return -EINVAL; |
| 1248 | if (len & ~PAGE_MASK) |
| 1249 | return -EINVAL; |
| 1250 | if (!len) |
| 1251 | return -EINVAL; |
Peter Collingbourne | e71e2ac | 2021-07-23 15:50:01 -0700 | [diff] [blame] | 1252 | if (start < mmap_min_addr) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1253 | return -EINVAL; |
Peter Collingbourne | e71e2ac | 2021-07-23 15:50:01 -0700 | [diff] [blame] | 1254 | if (start >= task_size) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1255 | return -EINVAL; |
Peter Collingbourne | e71e2ac | 2021-07-23 15:50:01 -0700 | [diff] [blame] | 1256 | if (len > task_size - start) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1257 | return -EINVAL; |
| 1258 | return 0; |
| 1259 | } |
| 1260 | |
| 1261 | static int userfaultfd_register(struct userfaultfd_ctx *ctx, |
| 1262 | unsigned long arg) |
| 1263 | { |
| 1264 | struct mm_struct *mm = ctx->mm; |
| 1265 | struct vm_area_struct *vma, *prev, *cur; |
| 1266 | int ret; |
| 1267 | struct uffdio_register uffdio_register; |
| 1268 | struct uffdio_register __user *user_uffdio_register; |
| 1269 | unsigned long vm_flags, new_flags; |
| 1270 | bool found; |
Mike Rapoport | ce53e8e | 2017-09-06 16:23:12 -0700 | [diff] [blame] | 1271 | bool basic_ioctls; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1272 | unsigned long start, end, vma_end; |
| 1273 | |
| 1274 | user_uffdio_register = (struct uffdio_register __user *) arg; |
| 1275 | |
| 1276 | ret = -EFAULT; |
| 1277 | if (copy_from_user(&uffdio_register, user_uffdio_register, |
| 1278 | sizeof(uffdio_register)-sizeof(__u64))) |
| 1279 | goto out; |
| 1280 | |
| 1281 | ret = -EINVAL; |
| 1282 | if (!uffdio_register.mode) |
| 1283 | goto out; |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 1284 | if (uffdio_register.mode & ~UFFD_API_REGISTER_MODES) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1285 | goto out; |
| 1286 | vm_flags = 0; |
| 1287 | if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING) |
| 1288 | vm_flags |= VM_UFFD_MISSING; |
Peter Xu | 00b151f2 | 2021-06-30 18:49:06 -0700 | [diff] [blame] | 1289 | if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) { |
| 1290 | #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP |
| 1291 | goto out; |
| 1292 | #endif |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1293 | vm_flags |= VM_UFFD_WP; |
Peter Xu | 00b151f2 | 2021-06-30 18:49:06 -0700 | [diff] [blame] | 1294 | } |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 1295 | if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR) { |
| 1296 | #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR |
| 1297 | goto out; |
| 1298 | #endif |
| 1299 | vm_flags |= VM_UFFD_MINOR; |
| 1300 | } |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1301 | |
Peter Collingbourne | e71e2ac | 2021-07-23 15:50:01 -0700 | [diff] [blame] | 1302 | ret = validate_range(mm, uffdio_register.range.start, |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1303 | uffdio_register.range.len); |
| 1304 | if (ret) |
| 1305 | goto out; |
| 1306 | |
| 1307 | start = uffdio_register.range.start; |
| 1308 | end = start + uffdio_register.range.len; |
| 1309 | |
Oleg Nesterov | d2005e3 | 2016-05-20 16:58:36 -0700 | [diff] [blame] | 1310 | ret = -ENOMEM; |
| 1311 | if (!mmget_not_zero(mm)) |
| 1312 | goto out; |
| 1313 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1314 | mmap_write_lock(mm); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1315 | vma = find_vma_prev(mm, start, &prev); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1316 | if (!vma) |
| 1317 | goto out_unlock; |
| 1318 | |
| 1319 | /* check that there's at least one vma in the range */ |
| 1320 | ret = -EINVAL; |
| 1321 | if (vma->vm_start >= end) |
| 1322 | goto out_unlock; |
| 1323 | |
| 1324 | /* |
Mike Kravetz | cab350a | 2017-02-22 15:43:04 -0800 | [diff] [blame] | 1325 | * If the first vma contains huge pages, make sure start address |
| 1326 | * is aligned to huge page size. |
| 1327 | */ |
| 1328 | if (is_vm_hugetlb_page(vma)) { |
| 1329 | unsigned long vma_hpagesize = vma_kernel_pagesize(vma); |
| 1330 | |
| 1331 | if (start & (vma_hpagesize - 1)) |
| 1332 | goto out_unlock; |
| 1333 | } |
| 1334 | |
| 1335 | /* |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1336 | * Search for not compatible vmas. |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1337 | */ |
| 1338 | found = false; |
Mike Rapoport | ce53e8e | 2017-09-06 16:23:12 -0700 | [diff] [blame] | 1339 | basic_ioctls = false; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1340 | for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) { |
| 1341 | cond_resched(); |
| 1342 | |
| 1343 | BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^ |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 1344 | !!(cur->vm_flags & __VM_UFFD_FLAGS)); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1345 | |
| 1346 | /* check not compatible vmas */ |
| 1347 | ret = -EINVAL; |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 1348 | if (!vma_can_userfault(cur, vm_flags)) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1349 | goto out_unlock; |
Andrea Arcangeli | 29ec9066 | 2018-11-30 14:09:32 -0800 | [diff] [blame] | 1350 | |
| 1351 | /* |
| 1352 | * UFFDIO_COPY will fill file holes even without |
| 1353 | * PROT_WRITE. This check enforces that if this is a |
| 1354 | * MAP_SHARED, the process has write permission to the backing |
| 1355 | * file. If VM_MAYWRITE is set it also enforces that on a |
| 1356 | * MAP_SHARED vma: there is no F_WRITE_SEAL and no further |
| 1357 | * F_WRITE_SEAL can be taken until the vma is destroyed. |
| 1358 | */ |
| 1359 | ret = -EPERM; |
| 1360 | if (unlikely(!(cur->vm_flags & VM_MAYWRITE))) |
| 1361 | goto out_unlock; |
| 1362 | |
Mike Kravetz | cab350a | 2017-02-22 15:43:04 -0800 | [diff] [blame] | 1363 | /* |
| 1364 | * If this vma contains ending address, and huge pages |
| 1365 | * check alignment. |
| 1366 | */ |
| 1367 | if (is_vm_hugetlb_page(cur) && end <= cur->vm_end && |
| 1368 | end > cur->vm_start) { |
| 1369 | unsigned long vma_hpagesize = vma_kernel_pagesize(cur); |
| 1370 | |
| 1371 | ret = -EINVAL; |
| 1372 | |
| 1373 | if (end & (vma_hpagesize - 1)) |
| 1374 | goto out_unlock; |
| 1375 | } |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 1376 | if ((vm_flags & VM_UFFD_WP) && !(cur->vm_flags & VM_MAYWRITE)) |
| 1377 | goto out_unlock; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1378 | |
| 1379 | /* |
| 1380 | * Check that this vma isn't already owned by a |
| 1381 | * different userfaultfd. We can't allow more than one |
| 1382 | * userfaultfd to own a single vma simultaneously or we |
| 1383 | * wouldn't know which one to deliver the userfaults to. |
| 1384 | */ |
| 1385 | ret = -EBUSY; |
| 1386 | if (cur->vm_userfaultfd_ctx.ctx && |
| 1387 | cur->vm_userfaultfd_ctx.ctx != ctx) |
| 1388 | goto out_unlock; |
| 1389 | |
Mike Kravetz | cab350a | 2017-02-22 15:43:04 -0800 | [diff] [blame] | 1390 | /* |
| 1391 | * Note vmas containing huge pages |
| 1392 | */ |
Mike Rapoport | ce53e8e | 2017-09-06 16:23:12 -0700 | [diff] [blame] | 1393 | if (is_vm_hugetlb_page(cur)) |
| 1394 | basic_ioctls = true; |
Mike Kravetz | cab350a | 2017-02-22 15:43:04 -0800 | [diff] [blame] | 1395 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1396 | found = true; |
| 1397 | } |
| 1398 | BUG_ON(!found); |
| 1399 | |
| 1400 | if (vma->vm_start < start) |
| 1401 | prev = vma; |
| 1402 | |
| 1403 | ret = 0; |
| 1404 | do { |
| 1405 | cond_resched(); |
| 1406 | |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 1407 | BUG_ON(!vma_can_userfault(vma, vm_flags)); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1408 | BUG_ON(vma->vm_userfaultfd_ctx.ctx && |
| 1409 | vma->vm_userfaultfd_ctx.ctx != ctx); |
Andrea Arcangeli | 29ec9066 | 2018-11-30 14:09:32 -0800 | [diff] [blame] | 1410 | WARN_ON(!(vma->vm_flags & VM_MAYWRITE)); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1411 | |
| 1412 | /* |
| 1413 | * Nothing to do: this vma is already registered into this |
| 1414 | * userfaultfd and with the right tracking mode too. |
| 1415 | */ |
| 1416 | if (vma->vm_userfaultfd_ctx.ctx == ctx && |
| 1417 | (vma->vm_flags & vm_flags) == vm_flags) |
| 1418 | goto skip; |
| 1419 | |
| 1420 | if (vma->vm_start > start) |
| 1421 | start = vma->vm_start; |
| 1422 | vma_end = min(end, vma->vm_end); |
| 1423 | |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 1424 | new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1425 | prev = vma_merge(mm, prev, start, vma_end, new_flags, |
| 1426 | vma->anon_vma, vma->vm_file, vma->vm_pgoff, |
| 1427 | vma_policy(vma), |
Colin Cross | 9a10064 | 2022-01-14 14:05:59 -0800 | [diff] [blame] | 1428 | ((struct vm_userfaultfd_ctx){ ctx }), |
Suren Baghdasaryan | 5c26f6a | 2022-03-04 20:28:51 -0800 | [diff] [blame] | 1429 | anon_vma_name(vma)); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1430 | if (prev) { |
| 1431 | vma = prev; |
| 1432 | goto next; |
| 1433 | } |
| 1434 | if (vma->vm_start < start) { |
| 1435 | ret = split_vma(mm, vma, start, 1); |
| 1436 | if (ret) |
| 1437 | break; |
| 1438 | } |
| 1439 | if (vma->vm_end > end) { |
| 1440 | ret = split_vma(mm, vma, end, 0); |
| 1441 | if (ret) |
| 1442 | break; |
| 1443 | } |
| 1444 | next: |
| 1445 | /* |
| 1446 | * In the vma_merge() successful mprotect-like case 8: |
| 1447 | * the next vma was merged into the current one and |
| 1448 | * the current one has not been updated yet. |
| 1449 | */ |
| 1450 | vma->vm_flags = new_flags; |
| 1451 | vma->vm_userfaultfd_ctx.ctx = ctx; |
| 1452 | |
Peter Xu | 6dfeaff | 2021-05-04 18:33:13 -0700 | [diff] [blame] | 1453 | if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma)) |
| 1454 | hugetlb_unshare_all_pmds(vma); |
| 1455 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1456 | skip: |
| 1457 | prev = vma; |
| 1458 | start = vma->vm_end; |
| 1459 | vma = vma->vm_next; |
| 1460 | } while (vma && vma->vm_start < end); |
| 1461 | out_unlock: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1462 | mmap_write_unlock(mm); |
Oleg Nesterov | d2005e3 | 2016-05-20 16:58:36 -0700 | [diff] [blame] | 1463 | mmput(mm); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1464 | if (!ret) { |
Peter Xu | 1481930 | 2020-04-06 20:06:29 -0700 | [diff] [blame] | 1465 | __u64 ioctls_out; |
| 1466 | |
| 1467 | ioctls_out = basic_ioctls ? UFFD_API_RANGE_IOCTLS_BASIC : |
| 1468 | UFFD_API_RANGE_IOCTLS; |
| 1469 | |
| 1470 | /* |
| 1471 | * Declare the WP ioctl only if the WP mode is |
| 1472 | * specified and all checks passed with the range |
| 1473 | */ |
| 1474 | if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_WP)) |
| 1475 | ioctls_out &= ~((__u64)1 << _UFFDIO_WRITEPROTECT); |
| 1476 | |
Axel Rasmussen | f619147 | 2021-05-04 18:35:49 -0700 | [diff] [blame] | 1477 | /* CONTINUE ioctl is only supported for MINOR ranges. */ |
| 1478 | if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR)) |
| 1479 | ioctls_out &= ~((__u64)1 << _UFFDIO_CONTINUE); |
| 1480 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1481 | /* |
| 1482 | * Now that we scanned all vmas we can already tell |
| 1483 | * userland which ioctls methods are guaranteed to |
| 1484 | * succeed on this range. |
| 1485 | */ |
Peter Xu | 1481930 | 2020-04-06 20:06:29 -0700 | [diff] [blame] | 1486 | if (put_user(ioctls_out, &user_uffdio_register->ioctls)) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1487 | ret = -EFAULT; |
| 1488 | } |
| 1489 | out: |
| 1490 | return ret; |
| 1491 | } |
| 1492 | |
| 1493 | static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, |
| 1494 | unsigned long arg) |
| 1495 | { |
| 1496 | struct mm_struct *mm = ctx->mm; |
| 1497 | struct vm_area_struct *vma, *prev, *cur; |
| 1498 | int ret; |
| 1499 | struct uffdio_range uffdio_unregister; |
| 1500 | unsigned long new_flags; |
| 1501 | bool found; |
| 1502 | unsigned long start, end, vma_end; |
| 1503 | const void __user *buf = (void __user *)arg; |
| 1504 | |
| 1505 | ret = -EFAULT; |
| 1506 | if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister))) |
| 1507 | goto out; |
| 1508 | |
Peter Collingbourne | e71e2ac | 2021-07-23 15:50:01 -0700 | [diff] [blame] | 1509 | ret = validate_range(mm, uffdio_unregister.start, |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1510 | uffdio_unregister.len); |
| 1511 | if (ret) |
| 1512 | goto out; |
| 1513 | |
| 1514 | start = uffdio_unregister.start; |
| 1515 | end = start + uffdio_unregister.len; |
| 1516 | |
Oleg Nesterov | d2005e3 | 2016-05-20 16:58:36 -0700 | [diff] [blame] | 1517 | ret = -ENOMEM; |
| 1518 | if (!mmget_not_zero(mm)) |
| 1519 | goto out; |
| 1520 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1521 | mmap_write_lock(mm); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1522 | vma = find_vma_prev(mm, start, &prev); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1523 | if (!vma) |
| 1524 | goto out_unlock; |
| 1525 | |
| 1526 | /* check that there's at least one vma in the range */ |
| 1527 | ret = -EINVAL; |
| 1528 | if (vma->vm_start >= end) |
| 1529 | goto out_unlock; |
| 1530 | |
| 1531 | /* |
Mike Kravetz | cab350a | 2017-02-22 15:43:04 -0800 | [diff] [blame] | 1532 | * If the first vma contains huge pages, make sure start address |
| 1533 | * is aligned to huge page size. |
| 1534 | */ |
| 1535 | if (is_vm_hugetlb_page(vma)) { |
| 1536 | unsigned long vma_hpagesize = vma_kernel_pagesize(vma); |
| 1537 | |
| 1538 | if (start & (vma_hpagesize - 1)) |
| 1539 | goto out_unlock; |
| 1540 | } |
| 1541 | |
| 1542 | /* |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1543 | * Search for not compatible vmas. |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1544 | */ |
| 1545 | found = false; |
| 1546 | ret = -EINVAL; |
| 1547 | for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) { |
| 1548 | cond_resched(); |
| 1549 | |
| 1550 | BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^ |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 1551 | !!(cur->vm_flags & __VM_UFFD_FLAGS)); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1552 | |
| 1553 | /* |
| 1554 | * Check not compatible vmas, not strictly required |
| 1555 | * here as not compatible vmas cannot have an |
| 1556 | * userfaultfd_ctx registered on them, but this |
| 1557 | * provides for more strict behavior to notice |
| 1558 | * unregistration errors. |
| 1559 | */ |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 1560 | if (!vma_can_userfault(cur, cur->vm_flags)) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1561 | goto out_unlock; |
| 1562 | |
| 1563 | found = true; |
| 1564 | } |
| 1565 | BUG_ON(!found); |
| 1566 | |
| 1567 | if (vma->vm_start < start) |
| 1568 | prev = vma; |
| 1569 | |
| 1570 | ret = 0; |
| 1571 | do { |
| 1572 | cond_resched(); |
| 1573 | |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 1574 | BUG_ON(!vma_can_userfault(vma, vma->vm_flags)); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1575 | |
| 1576 | /* |
| 1577 | * Nothing to do: this vma is already registered into this |
| 1578 | * userfaultfd and with the right tracking mode too. |
| 1579 | */ |
| 1580 | if (!vma->vm_userfaultfd_ctx.ctx) |
| 1581 | goto skip; |
| 1582 | |
Andrea Arcangeli | 01e881f | 2018-12-14 14:17:17 -0800 | [diff] [blame] | 1583 | WARN_ON(!(vma->vm_flags & VM_MAYWRITE)); |
| 1584 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1585 | if (vma->vm_start > start) |
| 1586 | start = vma->vm_start; |
| 1587 | vma_end = min(end, vma->vm_end); |
| 1588 | |
Andrea Arcangeli | 09fa529 | 2017-02-22 15:42:46 -0800 | [diff] [blame] | 1589 | if (userfaultfd_missing(vma)) { |
| 1590 | /* |
| 1591 | * Wake any concurrent pending userfault while |
| 1592 | * we unregister, so they will not hang |
| 1593 | * permanently and it avoids userland to call |
| 1594 | * UFFDIO_WAKE explicitly. |
| 1595 | */ |
| 1596 | struct userfaultfd_wake_range range; |
| 1597 | range.start = start; |
| 1598 | range.len = vma_end - start; |
| 1599 | wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range); |
| 1600 | } |
| 1601 | |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 1602 | new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1603 | prev = vma_merge(mm, prev, start, vma_end, new_flags, |
| 1604 | vma->anon_vma, vma->vm_file, vma->vm_pgoff, |
| 1605 | vma_policy(vma), |
Suren Baghdasaryan | 5c26f6a | 2022-03-04 20:28:51 -0800 | [diff] [blame] | 1606 | NULL_VM_UFFD_CTX, anon_vma_name(vma)); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1607 | if (prev) { |
| 1608 | vma = prev; |
| 1609 | goto next; |
| 1610 | } |
| 1611 | if (vma->vm_start < start) { |
| 1612 | ret = split_vma(mm, vma, start, 1); |
| 1613 | if (ret) |
| 1614 | break; |
| 1615 | } |
| 1616 | if (vma->vm_end > end) { |
| 1617 | ret = split_vma(mm, vma, end, 0); |
| 1618 | if (ret) |
| 1619 | break; |
| 1620 | } |
| 1621 | next: |
| 1622 | /* |
| 1623 | * In the vma_merge() successful mprotect-like case 8: |
| 1624 | * the next vma was merged into the current one and |
| 1625 | * the current one has not been updated yet. |
| 1626 | */ |
| 1627 | vma->vm_flags = new_flags; |
| 1628 | vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; |
| 1629 | |
| 1630 | skip: |
| 1631 | prev = vma; |
| 1632 | start = vma->vm_end; |
| 1633 | vma = vma->vm_next; |
| 1634 | } while (vma && vma->vm_start < end); |
| 1635 | out_unlock: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1636 | mmap_write_unlock(mm); |
Oleg Nesterov | d2005e3 | 2016-05-20 16:58:36 -0700 | [diff] [blame] | 1637 | mmput(mm); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1638 | out: |
| 1639 | return ret; |
| 1640 | } |
| 1641 | |
| 1642 | /* |
Andrea Arcangeli | ba85c70 | 2015-09-04 15:46:41 -0700 | [diff] [blame] | 1643 | * userfaultfd_wake may be used in combination with the |
| 1644 | * UFFDIO_*_MODE_DONTWAKE to wakeup userfaults in batches. |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1645 | */ |
| 1646 | static int userfaultfd_wake(struct userfaultfd_ctx *ctx, |
| 1647 | unsigned long arg) |
| 1648 | { |
| 1649 | int ret; |
| 1650 | struct uffdio_range uffdio_wake; |
| 1651 | struct userfaultfd_wake_range range; |
| 1652 | const void __user *buf = (void __user *)arg; |
| 1653 | |
| 1654 | ret = -EFAULT; |
| 1655 | if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake))) |
| 1656 | goto out; |
| 1657 | |
Peter Collingbourne | e71e2ac | 2021-07-23 15:50:01 -0700 | [diff] [blame] | 1658 | ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1659 | if (ret) |
| 1660 | goto out; |
| 1661 | |
| 1662 | range.start = uffdio_wake.start; |
| 1663 | range.len = uffdio_wake.len; |
| 1664 | |
| 1665 | /* |
| 1666 | * len == 0 means wake all and we don't want to wake all here, |
| 1667 | * so check it again to be sure. |
| 1668 | */ |
| 1669 | VM_BUG_ON(!range.len); |
| 1670 | |
| 1671 | wake_userfault(ctx, &range); |
| 1672 | ret = 0; |
| 1673 | |
| 1674 | out: |
| 1675 | return ret; |
| 1676 | } |
| 1677 | |
Andrea Arcangeli | ad465cae | 2015-09-04 15:47:11 -0700 | [diff] [blame] | 1678 | static int userfaultfd_copy(struct userfaultfd_ctx *ctx, |
| 1679 | unsigned long arg) |
| 1680 | { |
| 1681 | __s64 ret; |
| 1682 | struct uffdio_copy uffdio_copy; |
| 1683 | struct uffdio_copy __user *user_uffdio_copy; |
| 1684 | struct userfaultfd_wake_range range; |
| 1685 | |
| 1686 | user_uffdio_copy = (struct uffdio_copy __user *) arg; |
| 1687 | |
Mike Rapoport | df2cc96 | 2018-06-07 17:09:25 -0700 | [diff] [blame] | 1688 | ret = -EAGAIN; |
Nadav Amit | a759a90 | 2021-09-02 14:58:56 -0700 | [diff] [blame] | 1689 | if (atomic_read(&ctx->mmap_changing)) |
Mike Rapoport | df2cc96 | 2018-06-07 17:09:25 -0700 | [diff] [blame] | 1690 | goto out; |
| 1691 | |
Andrea Arcangeli | ad465cae | 2015-09-04 15:47:11 -0700 | [diff] [blame] | 1692 | ret = -EFAULT; |
| 1693 | if (copy_from_user(&uffdio_copy, user_uffdio_copy, |
| 1694 | /* don't copy "copy" last field */ |
| 1695 | sizeof(uffdio_copy)-sizeof(__s64))) |
| 1696 | goto out; |
| 1697 | |
Peter Collingbourne | e71e2ac | 2021-07-23 15:50:01 -0700 | [diff] [blame] | 1698 | ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len); |
Andrea Arcangeli | ad465cae | 2015-09-04 15:47:11 -0700 | [diff] [blame] | 1699 | if (ret) |
| 1700 | goto out; |
| 1701 | /* |
| 1702 | * double check for wraparound just in case. copy_from_user() |
| 1703 | * will later check uffdio_copy.src + uffdio_copy.len to fit |
| 1704 | * in the userland range. |
| 1705 | */ |
| 1706 | ret = -EINVAL; |
| 1707 | if (uffdio_copy.src + uffdio_copy.len <= uffdio_copy.src) |
| 1708 | goto out; |
Andrea Arcangeli | 72981e0 | 2020-04-06 20:05:41 -0700 | [diff] [blame] | 1709 | if (uffdio_copy.mode & ~(UFFDIO_COPY_MODE_DONTWAKE|UFFDIO_COPY_MODE_WP)) |
Andrea Arcangeli | ad465cae | 2015-09-04 15:47:11 -0700 | [diff] [blame] | 1710 | goto out; |
Oleg Nesterov | d2005e3 | 2016-05-20 16:58:36 -0700 | [diff] [blame] | 1711 | if (mmget_not_zero(ctx->mm)) { |
| 1712 | ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src, |
Andrea Arcangeli | 72981e0 | 2020-04-06 20:05:41 -0700 | [diff] [blame] | 1713 | uffdio_copy.len, &ctx->mmap_changing, |
| 1714 | uffdio_copy.mode); |
Oleg Nesterov | d2005e3 | 2016-05-20 16:58:36 -0700 | [diff] [blame] | 1715 | mmput(ctx->mm); |
Mike Rapoport | 9633318 | 2017-02-24 14:58:31 -0800 | [diff] [blame] | 1716 | } else { |
Mike Rapoport | e86b298 | 2017-08-10 15:24:32 -0700 | [diff] [blame] | 1717 | return -ESRCH; |
Oleg Nesterov | d2005e3 | 2016-05-20 16:58:36 -0700 | [diff] [blame] | 1718 | } |
Andrea Arcangeli | ad465cae | 2015-09-04 15:47:11 -0700 | [diff] [blame] | 1719 | if (unlikely(put_user(ret, &user_uffdio_copy->copy))) |
| 1720 | return -EFAULT; |
| 1721 | if (ret < 0) |
| 1722 | goto out; |
| 1723 | BUG_ON(!ret); |
| 1724 | /* len == 0 would wake all */ |
| 1725 | range.len = ret; |
| 1726 | if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) { |
| 1727 | range.start = uffdio_copy.dst; |
| 1728 | wake_userfault(ctx, &range); |
| 1729 | } |
| 1730 | ret = range.len == uffdio_copy.len ? 0 : -EAGAIN; |
| 1731 | out: |
| 1732 | return ret; |
| 1733 | } |
| 1734 | |
| 1735 | static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx, |
| 1736 | unsigned long arg) |
| 1737 | { |
| 1738 | __s64 ret; |
| 1739 | struct uffdio_zeropage uffdio_zeropage; |
| 1740 | struct uffdio_zeropage __user *user_uffdio_zeropage; |
| 1741 | struct userfaultfd_wake_range range; |
| 1742 | |
| 1743 | user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg; |
| 1744 | |
Mike Rapoport | df2cc96 | 2018-06-07 17:09:25 -0700 | [diff] [blame] | 1745 | ret = -EAGAIN; |
Nadav Amit | a759a90 | 2021-09-02 14:58:56 -0700 | [diff] [blame] | 1746 | if (atomic_read(&ctx->mmap_changing)) |
Mike Rapoport | df2cc96 | 2018-06-07 17:09:25 -0700 | [diff] [blame] | 1747 | goto out; |
| 1748 | |
Andrea Arcangeli | ad465cae | 2015-09-04 15:47:11 -0700 | [diff] [blame] | 1749 | ret = -EFAULT; |
| 1750 | if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage, |
| 1751 | /* don't copy "zeropage" last field */ |
| 1752 | sizeof(uffdio_zeropage)-sizeof(__s64))) |
| 1753 | goto out; |
| 1754 | |
Peter Collingbourne | e71e2ac | 2021-07-23 15:50:01 -0700 | [diff] [blame] | 1755 | ret = validate_range(ctx->mm, uffdio_zeropage.range.start, |
Andrea Arcangeli | ad465cae | 2015-09-04 15:47:11 -0700 | [diff] [blame] | 1756 | uffdio_zeropage.range.len); |
| 1757 | if (ret) |
| 1758 | goto out; |
| 1759 | ret = -EINVAL; |
| 1760 | if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE) |
| 1761 | goto out; |
| 1762 | |
Oleg Nesterov | d2005e3 | 2016-05-20 16:58:36 -0700 | [diff] [blame] | 1763 | if (mmget_not_zero(ctx->mm)) { |
| 1764 | ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start, |
Mike Rapoport | df2cc96 | 2018-06-07 17:09:25 -0700 | [diff] [blame] | 1765 | uffdio_zeropage.range.len, |
| 1766 | &ctx->mmap_changing); |
Oleg Nesterov | d2005e3 | 2016-05-20 16:58:36 -0700 | [diff] [blame] | 1767 | mmput(ctx->mm); |
Mike Rapoport | 9d95aa4 | 2017-08-02 13:32:15 -0700 | [diff] [blame] | 1768 | } else { |
Mike Rapoport | e86b298 | 2017-08-10 15:24:32 -0700 | [diff] [blame] | 1769 | return -ESRCH; |
Oleg Nesterov | d2005e3 | 2016-05-20 16:58:36 -0700 | [diff] [blame] | 1770 | } |
Andrea Arcangeli | ad465cae | 2015-09-04 15:47:11 -0700 | [diff] [blame] | 1771 | if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage))) |
| 1772 | return -EFAULT; |
| 1773 | if (ret < 0) |
| 1774 | goto out; |
| 1775 | /* len == 0 would wake all */ |
| 1776 | BUG_ON(!ret); |
| 1777 | range.len = ret; |
| 1778 | if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) { |
| 1779 | range.start = uffdio_zeropage.range.start; |
| 1780 | wake_userfault(ctx, &range); |
| 1781 | } |
| 1782 | ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN; |
| 1783 | out: |
| 1784 | return ret; |
| 1785 | } |
| 1786 | |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 1787 | static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx, |
| 1788 | unsigned long arg) |
| 1789 | { |
| 1790 | int ret; |
| 1791 | struct uffdio_writeprotect uffdio_wp; |
| 1792 | struct uffdio_writeprotect __user *user_uffdio_wp; |
| 1793 | struct userfaultfd_wake_range range; |
Peter Xu | 23080e2 | 2020-04-06 20:06:20 -0700 | [diff] [blame] | 1794 | bool mode_wp, mode_dontwake; |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 1795 | |
Nadav Amit | a759a90 | 2021-09-02 14:58:56 -0700 | [diff] [blame] | 1796 | if (atomic_read(&ctx->mmap_changing)) |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 1797 | return -EAGAIN; |
| 1798 | |
| 1799 | user_uffdio_wp = (struct uffdio_writeprotect __user *) arg; |
| 1800 | |
| 1801 | if (copy_from_user(&uffdio_wp, user_uffdio_wp, |
| 1802 | sizeof(struct uffdio_writeprotect))) |
| 1803 | return -EFAULT; |
| 1804 | |
Peter Collingbourne | e71e2ac | 2021-07-23 15:50:01 -0700 | [diff] [blame] | 1805 | ret = validate_range(ctx->mm, uffdio_wp.range.start, |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 1806 | uffdio_wp.range.len); |
| 1807 | if (ret) |
| 1808 | return ret; |
| 1809 | |
| 1810 | if (uffdio_wp.mode & ~(UFFDIO_WRITEPROTECT_MODE_DONTWAKE | |
| 1811 | UFFDIO_WRITEPROTECT_MODE_WP)) |
| 1812 | return -EINVAL; |
Peter Xu | 23080e2 | 2020-04-06 20:06:20 -0700 | [diff] [blame] | 1813 | |
| 1814 | mode_wp = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_WP; |
| 1815 | mode_dontwake = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_DONTWAKE; |
| 1816 | |
| 1817 | if (mode_wp && mode_dontwake) |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 1818 | return -EINVAL; |
| 1819 | |
Nadav Amit | cb185d5 | 2021-10-18 15:15:25 -0700 | [diff] [blame] | 1820 | if (mmget_not_zero(ctx->mm)) { |
| 1821 | ret = mwriteprotect_range(ctx->mm, uffdio_wp.range.start, |
| 1822 | uffdio_wp.range.len, mode_wp, |
| 1823 | &ctx->mmap_changing); |
| 1824 | mmput(ctx->mm); |
| 1825 | } else { |
| 1826 | return -ESRCH; |
| 1827 | } |
| 1828 | |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 1829 | if (ret) |
| 1830 | return ret; |
| 1831 | |
Peter Xu | 23080e2 | 2020-04-06 20:06:20 -0700 | [diff] [blame] | 1832 | if (!mode_wp && !mode_dontwake) { |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 1833 | range.start = uffdio_wp.range.start; |
| 1834 | range.len = uffdio_wp.range.len; |
| 1835 | wake_userfault(ctx, &range); |
| 1836 | } |
| 1837 | return ret; |
| 1838 | } |
| 1839 | |
Axel Rasmussen | f619147 | 2021-05-04 18:35:49 -0700 | [diff] [blame] | 1840 | static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg) |
| 1841 | { |
| 1842 | __s64 ret; |
| 1843 | struct uffdio_continue uffdio_continue; |
| 1844 | struct uffdio_continue __user *user_uffdio_continue; |
| 1845 | struct userfaultfd_wake_range range; |
| 1846 | |
| 1847 | user_uffdio_continue = (struct uffdio_continue __user *)arg; |
| 1848 | |
| 1849 | ret = -EAGAIN; |
Nadav Amit | a759a90 | 2021-09-02 14:58:56 -0700 | [diff] [blame] | 1850 | if (atomic_read(&ctx->mmap_changing)) |
Axel Rasmussen | f619147 | 2021-05-04 18:35:49 -0700 | [diff] [blame] | 1851 | goto out; |
| 1852 | |
| 1853 | ret = -EFAULT; |
| 1854 | if (copy_from_user(&uffdio_continue, user_uffdio_continue, |
| 1855 | /* don't copy the output fields */ |
| 1856 | sizeof(uffdio_continue) - (sizeof(__s64)))) |
| 1857 | goto out; |
| 1858 | |
Peter Collingbourne | e71e2ac | 2021-07-23 15:50:01 -0700 | [diff] [blame] | 1859 | ret = validate_range(ctx->mm, uffdio_continue.range.start, |
Axel Rasmussen | f619147 | 2021-05-04 18:35:49 -0700 | [diff] [blame] | 1860 | uffdio_continue.range.len); |
| 1861 | if (ret) |
| 1862 | goto out; |
| 1863 | |
| 1864 | ret = -EINVAL; |
| 1865 | /* double check for wraparound just in case. */ |
| 1866 | if (uffdio_continue.range.start + uffdio_continue.range.len <= |
| 1867 | uffdio_continue.range.start) { |
| 1868 | goto out; |
| 1869 | } |
| 1870 | if (uffdio_continue.mode & ~UFFDIO_CONTINUE_MODE_DONTWAKE) |
| 1871 | goto out; |
| 1872 | |
| 1873 | if (mmget_not_zero(ctx->mm)) { |
| 1874 | ret = mcopy_continue(ctx->mm, uffdio_continue.range.start, |
| 1875 | uffdio_continue.range.len, |
| 1876 | &ctx->mmap_changing); |
| 1877 | mmput(ctx->mm); |
| 1878 | } else { |
| 1879 | return -ESRCH; |
| 1880 | } |
| 1881 | |
| 1882 | if (unlikely(put_user(ret, &user_uffdio_continue->mapped))) |
| 1883 | return -EFAULT; |
| 1884 | if (ret < 0) |
| 1885 | goto out; |
| 1886 | |
| 1887 | /* len == 0 would wake all */ |
| 1888 | BUG_ON(!ret); |
| 1889 | range.len = ret; |
| 1890 | if (!(uffdio_continue.mode & UFFDIO_CONTINUE_MODE_DONTWAKE)) { |
| 1891 | range.start = uffdio_continue.range.start; |
| 1892 | wake_userfault(ctx, &range); |
| 1893 | } |
| 1894 | ret = range.len == uffdio_continue.range.len ? 0 : -EAGAIN; |
| 1895 | |
| 1896 | out: |
| 1897 | return ret; |
| 1898 | } |
| 1899 | |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 1900 | static inline unsigned int uffd_ctx_features(__u64 user_features) |
| 1901 | { |
| 1902 | /* |
Nadav Amit | 22e5fe2a | 2021-09-02 14:58:59 -0700 | [diff] [blame] | 1903 | * For the current set of features the bits just coincide. Set |
| 1904 | * UFFD_FEATURE_INITIALIZED to mark the features as enabled. |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 1905 | */ |
Nadav Amit | 22e5fe2a | 2021-09-02 14:58:59 -0700 | [diff] [blame] | 1906 | return (unsigned int)user_features | UFFD_FEATURE_INITIALIZED; |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 1907 | } |
| 1908 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1909 | /* |
| 1910 | * userland asks for a certain API version and we return which bits |
| 1911 | * and ioctl commands are implemented in this kernel for such API |
| 1912 | * version or -EINVAL if unknown. |
| 1913 | */ |
| 1914 | static int userfaultfd_api(struct userfaultfd_ctx *ctx, |
| 1915 | unsigned long arg) |
| 1916 | { |
| 1917 | struct uffdio_api uffdio_api; |
| 1918 | void __user *buf = (void __user *)arg; |
Nadav Amit | 22e5fe2a | 2021-09-02 14:58:59 -0700 | [diff] [blame] | 1919 | unsigned int ctx_features; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1920 | int ret; |
Andrea Arcangeli | 6560314 | 2017-02-22 15:42:24 -0800 | [diff] [blame] | 1921 | __u64 features; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1922 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1923 | ret = -EFAULT; |
Andrea Arcangeli | a9b85f9 | 2015-09-04 15:46:37 -0700 | [diff] [blame] | 1924 | if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api))) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1925 | goto out; |
Andrea Arcangeli | 6560314 | 2017-02-22 15:42:24 -0800 | [diff] [blame] | 1926 | features = uffdio_api.features; |
Mike Rapoport | 3c1c24d | 2019-11-30 17:58:01 -0800 | [diff] [blame] | 1927 | ret = -EINVAL; |
| 1928 | if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES)) |
| 1929 | goto err_out; |
| 1930 | ret = -EPERM; |
| 1931 | if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE)) |
| 1932 | goto err_out; |
Andrea Arcangeli | 6560314 | 2017-02-22 15:42:24 -0800 | [diff] [blame] | 1933 | /* report all available features and ioctls to userland */ |
| 1934 | uffdio_api.features = UFFD_API_FEATURES; |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 1935 | #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR |
Axel Rasmussen | 964ab00 | 2021-06-30 18:49:27 -0700 | [diff] [blame] | 1936 | uffdio_api.features &= |
| 1937 | ~(UFFD_FEATURE_MINOR_HUGETLBFS | UFFD_FEATURE_MINOR_SHMEM); |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 1938 | #endif |
Peter Xu | 00b151f2 | 2021-06-30 18:49:06 -0700 | [diff] [blame] | 1939 | #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP |
| 1940 | uffdio_api.features &= ~UFFD_FEATURE_PAGEFAULT_FLAG_WP; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1941 | #endif |
Peter Xu | b1f9e87 | 2022-05-12 20:22:56 -0700 | [diff] [blame] | 1942 | #ifndef CONFIG_PTE_MARKER_UFFD_WP |
| 1943 | uffdio_api.features &= ~UFFD_FEATURE_WP_HUGETLBFS_SHMEM; |
| 1944 | #endif |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1945 | uffdio_api.ioctls = UFFD_API_IOCTLS; |
| 1946 | ret = -EFAULT; |
| 1947 | if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api))) |
| 1948 | goto out; |
Nadav Amit | 22e5fe2a | 2021-09-02 14:58:59 -0700 | [diff] [blame] | 1949 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1950 | /* only enable the requested features for this uffd context */ |
Nadav Amit | 22e5fe2a | 2021-09-02 14:58:59 -0700 | [diff] [blame] | 1951 | ctx_features = uffd_ctx_features(features); |
| 1952 | ret = -EINVAL; |
| 1953 | if (cmpxchg(&ctx->features, 0, ctx_features) != 0) |
| 1954 | goto err_out; |
| 1955 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1956 | ret = 0; |
| 1957 | out: |
| 1958 | return ret; |
| 1959 | err_out: |
| 1960 | memset(&uffdio_api, 0, sizeof(uffdio_api)); |
| 1961 | if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api))) |
| 1962 | ret = -EFAULT; |
| 1963 | goto out; |
| 1964 | } |
| 1965 | |
| 1966 | static long userfaultfd_ioctl(struct file *file, unsigned cmd, |
Andrea Arcangeli | e6485a4 | 2015-09-04 15:47:15 -0700 | [diff] [blame] | 1967 | unsigned long arg) |
| 1968 | { |
| 1969 | int ret = -EINVAL; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1970 | struct userfaultfd_ctx *ctx = file->private_data; |
| 1971 | |
Nadav Amit | 22e5fe2a | 2021-09-02 14:58:59 -0700 | [diff] [blame] | 1972 | if (cmd != UFFDIO_API && !userfaultfd_is_initialized(ctx)) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1973 | return -EINVAL; |
| 1974 | |
| 1975 | switch(cmd) { |
| 1976 | case UFFDIO_API: |
| 1977 | ret = userfaultfd_api(ctx, arg); |
| 1978 | break; |
| 1979 | case UFFDIO_REGISTER: |
| 1980 | ret = userfaultfd_register(ctx, arg); |
| 1981 | break; |
| 1982 | case UFFDIO_UNREGISTER: |
| 1983 | ret = userfaultfd_unregister(ctx, arg); |
| 1984 | break; |
| 1985 | case UFFDIO_WAKE: |
Andrea Arcangeli | ad465cae | 2015-09-04 15:47:11 -0700 | [diff] [blame] | 1986 | ret = userfaultfd_wake(ctx, arg); |
| 1987 | break; |
| 1988 | case UFFDIO_COPY: |
| 1989 | ret = userfaultfd_copy(ctx, arg); |
| 1990 | break; |
| 1991 | case UFFDIO_ZEROPAGE: |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1992 | ret = userfaultfd_zeropage(ctx, arg); |
| 1993 | break; |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 1994 | case UFFDIO_WRITEPROTECT: |
| 1995 | ret = userfaultfd_writeprotect(ctx, arg); |
| 1996 | break; |
Axel Rasmussen | f619147 | 2021-05-04 18:35:49 -0700 | [diff] [blame] | 1997 | case UFFDIO_CONTINUE: |
| 1998 | ret = userfaultfd_continue(ctx, arg); |
| 1999 | break; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2000 | } |
| 2001 | return ret; |
| 2002 | } |
| 2003 | |
| 2004 | #ifdef CONFIG_PROC_FS |
| 2005 | static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f) |
| 2006 | { |
| 2007 | struct userfaultfd_ctx *ctx = f->private_data; |
Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 2008 | wait_queue_entry_t *wq; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2009 | unsigned long pending = 0, total = 0; |
| 2010 | |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 2011 | spin_lock_irq(&ctx->fault_pending_wqh.lock); |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 2012 | list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) { |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 2013 | pending++; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2014 | total++; |
| 2015 | } |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 2016 | list_for_each_entry(wq, &ctx->fault_wqh.head, entry) { |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 2017 | total++; |
| 2018 | } |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 2019 | spin_unlock_irq(&ctx->fault_pending_wqh.lock); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2020 | |
| 2021 | /* |
| 2022 | * If more protocols will be added, there will be all shown |
| 2023 | * separated by a space. Like this: |
| 2024 | * protocols: aa:... bb:... |
| 2025 | */ |
| 2026 | seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n", |
Mike Rapoport | 045098e | 2017-04-07 16:04:42 -0700 | [diff] [blame] | 2027 | pending, total, UFFD_API, ctx->features, |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2028 | UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS); |
| 2029 | } |
| 2030 | #endif |
| 2031 | |
| 2032 | static const struct file_operations userfaultfd_fops = { |
| 2033 | #ifdef CONFIG_PROC_FS |
| 2034 | .show_fdinfo = userfaultfd_show_fdinfo, |
| 2035 | #endif |
| 2036 | .release = userfaultfd_release, |
| 2037 | .poll = userfaultfd_poll, |
| 2038 | .read = userfaultfd_read, |
| 2039 | .unlocked_ioctl = userfaultfd_ioctl, |
Arnd Bergmann | 1832f2d | 2018-09-11 21:59:08 +0200 | [diff] [blame] | 2040 | .compat_ioctl = compat_ptr_ioctl, |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2041 | .llseek = noop_llseek, |
| 2042 | }; |
| 2043 | |
Andrea Arcangeli | 3004ec9 | 2015-09-04 15:46:48 -0700 | [diff] [blame] | 2044 | static void init_once_userfaultfd_ctx(void *mem) |
| 2045 | { |
| 2046 | struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem; |
| 2047 | |
| 2048 | init_waitqueue_head(&ctx->fault_pending_wqh); |
| 2049 | init_waitqueue_head(&ctx->fault_wqh); |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 2050 | init_waitqueue_head(&ctx->event_wqh); |
Andrea Arcangeli | 3004ec9 | 2015-09-04 15:46:48 -0700 | [diff] [blame] | 2051 | init_waitqueue_head(&ctx->fd_wqh); |
Ahmed S. Darwish | 2ca97ac | 2020-07-20 17:55:28 +0200 | [diff] [blame] | 2052 | seqcount_spinlock_init(&ctx->refile_seq, &ctx->fault_pending_wqh.lock); |
Andrea Arcangeli | 3004ec9 | 2015-09-04 15:46:48 -0700 | [diff] [blame] | 2053 | } |
| 2054 | |
Eric Biggers | 284cd24 | 2018-01-31 16:19:48 -0800 | [diff] [blame] | 2055 | SYSCALL_DEFINE1(userfaultfd, int, flags) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2056 | { |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2057 | struct userfaultfd_ctx *ctx; |
Eric Biggers | 284cd24 | 2018-01-31 16:19:48 -0800 | [diff] [blame] | 2058 | int fd; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2059 | |
Lokesh Gidra | d0d4730 | 2020-12-14 19:13:54 -0800 | [diff] [blame] | 2060 | if (!sysctl_unprivileged_userfaultfd && |
| 2061 | (flags & UFFD_USER_MODE_ONLY) == 0 && |
| 2062 | !capable(CAP_SYS_PTRACE)) { |
| 2063 | printk_once(KERN_WARNING "uffd: Set unprivileged_userfaultfd " |
| 2064 | "sysctl knob to 1 if kernel faults must be handled " |
| 2065 | "without obtaining CAP_SYS_PTRACE capability\n"); |
Peter Xu | cefdca0 | 2019-05-13 17:16:41 -0700 | [diff] [blame] | 2066 | return -EPERM; |
Lokesh Gidra | d0d4730 | 2020-12-14 19:13:54 -0800 | [diff] [blame] | 2067 | } |
Peter Xu | cefdca0 | 2019-05-13 17:16:41 -0700 | [diff] [blame] | 2068 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2069 | BUG_ON(!current->mm); |
| 2070 | |
| 2071 | /* Check the UFFD_* constants for consistency. */ |
Lokesh Gidra | 37cd057 | 2020-12-14 19:13:49 -0800 | [diff] [blame] | 2072 | BUILD_BUG_ON(UFFD_USER_MODE_ONLY & UFFD_SHARED_FCNTL_FLAGS); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2073 | BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC); |
| 2074 | BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK); |
| 2075 | |
Lokesh Gidra | 37cd057 | 2020-12-14 19:13:49 -0800 | [diff] [blame] | 2076 | if (flags & ~(UFFD_SHARED_FCNTL_FLAGS | UFFD_USER_MODE_ONLY)) |
Eric Biggers | 284cd24 | 2018-01-31 16:19:48 -0800 | [diff] [blame] | 2077 | return -EINVAL; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2078 | |
Andrea Arcangeli | 3004ec9 | 2015-09-04 15:46:48 -0700 | [diff] [blame] | 2079 | ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2080 | if (!ctx) |
Eric Biggers | 284cd24 | 2018-01-31 16:19:48 -0800 | [diff] [blame] | 2081 | return -ENOMEM; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2082 | |
Eric Biggers | ca88042 | 2018-12-28 00:34:43 -0800 | [diff] [blame] | 2083 | refcount_set(&ctx->refcount, 1); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2084 | ctx->flags = flags; |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 2085 | ctx->features = 0; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2086 | ctx->released = false; |
Nadav Amit | a759a90 | 2021-09-02 14:58:56 -0700 | [diff] [blame] | 2087 | atomic_set(&ctx->mmap_changing, 0); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2088 | ctx->mm = current->mm; |
| 2089 | /* prevent the mm struct to be freed */ |
Vegard Nossum | f1f1007 | 2017-02-27 14:30:07 -0800 | [diff] [blame] | 2090 | mmgrab(ctx->mm); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2091 | |
Daniel Colascione | b537900 | 2021-01-08 14:22:23 -0800 | [diff] [blame] | 2092 | fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, ctx, |
| 2093 | O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS), NULL); |
Eric Biggers | 284cd24 | 2018-01-31 16:19:48 -0800 | [diff] [blame] | 2094 | if (fd < 0) { |
Oleg Nesterov | d2005e3 | 2016-05-20 16:58:36 -0700 | [diff] [blame] | 2095 | mmdrop(ctx->mm); |
Andrea Arcangeli | 3004ec9 | 2015-09-04 15:46:48 -0700 | [diff] [blame] | 2096 | kmem_cache_free(userfaultfd_ctx_cachep, ctx); |
Eric Biggers | c03e946f | 2015-09-17 16:01:54 -0700 | [diff] [blame] | 2097 | } |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2098 | return fd; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2099 | } |
Andrea Arcangeli | 3004ec9 | 2015-09-04 15:46:48 -0700 | [diff] [blame] | 2100 | |
| 2101 | static int __init userfaultfd_init(void) |
| 2102 | { |
| 2103 | userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache", |
| 2104 | sizeof(struct userfaultfd_ctx), |
| 2105 | 0, |
| 2106 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, |
| 2107 | init_once_userfaultfd_ctx); |
| 2108 | return 0; |
| 2109 | } |
| 2110 | __initcall(userfaultfd_init); |