Thomas Gleixner | 457c899 | 2019-05-19 13:08:55 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 2 | #include <linux/kernel.h> |
| 3 | #include <linux/errno.h> |
| 4 | #include <linux/err.h> |
| 5 | #include <linux/spinlock.h> |
| 6 | |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 7 | #include <linux/mm.h> |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 8 | #include <linux/memremap.h> |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 9 | #include <linux/pagemap.h> |
| 10 | #include <linux/rmap.h> |
| 11 | #include <linux/swap.h> |
| 12 | #include <linux/swapops.h> |
Mike Rapoport | 1507f51 | 2021-07-07 18:08:03 -0700 | [diff] [blame] | 13 | #include <linux/secretmem.h> |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 14 | |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 15 | #include <linux/sched/signal.h> |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 16 | #include <linux/rwsem.h> |
Aneesh Kumar K.V | f30c59e | 2014-11-05 21:57:40 +0530 | [diff] [blame] | 17 | #include <linux/hugetlb.h> |
Aneesh Kumar K.V | 9a4e9f3 | 2019-03-05 15:47:44 -0800 | [diff] [blame] | 18 | #include <linux/migrate.h> |
| 19 | #include <linux/mm_inline.h> |
| 20 | #include <linux/sched/mm.h> |
Kirill A. Shutemov | 1027e44 | 2015-09-04 15:47:55 -0700 | [diff] [blame] | 21 | |
Dave Hansen | 33a709b | 2016-02-12 13:02:19 -0800 | [diff] [blame] | 22 | #include <asm/mmu_context.h> |
Kirill A. Shutemov | 1027e44 | 2015-09-04 15:47:55 -0700 | [diff] [blame] | 23 | #include <asm/tlbflush.h> |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 24 | |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 25 | #include "internal.h" |
| 26 | |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 27 | struct follow_page_context { |
| 28 | struct dev_pagemap *pgmap; |
| 29 | unsigned int page_mask; |
| 30 | }; |
| 31 | |
Linus Torvalds | cd1adf1 | 2021-09-07 11:03:45 -0700 | [diff] [blame] | 32 | /* |
Matthew Wilcox (Oracle) | ece1ed7 | 2022-02-04 10:27:40 -0500 | [diff] [blame] | 33 | * Return the folio with ref appropriately incremented, |
Linus Torvalds | cd1adf1 | 2021-09-07 11:03:45 -0700 | [diff] [blame] | 34 | * or NULL if that failed. |
John Hubbard | a707cdd | 2020-01-30 22:12:21 -0800 | [diff] [blame] | 35 | */ |
Matthew Wilcox (Oracle) | ece1ed7 | 2022-02-04 10:27:40 -0500 | [diff] [blame] | 36 | static inline struct folio *try_get_folio(struct page *page, int refs) |
John Hubbard | a707cdd | 2020-01-30 22:12:21 -0800 | [diff] [blame] | 37 | { |
Matthew Wilcox (Oracle) | ece1ed7 | 2022-02-04 10:27:40 -0500 | [diff] [blame] | 38 | struct folio *folio; |
John Hubbard | a707cdd | 2020-01-30 22:12:21 -0800 | [diff] [blame] | 39 | |
Matthew Wilcox (Oracle) | 5940937 | 2022-01-07 14:04:55 -0500 | [diff] [blame] | 40 | retry: |
Matthew Wilcox (Oracle) | ece1ed7 | 2022-02-04 10:27:40 -0500 | [diff] [blame] | 41 | folio = page_folio(page); |
| 42 | if (WARN_ON_ONCE(folio_ref_count(folio) < 0)) |
John Hubbard | a707cdd | 2020-01-30 22:12:21 -0800 | [diff] [blame] | 43 | return NULL; |
Matthew Wilcox (Oracle) | ece1ed7 | 2022-02-04 10:27:40 -0500 | [diff] [blame] | 44 | if (unlikely(!folio_ref_try_add_rcu(folio, refs))) |
John Hubbard | a707cdd | 2020-01-30 22:12:21 -0800 | [diff] [blame] | 45 | return NULL; |
Jann Horn | c24d373 | 2021-06-28 19:33:23 -0700 | [diff] [blame] | 46 | |
| 47 | /* |
Matthew Wilcox (Oracle) | ece1ed7 | 2022-02-04 10:27:40 -0500 | [diff] [blame] | 48 | * At this point we have a stable reference to the folio; but it |
| 49 | * could be that between calling page_folio() and the refcount |
| 50 | * increment, the folio was split, in which case we'd end up |
| 51 | * holding a reference on a folio that has nothing to do with the page |
Jann Horn | c24d373 | 2021-06-28 19:33:23 -0700 | [diff] [blame] | 52 | * we were given anymore. |
Matthew Wilcox (Oracle) | ece1ed7 | 2022-02-04 10:27:40 -0500 | [diff] [blame] | 53 | * So now that the folio is stable, recheck that the page still |
| 54 | * belongs to this folio. |
Jann Horn | c24d373 | 2021-06-28 19:33:23 -0700 | [diff] [blame] | 55 | */ |
Matthew Wilcox (Oracle) | ece1ed7 | 2022-02-04 10:27:40 -0500 | [diff] [blame] | 56 | if (unlikely(page_folio(page) != folio)) { |
| 57 | folio_put_refs(folio, refs); |
Matthew Wilcox (Oracle) | 5940937 | 2022-01-07 14:04:55 -0500 | [diff] [blame] | 58 | goto retry; |
Jann Horn | c24d373 | 2021-06-28 19:33:23 -0700 | [diff] [blame] | 59 | } |
| 60 | |
Matthew Wilcox (Oracle) | ece1ed7 | 2022-02-04 10:27:40 -0500 | [diff] [blame] | 61 | return folio; |
John Hubbard | a707cdd | 2020-01-30 22:12:21 -0800 | [diff] [blame] | 62 | } |
| 63 | |
John Hubbard | 3967db2 | 2021-09-02 14:53:48 -0700 | [diff] [blame] | 64 | /** |
Matthew Wilcox (Oracle) | ece1ed7 | 2022-02-04 10:27:40 -0500 | [diff] [blame] | 65 | * try_grab_folio() - Attempt to get or pin a folio. |
John Hubbard | 3967db2 | 2021-09-02 14:53:48 -0700 | [diff] [blame] | 66 | * @page: pointer to page to be grabbed |
Matthew Wilcox (Oracle) | ece1ed7 | 2022-02-04 10:27:40 -0500 | [diff] [blame] | 67 | * @refs: the value to (effectively) add to the folio's refcount |
John Hubbard | 3967db2 | 2021-09-02 14:53:48 -0700 | [diff] [blame] | 68 | * @flags: gup flags: these are the FOLL_* flag values. |
| 69 | * |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 70 | * "grab" names in this file mean, "look at flags to decide whether to use |
Matthew Wilcox (Oracle) | ece1ed7 | 2022-02-04 10:27:40 -0500 | [diff] [blame] | 71 | * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount. |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 72 | * |
| 73 | * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the |
| 74 | * same time. (That's true throughout the get_user_pages*() and |
| 75 | * pin_user_pages*() APIs.) Cases: |
| 76 | * |
Matthew Wilcox (Oracle) | ece1ed7 | 2022-02-04 10:27:40 -0500 | [diff] [blame] | 77 | * FOLL_GET: folio's refcount will be incremented by @refs. |
John Hubbard | 3967db2 | 2021-09-02 14:53:48 -0700 | [diff] [blame] | 78 | * |
Matthew Wilcox (Oracle) | ece1ed7 | 2022-02-04 10:27:40 -0500 | [diff] [blame] | 79 | * FOLL_PIN on large folios: folio's refcount will be incremented by |
| 80 | * @refs, and its compound_pincount will be incremented by @refs. |
John Hubbard | 3967db2 | 2021-09-02 14:53:48 -0700 | [diff] [blame] | 81 | * |
Matthew Wilcox (Oracle) | ece1ed7 | 2022-02-04 10:27:40 -0500 | [diff] [blame] | 82 | * FOLL_PIN on single-page folios: folio's refcount will be incremented by |
Matthew Wilcox (Oracle) | 5232c63 | 2022-01-06 16:46:43 -0500 | [diff] [blame] | 83 | * @refs * GUP_PIN_COUNTING_BIAS. |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 84 | * |
Matthew Wilcox (Oracle) | ece1ed7 | 2022-02-04 10:27:40 -0500 | [diff] [blame] | 85 | * Return: The folio containing @page (with refcount appropriately |
| 86 | * incremented) for success, or NULL upon failure. If neither FOLL_GET |
| 87 | * nor FOLL_PIN was set, that's considered failure, and furthermore, |
| 88 | * a likely bug in the caller, so a warning is also emitted. |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 89 | */ |
Matthew Wilcox (Oracle) | ece1ed7 | 2022-02-04 10:27:40 -0500 | [diff] [blame] | 90 | struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags) |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 91 | { |
| 92 | if (flags & FOLL_GET) |
Matthew Wilcox (Oracle) | ece1ed7 | 2022-02-04 10:27:40 -0500 | [diff] [blame] | 93 | return try_get_folio(page, refs); |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 94 | else if (flags & FOLL_PIN) { |
Matthew Wilcox (Oracle) | ece1ed7 | 2022-02-04 10:27:40 -0500 | [diff] [blame] | 95 | struct folio *folio; |
| 96 | |
John Hubbard | 47e29d3 | 2020-04-01 21:05:33 -0700 | [diff] [blame] | 97 | /* |
Pavel Tatashin | d1e153f | 2021-05-04 18:39:08 -0700 | [diff] [blame] | 98 | * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a |
| 99 | * right zone, so fail and let the caller fall back to the slow |
| 100 | * path. |
Pingfan Liu | df3a0a2 | 2020-04-01 21:06:04 -0700 | [diff] [blame] | 101 | */ |
Pavel Tatashin | d1e153f | 2021-05-04 18:39:08 -0700 | [diff] [blame] | 102 | if (unlikely((flags & FOLL_LONGTERM) && |
| 103 | !is_pinnable_page(page))) |
Pingfan Liu | df3a0a2 | 2020-04-01 21:06:04 -0700 | [diff] [blame] | 104 | return NULL; |
| 105 | |
| 106 | /* |
Jann Horn | c24d373 | 2021-06-28 19:33:23 -0700 | [diff] [blame] | 107 | * CAUTION: Don't use compound_head() on the page before this |
| 108 | * point, the result won't be stable. |
| 109 | */ |
Matthew Wilcox (Oracle) | ece1ed7 | 2022-02-04 10:27:40 -0500 | [diff] [blame] | 110 | folio = try_get_folio(page, refs); |
| 111 | if (!folio) |
Jann Horn | c24d373 | 2021-06-28 19:33:23 -0700 | [diff] [blame] | 112 | return NULL; |
| 113 | |
| 114 | /* |
Matthew Wilcox (Oracle) | ece1ed7 | 2022-02-04 10:27:40 -0500 | [diff] [blame] | 115 | * When pinning a large folio, use an exact count to track it. |
John Hubbard | 47e29d3 | 2020-04-01 21:05:33 -0700 | [diff] [blame] | 116 | * |
Matthew Wilcox (Oracle) | ece1ed7 | 2022-02-04 10:27:40 -0500 | [diff] [blame] | 117 | * However, be sure to *also* increment the normal folio |
| 118 | * refcount field at least once, so that the folio really |
Matthew Wilcox (Oracle) | 78d9d6c | 2022-01-07 14:15:11 -0500 | [diff] [blame] | 119 | * is pinned. That's why the refcount from the earlier |
Matthew Wilcox (Oracle) | ece1ed7 | 2022-02-04 10:27:40 -0500 | [diff] [blame] | 120 | * try_get_folio() is left intact. |
John Hubbard | 47e29d3 | 2020-04-01 21:05:33 -0700 | [diff] [blame] | 121 | */ |
Matthew Wilcox (Oracle) | ece1ed7 | 2022-02-04 10:27:40 -0500 | [diff] [blame] | 122 | if (folio_test_large(folio)) |
| 123 | atomic_add(refs, folio_pincount_ptr(folio)); |
Jann Horn | c24d373 | 2021-06-28 19:33:23 -0700 | [diff] [blame] | 124 | else |
Matthew Wilcox (Oracle) | ece1ed7 | 2022-02-04 10:27:40 -0500 | [diff] [blame] | 125 | folio_ref_add(folio, |
| 126 | refs * (GUP_PIN_COUNTING_BIAS - 1)); |
| 127 | node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs); |
John Hubbard | 47e29d3 | 2020-04-01 21:05:33 -0700 | [diff] [blame] | 128 | |
Matthew Wilcox (Oracle) | ece1ed7 | 2022-02-04 10:27:40 -0500 | [diff] [blame] | 129 | return folio; |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 130 | } |
| 131 | |
| 132 | WARN_ON_ONCE(1); |
| 133 | return NULL; |
| 134 | } |
| 135 | |
Matthew Wilcox (Oracle) | d8ddc09 | 2021-12-10 15:39:04 -0500 | [diff] [blame] | 136 | static void gup_put_folio(struct folio *folio, int refs, unsigned int flags) |
Jason Gunthorpe | 4509b42 | 2020-12-14 19:05:51 -0800 | [diff] [blame] | 137 | { |
| 138 | if (flags & FOLL_PIN) { |
Matthew Wilcox (Oracle) | d8ddc09 | 2021-12-10 15:39:04 -0500 | [diff] [blame] | 139 | node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs); |
| 140 | if (folio_test_large(folio)) |
| 141 | atomic_sub(refs, folio_pincount_ptr(folio)); |
Jason Gunthorpe | 4509b42 | 2020-12-14 19:05:51 -0800 | [diff] [blame] | 142 | else |
| 143 | refs *= GUP_PIN_COUNTING_BIAS; |
| 144 | } |
| 145 | |
Matthew Wilcox (Oracle) | d8ddc09 | 2021-12-10 15:39:04 -0500 | [diff] [blame] | 146 | folio_put_refs(folio, refs); |
Jason Gunthorpe | 4509b42 | 2020-12-14 19:05:51 -0800 | [diff] [blame] | 147 | } |
| 148 | |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 149 | /** |
| 150 | * try_grab_page() - elevate a page's refcount by a flag-dependent amount |
Matthew Wilcox (Oracle) | 5fec071 | 2022-02-04 10:32:01 -0500 | [diff] [blame] | 151 | * @page: pointer to page to be grabbed |
| 152 | * @flags: gup flags: these are the FOLL_* flag values. |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 153 | * |
| 154 | * This might not do anything at all, depending on the flags argument. |
| 155 | * |
| 156 | * "grab" names in this file mean, "look at flags to decide whether to use |
| 157 | * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount. |
| 158 | * |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 159 | * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same |
Matthew Wilcox (Oracle) | ece1ed7 | 2022-02-04 10:27:40 -0500 | [diff] [blame] | 160 | * time. Cases: please see the try_grab_folio() documentation, with |
John Hubbard | 3967db2 | 2021-09-02 14:53:48 -0700 | [diff] [blame] | 161 | * "refs=1". |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 162 | * |
| 163 | * Return: true for success, or if no action was required (if neither FOLL_PIN |
| 164 | * nor FOLL_GET was set, nothing is done). False for failure: FOLL_GET or |
| 165 | * FOLL_PIN was set, but the page could not be grabbed. |
| 166 | */ |
| 167 | bool __must_check try_grab_page(struct page *page, unsigned int flags) |
| 168 | { |
Matthew Wilcox (Oracle) | 5fec071 | 2022-02-04 10:32:01 -0500 | [diff] [blame] | 169 | struct folio *folio = page_folio(page); |
| 170 | |
John Hubbard | c36c04c | 2022-02-01 19:23:17 -0800 | [diff] [blame] | 171 | WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == (FOLL_GET | FOLL_PIN)); |
Matthew Wilcox (Oracle) | 5fec071 | 2022-02-04 10:32:01 -0500 | [diff] [blame] | 172 | if (WARN_ON_ONCE(folio_ref_count(folio) <= 0)) |
| 173 | return false; |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 174 | |
John Hubbard | c36c04c | 2022-02-01 19:23:17 -0800 | [diff] [blame] | 175 | if (flags & FOLL_GET) |
Matthew Wilcox (Oracle) | 5fec071 | 2022-02-04 10:32:01 -0500 | [diff] [blame] | 176 | folio_ref_inc(folio); |
John Hubbard | c36c04c | 2022-02-01 19:23:17 -0800 | [diff] [blame] | 177 | else if (flags & FOLL_PIN) { |
John Hubbard | c36c04c | 2022-02-01 19:23:17 -0800 | [diff] [blame] | 178 | /* |
Matthew Wilcox (Oracle) | 5fec071 | 2022-02-04 10:32:01 -0500 | [diff] [blame] | 179 | * Similar to try_grab_folio(): be sure to *also* |
Matthew Wilcox (Oracle) | 78d9d6c | 2022-01-07 14:15:11 -0500 | [diff] [blame] | 180 | * increment the normal page refcount field at least once, |
| 181 | * so that the page really is pinned. |
John Hubbard | c36c04c | 2022-02-01 19:23:17 -0800 | [diff] [blame] | 182 | */ |
Matthew Wilcox (Oracle) | 5fec071 | 2022-02-04 10:32:01 -0500 | [diff] [blame] | 183 | if (folio_test_large(folio)) { |
| 184 | folio_ref_add(folio, 1); |
| 185 | atomic_add(1, folio_pincount_ptr(folio)); |
Matthew Wilcox (Oracle) | 8ea2979 | 2022-02-04 09:24:26 -0500 | [diff] [blame] | 186 | } else { |
Matthew Wilcox (Oracle) | 5fec071 | 2022-02-04 10:32:01 -0500 | [diff] [blame] | 187 | folio_ref_add(folio, GUP_PIN_COUNTING_BIAS); |
Matthew Wilcox (Oracle) | 8ea2979 | 2022-02-04 09:24:26 -0500 | [diff] [blame] | 188 | } |
John Hubbard | c36c04c | 2022-02-01 19:23:17 -0800 | [diff] [blame] | 189 | |
Matthew Wilcox (Oracle) | 5fec071 | 2022-02-04 10:32:01 -0500 | [diff] [blame] | 190 | node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, 1); |
John Hubbard | c36c04c | 2022-02-01 19:23:17 -0800 | [diff] [blame] | 191 | } |
| 192 | |
| 193 | return true; |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 194 | } |
| 195 | |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 196 | /** |
| 197 | * unpin_user_page() - release a dma-pinned page |
| 198 | * @page: pointer to page to be released |
| 199 | * |
| 200 | * Pages that were pinned via pin_user_pages*() must be released via either |
| 201 | * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so |
| 202 | * that such pages can be separately tracked and uniquely handled. In |
| 203 | * particular, interactions with RDMA and filesystems need special handling. |
| 204 | */ |
| 205 | void unpin_user_page(struct page *page) |
| 206 | { |
Matthew Wilcox (Oracle) | d8ddc09 | 2021-12-10 15:39:04 -0500 | [diff] [blame] | 207 | gup_put_folio(page_folio(page), 1, FOLL_PIN); |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 208 | } |
| 209 | EXPORT_SYMBOL(unpin_user_page); |
| 210 | |
Matthew Wilcox (Oracle) | 659508f | 2021-12-23 10:20:12 -0500 | [diff] [blame] | 211 | static inline struct folio *gup_folio_range_next(struct page *start, |
Matthew Wilcox (Oracle) | 8f39f5f | 2022-01-09 16:05:11 -0500 | [diff] [blame] | 212 | unsigned long npages, unsigned long i, unsigned int *ntails) |
Joao Martins | 458a4f78 | 2021-04-29 22:55:50 -0700 | [diff] [blame] | 213 | { |
Matthew Wilcox (Oracle) | 659508f | 2021-12-23 10:20:12 -0500 | [diff] [blame] | 214 | struct page *next = nth_page(start, i); |
| 215 | struct folio *folio = page_folio(next); |
Joao Martins | 458a4f78 | 2021-04-29 22:55:50 -0700 | [diff] [blame] | 216 | unsigned int nr = 1; |
| 217 | |
Matthew Wilcox (Oracle) | 659508f | 2021-12-23 10:20:12 -0500 | [diff] [blame] | 218 | if (folio_test_large(folio)) |
Matthew Wilcox (Oracle) | 4c65422 | 2022-01-07 13:45:25 -0500 | [diff] [blame] | 219 | nr = min_t(unsigned int, npages - i, |
Matthew Wilcox (Oracle) | 659508f | 2021-12-23 10:20:12 -0500 | [diff] [blame] | 220 | folio_nr_pages(folio) - folio_page_idx(folio, next)); |
Joao Martins | 458a4f78 | 2021-04-29 22:55:50 -0700 | [diff] [blame] | 221 | |
Joao Martins | 458a4f78 | 2021-04-29 22:55:50 -0700 | [diff] [blame] | 222 | *ntails = nr; |
Matthew Wilcox (Oracle) | 659508f | 2021-12-23 10:20:12 -0500 | [diff] [blame] | 223 | return folio; |
Joao Martins | 458a4f78 | 2021-04-29 22:55:50 -0700 | [diff] [blame] | 224 | } |
| 225 | |
Matthew Wilcox (Oracle) | 12521c7 | 2021-12-22 23:43:16 -0500 | [diff] [blame] | 226 | static inline struct folio *gup_folio_next(struct page **list, |
Matthew Wilcox (Oracle) | 28297db | 2022-01-09 21:03:47 -0500 | [diff] [blame] | 227 | unsigned long npages, unsigned long i, unsigned int *ntails) |
Joao Martins | 8745d7f | 2021-04-29 22:55:44 -0700 | [diff] [blame] | 228 | { |
Matthew Wilcox (Oracle) | 12521c7 | 2021-12-22 23:43:16 -0500 | [diff] [blame] | 229 | struct folio *folio = page_folio(list[i]); |
Joao Martins | 8745d7f | 2021-04-29 22:55:44 -0700 | [diff] [blame] | 230 | unsigned int nr; |
| 231 | |
Joao Martins | 8745d7f | 2021-04-29 22:55:44 -0700 | [diff] [blame] | 232 | for (nr = i + 1; nr < npages; nr++) { |
Matthew Wilcox (Oracle) | 12521c7 | 2021-12-22 23:43:16 -0500 | [diff] [blame] | 233 | if (page_folio(list[nr]) != folio) |
Joao Martins | 8745d7f | 2021-04-29 22:55:44 -0700 | [diff] [blame] | 234 | break; |
| 235 | } |
| 236 | |
Joao Martins | 8745d7f | 2021-04-29 22:55:44 -0700 | [diff] [blame] | 237 | *ntails = nr - i; |
Matthew Wilcox (Oracle) | 12521c7 | 2021-12-22 23:43:16 -0500 | [diff] [blame] | 238 | return folio; |
Joao Martins | 8745d7f | 2021-04-29 22:55:44 -0700 | [diff] [blame] | 239 | } |
| 240 | |
John Hubbard | fc1d8e7 | 2019-05-13 17:19:08 -0700 | [diff] [blame] | 241 | /** |
John Hubbard | f1f6a7d | 2020-01-30 22:13:35 -0800 | [diff] [blame] | 242 | * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages |
akpm@linux-foundation.org | 2d15eb3 | 2019-09-23 15:35:04 -0700 | [diff] [blame] | 243 | * @pages: array of pages to be maybe marked dirty, and definitely released. |
John Hubbard | fc1d8e7 | 2019-05-13 17:19:08 -0700 | [diff] [blame] | 244 | * @npages: number of pages in the @pages array. |
akpm@linux-foundation.org | 2d15eb3 | 2019-09-23 15:35:04 -0700 | [diff] [blame] | 245 | * @make_dirty: whether to mark the pages dirty |
John Hubbard | fc1d8e7 | 2019-05-13 17:19:08 -0700 | [diff] [blame] | 246 | * |
| 247 | * "gup-pinned page" refers to a page that has had one of the get_user_pages() |
| 248 | * variants called on that page. |
| 249 | * |
| 250 | * For each page in the @pages array, make that page (or its head page, if a |
akpm@linux-foundation.org | 2d15eb3 | 2019-09-23 15:35:04 -0700 | [diff] [blame] | 251 | * compound page) dirty, if @make_dirty is true, and if the page was previously |
John Hubbard | f1f6a7d | 2020-01-30 22:13:35 -0800 | [diff] [blame] | 252 | * listed as clean. In any case, releases all pages using unpin_user_page(), |
| 253 | * possibly via unpin_user_pages(), for the non-dirty case. |
John Hubbard | fc1d8e7 | 2019-05-13 17:19:08 -0700 | [diff] [blame] | 254 | * |
John Hubbard | f1f6a7d | 2020-01-30 22:13:35 -0800 | [diff] [blame] | 255 | * Please see the unpin_user_page() documentation for details. |
John Hubbard | fc1d8e7 | 2019-05-13 17:19:08 -0700 | [diff] [blame] | 256 | * |
akpm@linux-foundation.org | 2d15eb3 | 2019-09-23 15:35:04 -0700 | [diff] [blame] | 257 | * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is |
| 258 | * required, then the caller should a) verify that this is really correct, |
| 259 | * because _lock() is usually required, and b) hand code it: |
John Hubbard | f1f6a7d | 2020-01-30 22:13:35 -0800 | [diff] [blame] | 260 | * set_page_dirty_lock(), unpin_user_page(). |
John Hubbard | fc1d8e7 | 2019-05-13 17:19:08 -0700 | [diff] [blame] | 261 | * |
| 262 | */ |
John Hubbard | f1f6a7d | 2020-01-30 22:13:35 -0800 | [diff] [blame] | 263 | void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, |
| 264 | bool make_dirty) |
John Hubbard | fc1d8e7 | 2019-05-13 17:19:08 -0700 | [diff] [blame] | 265 | { |
Matthew Wilcox (Oracle) | 12521c7 | 2021-12-22 23:43:16 -0500 | [diff] [blame] | 266 | unsigned long i; |
| 267 | struct folio *folio; |
| 268 | unsigned int nr; |
akpm@linux-foundation.org | 2d15eb3 | 2019-09-23 15:35:04 -0700 | [diff] [blame] | 269 | |
| 270 | if (!make_dirty) { |
John Hubbard | f1f6a7d | 2020-01-30 22:13:35 -0800 | [diff] [blame] | 271 | unpin_user_pages(pages, npages); |
akpm@linux-foundation.org | 2d15eb3 | 2019-09-23 15:35:04 -0700 | [diff] [blame] | 272 | return; |
| 273 | } |
| 274 | |
Matthew Wilcox (Oracle) | 12521c7 | 2021-12-22 23:43:16 -0500 | [diff] [blame] | 275 | for (i = 0; i < npages; i += nr) { |
| 276 | folio = gup_folio_next(pages, npages, i, &nr); |
akpm@linux-foundation.org | 2d15eb3 | 2019-09-23 15:35:04 -0700 | [diff] [blame] | 277 | /* |
| 278 | * Checking PageDirty at this point may race with |
| 279 | * clear_page_dirty_for_io(), but that's OK. Two key |
| 280 | * cases: |
| 281 | * |
| 282 | * 1) This code sees the page as already dirty, so it |
| 283 | * skips the call to set_page_dirty(). That could happen |
| 284 | * because clear_page_dirty_for_io() called |
| 285 | * page_mkclean(), followed by set_page_dirty(). |
| 286 | * However, now the page is going to get written back, |
| 287 | * which meets the original intention of setting it |
| 288 | * dirty, so all is well: clear_page_dirty_for_io() goes |
| 289 | * on to call TestClearPageDirty(), and write the page |
| 290 | * back. |
| 291 | * |
| 292 | * 2) This code sees the page as clean, so it calls |
| 293 | * set_page_dirty(). The page stays dirty, despite being |
| 294 | * written back, so it gets written back again in the |
| 295 | * next writeback cycle. This is harmless. |
| 296 | */ |
Matthew Wilcox (Oracle) | 12521c7 | 2021-12-22 23:43:16 -0500 | [diff] [blame] | 297 | if (!folio_test_dirty(folio)) { |
| 298 | folio_lock(folio); |
| 299 | folio_mark_dirty(folio); |
| 300 | folio_unlock(folio); |
| 301 | } |
| 302 | gup_put_folio(folio, nr, FOLL_PIN); |
akpm@linux-foundation.org | 2d15eb3 | 2019-09-23 15:35:04 -0700 | [diff] [blame] | 303 | } |
John Hubbard | fc1d8e7 | 2019-05-13 17:19:08 -0700 | [diff] [blame] | 304 | } |
John Hubbard | f1f6a7d | 2020-01-30 22:13:35 -0800 | [diff] [blame] | 305 | EXPORT_SYMBOL(unpin_user_pages_dirty_lock); |
John Hubbard | fc1d8e7 | 2019-05-13 17:19:08 -0700 | [diff] [blame] | 306 | |
| 307 | /** |
Joao Martins | 458a4f78 | 2021-04-29 22:55:50 -0700 | [diff] [blame] | 308 | * unpin_user_page_range_dirty_lock() - release and optionally dirty |
| 309 | * gup-pinned page range |
| 310 | * |
| 311 | * @page: the starting page of a range maybe marked dirty, and definitely released. |
| 312 | * @npages: number of consecutive pages to release. |
| 313 | * @make_dirty: whether to mark the pages dirty |
| 314 | * |
| 315 | * "gup-pinned page range" refers to a range of pages that has had one of the |
| 316 | * pin_user_pages() variants called on that page. |
| 317 | * |
| 318 | * For the page ranges defined by [page .. page+npages], make that range (or |
| 319 | * its head pages, if a compound page) dirty, if @make_dirty is true, and if the |
| 320 | * page range was previously listed as clean. |
| 321 | * |
| 322 | * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is |
| 323 | * required, then the caller should a) verify that this is really correct, |
| 324 | * because _lock() is usually required, and b) hand code it: |
| 325 | * set_page_dirty_lock(), unpin_user_page(). |
| 326 | * |
| 327 | */ |
| 328 | void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, |
| 329 | bool make_dirty) |
| 330 | { |
Matthew Wilcox (Oracle) | 659508f | 2021-12-23 10:20:12 -0500 | [diff] [blame] | 331 | unsigned long i; |
| 332 | struct folio *folio; |
| 333 | unsigned int nr; |
Joao Martins | 458a4f78 | 2021-04-29 22:55:50 -0700 | [diff] [blame] | 334 | |
Matthew Wilcox (Oracle) | 659508f | 2021-12-23 10:20:12 -0500 | [diff] [blame] | 335 | for (i = 0; i < npages; i += nr) { |
| 336 | folio = gup_folio_range_next(page, npages, i, &nr); |
| 337 | if (make_dirty && !folio_test_dirty(folio)) { |
| 338 | folio_lock(folio); |
| 339 | folio_mark_dirty(folio); |
| 340 | folio_unlock(folio); |
| 341 | } |
| 342 | gup_put_folio(folio, nr, FOLL_PIN); |
Joao Martins | 458a4f78 | 2021-04-29 22:55:50 -0700 | [diff] [blame] | 343 | } |
| 344 | } |
| 345 | EXPORT_SYMBOL(unpin_user_page_range_dirty_lock); |
| 346 | |
| 347 | /** |
John Hubbard | f1f6a7d | 2020-01-30 22:13:35 -0800 | [diff] [blame] | 348 | * unpin_user_pages() - release an array of gup-pinned pages. |
John Hubbard | fc1d8e7 | 2019-05-13 17:19:08 -0700 | [diff] [blame] | 349 | * @pages: array of pages to be marked dirty and released. |
| 350 | * @npages: number of pages in the @pages array. |
| 351 | * |
John Hubbard | f1f6a7d | 2020-01-30 22:13:35 -0800 | [diff] [blame] | 352 | * For each page in the @pages array, release the page using unpin_user_page(). |
John Hubbard | fc1d8e7 | 2019-05-13 17:19:08 -0700 | [diff] [blame] | 353 | * |
John Hubbard | f1f6a7d | 2020-01-30 22:13:35 -0800 | [diff] [blame] | 354 | * Please see the unpin_user_page() documentation for details. |
John Hubbard | fc1d8e7 | 2019-05-13 17:19:08 -0700 | [diff] [blame] | 355 | */ |
John Hubbard | f1f6a7d | 2020-01-30 22:13:35 -0800 | [diff] [blame] | 356 | void unpin_user_pages(struct page **pages, unsigned long npages) |
John Hubbard | fc1d8e7 | 2019-05-13 17:19:08 -0700 | [diff] [blame] | 357 | { |
Matthew Wilcox (Oracle) | 12521c7 | 2021-12-22 23:43:16 -0500 | [diff] [blame] | 358 | unsigned long i; |
| 359 | struct folio *folio; |
| 360 | unsigned int nr; |
John Hubbard | fc1d8e7 | 2019-05-13 17:19:08 -0700 | [diff] [blame] | 361 | |
| 362 | /* |
John Hubbard | 146608bb | 2020-10-13 16:52:01 -0700 | [diff] [blame] | 363 | * If this WARN_ON() fires, then the system *might* be leaking pages (by |
| 364 | * leaving them pinned), but probably not. More likely, gup/pup returned |
| 365 | * a hard -ERRNO error to the caller, who erroneously passed it here. |
| 366 | */ |
| 367 | if (WARN_ON(IS_ERR_VALUE(npages))) |
| 368 | return; |
Joao Martins | 31b912d | 2021-04-29 22:55:47 -0700 | [diff] [blame] | 369 | |
Matthew Wilcox (Oracle) | 12521c7 | 2021-12-22 23:43:16 -0500 | [diff] [blame] | 370 | for (i = 0; i < npages; i += nr) { |
| 371 | folio = gup_folio_next(pages, npages, i, &nr); |
| 372 | gup_put_folio(folio, nr, FOLL_PIN); |
Matthew Wilcox (Oracle) | e760274 | 2022-01-08 20:23:46 -0500 | [diff] [blame] | 373 | } |
John Hubbard | fc1d8e7 | 2019-05-13 17:19:08 -0700 | [diff] [blame] | 374 | } |
John Hubbard | f1f6a7d | 2020-01-30 22:13:35 -0800 | [diff] [blame] | 375 | EXPORT_SYMBOL(unpin_user_pages); |
John Hubbard | fc1d8e7 | 2019-05-13 17:19:08 -0700 | [diff] [blame] | 376 | |
Andrea Arcangeli | a458b76 | 2021-06-28 19:36:40 -0700 | [diff] [blame] | 377 | /* |
| 378 | * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's |
| 379 | * lifecycle. Avoid setting the bit unless necessary, or it might cause write |
| 380 | * cache bouncing on large SMP machines for concurrent pinned gups. |
| 381 | */ |
| 382 | static inline void mm_set_has_pinned_flag(unsigned long *mm_flags) |
| 383 | { |
| 384 | if (!test_bit(MMF_HAS_PINNED, mm_flags)) |
| 385 | set_bit(MMF_HAS_PINNED, mm_flags); |
| 386 | } |
| 387 | |
Christoph Hellwig | 050a9ad | 2019-07-11 20:57:21 -0700 | [diff] [blame] | 388 | #ifdef CONFIG_MMU |
Kirill A. Shutemov | 69e68b4 | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 389 | static struct page *no_page_table(struct vm_area_struct *vma, |
| 390 | unsigned int flags) |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 391 | { |
Kirill A. Shutemov | 69e68b4 | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 392 | /* |
| 393 | * When core dumping an enormous anonymous area that nobody |
| 394 | * has touched so far, we don't want to allocate unnecessary pages or |
| 395 | * page tables. Return error instead of NULL to skip handle_mm_fault, |
| 396 | * then get_dump_page() will return NULL to leave a hole in the dump. |
| 397 | * But we can only make this optimization where a hole would surely |
| 398 | * be zero-filled if handle_mm_fault() actually did handle it. |
| 399 | */ |
Anshuman Khandual | a0137f1 | 2020-04-06 20:03:55 -0700 | [diff] [blame] | 400 | if ((flags & FOLL_DUMP) && |
| 401 | (vma_is_anonymous(vma) || !vma->vm_ops->fault)) |
Kirill A. Shutemov | 69e68b4 | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 402 | return ERR_PTR(-EFAULT); |
| 403 | return NULL; |
| 404 | } |
| 405 | |
Kirill A. Shutemov | 1027e44 | 2015-09-04 15:47:55 -0700 | [diff] [blame] | 406 | static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, |
| 407 | pte_t *pte, unsigned int flags) |
| 408 | { |
Kirill A. Shutemov | 1027e44 | 2015-09-04 15:47:55 -0700 | [diff] [blame] | 409 | if (flags & FOLL_TOUCH) { |
| 410 | pte_t entry = *pte; |
| 411 | |
| 412 | if (flags & FOLL_WRITE) |
| 413 | entry = pte_mkdirty(entry); |
| 414 | entry = pte_mkyoung(entry); |
| 415 | |
| 416 | if (!pte_same(*pte, entry)) { |
| 417 | set_pte_at(vma->vm_mm, address, pte, entry); |
| 418 | update_mmu_cache(vma, address, pte); |
| 419 | } |
| 420 | } |
| 421 | |
| 422 | /* Proper page table entry exists, but no corresponding struct page */ |
| 423 | return -EEXIST; |
| 424 | } |
| 425 | |
Linus Torvalds | 19be0ea | 2016-10-13 13:07:36 -0700 | [diff] [blame] | 426 | /* |
Peter Xu | a308c71 | 2020-08-21 19:49:57 -0400 | [diff] [blame] | 427 | * FOLL_FORCE can write to even unwritable pte's, but only |
| 428 | * after we've gone through a COW cycle and they are dirty. |
Linus Torvalds | 19be0ea | 2016-10-13 13:07:36 -0700 | [diff] [blame] | 429 | */ |
| 430 | static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) |
| 431 | { |
Peter Xu | a308c71 | 2020-08-21 19:49:57 -0400 | [diff] [blame] | 432 | return pte_write(pte) || |
| 433 | ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); |
Linus Torvalds | 19be0ea | 2016-10-13 13:07:36 -0700 | [diff] [blame] | 434 | } |
| 435 | |
Kirill A. Shutemov | 69e68b4 | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 436 | static struct page *follow_page_pte(struct vm_area_struct *vma, |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 437 | unsigned long address, pmd_t *pmd, unsigned int flags, |
| 438 | struct dev_pagemap **pgmap) |
Kirill A. Shutemov | 69e68b4 | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 439 | { |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 440 | struct mm_struct *mm = vma->vm_mm; |
Kirill A. Shutemov | 69e68b4 | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 441 | struct page *page; |
| 442 | spinlock_t *ptl; |
| 443 | pte_t *ptep, pte; |
Claudio Imbrenda | f28d436 | 2020-04-01 21:05:56 -0700 | [diff] [blame] | 444 | int ret; |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 445 | |
John Hubbard | eddb1c2 | 2020-01-30 22:12:54 -0800 | [diff] [blame] | 446 | /* FOLL_GET and FOLL_PIN are mutually exclusive. */ |
| 447 | if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == |
| 448 | (FOLL_PIN | FOLL_GET))) |
| 449 | return ERR_PTR(-EINVAL); |
Kirill A. Shutemov | 69e68b4 | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 450 | retry: |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 451 | if (unlikely(pmd_bad(*pmd))) |
Kirill A. Shutemov | 69e68b4 | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 452 | return no_page_table(vma, flags); |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 453 | |
| 454 | ptep = pte_offset_map_lock(mm, pmd, address, &ptl); |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 455 | pte = *ptep; |
| 456 | if (!pte_present(pte)) { |
| 457 | swp_entry_t entry; |
| 458 | /* |
| 459 | * KSM's break_ksm() relies upon recognizing a ksm page |
| 460 | * even while it is being migrated, so for that case we |
| 461 | * need migration_entry_wait(). |
| 462 | */ |
| 463 | if (likely(!(flags & FOLL_MIGRATION))) |
| 464 | goto no_page; |
Kirill A. Shutemov | 0661a33 | 2015-02-10 14:10:04 -0800 | [diff] [blame] | 465 | if (pte_none(pte)) |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 466 | goto no_page; |
| 467 | entry = pte_to_swp_entry(pte); |
| 468 | if (!is_migration_entry(entry)) |
| 469 | goto no_page; |
| 470 | pte_unmap_unlock(ptep, ptl); |
| 471 | migration_entry_wait(mm, pmd, address); |
Kirill A. Shutemov | 69e68b4 | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 472 | goto retry; |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 473 | } |
Mel Gorman | 8a0516e | 2015-02-12 14:58:22 -0800 | [diff] [blame] | 474 | if ((flags & FOLL_NUMA) && pte_protnone(pte)) |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 475 | goto no_page; |
Linus Torvalds | 19be0ea | 2016-10-13 13:07:36 -0700 | [diff] [blame] | 476 | if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) { |
Kirill A. Shutemov | 69e68b4 | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 477 | pte_unmap_unlock(ptep, ptl); |
| 478 | return NULL; |
| 479 | } |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 480 | |
| 481 | page = vm_normal_page(vma, address, pte); |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 482 | if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) { |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 483 | /* |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 484 | * Only return device mapping pages in the FOLL_GET or FOLL_PIN |
| 485 | * case since they are only valid while holding the pgmap |
| 486 | * reference. |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 487 | */ |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 488 | *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap); |
| 489 | if (*pgmap) |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 490 | page = pte_page(pte); |
| 491 | else |
| 492 | goto no_page; |
| 493 | } else if (unlikely(!page)) { |
Kirill A. Shutemov | 1027e44 | 2015-09-04 15:47:55 -0700 | [diff] [blame] | 494 | if (flags & FOLL_DUMP) { |
| 495 | /* Avoid special (like zero) pages in core dumps */ |
| 496 | page = ERR_PTR(-EFAULT); |
| 497 | goto out; |
| 498 | } |
| 499 | |
| 500 | if (is_zero_pfn(pte_pfn(pte))) { |
| 501 | page = pte_page(pte); |
| 502 | } else { |
Kirill A. Shutemov | 1027e44 | 2015-09-04 15:47:55 -0700 | [diff] [blame] | 503 | ret = follow_pfn_pte(vma, address, ptep, flags); |
| 504 | page = ERR_PTR(ret); |
| 505 | goto out; |
| 506 | } |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 507 | } |
| 508 | |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 509 | /* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */ |
| 510 | if (unlikely(!try_grab_page(page, flags))) { |
| 511 | page = ERR_PTR(-ENOMEM); |
| 512 | goto out; |
Linus Torvalds | 8fde12c | 2019-04-11 10:49:19 -0700 | [diff] [blame] | 513 | } |
Claudio Imbrenda | f28d436 | 2020-04-01 21:05:56 -0700 | [diff] [blame] | 514 | /* |
| 515 | * We need to make the page accessible if and only if we are going |
| 516 | * to access its content (the FOLL_PIN case). Please see |
| 517 | * Documentation/core-api/pin_user_pages.rst for details. |
| 518 | */ |
| 519 | if (flags & FOLL_PIN) { |
| 520 | ret = arch_make_page_accessible(page); |
| 521 | if (ret) { |
| 522 | unpin_user_page(page); |
| 523 | page = ERR_PTR(ret); |
| 524 | goto out; |
| 525 | } |
| 526 | } |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 527 | if (flags & FOLL_TOUCH) { |
| 528 | if ((flags & FOLL_WRITE) && |
| 529 | !pte_dirty(pte) && !PageDirty(page)) |
| 530 | set_page_dirty(page); |
| 531 | /* |
| 532 | * pte_mkyoung() would be more correct here, but atomic care |
| 533 | * is needed to avoid losing the dirty bit: it is easier to use |
| 534 | * mark_page_accessed(). |
| 535 | */ |
| 536 | mark_page_accessed(page); |
| 537 | } |
Kirill A. Shutemov | 1027e44 | 2015-09-04 15:47:55 -0700 | [diff] [blame] | 538 | out: |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 539 | pte_unmap_unlock(ptep, ptl); |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 540 | return page; |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 541 | no_page: |
| 542 | pte_unmap_unlock(ptep, ptl); |
| 543 | if (!pte_none(pte)) |
Kirill A. Shutemov | 69e68b4 | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 544 | return NULL; |
| 545 | return no_page_table(vma, flags); |
| 546 | } |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 547 | |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 548 | static struct page *follow_pmd_mask(struct vm_area_struct *vma, |
| 549 | unsigned long address, pud_t *pudp, |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 550 | unsigned int flags, |
| 551 | struct follow_page_context *ctx) |
Kirill A. Shutemov | 69e68b4 | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 552 | { |
Huang Ying | 6882728 | 2018-06-07 17:06:34 -0700 | [diff] [blame] | 553 | pmd_t *pmd, pmdval; |
Kirill A. Shutemov | 69e68b4 | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 554 | spinlock_t *ptl; |
| 555 | struct page *page; |
| 556 | struct mm_struct *mm = vma->vm_mm; |
| 557 | |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 558 | pmd = pmd_offset(pudp, address); |
Huang Ying | 6882728 | 2018-06-07 17:06:34 -0700 | [diff] [blame] | 559 | /* |
| 560 | * The READ_ONCE() will stabilize the pmdval in a register or |
| 561 | * on the stack so that it will stop changing under the code. |
| 562 | */ |
| 563 | pmdval = READ_ONCE(*pmd); |
| 564 | if (pmd_none(pmdval)) |
Kirill A. Shutemov | 69e68b4 | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 565 | return no_page_table(vma, flags); |
Wei Yang | be9d304 | 2020-01-30 22:12:14 -0800 | [diff] [blame] | 566 | if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) { |
Naoya Horiguchi | e66f17f | 2015-02-11 15:25:22 -0800 | [diff] [blame] | 567 | page = follow_huge_pmd(mm, address, pmd, flags); |
| 568 | if (page) |
| 569 | return page; |
| 570 | return no_page_table(vma, flags); |
Kirill A. Shutemov | 69e68b4 | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 571 | } |
Huang Ying | 6882728 | 2018-06-07 17:06:34 -0700 | [diff] [blame] | 572 | if (is_hugepd(__hugepd(pmd_val(pmdval)))) { |
Aneesh Kumar K.V | 4dc7145 | 2017-07-06 15:38:56 -0700 | [diff] [blame] | 573 | page = follow_huge_pd(vma, address, |
Huang Ying | 6882728 | 2018-06-07 17:06:34 -0700 | [diff] [blame] | 574 | __hugepd(pmd_val(pmdval)), flags, |
Aneesh Kumar K.V | 4dc7145 | 2017-07-06 15:38:56 -0700 | [diff] [blame] | 575 | PMD_SHIFT); |
| 576 | if (page) |
| 577 | return page; |
| 578 | return no_page_table(vma, flags); |
| 579 | } |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 580 | retry: |
Huang Ying | 6882728 | 2018-06-07 17:06:34 -0700 | [diff] [blame] | 581 | if (!pmd_present(pmdval)) { |
Li Xinhai | 28b0ee3 | 2022-01-14 14:05:16 -0800 | [diff] [blame] | 582 | /* |
| 583 | * Should never reach here, if thp migration is not supported; |
| 584 | * Otherwise, it must be a thp migration entry. |
| 585 | */ |
| 586 | VM_BUG_ON(!thp_migration_supported() || |
| 587 | !is_pmd_migration_entry(pmdval)); |
| 588 | |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 589 | if (likely(!(flags & FOLL_MIGRATION))) |
| 590 | return no_page_table(vma, flags); |
Li Xinhai | 28b0ee3 | 2022-01-14 14:05:16 -0800 | [diff] [blame] | 591 | |
| 592 | pmd_migration_entry_wait(mm, pmd); |
Huang Ying | 6882728 | 2018-06-07 17:06:34 -0700 | [diff] [blame] | 593 | pmdval = READ_ONCE(*pmd); |
| 594 | /* |
| 595 | * MADV_DONTNEED may convert the pmd to null because |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 596 | * mmap_lock is held in read mode |
Huang Ying | 6882728 | 2018-06-07 17:06:34 -0700 | [diff] [blame] | 597 | */ |
| 598 | if (pmd_none(pmdval)) |
| 599 | return no_page_table(vma, flags); |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 600 | goto retry; |
| 601 | } |
Huang Ying | 6882728 | 2018-06-07 17:06:34 -0700 | [diff] [blame] | 602 | if (pmd_devmap(pmdval)) { |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 603 | ptl = pmd_lock(mm, pmd); |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 604 | page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap); |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 605 | spin_unlock(ptl); |
| 606 | if (page) |
| 607 | return page; |
| 608 | } |
Huang Ying | 6882728 | 2018-06-07 17:06:34 -0700 | [diff] [blame] | 609 | if (likely(!pmd_trans_huge(pmdval))) |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 610 | return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); |
Kirill A. Shutemov | 6742d29 | 2016-01-15 16:52:28 -0800 | [diff] [blame] | 611 | |
Huang Ying | 6882728 | 2018-06-07 17:06:34 -0700 | [diff] [blame] | 612 | if ((flags & FOLL_NUMA) && pmd_protnone(pmdval)) |
Aneesh Kumar K.V | db08f20 | 2017-02-24 14:59:53 -0800 | [diff] [blame] | 613 | return no_page_table(vma, flags); |
| 614 | |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 615 | retry_locked: |
Kirill A. Shutemov | 6742d29 | 2016-01-15 16:52:28 -0800 | [diff] [blame] | 616 | ptl = pmd_lock(mm, pmd); |
Huang Ying | 6882728 | 2018-06-07 17:06:34 -0700 | [diff] [blame] | 617 | if (unlikely(pmd_none(*pmd))) { |
| 618 | spin_unlock(ptl); |
| 619 | return no_page_table(vma, flags); |
| 620 | } |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 621 | if (unlikely(!pmd_present(*pmd))) { |
| 622 | spin_unlock(ptl); |
| 623 | if (likely(!(flags & FOLL_MIGRATION))) |
| 624 | return no_page_table(vma, flags); |
| 625 | pmd_migration_entry_wait(mm, pmd); |
| 626 | goto retry_locked; |
| 627 | } |
Kirill A. Shutemov | 6742d29 | 2016-01-15 16:52:28 -0800 | [diff] [blame] | 628 | if (unlikely(!pmd_trans_huge(*pmd))) { |
| 629 | spin_unlock(ptl); |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 630 | return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); |
Kirill A. Shutemov | 69e68b4 | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 631 | } |
Yang Shi | 4066c11 | 2021-04-29 22:55:56 -0700 | [diff] [blame] | 632 | if (flags & FOLL_SPLIT_PMD) { |
Kirill A. Shutemov | 6742d29 | 2016-01-15 16:52:28 -0800 | [diff] [blame] | 633 | int ret; |
| 634 | page = pmd_page(*pmd); |
| 635 | if (is_huge_zero_page(page)) { |
| 636 | spin_unlock(ptl); |
| 637 | ret = 0; |
Kirill A. Shutemov | 78ddc53 | 2016-01-15 16:52:42 -0800 | [diff] [blame] | 638 | split_huge_pmd(vma, pmd, address); |
Naoya Horiguchi | 337d9ab | 2016-07-26 15:24:03 -0700 | [diff] [blame] | 639 | if (pmd_trans_unstable(pmd)) |
| 640 | ret = -EBUSY; |
Yang Shi | 4066c11 | 2021-04-29 22:55:56 -0700 | [diff] [blame] | 641 | } else { |
Song Liu | bfe7b00 | 2019-09-23 15:38:25 -0700 | [diff] [blame] | 642 | spin_unlock(ptl); |
| 643 | split_huge_pmd(vma, pmd, address); |
| 644 | ret = pte_alloc(mm, pmd) ? -ENOMEM : 0; |
Kirill A. Shutemov | 6742d29 | 2016-01-15 16:52:28 -0800 | [diff] [blame] | 645 | } |
| 646 | |
| 647 | return ret ? ERR_PTR(ret) : |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 648 | follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); |
Kirill A. Shutemov | 6742d29 | 2016-01-15 16:52:28 -0800 | [diff] [blame] | 649 | } |
Kirill A. Shutemov | 6742d29 | 2016-01-15 16:52:28 -0800 | [diff] [blame] | 650 | page = follow_trans_huge_pmd(vma, address, pmd, flags); |
| 651 | spin_unlock(ptl); |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 652 | ctx->page_mask = HPAGE_PMD_NR - 1; |
Kirill A. Shutemov | 6742d29 | 2016-01-15 16:52:28 -0800 | [diff] [blame] | 653 | return page; |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 654 | } |
| 655 | |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 656 | static struct page *follow_pud_mask(struct vm_area_struct *vma, |
| 657 | unsigned long address, p4d_t *p4dp, |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 658 | unsigned int flags, |
| 659 | struct follow_page_context *ctx) |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 660 | { |
| 661 | pud_t *pud; |
| 662 | spinlock_t *ptl; |
| 663 | struct page *page; |
| 664 | struct mm_struct *mm = vma->vm_mm; |
| 665 | |
| 666 | pud = pud_offset(p4dp, address); |
| 667 | if (pud_none(*pud)) |
| 668 | return no_page_table(vma, flags); |
Wei Yang | be9d304 | 2020-01-30 22:12:14 -0800 | [diff] [blame] | 669 | if (pud_huge(*pud) && is_vm_hugetlb_page(vma)) { |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 670 | page = follow_huge_pud(mm, address, pud, flags); |
| 671 | if (page) |
| 672 | return page; |
| 673 | return no_page_table(vma, flags); |
| 674 | } |
Aneesh Kumar K.V | 4dc7145 | 2017-07-06 15:38:56 -0700 | [diff] [blame] | 675 | if (is_hugepd(__hugepd(pud_val(*pud)))) { |
| 676 | page = follow_huge_pd(vma, address, |
| 677 | __hugepd(pud_val(*pud)), flags, |
| 678 | PUD_SHIFT); |
| 679 | if (page) |
| 680 | return page; |
| 681 | return no_page_table(vma, flags); |
| 682 | } |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 683 | if (pud_devmap(*pud)) { |
| 684 | ptl = pud_lock(mm, pud); |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 685 | page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap); |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 686 | spin_unlock(ptl); |
| 687 | if (page) |
| 688 | return page; |
| 689 | } |
| 690 | if (unlikely(pud_bad(*pud))) |
| 691 | return no_page_table(vma, flags); |
| 692 | |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 693 | return follow_pmd_mask(vma, address, pud, flags, ctx); |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 694 | } |
| 695 | |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 696 | static struct page *follow_p4d_mask(struct vm_area_struct *vma, |
| 697 | unsigned long address, pgd_t *pgdp, |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 698 | unsigned int flags, |
| 699 | struct follow_page_context *ctx) |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 700 | { |
| 701 | p4d_t *p4d; |
Aneesh Kumar K.V | 4dc7145 | 2017-07-06 15:38:56 -0700 | [diff] [blame] | 702 | struct page *page; |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 703 | |
| 704 | p4d = p4d_offset(pgdp, address); |
| 705 | if (p4d_none(*p4d)) |
| 706 | return no_page_table(vma, flags); |
| 707 | BUILD_BUG_ON(p4d_huge(*p4d)); |
| 708 | if (unlikely(p4d_bad(*p4d))) |
| 709 | return no_page_table(vma, flags); |
| 710 | |
Aneesh Kumar K.V | 4dc7145 | 2017-07-06 15:38:56 -0700 | [diff] [blame] | 711 | if (is_hugepd(__hugepd(p4d_val(*p4d)))) { |
| 712 | page = follow_huge_pd(vma, address, |
| 713 | __hugepd(p4d_val(*p4d)), flags, |
| 714 | P4D_SHIFT); |
| 715 | if (page) |
| 716 | return page; |
| 717 | return no_page_table(vma, flags); |
| 718 | } |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 719 | return follow_pud_mask(vma, address, p4d, flags, ctx); |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 720 | } |
| 721 | |
| 722 | /** |
| 723 | * follow_page_mask - look up a page descriptor from a user-virtual address |
| 724 | * @vma: vm_area_struct mapping @address |
| 725 | * @address: virtual address to look up |
| 726 | * @flags: flags modifying lookup behaviour |
Mike Rapoport | 7817955 | 2018-11-16 15:08:29 -0800 | [diff] [blame] | 727 | * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a |
| 728 | * pointer to output page_mask |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 729 | * |
| 730 | * @flags can have FOLL_ flags set, defined in <linux/mm.h> |
| 731 | * |
Mike Rapoport | 7817955 | 2018-11-16 15:08:29 -0800 | [diff] [blame] | 732 | * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches |
| 733 | * the device's dev_pagemap metadata to avoid repeating expensive lookups. |
| 734 | * |
| 735 | * On output, the @ctx->page_mask is set according to the size of the page. |
| 736 | * |
| 737 | * Return: the mapped (struct page *), %NULL if no mapping exists, or |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 738 | * an error pointer if there is a mapping to something not represented |
| 739 | * by a page descriptor (see also vm_normal_page()). |
| 740 | */ |
Bharath Vedartham | a7030ae | 2019-07-11 20:54:34 -0700 | [diff] [blame] | 741 | static struct page *follow_page_mask(struct vm_area_struct *vma, |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 742 | unsigned long address, unsigned int flags, |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 743 | struct follow_page_context *ctx) |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 744 | { |
| 745 | pgd_t *pgd; |
| 746 | struct page *page; |
| 747 | struct mm_struct *mm = vma->vm_mm; |
| 748 | |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 749 | ctx->page_mask = 0; |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 750 | |
| 751 | /* make this handle hugepd */ |
| 752 | page = follow_huge_addr(mm, address, flags & FOLL_WRITE); |
| 753 | if (!IS_ERR(page)) { |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 754 | WARN_ON_ONCE(flags & (FOLL_GET | FOLL_PIN)); |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 755 | return page; |
| 756 | } |
| 757 | |
| 758 | pgd = pgd_offset(mm, address); |
| 759 | |
| 760 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) |
| 761 | return no_page_table(vma, flags); |
| 762 | |
Anshuman Khandual | faaa5b6 | 2017-07-06 15:38:50 -0700 | [diff] [blame] | 763 | if (pgd_huge(*pgd)) { |
| 764 | page = follow_huge_pgd(mm, address, pgd, flags); |
| 765 | if (page) |
| 766 | return page; |
| 767 | return no_page_table(vma, flags); |
| 768 | } |
Aneesh Kumar K.V | 4dc7145 | 2017-07-06 15:38:56 -0700 | [diff] [blame] | 769 | if (is_hugepd(__hugepd(pgd_val(*pgd)))) { |
| 770 | page = follow_huge_pd(vma, address, |
| 771 | __hugepd(pgd_val(*pgd)), flags, |
| 772 | PGDIR_SHIFT); |
| 773 | if (page) |
| 774 | return page; |
| 775 | return no_page_table(vma, flags); |
| 776 | } |
Anshuman Khandual | faaa5b6 | 2017-07-06 15:38:50 -0700 | [diff] [blame] | 777 | |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 778 | return follow_p4d_mask(vma, address, pgd, flags, ctx); |
| 779 | } |
| 780 | |
| 781 | struct page *follow_page(struct vm_area_struct *vma, unsigned long address, |
| 782 | unsigned int foll_flags) |
| 783 | { |
| 784 | struct follow_page_context ctx = { NULL }; |
| 785 | struct page *page; |
| 786 | |
Mike Rapoport | 1507f51 | 2021-07-07 18:08:03 -0700 | [diff] [blame] | 787 | if (vma_is_secretmem(vma)) |
| 788 | return NULL; |
| 789 | |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 790 | page = follow_page_mask(vma, address, foll_flags, &ctx); |
| 791 | if (ctx.pgmap) |
| 792 | put_dev_pagemap(ctx.pgmap); |
| 793 | return page; |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 794 | } |
| 795 | |
Kirill A. Shutemov | f2b495c | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 796 | static int get_gate_page(struct mm_struct *mm, unsigned long address, |
| 797 | unsigned int gup_flags, struct vm_area_struct **vma, |
| 798 | struct page **page) |
| 799 | { |
| 800 | pgd_t *pgd; |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 801 | p4d_t *p4d; |
Kirill A. Shutemov | f2b495c | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 802 | pud_t *pud; |
| 803 | pmd_t *pmd; |
| 804 | pte_t *pte; |
| 805 | int ret = -EFAULT; |
| 806 | |
| 807 | /* user gate pages are read-only */ |
| 808 | if (gup_flags & FOLL_WRITE) |
| 809 | return -EFAULT; |
| 810 | if (address > TASK_SIZE) |
| 811 | pgd = pgd_offset_k(address); |
| 812 | else |
| 813 | pgd = pgd_offset_gate(mm, address); |
Andy Lutomirski | b5d1c39 | 2019-07-11 20:57:43 -0700 | [diff] [blame] | 814 | if (pgd_none(*pgd)) |
| 815 | return -EFAULT; |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 816 | p4d = p4d_offset(pgd, address); |
Andy Lutomirski | b5d1c39 | 2019-07-11 20:57:43 -0700 | [diff] [blame] | 817 | if (p4d_none(*p4d)) |
| 818 | return -EFAULT; |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 819 | pud = pud_offset(p4d, address); |
Andy Lutomirski | b5d1c39 | 2019-07-11 20:57:43 -0700 | [diff] [blame] | 820 | if (pud_none(*pud)) |
| 821 | return -EFAULT; |
Kirill A. Shutemov | f2b495c | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 822 | pmd = pmd_offset(pud, address); |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 823 | if (!pmd_present(*pmd)) |
Kirill A. Shutemov | f2b495c | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 824 | return -EFAULT; |
| 825 | VM_BUG_ON(pmd_trans_huge(*pmd)); |
| 826 | pte = pte_offset_map(pmd, address); |
| 827 | if (pte_none(*pte)) |
| 828 | goto unmap; |
| 829 | *vma = get_gate_vma(mm); |
| 830 | if (!page) |
| 831 | goto out; |
| 832 | *page = vm_normal_page(*vma, address, *pte); |
| 833 | if (!*page) { |
| 834 | if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte))) |
| 835 | goto unmap; |
| 836 | *page = pte_page(*pte); |
| 837 | } |
Dave Hansen | 9fa2dd9 | 2020-09-03 13:40:28 -0700 | [diff] [blame] | 838 | if (unlikely(!try_grab_page(*page, gup_flags))) { |
Linus Torvalds | 8fde12c | 2019-04-11 10:49:19 -0700 | [diff] [blame] | 839 | ret = -ENOMEM; |
| 840 | goto unmap; |
| 841 | } |
Kirill A. Shutemov | f2b495c | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 842 | out: |
| 843 | ret = 0; |
| 844 | unmap: |
| 845 | pte_unmap(pte); |
| 846 | return ret; |
| 847 | } |
| 848 | |
Paul Cassella | 9a95f3c | 2014-08-06 16:07:24 -0700 | [diff] [blame] | 849 | /* |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 850 | * mmap_lock must be held on entry. If @locked != NULL and *@flags |
| 851 | * does not include FOLL_NOWAIT, the mmap_lock may be released. If it |
Peter Xu | 4f6da93 | 2020-04-01 21:07:58 -0700 | [diff] [blame] | 852 | * is, *@locked will be set to 0 and -EBUSY returned. |
Paul Cassella | 9a95f3c | 2014-08-06 16:07:24 -0700 | [diff] [blame] | 853 | */ |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 854 | static int faultin_page(struct vm_area_struct *vma, |
Peter Xu | 4f6da93 | 2020-04-01 21:07:58 -0700 | [diff] [blame] | 855 | unsigned long address, unsigned int *flags, int *locked) |
Kirill A. Shutemov | 1674448 | 2014-06-04 16:08:12 -0700 | [diff] [blame] | 856 | { |
Kirill A. Shutemov | 1674448 | 2014-06-04 16:08:12 -0700 | [diff] [blame] | 857 | unsigned int fault_flags = 0; |
Souptick Joarder | 2b74030 | 2018-08-23 17:01:36 -0700 | [diff] [blame] | 858 | vm_fault_t ret; |
Kirill A. Shutemov | 1674448 | 2014-06-04 16:08:12 -0700 | [diff] [blame] | 859 | |
Andreas Gruenbacher | 55b8fe7 | 2021-08-17 22:52:08 +0200 | [diff] [blame] | 860 | if (*flags & FOLL_NOFAULT) |
| 861 | return -EFAULT; |
Kirill A. Shutemov | 1674448 | 2014-06-04 16:08:12 -0700 | [diff] [blame] | 862 | if (*flags & FOLL_WRITE) |
| 863 | fault_flags |= FAULT_FLAG_WRITE; |
Dave Hansen | 1b2ee12 | 2016-02-12 13:02:21 -0800 | [diff] [blame] | 864 | if (*flags & FOLL_REMOTE) |
| 865 | fault_flags |= FAULT_FLAG_REMOTE; |
Peter Xu | 4f6da93 | 2020-04-01 21:07:58 -0700 | [diff] [blame] | 866 | if (locked) |
Peter Xu | 71335f3 | 2020-04-01 21:08:53 -0700 | [diff] [blame] | 867 | fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
Kirill A. Shutemov | 1674448 | 2014-06-04 16:08:12 -0700 | [diff] [blame] | 868 | if (*flags & FOLL_NOWAIT) |
| 869 | fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; |
Andres Lagar-Cavilla | 234b239 | 2014-09-17 10:51:48 -0700 | [diff] [blame] | 870 | if (*flags & FOLL_TRIED) { |
Peter Xu | 4426e94 | 2020-04-01 21:08:49 -0700 | [diff] [blame] | 871 | /* |
| 872 | * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED |
| 873 | * can co-exist |
| 874 | */ |
Andres Lagar-Cavilla | 234b239 | 2014-09-17 10:51:48 -0700 | [diff] [blame] | 875 | fault_flags |= FAULT_FLAG_TRIED; |
| 876 | } |
Kirill A. Shutemov | 1674448 | 2014-06-04 16:08:12 -0700 | [diff] [blame] | 877 | |
Peter Xu | bce617e | 2020-08-11 18:37:44 -0700 | [diff] [blame] | 878 | ret = handle_mm_fault(vma, address, fault_flags, NULL); |
Kirill A. Shutemov | 1674448 | 2014-06-04 16:08:12 -0700 | [diff] [blame] | 879 | if (ret & VM_FAULT_ERROR) { |
James Morse | 9a291a7 | 2017-06-02 14:46:46 -0700 | [diff] [blame] | 880 | int err = vm_fault_to_errno(ret, *flags); |
| 881 | |
| 882 | if (err) |
| 883 | return err; |
Kirill A. Shutemov | 1674448 | 2014-06-04 16:08:12 -0700 | [diff] [blame] | 884 | BUG(); |
| 885 | } |
| 886 | |
Kirill A. Shutemov | 1674448 | 2014-06-04 16:08:12 -0700 | [diff] [blame] | 887 | if (ret & VM_FAULT_RETRY) { |
Peter Xu | 4f6da93 | 2020-04-01 21:07:58 -0700 | [diff] [blame] | 888 | if (locked && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) |
| 889 | *locked = 0; |
Kirill A. Shutemov | 1674448 | 2014-06-04 16:08:12 -0700 | [diff] [blame] | 890 | return -EBUSY; |
| 891 | } |
| 892 | |
| 893 | /* |
| 894 | * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when |
| 895 | * necessary, even if maybe_mkwrite decided not to set pte_write. We |
| 896 | * can thus safely do subsequent page lookups as if they were reads. |
| 897 | * But only do so when looping for pte_write is futile: in some cases |
| 898 | * userspace may also be wanting to write to the gotten user page, |
| 899 | * which a read fault here might prevent (a readonly page might get |
| 900 | * reCOWed by userspace write). |
| 901 | */ |
| 902 | if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) |
Mario Leinweber | 2923117 | 2018-04-05 16:24:18 -0700 | [diff] [blame] | 903 | *flags |= FOLL_COW; |
Kirill A. Shutemov | 1674448 | 2014-06-04 16:08:12 -0700 | [diff] [blame] | 904 | return 0; |
| 905 | } |
| 906 | |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 907 | static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) |
| 908 | { |
| 909 | vm_flags_t vm_flags = vma->vm_flags; |
Dave Hansen | 1b2ee12 | 2016-02-12 13:02:21 -0800 | [diff] [blame] | 910 | int write = (gup_flags & FOLL_WRITE); |
| 911 | int foreign = (gup_flags & FOLL_REMOTE); |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 912 | |
| 913 | if (vm_flags & (VM_IO | VM_PFNMAP)) |
| 914 | return -EFAULT; |
| 915 | |
Willy Tarreau | 7f7ccc2 | 2018-05-11 08:11:44 +0200 | [diff] [blame] | 916 | if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma)) |
| 917 | return -EFAULT; |
| 918 | |
Jason Gunthorpe | 52650c8 | 2020-12-14 19:05:48 -0800 | [diff] [blame] | 919 | if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma)) |
| 920 | return -EOPNOTSUPP; |
| 921 | |
Mike Rapoport | 1507f51 | 2021-07-07 18:08:03 -0700 | [diff] [blame] | 922 | if (vma_is_secretmem(vma)) |
| 923 | return -EFAULT; |
| 924 | |
Dave Hansen | 1b2ee12 | 2016-02-12 13:02:21 -0800 | [diff] [blame] | 925 | if (write) { |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 926 | if (!(vm_flags & VM_WRITE)) { |
| 927 | if (!(gup_flags & FOLL_FORCE)) |
| 928 | return -EFAULT; |
| 929 | /* |
| 930 | * We used to let the write,force case do COW in a |
| 931 | * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could |
| 932 | * set a breakpoint in a read-only mapping of an |
| 933 | * executable, without corrupting the file (yet only |
| 934 | * when that file had been opened for writing!). |
| 935 | * Anon pages in shared mappings are surprising: now |
| 936 | * just reject it. |
| 937 | */ |
Hugh Dickins | 4643536 | 2016-01-30 18:03:16 -0800 | [diff] [blame] | 938 | if (!is_cow_mapping(vm_flags)) |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 939 | return -EFAULT; |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 940 | } |
| 941 | } else if (!(vm_flags & VM_READ)) { |
| 942 | if (!(gup_flags & FOLL_FORCE)) |
| 943 | return -EFAULT; |
| 944 | /* |
| 945 | * Is there actually any vma we can reach here which does not |
| 946 | * have VM_MAYREAD set? |
| 947 | */ |
| 948 | if (!(vm_flags & VM_MAYREAD)) |
| 949 | return -EFAULT; |
| 950 | } |
Dave Hansen | d61172b | 2016-02-12 13:02:24 -0800 | [diff] [blame] | 951 | /* |
| 952 | * gups are always data accesses, not instruction |
| 953 | * fetches, so execute=false here |
| 954 | */ |
| 955 | if (!arch_vma_access_permitted(vma, write, false, foreign)) |
Dave Hansen | 33a709b | 2016-02-12 13:02:19 -0800 | [diff] [blame] | 956 | return -EFAULT; |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 957 | return 0; |
| 958 | } |
| 959 | |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 960 | /** |
| 961 | * __get_user_pages() - pin user pages in memory |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 962 | * @mm: mm_struct of target mm |
| 963 | * @start: starting user address |
| 964 | * @nr_pages: number of pages from start to pin |
| 965 | * @gup_flags: flags modifying pin behaviour |
| 966 | * @pages: array that receives pointers to the pages pinned. |
| 967 | * Should be at least nr_pages long. Or NULL, if caller |
| 968 | * only intends to ensure the pages are faulted in. |
| 969 | * @vmas: array of pointers to vmas corresponding to each page. |
| 970 | * Or NULL if the caller does not require them. |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 971 | * @locked: whether we're still with the mmap_lock held |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 972 | * |
Liu Xiang | d2dfbe4 | 2019-11-30 17:49:53 -0800 | [diff] [blame] | 973 | * Returns either number of pages pinned (which may be less than the |
| 974 | * number requested), or an error. Details about the return value: |
| 975 | * |
| 976 | * -- If nr_pages is 0, returns 0. |
| 977 | * -- If nr_pages is >0, but no pages were pinned, returns -errno. |
| 978 | * -- If nr_pages is >0, and some pages were pinned, returns the number of |
| 979 | * pages pinned. Again, this may be less than nr_pages. |
Michal Hocko | 2d3a36a | 2020-06-03 16:03:25 -0700 | [diff] [blame] | 980 | * -- 0 return value is possible when the fault would need to be retried. |
Liu Xiang | d2dfbe4 | 2019-11-30 17:49:53 -0800 | [diff] [blame] | 981 | * |
| 982 | * The caller is responsible for releasing returned @pages, via put_page(). |
| 983 | * |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 984 | * @vmas are valid only as long as mmap_lock is held. |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 985 | * |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 986 | * Must be called with mmap_lock held. It may be released. See below. |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 987 | * |
| 988 | * __get_user_pages walks a process's page tables and takes a reference to |
| 989 | * each struct page that each user address corresponds to at a given |
| 990 | * instant. That is, it takes the page that would be accessed if a user |
| 991 | * thread accesses the given user virtual address at that instant. |
| 992 | * |
| 993 | * This does not guarantee that the page exists in the user mappings when |
| 994 | * __get_user_pages returns, and there may even be a completely different |
| 995 | * page there in some cases (eg. if mmapped pagecache has been invalidated |
| 996 | * and subsequently re faulted). However it does guarantee that the page |
| 997 | * won't be freed completely. And mostly callers simply care that the page |
| 998 | * contains data that was valid *at some point in time*. Typically, an IO |
| 999 | * or similar operation cannot guarantee anything stronger anyway because |
| 1000 | * locks can't be held over the syscall boundary. |
| 1001 | * |
| 1002 | * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If |
| 1003 | * the page is written to, set_page_dirty (or set_page_dirty_lock, as |
| 1004 | * appropriate) must be called after the page is finished with, and |
| 1005 | * before put_page is called. |
| 1006 | * |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 1007 | * If @locked != NULL, *@locked will be set to 0 when mmap_lock is |
Peter Xu | 4f6da93 | 2020-04-01 21:07:58 -0700 | [diff] [blame] | 1008 | * released by an up_read(). That can happen if @gup_flags does not |
| 1009 | * have FOLL_NOWAIT. |
Paul Cassella | 9a95f3c | 2014-08-06 16:07:24 -0700 | [diff] [blame] | 1010 | * |
Peter Xu | 4f6da93 | 2020-04-01 21:07:58 -0700 | [diff] [blame] | 1011 | * A caller using such a combination of @locked and @gup_flags |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 1012 | * must therefore hold the mmap_lock for reading only, and recognize |
Paul Cassella | 9a95f3c | 2014-08-06 16:07:24 -0700 | [diff] [blame] | 1013 | * when it's been released. Otherwise, it must be held for either |
| 1014 | * reading or writing and will not be released. |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1015 | * |
| 1016 | * In most cases, get_user_pages or get_user_pages_fast should be used |
| 1017 | * instead of __get_user_pages. __get_user_pages should be used only if |
| 1018 | * you need some special @gup_flags. |
| 1019 | */ |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 1020 | static long __get_user_pages(struct mm_struct *mm, |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1021 | unsigned long start, unsigned long nr_pages, |
| 1022 | unsigned int gup_flags, struct page **pages, |
Peter Xu | 4f6da93 | 2020-04-01 21:07:58 -0700 | [diff] [blame] | 1023 | struct vm_area_struct **vmas, int *locked) |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1024 | { |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 1025 | long ret = 0, i = 0; |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 1026 | struct vm_area_struct *vma = NULL; |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 1027 | struct follow_page_context ctx = { NULL }; |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1028 | |
| 1029 | if (!nr_pages) |
| 1030 | return 0; |
| 1031 | |
Andrey Konovalov | f965259 | 2019-09-25 16:48:34 -0700 | [diff] [blame] | 1032 | start = untagged_addr(start); |
| 1033 | |
John Hubbard | eddb1c2 | 2020-01-30 22:12:54 -0800 | [diff] [blame] | 1034 | VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1035 | |
| 1036 | /* |
| 1037 | * If FOLL_FORCE is set then do not force a full fault as the hinting |
| 1038 | * fault information is unrelated to the reference behaviour of a task |
| 1039 | * using the address space |
| 1040 | */ |
| 1041 | if (!(gup_flags & FOLL_FORCE)) |
| 1042 | gup_flags |= FOLL_NUMA; |
| 1043 | |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1044 | do { |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 1045 | struct page *page; |
| 1046 | unsigned int foll_flags = gup_flags; |
| 1047 | unsigned int page_increm; |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1048 | |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 1049 | /* first iteration or cross vma bound */ |
| 1050 | if (!vma || start >= vma->vm_end) { |
| 1051 | vma = find_extend_vma(mm, start); |
| 1052 | if (!vma && in_gate_area(mm, start)) { |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 1053 | ret = get_gate_page(mm, start & PAGE_MASK, |
| 1054 | gup_flags, &vma, |
| 1055 | pages ? &pages[i] : NULL); |
| 1056 | if (ret) |
John Hubbard | 08be37b | 2018-11-30 14:08:53 -0800 | [diff] [blame] | 1057 | goto out; |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 1058 | ctx.page_mask = 0; |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 1059 | goto next_page; |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1060 | } |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1061 | |
Jason Gunthorpe | 52650c8 | 2020-12-14 19:05:48 -0800 | [diff] [blame] | 1062 | if (!vma) { |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 1063 | ret = -EFAULT; |
| 1064 | goto out; |
| 1065 | } |
Jason Gunthorpe | 52650c8 | 2020-12-14 19:05:48 -0800 | [diff] [blame] | 1066 | ret = check_vma_flags(vma, gup_flags); |
| 1067 | if (ret) |
| 1068 | goto out; |
| 1069 | |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 1070 | if (is_vm_hugetlb_page(vma)) { |
| 1071 | i = follow_hugetlb_page(mm, vma, pages, vmas, |
| 1072 | &start, &nr_pages, i, |
Peter Xu | a308c71 | 2020-08-21 19:49:57 -0400 | [diff] [blame] | 1073 | gup_flags, locked); |
Peter Xu | ad415db | 2020-04-01 21:08:02 -0700 | [diff] [blame] | 1074 | if (locked && *locked == 0) { |
| 1075 | /* |
| 1076 | * We've got a VM_FAULT_RETRY |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 1077 | * and we've lost mmap_lock. |
Peter Xu | ad415db | 2020-04-01 21:08:02 -0700 | [diff] [blame] | 1078 | * We must stop here. |
| 1079 | */ |
| 1080 | BUG_ON(gup_flags & FOLL_NOWAIT); |
Peter Xu | ad415db | 2020-04-01 21:08:02 -0700 | [diff] [blame] | 1081 | goto out; |
| 1082 | } |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 1083 | continue; |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1084 | } |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 1085 | } |
| 1086 | retry: |
| 1087 | /* |
| 1088 | * If we have a pending SIGKILL, don't keep faulting pages and |
| 1089 | * potentially allocating memory. |
| 1090 | */ |
Davidlohr Bueso | fa45f11 | 2019-01-03 15:28:55 -0800 | [diff] [blame] | 1091 | if (fatal_signal_pending(current)) { |
Michal Hocko | d180870d | 2020-04-20 18:13:55 -0700 | [diff] [blame] | 1092 | ret = -EINTR; |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 1093 | goto out; |
| 1094 | } |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 1095 | cond_resched(); |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 1096 | |
| 1097 | page = follow_page_mask(vma, start, foll_flags, &ctx); |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 1098 | if (!page) { |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 1099 | ret = faultin_page(vma, start, &foll_flags, locked); |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 1100 | switch (ret) { |
| 1101 | case 0: |
| 1102 | goto retry; |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 1103 | case -EBUSY: |
| 1104 | ret = 0; |
Joe Perches | e4a9bc5 | 2020-04-06 20:08:39 -0700 | [diff] [blame] | 1105 | fallthrough; |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 1106 | case -EFAULT: |
| 1107 | case -ENOMEM: |
| 1108 | case -EHWPOISON: |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 1109 | goto out; |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 1110 | } |
| 1111 | BUG(); |
Kirill A. Shutemov | 1027e44 | 2015-09-04 15:47:55 -0700 | [diff] [blame] | 1112 | } else if (PTR_ERR(page) == -EEXIST) { |
| 1113 | /* |
| 1114 | * Proper page table entry exists, but no corresponding |
John Hubbard | 6546246 | 2022-03-22 14:39:40 -0700 | [diff] [blame] | 1115 | * struct page. If the caller expects **pages to be |
| 1116 | * filled in, bail out now, because that can't be done |
| 1117 | * for this page. |
Kirill A. Shutemov | 1027e44 | 2015-09-04 15:47:55 -0700 | [diff] [blame] | 1118 | */ |
John Hubbard | 6546246 | 2022-03-22 14:39:40 -0700 | [diff] [blame] | 1119 | if (pages) { |
| 1120 | ret = PTR_ERR(page); |
| 1121 | goto out; |
| 1122 | } |
| 1123 | |
Kirill A. Shutemov | 1027e44 | 2015-09-04 15:47:55 -0700 | [diff] [blame] | 1124 | goto next_page; |
| 1125 | } else if (IS_ERR(page)) { |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 1126 | ret = PTR_ERR(page); |
| 1127 | goto out; |
Kirill A. Shutemov | 1027e44 | 2015-09-04 15:47:55 -0700 | [diff] [blame] | 1128 | } |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 1129 | if (pages) { |
| 1130 | pages[i] = page; |
| 1131 | flush_anon_page(vma, page, start); |
| 1132 | flush_dcache_page(page); |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 1133 | ctx.page_mask = 0; |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 1134 | } |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1135 | next_page: |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 1136 | if (vmas) { |
| 1137 | vmas[i] = vma; |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 1138 | ctx.page_mask = 0; |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 1139 | } |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 1140 | page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask); |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 1141 | if (page_increm > nr_pages) |
| 1142 | page_increm = nr_pages; |
| 1143 | i += page_increm; |
| 1144 | start += page_increm * PAGE_SIZE; |
| 1145 | nr_pages -= page_increm; |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1146 | } while (nr_pages); |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 1147 | out: |
| 1148 | if (ctx.pgmap) |
| 1149 | put_dev_pagemap(ctx.pgmap); |
| 1150 | return i ? i : ret; |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1151 | } |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1152 | |
Tobias Klauser | 771ab43 | 2016-12-12 16:41:53 -0800 | [diff] [blame] | 1153 | static bool vma_permits_fault(struct vm_area_struct *vma, |
| 1154 | unsigned int fault_flags) |
Dave Hansen | d4925e0 | 2016-02-12 13:02:16 -0800 | [diff] [blame] | 1155 | { |
Dave Hansen | 1b2ee12 | 2016-02-12 13:02:21 -0800 | [diff] [blame] | 1156 | bool write = !!(fault_flags & FAULT_FLAG_WRITE); |
| 1157 | bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE); |
Dave Hansen | 33a709b | 2016-02-12 13:02:19 -0800 | [diff] [blame] | 1158 | vm_flags_t vm_flags = write ? VM_WRITE : VM_READ; |
Dave Hansen | d4925e0 | 2016-02-12 13:02:16 -0800 | [diff] [blame] | 1159 | |
| 1160 | if (!(vm_flags & vma->vm_flags)) |
| 1161 | return false; |
| 1162 | |
Dave Hansen | 33a709b | 2016-02-12 13:02:19 -0800 | [diff] [blame] | 1163 | /* |
| 1164 | * The architecture might have a hardware protection |
Dave Hansen | 1b2ee12 | 2016-02-12 13:02:21 -0800 | [diff] [blame] | 1165 | * mechanism other than read/write that can deny access. |
Dave Hansen | d61172b | 2016-02-12 13:02:24 -0800 | [diff] [blame] | 1166 | * |
| 1167 | * gup always represents data access, not instruction |
| 1168 | * fetches, so execute=false here: |
Dave Hansen | 33a709b | 2016-02-12 13:02:19 -0800 | [diff] [blame] | 1169 | */ |
Dave Hansen | d61172b | 2016-02-12 13:02:24 -0800 | [diff] [blame] | 1170 | if (!arch_vma_access_permitted(vma, write, false, foreign)) |
Dave Hansen | 33a709b | 2016-02-12 13:02:19 -0800 | [diff] [blame] | 1171 | return false; |
| 1172 | |
Dave Hansen | d4925e0 | 2016-02-12 13:02:16 -0800 | [diff] [blame] | 1173 | return true; |
| 1174 | } |
| 1175 | |
Souptick Joarder | adc8cb4 | 2020-06-01 21:48:24 -0700 | [diff] [blame] | 1176 | /** |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1177 | * fixup_user_fault() - manually resolve a user page fault |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1178 | * @mm: mm_struct of target mm |
| 1179 | * @address: user address |
| 1180 | * @fault_flags:flags to pass down to handle_mm_fault() |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 1181 | * @unlocked: did we unlock the mmap_lock while retrying, maybe NULL if caller |
Miles Chen | 548b6a1 | 2020-06-01 21:48:33 -0700 | [diff] [blame] | 1182 | * does not allow retry. If NULL, the caller must guarantee |
| 1183 | * that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY. |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1184 | * |
| 1185 | * This is meant to be called in the specific scenario where for locking reasons |
| 1186 | * we try to access user memory in atomic context (within a pagefault_disable() |
| 1187 | * section), this returns -EFAULT, and we want to resolve the user fault before |
| 1188 | * trying again. |
| 1189 | * |
| 1190 | * Typically this is meant to be used by the futex code. |
| 1191 | * |
| 1192 | * The main difference with get_user_pages() is that this function will |
| 1193 | * unconditionally call handle_mm_fault() which will in turn perform all the |
| 1194 | * necessary SW fixup of the dirty and young bits in the PTE, while |
Dominik Dingel | 4a9e1cd | 2016-01-15 16:57:04 -0800 | [diff] [blame] | 1195 | * get_user_pages() only guarantees to update these in the struct page. |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1196 | * |
| 1197 | * This is important for some architectures where those bits also gate the |
| 1198 | * access permission to the page because they are maintained in software. On |
| 1199 | * such architectures, gup() will not be enough to make a subsequent access |
| 1200 | * succeed. |
| 1201 | * |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 1202 | * This function will not return with an unlocked mmap_lock. So it has not the |
| 1203 | * same semantics wrt the @mm->mmap_lock as does filemap_fault(). |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1204 | */ |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 1205 | int fixup_user_fault(struct mm_struct *mm, |
Dominik Dingel | 4a9e1cd | 2016-01-15 16:57:04 -0800 | [diff] [blame] | 1206 | unsigned long address, unsigned int fault_flags, |
| 1207 | bool *unlocked) |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1208 | { |
| 1209 | struct vm_area_struct *vma; |
Miaohe Lin | 8fed2f3 | 2021-09-02 14:53:33 -0700 | [diff] [blame] | 1210 | vm_fault_t ret; |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1211 | |
Andrey Konovalov | f965259 | 2019-09-25 16:48:34 -0700 | [diff] [blame] | 1212 | address = untagged_addr(address); |
| 1213 | |
Dominik Dingel | 4a9e1cd | 2016-01-15 16:57:04 -0800 | [diff] [blame] | 1214 | if (unlocked) |
Peter Xu | 71335f3 | 2020-04-01 21:08:53 -0700 | [diff] [blame] | 1215 | fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
Dominik Dingel | 4a9e1cd | 2016-01-15 16:57:04 -0800 | [diff] [blame] | 1216 | |
| 1217 | retry: |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1218 | vma = find_extend_vma(mm, address); |
| 1219 | if (!vma || address < vma->vm_start) |
| 1220 | return -EFAULT; |
| 1221 | |
Dave Hansen | d4925e0 | 2016-02-12 13:02:16 -0800 | [diff] [blame] | 1222 | if (!vma_permits_fault(vma, fault_flags)) |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1223 | return -EFAULT; |
| 1224 | |
Peter Xu | 475f4dfc | 2020-05-13 17:50:41 -0700 | [diff] [blame] | 1225 | if ((fault_flags & FAULT_FLAG_KILLABLE) && |
| 1226 | fatal_signal_pending(current)) |
| 1227 | return -EINTR; |
| 1228 | |
Peter Xu | bce617e | 2020-08-11 18:37:44 -0700 | [diff] [blame] | 1229 | ret = handle_mm_fault(vma, address, fault_flags, NULL); |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1230 | if (ret & VM_FAULT_ERROR) { |
James Morse | 9a291a7 | 2017-06-02 14:46:46 -0700 | [diff] [blame] | 1231 | int err = vm_fault_to_errno(ret, 0); |
| 1232 | |
| 1233 | if (err) |
| 1234 | return err; |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1235 | BUG(); |
| 1236 | } |
Dominik Dingel | 4a9e1cd | 2016-01-15 16:57:04 -0800 | [diff] [blame] | 1237 | |
| 1238 | if (ret & VM_FAULT_RETRY) { |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1239 | mmap_read_lock(mm); |
Peter Xu | 475f4dfc | 2020-05-13 17:50:41 -0700 | [diff] [blame] | 1240 | *unlocked = true; |
| 1241 | fault_flags |= FAULT_FLAG_TRIED; |
| 1242 | goto retry; |
Dominik Dingel | 4a9e1cd | 2016-01-15 16:57:04 -0800 | [diff] [blame] | 1243 | } |
| 1244 | |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1245 | return 0; |
| 1246 | } |
Paolo Bonzini | add6a0c | 2016-06-07 17:51:18 +0200 | [diff] [blame] | 1247 | EXPORT_SYMBOL_GPL(fixup_user_fault); |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1248 | |
Michal Hocko | 2d3a36a | 2020-06-03 16:03:25 -0700 | [diff] [blame] | 1249 | /* |
| 1250 | * Please note that this function, unlike __get_user_pages will not |
| 1251 | * return 0 for nr_pages > 0 without FOLL_NOWAIT |
| 1252 | */ |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 1253 | static __always_inline long __get_user_pages_locked(struct mm_struct *mm, |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 1254 | unsigned long start, |
| 1255 | unsigned long nr_pages, |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 1256 | struct page **pages, |
| 1257 | struct vm_area_struct **vmas, |
Al Viro | e716712 | 2017-11-19 11:32:05 -0500 | [diff] [blame] | 1258 | int *locked, |
Andrea Arcangeli | 0fd71a5 | 2015-02-11 15:27:20 -0800 | [diff] [blame] | 1259 | unsigned int flags) |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 1260 | { |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 1261 | long ret, pages_done; |
| 1262 | bool lock_dropped; |
| 1263 | |
| 1264 | if (locked) { |
| 1265 | /* if VM_FAULT_RETRY can be returned, vmas become invalid */ |
| 1266 | BUG_ON(vmas); |
| 1267 | /* check caller initialized locked */ |
| 1268 | BUG_ON(*locked != 1); |
| 1269 | } |
| 1270 | |
Andrea Arcangeli | a458b76 | 2021-06-28 19:36:40 -0700 | [diff] [blame] | 1271 | if (flags & FOLL_PIN) |
| 1272 | mm_set_has_pinned_flag(&mm->flags); |
Peter Xu | 008cfe4 | 2020-09-25 18:25:57 -0400 | [diff] [blame] | 1273 | |
John Hubbard | eddb1c2 | 2020-01-30 22:12:54 -0800 | [diff] [blame] | 1274 | /* |
| 1275 | * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior |
| 1276 | * is to set FOLL_GET if the caller wants pages[] filled in (but has |
| 1277 | * carelessly failed to specify FOLL_GET), so keep doing that, but only |
| 1278 | * for FOLL_GET, not for the newer FOLL_PIN. |
| 1279 | * |
| 1280 | * FOLL_PIN always expects pages to be non-null, but no need to assert |
| 1281 | * that here, as any failures will be obvious enough. |
| 1282 | */ |
| 1283 | if (pages && !(flags & FOLL_PIN)) |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 1284 | flags |= FOLL_GET; |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 1285 | |
| 1286 | pages_done = 0; |
| 1287 | lock_dropped = false; |
| 1288 | for (;;) { |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 1289 | ret = __get_user_pages(mm, start, nr_pages, flags, pages, |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 1290 | vmas, locked); |
| 1291 | if (!locked) |
| 1292 | /* VM_FAULT_RETRY couldn't trigger, bypass */ |
| 1293 | return ret; |
| 1294 | |
| 1295 | /* VM_FAULT_RETRY cannot return errors */ |
| 1296 | if (!*locked) { |
| 1297 | BUG_ON(ret < 0); |
| 1298 | BUG_ON(ret >= nr_pages); |
| 1299 | } |
| 1300 | |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 1301 | if (ret > 0) { |
| 1302 | nr_pages -= ret; |
| 1303 | pages_done += ret; |
| 1304 | if (!nr_pages) |
| 1305 | break; |
| 1306 | } |
| 1307 | if (*locked) { |
Andrea Arcangeli | 96312e6 | 2018-03-09 15:51:06 -0800 | [diff] [blame] | 1308 | /* |
| 1309 | * VM_FAULT_RETRY didn't trigger or it was a |
| 1310 | * FOLL_NOWAIT. |
| 1311 | */ |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 1312 | if (!pages_done) |
| 1313 | pages_done = ret; |
| 1314 | break; |
| 1315 | } |
Mike Rapoport | df17277 | 2019-05-31 22:30:33 -0700 | [diff] [blame] | 1316 | /* |
| 1317 | * VM_FAULT_RETRY triggered, so seek to the faulting offset. |
| 1318 | * For the prefault case (!pages) we only update counts. |
| 1319 | */ |
| 1320 | if (likely(pages)) |
| 1321 | pages += ret; |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 1322 | start += ret << PAGE_SHIFT; |
Peter Xu | 4426e94 | 2020-04-01 21:08:49 -0700 | [diff] [blame] | 1323 | lock_dropped = true; |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 1324 | |
Peter Xu | 4426e94 | 2020-04-01 21:08:49 -0700 | [diff] [blame] | 1325 | retry: |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 1326 | /* |
| 1327 | * Repeat on the address that fired VM_FAULT_RETRY |
Peter Xu | 4426e94 | 2020-04-01 21:08:49 -0700 | [diff] [blame] | 1328 | * with both FAULT_FLAG_ALLOW_RETRY and |
| 1329 | * FAULT_FLAG_TRIED. Note that GUP can be interrupted |
| 1330 | * by fatal signals, so we need to check it before we |
| 1331 | * start trying again otherwise it can loop forever. |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 1332 | */ |
Peter Xu | 4426e94 | 2020-04-01 21:08:49 -0700 | [diff] [blame] | 1333 | |
Hillf Danton | ae46d2a | 2020-04-08 11:59:24 -0400 | [diff] [blame] | 1334 | if (fatal_signal_pending(current)) { |
| 1335 | if (!pages_done) |
| 1336 | pages_done = -EINTR; |
Peter Xu | 4426e94 | 2020-04-01 21:08:49 -0700 | [diff] [blame] | 1337 | break; |
Hillf Danton | ae46d2a | 2020-04-08 11:59:24 -0400 | [diff] [blame] | 1338 | } |
Peter Xu | 4426e94 | 2020-04-01 21:08:49 -0700 | [diff] [blame] | 1339 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1340 | ret = mmap_read_lock_killable(mm); |
Peter Xu | 71335f3 | 2020-04-01 21:08:53 -0700 | [diff] [blame] | 1341 | if (ret) { |
| 1342 | BUG_ON(ret > 0); |
| 1343 | if (!pages_done) |
| 1344 | pages_done = ret; |
| 1345 | break; |
| 1346 | } |
Peter Xu | 4426e94 | 2020-04-01 21:08:49 -0700 | [diff] [blame] | 1347 | |
Peter Xu | c7b6a56 | 2020-04-07 21:40:10 -0400 | [diff] [blame] | 1348 | *locked = 1; |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 1349 | ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED, |
Peter Xu | 4426e94 | 2020-04-01 21:08:49 -0700 | [diff] [blame] | 1350 | pages, NULL, locked); |
| 1351 | if (!*locked) { |
| 1352 | /* Continue to retry until we succeeded */ |
| 1353 | BUG_ON(ret != 0); |
| 1354 | goto retry; |
| 1355 | } |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 1356 | if (ret != 1) { |
| 1357 | BUG_ON(ret > 1); |
| 1358 | if (!pages_done) |
| 1359 | pages_done = ret; |
| 1360 | break; |
| 1361 | } |
| 1362 | nr_pages--; |
| 1363 | pages_done++; |
| 1364 | if (!nr_pages) |
| 1365 | break; |
Mike Rapoport | df17277 | 2019-05-31 22:30:33 -0700 | [diff] [blame] | 1366 | if (likely(pages)) |
| 1367 | pages++; |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 1368 | start += PAGE_SIZE; |
| 1369 | } |
Al Viro | e716712 | 2017-11-19 11:32:05 -0500 | [diff] [blame] | 1370 | if (lock_dropped && *locked) { |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 1371 | /* |
| 1372 | * We must let the caller know we temporarily dropped the lock |
| 1373 | * and so the critical section protected by it was lost. |
| 1374 | */ |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1375 | mmap_read_unlock(mm); |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 1376 | *locked = 0; |
| 1377 | } |
| 1378 | return pages_done; |
| 1379 | } |
| 1380 | |
Christoph Hellwig | d3649f6 | 2019-07-11 20:57:18 -0700 | [diff] [blame] | 1381 | /** |
| 1382 | * populate_vma_page_range() - populate a range of pages in the vma. |
| 1383 | * @vma: target vma |
| 1384 | * @start: start address |
| 1385 | * @end: end address |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 1386 | * @locked: whether the mmap_lock is still held |
Christoph Hellwig | d3649f6 | 2019-07-11 20:57:18 -0700 | [diff] [blame] | 1387 | * |
| 1388 | * This takes care of mlocking the pages too if VM_LOCKED is set. |
| 1389 | * |
Tang Yizhou | 0a36f7f | 2020-08-06 23:20:01 -0700 | [diff] [blame] | 1390 | * Return either number of pages pinned in the vma, or a negative error |
| 1391 | * code on error. |
Christoph Hellwig | d3649f6 | 2019-07-11 20:57:18 -0700 | [diff] [blame] | 1392 | * |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 1393 | * vma->vm_mm->mmap_lock must be held. |
Christoph Hellwig | d3649f6 | 2019-07-11 20:57:18 -0700 | [diff] [blame] | 1394 | * |
Peter Xu | 4f6da93 | 2020-04-01 21:07:58 -0700 | [diff] [blame] | 1395 | * If @locked is NULL, it may be held for read or write and will |
Christoph Hellwig | d3649f6 | 2019-07-11 20:57:18 -0700 | [diff] [blame] | 1396 | * be unperturbed. |
| 1397 | * |
Peter Xu | 4f6da93 | 2020-04-01 21:07:58 -0700 | [diff] [blame] | 1398 | * If @locked is non-NULL, it must held for read only and may be |
| 1399 | * released. If it's released, *@locked will be set to 0. |
Christoph Hellwig | d3649f6 | 2019-07-11 20:57:18 -0700 | [diff] [blame] | 1400 | */ |
| 1401 | long populate_vma_page_range(struct vm_area_struct *vma, |
Peter Xu | 4f6da93 | 2020-04-01 21:07:58 -0700 | [diff] [blame] | 1402 | unsigned long start, unsigned long end, int *locked) |
Christoph Hellwig | d3649f6 | 2019-07-11 20:57:18 -0700 | [diff] [blame] | 1403 | { |
| 1404 | struct mm_struct *mm = vma->vm_mm; |
| 1405 | unsigned long nr_pages = (end - start) / PAGE_SIZE; |
| 1406 | int gup_flags; |
Hugh Dickins | ece369c | 2022-04-01 11:28:27 -0700 | [diff] [blame] | 1407 | long ret; |
Christoph Hellwig | d3649f6 | 2019-07-11 20:57:18 -0700 | [diff] [blame] | 1408 | |
Miaohe Lin | be51eb1 | 2021-09-02 14:53:45 -0700 | [diff] [blame] | 1409 | VM_BUG_ON(!PAGE_ALIGNED(start)); |
| 1410 | VM_BUG_ON(!PAGE_ALIGNED(end)); |
Christoph Hellwig | d3649f6 | 2019-07-11 20:57:18 -0700 | [diff] [blame] | 1411 | VM_BUG_ON_VMA(start < vma->vm_start, vma); |
| 1412 | VM_BUG_ON_VMA(end > vma->vm_end, vma); |
Michel Lespinasse | 42fc541 | 2020-06-08 21:33:44 -0700 | [diff] [blame] | 1413 | mmap_assert_locked(mm); |
Christoph Hellwig | d3649f6 | 2019-07-11 20:57:18 -0700 | [diff] [blame] | 1414 | |
Hugh Dickins | b67bf49 | 2022-02-14 18:21:52 -0800 | [diff] [blame] | 1415 | /* |
| 1416 | * Rightly or wrongly, the VM_LOCKONFAULT case has never used |
| 1417 | * faultin_page() to break COW, so it has no work to do here. |
| 1418 | */ |
Christoph Hellwig | d3649f6 | 2019-07-11 20:57:18 -0700 | [diff] [blame] | 1419 | if (vma->vm_flags & VM_LOCKONFAULT) |
Hugh Dickins | b67bf49 | 2022-02-14 18:21:52 -0800 | [diff] [blame] | 1420 | return nr_pages; |
| 1421 | |
| 1422 | gup_flags = FOLL_TOUCH; |
Christoph Hellwig | d3649f6 | 2019-07-11 20:57:18 -0700 | [diff] [blame] | 1423 | /* |
| 1424 | * We want to touch writable mappings with a write fault in order |
| 1425 | * to break COW, except for shared mappings because these don't COW |
| 1426 | * and we would not want to dirty them for nothing. |
| 1427 | */ |
| 1428 | if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) |
| 1429 | gup_flags |= FOLL_WRITE; |
| 1430 | |
| 1431 | /* |
| 1432 | * We want mlock to succeed for regions that have any permissions |
| 1433 | * other than PROT_NONE. |
| 1434 | */ |
Anshuman Khandual | 3122e80e | 2020-04-06 20:03:47 -0700 | [diff] [blame] | 1435 | if (vma_is_accessible(vma)) |
Christoph Hellwig | d3649f6 | 2019-07-11 20:57:18 -0700 | [diff] [blame] | 1436 | gup_flags |= FOLL_FORCE; |
| 1437 | |
| 1438 | /* |
| 1439 | * We made sure addr is within a VMA, so the following will |
| 1440 | * not result in a stack expansion that recurses back here. |
| 1441 | */ |
Hugh Dickins | ece369c | 2022-04-01 11:28:27 -0700 | [diff] [blame] | 1442 | ret = __get_user_pages(mm, start, nr_pages, gup_flags, |
Peter Xu | 4f6da93 | 2020-04-01 21:07:58 -0700 | [diff] [blame] | 1443 | NULL, NULL, locked); |
Hugh Dickins | ece369c | 2022-04-01 11:28:27 -0700 | [diff] [blame] | 1444 | lru_add_drain(); |
| 1445 | return ret; |
Christoph Hellwig | d3649f6 | 2019-07-11 20:57:18 -0700 | [diff] [blame] | 1446 | } |
| 1447 | |
| 1448 | /* |
David Hildenbrand | 4ca9b385 | 2021-06-30 18:52:28 -0700 | [diff] [blame] | 1449 | * faultin_vma_page_range() - populate (prefault) page tables inside the |
| 1450 | * given VMA range readable/writable |
| 1451 | * |
| 1452 | * This takes care of mlocking the pages, too, if VM_LOCKED is set. |
| 1453 | * |
| 1454 | * @vma: target vma |
| 1455 | * @start: start address |
| 1456 | * @end: end address |
| 1457 | * @write: whether to prefault readable or writable |
| 1458 | * @locked: whether the mmap_lock is still held |
| 1459 | * |
| 1460 | * Returns either number of processed pages in the vma, or a negative error |
| 1461 | * code on error (see __get_user_pages()). |
| 1462 | * |
| 1463 | * vma->vm_mm->mmap_lock must be held. The range must be page-aligned and |
| 1464 | * covered by the VMA. |
| 1465 | * |
| 1466 | * If @locked is NULL, it may be held for read or write and will be unperturbed. |
| 1467 | * |
| 1468 | * If @locked is non-NULL, it must held for read only and may be released. If |
| 1469 | * it's released, *@locked will be set to 0. |
| 1470 | */ |
| 1471 | long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start, |
| 1472 | unsigned long end, bool write, int *locked) |
| 1473 | { |
| 1474 | struct mm_struct *mm = vma->vm_mm; |
| 1475 | unsigned long nr_pages = (end - start) / PAGE_SIZE; |
| 1476 | int gup_flags; |
Hugh Dickins | ece369c | 2022-04-01 11:28:27 -0700 | [diff] [blame] | 1477 | long ret; |
David Hildenbrand | 4ca9b385 | 2021-06-30 18:52:28 -0700 | [diff] [blame] | 1478 | |
| 1479 | VM_BUG_ON(!PAGE_ALIGNED(start)); |
| 1480 | VM_BUG_ON(!PAGE_ALIGNED(end)); |
| 1481 | VM_BUG_ON_VMA(start < vma->vm_start, vma); |
| 1482 | VM_BUG_ON_VMA(end > vma->vm_end, vma); |
| 1483 | mmap_assert_locked(mm); |
| 1484 | |
| 1485 | /* |
| 1486 | * FOLL_TOUCH: Mark page accessed and thereby young; will also mark |
| 1487 | * the page dirty with FOLL_WRITE -- which doesn't make a |
| 1488 | * difference with !FOLL_FORCE, because the page is writable |
| 1489 | * in the page table. |
| 1490 | * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit |
| 1491 | * a poisoned page. |
David Hildenbrand | 4ca9b385 | 2021-06-30 18:52:28 -0700 | [diff] [blame] | 1492 | * !FOLL_FORCE: Require proper access permissions. |
| 1493 | */ |
Hugh Dickins | b67bf49 | 2022-02-14 18:21:52 -0800 | [diff] [blame] | 1494 | gup_flags = FOLL_TOUCH | FOLL_HWPOISON; |
David Hildenbrand | 4ca9b385 | 2021-06-30 18:52:28 -0700 | [diff] [blame] | 1495 | if (write) |
| 1496 | gup_flags |= FOLL_WRITE; |
| 1497 | |
| 1498 | /* |
David Hildenbrand | eb2faa5 | 2021-08-13 16:54:37 -0700 | [diff] [blame] | 1499 | * We want to report -EINVAL instead of -EFAULT for any permission |
| 1500 | * problems or incompatible mappings. |
David Hildenbrand | 4ca9b385 | 2021-06-30 18:52:28 -0700 | [diff] [blame] | 1501 | */ |
David Hildenbrand | eb2faa5 | 2021-08-13 16:54:37 -0700 | [diff] [blame] | 1502 | if (check_vma_flags(vma, gup_flags)) |
| 1503 | return -EINVAL; |
| 1504 | |
Hugh Dickins | ece369c | 2022-04-01 11:28:27 -0700 | [diff] [blame] | 1505 | ret = __get_user_pages(mm, start, nr_pages, gup_flags, |
David Hildenbrand | 4ca9b385 | 2021-06-30 18:52:28 -0700 | [diff] [blame] | 1506 | NULL, NULL, locked); |
Hugh Dickins | ece369c | 2022-04-01 11:28:27 -0700 | [diff] [blame] | 1507 | lru_add_drain(); |
| 1508 | return ret; |
David Hildenbrand | 4ca9b385 | 2021-06-30 18:52:28 -0700 | [diff] [blame] | 1509 | } |
| 1510 | |
| 1511 | /* |
Christoph Hellwig | d3649f6 | 2019-07-11 20:57:18 -0700 | [diff] [blame] | 1512 | * __mm_populate - populate and/or mlock pages within a range of address space. |
| 1513 | * |
| 1514 | * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap |
| 1515 | * flags. VMAs must be already marked with the desired vm_flags, and |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 1516 | * mmap_lock must not be held. |
Christoph Hellwig | d3649f6 | 2019-07-11 20:57:18 -0700 | [diff] [blame] | 1517 | */ |
| 1518 | int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) |
| 1519 | { |
| 1520 | struct mm_struct *mm = current->mm; |
| 1521 | unsigned long end, nstart, nend; |
| 1522 | struct vm_area_struct *vma = NULL; |
| 1523 | int locked = 0; |
| 1524 | long ret = 0; |
| 1525 | |
| 1526 | end = start + len; |
| 1527 | |
| 1528 | for (nstart = start; nstart < end; nstart = nend) { |
| 1529 | /* |
| 1530 | * We want to fault in pages for [nstart; end) address range. |
| 1531 | * Find first corresponding VMA. |
| 1532 | */ |
| 1533 | if (!locked) { |
| 1534 | locked = 1; |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1535 | mmap_read_lock(mm); |
Christoph Hellwig | d3649f6 | 2019-07-11 20:57:18 -0700 | [diff] [blame] | 1536 | vma = find_vma(mm, nstart); |
| 1537 | } else if (nstart >= vma->vm_end) |
| 1538 | vma = vma->vm_next; |
| 1539 | if (!vma || vma->vm_start >= end) |
| 1540 | break; |
| 1541 | /* |
| 1542 | * Set [nstart; nend) to intersection of desired address |
| 1543 | * range with the first VMA. Also, skip undesirable VMA types. |
| 1544 | */ |
| 1545 | nend = min(end, vma->vm_end); |
| 1546 | if (vma->vm_flags & (VM_IO | VM_PFNMAP)) |
| 1547 | continue; |
| 1548 | if (nstart < vma->vm_start) |
| 1549 | nstart = vma->vm_start; |
| 1550 | /* |
| 1551 | * Now fault in a range of pages. populate_vma_page_range() |
| 1552 | * double checks the vma flags, so that it won't mlock pages |
| 1553 | * if the vma was already munlocked. |
| 1554 | */ |
| 1555 | ret = populate_vma_page_range(vma, nstart, nend, &locked); |
| 1556 | if (ret < 0) { |
| 1557 | if (ignore_errors) { |
| 1558 | ret = 0; |
| 1559 | continue; /* continue at next VMA */ |
| 1560 | } |
| 1561 | break; |
| 1562 | } |
| 1563 | nend = nstart + ret * PAGE_SIZE; |
| 1564 | ret = 0; |
| 1565 | } |
| 1566 | if (locked) |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1567 | mmap_read_unlock(mm); |
Christoph Hellwig | d3649f6 | 2019-07-11 20:57:18 -0700 | [diff] [blame] | 1568 | return ret; /* 0 or negative error code */ |
| 1569 | } |
Christoph Hellwig | 050a9ad | 2019-07-11 20:57:21 -0700 | [diff] [blame] | 1570 | #else /* CONFIG_MMU */ |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 1571 | static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start, |
Christoph Hellwig | 050a9ad | 2019-07-11 20:57:21 -0700 | [diff] [blame] | 1572 | unsigned long nr_pages, struct page **pages, |
| 1573 | struct vm_area_struct **vmas, int *locked, |
| 1574 | unsigned int foll_flags) |
| 1575 | { |
| 1576 | struct vm_area_struct *vma; |
| 1577 | unsigned long vm_flags; |
Pavel Tatashin | 24dc20c | 2021-05-04 18:39:15 -0700 | [diff] [blame] | 1578 | long i; |
Christoph Hellwig | 050a9ad | 2019-07-11 20:57:21 -0700 | [diff] [blame] | 1579 | |
| 1580 | /* calculate required read or write permissions. |
| 1581 | * If FOLL_FORCE is set, we only require the "MAY" flags. |
| 1582 | */ |
| 1583 | vm_flags = (foll_flags & FOLL_WRITE) ? |
| 1584 | (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); |
| 1585 | vm_flags &= (foll_flags & FOLL_FORCE) ? |
| 1586 | (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); |
| 1587 | |
| 1588 | for (i = 0; i < nr_pages; i++) { |
| 1589 | vma = find_vma(mm, start); |
| 1590 | if (!vma) |
| 1591 | goto finish_or_fault; |
| 1592 | |
| 1593 | /* protect what we can, including chardevs */ |
| 1594 | if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || |
| 1595 | !(vm_flags & vma->vm_flags)) |
| 1596 | goto finish_or_fault; |
| 1597 | |
| 1598 | if (pages) { |
| 1599 | pages[i] = virt_to_page(start); |
| 1600 | if (pages[i]) |
| 1601 | get_page(pages[i]); |
| 1602 | } |
| 1603 | if (vmas) |
| 1604 | vmas[i] = vma; |
| 1605 | start = (start + PAGE_SIZE) & PAGE_MASK; |
| 1606 | } |
| 1607 | |
| 1608 | return i; |
| 1609 | |
| 1610 | finish_or_fault: |
| 1611 | return i ? : -EFAULT; |
| 1612 | } |
| 1613 | #endif /* !CONFIG_MMU */ |
Christoph Hellwig | d3649f6 | 2019-07-11 20:57:18 -0700 | [diff] [blame] | 1614 | |
Jann Horn | 8f942ee | 2020-10-15 20:12:40 -0700 | [diff] [blame] | 1615 | /** |
Andreas Gruenbacher | bb523b4 | 2021-08-02 13:44:20 +0200 | [diff] [blame] | 1616 | * fault_in_writeable - fault in userspace address range for writing |
| 1617 | * @uaddr: start of address range |
| 1618 | * @size: size of address range |
| 1619 | * |
| 1620 | * Returns the number of bytes not faulted in (like copy_to_user() and |
| 1621 | * copy_from_user()). |
| 1622 | */ |
| 1623 | size_t fault_in_writeable(char __user *uaddr, size_t size) |
| 1624 | { |
| 1625 | char __user *start = uaddr, *end; |
| 1626 | |
| 1627 | if (unlikely(size == 0)) |
| 1628 | return 0; |
Christophe Leroy | 677b2a8 | 2022-01-14 14:05:13 -0800 | [diff] [blame] | 1629 | if (!user_write_access_begin(uaddr, size)) |
| 1630 | return size; |
Andreas Gruenbacher | bb523b4 | 2021-08-02 13:44:20 +0200 | [diff] [blame] | 1631 | if (!PAGE_ALIGNED(uaddr)) { |
Christophe Leroy | 677b2a8 | 2022-01-14 14:05:13 -0800 | [diff] [blame] | 1632 | unsafe_put_user(0, uaddr, out); |
Andreas Gruenbacher | bb523b4 | 2021-08-02 13:44:20 +0200 | [diff] [blame] | 1633 | uaddr = (char __user *)PAGE_ALIGN((unsigned long)uaddr); |
| 1634 | } |
| 1635 | end = (char __user *)PAGE_ALIGN((unsigned long)start + size); |
| 1636 | if (unlikely(end < start)) |
| 1637 | end = NULL; |
| 1638 | while (uaddr != end) { |
Christophe Leroy | 677b2a8 | 2022-01-14 14:05:13 -0800 | [diff] [blame] | 1639 | unsafe_put_user(0, uaddr, out); |
Andreas Gruenbacher | bb523b4 | 2021-08-02 13:44:20 +0200 | [diff] [blame] | 1640 | uaddr += PAGE_SIZE; |
| 1641 | } |
| 1642 | |
| 1643 | out: |
Christophe Leroy | 677b2a8 | 2022-01-14 14:05:13 -0800 | [diff] [blame] | 1644 | user_write_access_end(); |
Andreas Gruenbacher | bb523b4 | 2021-08-02 13:44:20 +0200 | [diff] [blame] | 1645 | if (size > uaddr - start) |
| 1646 | return size - (uaddr - start); |
| 1647 | return 0; |
| 1648 | } |
| 1649 | EXPORT_SYMBOL(fault_in_writeable); |
| 1650 | |
Andreas Gruenbacher | cdd591f | 2021-07-05 17:26:28 +0200 | [diff] [blame] | 1651 | /* |
| 1652 | * fault_in_safe_writeable - fault in an address range for writing |
| 1653 | * @uaddr: start of address range |
| 1654 | * @size: length of address range |
| 1655 | * |
Linus Torvalds | fe673d3 | 2022-03-08 11:55:48 -0800 | [diff] [blame] | 1656 | * Faults in an address range for writing. This is primarily useful when we |
| 1657 | * already know that some or all of the pages in the address range aren't in |
| 1658 | * memory. |
Andreas Gruenbacher | cdd591f | 2021-07-05 17:26:28 +0200 | [diff] [blame] | 1659 | * |
Linus Torvalds | fe673d3 | 2022-03-08 11:55:48 -0800 | [diff] [blame] | 1660 | * Unlike fault_in_writeable(), this function is non-destructive. |
Andreas Gruenbacher | cdd591f | 2021-07-05 17:26:28 +0200 | [diff] [blame] | 1661 | * |
| 1662 | * Note that we don't pin or otherwise hold the pages referenced that we fault |
| 1663 | * in. There's no guarantee that they'll stay in memory for any duration of |
| 1664 | * time. |
| 1665 | * |
| 1666 | * Returns the number of bytes not faulted in, like copy_to_user() and |
| 1667 | * copy_from_user(). |
| 1668 | */ |
| 1669 | size_t fault_in_safe_writeable(const char __user *uaddr, size_t size) |
| 1670 | { |
Linus Torvalds | fe673d3 | 2022-03-08 11:55:48 -0800 | [diff] [blame] | 1671 | unsigned long start = (unsigned long)uaddr, end; |
Andreas Gruenbacher | cdd591f | 2021-07-05 17:26:28 +0200 | [diff] [blame] | 1672 | struct mm_struct *mm = current->mm; |
Linus Torvalds | fe673d3 | 2022-03-08 11:55:48 -0800 | [diff] [blame] | 1673 | bool unlocked = false; |
Andreas Gruenbacher | cdd591f | 2021-07-05 17:26:28 +0200 | [diff] [blame] | 1674 | |
Linus Torvalds | fe673d3 | 2022-03-08 11:55:48 -0800 | [diff] [blame] | 1675 | if (unlikely(size == 0)) |
Andreas Gruenbacher | cdd591f | 2021-07-05 17:26:28 +0200 | [diff] [blame] | 1676 | return 0; |
Linus Torvalds | fe673d3 | 2022-03-08 11:55:48 -0800 | [diff] [blame] | 1677 | end = PAGE_ALIGN(start + size); |
| 1678 | if (end < start) |
| 1679 | end = 0; |
| 1680 | |
| 1681 | mmap_read_lock(mm); |
| 1682 | do { |
| 1683 | if (fixup_user_fault(mm, start, FAULT_FLAG_WRITE, &unlocked)) |
| 1684 | break; |
| 1685 | start = (start + PAGE_SIZE) & PAGE_MASK; |
| 1686 | } while (start != end); |
| 1687 | mmap_read_unlock(mm); |
| 1688 | |
| 1689 | if (size > (unsigned long)uaddr - start) |
| 1690 | return size - ((unsigned long)uaddr - start); |
| 1691 | return 0; |
Andreas Gruenbacher | cdd591f | 2021-07-05 17:26:28 +0200 | [diff] [blame] | 1692 | } |
| 1693 | EXPORT_SYMBOL(fault_in_safe_writeable); |
| 1694 | |
Andreas Gruenbacher | bb523b4 | 2021-08-02 13:44:20 +0200 | [diff] [blame] | 1695 | /** |
| 1696 | * fault_in_readable - fault in userspace address range for reading |
| 1697 | * @uaddr: start of user address range |
| 1698 | * @size: size of user address range |
| 1699 | * |
| 1700 | * Returns the number of bytes not faulted in (like copy_to_user() and |
| 1701 | * copy_from_user()). |
| 1702 | */ |
| 1703 | size_t fault_in_readable(const char __user *uaddr, size_t size) |
| 1704 | { |
| 1705 | const char __user *start = uaddr, *end; |
| 1706 | volatile char c; |
| 1707 | |
| 1708 | if (unlikely(size == 0)) |
| 1709 | return 0; |
Christophe Leroy | 677b2a8 | 2022-01-14 14:05:13 -0800 | [diff] [blame] | 1710 | if (!user_read_access_begin(uaddr, size)) |
| 1711 | return size; |
Andreas Gruenbacher | bb523b4 | 2021-08-02 13:44:20 +0200 | [diff] [blame] | 1712 | if (!PAGE_ALIGNED(uaddr)) { |
Christophe Leroy | 677b2a8 | 2022-01-14 14:05:13 -0800 | [diff] [blame] | 1713 | unsafe_get_user(c, uaddr, out); |
Andreas Gruenbacher | bb523b4 | 2021-08-02 13:44:20 +0200 | [diff] [blame] | 1714 | uaddr = (const char __user *)PAGE_ALIGN((unsigned long)uaddr); |
| 1715 | } |
| 1716 | end = (const char __user *)PAGE_ALIGN((unsigned long)start + size); |
| 1717 | if (unlikely(end < start)) |
| 1718 | end = NULL; |
| 1719 | while (uaddr != end) { |
Christophe Leroy | 677b2a8 | 2022-01-14 14:05:13 -0800 | [diff] [blame] | 1720 | unsafe_get_user(c, uaddr, out); |
Andreas Gruenbacher | bb523b4 | 2021-08-02 13:44:20 +0200 | [diff] [blame] | 1721 | uaddr += PAGE_SIZE; |
| 1722 | } |
| 1723 | |
| 1724 | out: |
Christophe Leroy | 677b2a8 | 2022-01-14 14:05:13 -0800 | [diff] [blame] | 1725 | user_read_access_end(); |
Andreas Gruenbacher | bb523b4 | 2021-08-02 13:44:20 +0200 | [diff] [blame] | 1726 | (void)c; |
| 1727 | if (size > uaddr - start) |
| 1728 | return size - (uaddr - start); |
| 1729 | return 0; |
| 1730 | } |
| 1731 | EXPORT_SYMBOL(fault_in_readable); |
| 1732 | |
| 1733 | /** |
Jann Horn | 8f942ee | 2020-10-15 20:12:40 -0700 | [diff] [blame] | 1734 | * get_dump_page() - pin user page in memory while writing it to core dump |
| 1735 | * @addr: user address |
| 1736 | * |
| 1737 | * Returns struct page pointer of user page pinned for dump, |
| 1738 | * to be freed afterwards by put_page(). |
| 1739 | * |
| 1740 | * Returns NULL on any kind of failure - a hole must then be inserted into |
| 1741 | * the corefile, to preserve alignment with its headers; and also returns |
| 1742 | * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - |
Ingo Molnar | f0953a1 | 2021-05-06 18:06:47 -0700 | [diff] [blame] | 1743 | * allowing a hole to be left in the corefile to save disk space. |
Jann Horn | 8f942ee | 2020-10-15 20:12:40 -0700 | [diff] [blame] | 1744 | * |
Jann Horn | 7f3bfab | 2020-10-15 20:12:57 -0700 | [diff] [blame] | 1745 | * Called without mmap_lock (takes and releases the mmap_lock by itself). |
Jann Horn | 8f942ee | 2020-10-15 20:12:40 -0700 | [diff] [blame] | 1746 | */ |
| 1747 | #ifdef CONFIG_ELF_CORE |
| 1748 | struct page *get_dump_page(unsigned long addr) |
| 1749 | { |
Jann Horn | 7f3bfab | 2020-10-15 20:12:57 -0700 | [diff] [blame] | 1750 | struct mm_struct *mm = current->mm; |
Jann Horn | 8f942ee | 2020-10-15 20:12:40 -0700 | [diff] [blame] | 1751 | struct page *page; |
Jann Horn | 7f3bfab | 2020-10-15 20:12:57 -0700 | [diff] [blame] | 1752 | int locked = 1; |
| 1753 | int ret; |
Jann Horn | 8f942ee | 2020-10-15 20:12:40 -0700 | [diff] [blame] | 1754 | |
Jann Horn | 7f3bfab | 2020-10-15 20:12:57 -0700 | [diff] [blame] | 1755 | if (mmap_read_lock_killable(mm)) |
Jann Horn | 8f942ee | 2020-10-15 20:12:40 -0700 | [diff] [blame] | 1756 | return NULL; |
Jann Horn | 7f3bfab | 2020-10-15 20:12:57 -0700 | [diff] [blame] | 1757 | ret = __get_user_pages_locked(mm, addr, 1, &page, NULL, &locked, |
| 1758 | FOLL_FORCE | FOLL_DUMP | FOLL_GET); |
| 1759 | if (locked) |
| 1760 | mmap_read_unlock(mm); |
| 1761 | return (ret == 1) ? page : NULL; |
Jann Horn | 8f942ee | 2020-10-15 20:12:40 -0700 | [diff] [blame] | 1762 | } |
| 1763 | #endif /* CONFIG_ELF_CORE */ |
| 1764 | |
Pavel Tatashin | d1e153f | 2021-05-04 18:39:08 -0700 | [diff] [blame] | 1765 | #ifdef CONFIG_MIGRATION |
Pavel Tatashin | f68749e | 2021-05-04 18:39:19 -0700 | [diff] [blame] | 1766 | /* |
| 1767 | * Check whether all pages are pinnable, if so return number of pages. If some |
| 1768 | * pages are not pinnable, migrate them, and unpin all pages. Return zero if |
| 1769 | * pages were migrated, or if some pages were not successfully isolated. |
| 1770 | * Return negative error if migration fails. |
| 1771 | */ |
| 1772 | static long check_and_migrate_movable_pages(unsigned long nr_pages, |
Pavel Tatashin | d1e153f | 2021-05-04 18:39:08 -0700 | [diff] [blame] | 1773 | struct page **pages, |
Pavel Tatashin | d1e153f | 2021-05-04 18:39:08 -0700 | [diff] [blame] | 1774 | unsigned int gup_flags) |
Aneesh Kumar K.V | 9a4e9f3 | 2019-03-05 15:47:44 -0800 | [diff] [blame] | 1775 | { |
Christoph Hellwig | f9f38f7 | 2022-02-16 15:31:37 +1100 | [diff] [blame] | 1776 | unsigned long isolation_error_count = 0, i; |
Matthew Wilcox (Oracle) | 1b7f7e5 | 2022-02-17 12:46:35 -0500 | [diff] [blame] | 1777 | struct folio *prev_folio = NULL; |
Pavel Tatashin | d1e153f | 2021-05-04 18:39:08 -0700 | [diff] [blame] | 1778 | LIST_HEAD(movable_page_list); |
Christoph Hellwig | f9f38f7 | 2022-02-16 15:31:37 +1100 | [diff] [blame] | 1779 | bool drain_allow = true; |
| 1780 | int ret = 0; |
Aneesh Kumar K.V | 9a4e9f3 | 2019-03-05 15:47:44 -0800 | [diff] [blame] | 1781 | |
Pavel Tatashin | 83c02c2 | 2021-05-04 18:38:42 -0700 | [diff] [blame] | 1782 | for (i = 0; i < nr_pages; i++) { |
Matthew Wilcox (Oracle) | 1b7f7e5 | 2022-02-17 12:46:35 -0500 | [diff] [blame] | 1783 | struct folio *folio = page_folio(pages[i]); |
Aneesh Kumar K.V | 9a4e9f3 | 2019-03-05 15:47:44 -0800 | [diff] [blame] | 1784 | |
Matthew Wilcox (Oracle) | 1b7f7e5 | 2022-02-17 12:46:35 -0500 | [diff] [blame] | 1785 | if (folio == prev_folio) |
Aneesh Kumar K.V | 9a4e9f3 | 2019-03-05 15:47:44 -0800 | [diff] [blame] | 1786 | continue; |
Matthew Wilcox (Oracle) | 1b7f7e5 | 2022-02-17 12:46:35 -0500 | [diff] [blame] | 1787 | prev_folio = folio; |
Aneesh Kumar K.V | 9a4e9f3 | 2019-03-05 15:47:44 -0800 | [diff] [blame] | 1788 | |
Matthew Wilcox (Oracle) | 1b7f7e5 | 2022-02-17 12:46:35 -0500 | [diff] [blame] | 1789 | if (folio_is_pinnable(folio)) |
Christoph Hellwig | f9f38f7 | 2022-02-16 15:31:37 +1100 | [diff] [blame] | 1790 | continue; |
| 1791 | |
| 1792 | /* |
| 1793 | * Try to move out any movable page before pinning the range. |
| 1794 | */ |
Matthew Wilcox (Oracle) | 1b7f7e5 | 2022-02-17 12:46:35 -0500 | [diff] [blame] | 1795 | if (folio_test_hugetlb(folio)) { |
| 1796 | if (!isolate_huge_page(&folio->page, |
| 1797 | &movable_page_list)) |
Christoph Hellwig | f9f38f7 | 2022-02-16 15:31:37 +1100 | [diff] [blame] | 1798 | isolation_error_count++; |
| 1799 | continue; |
Aneesh Kumar K.V | 9a4e9f3 | 2019-03-05 15:47:44 -0800 | [diff] [blame] | 1800 | } |
Christoph Hellwig | f9f38f7 | 2022-02-16 15:31:37 +1100 | [diff] [blame] | 1801 | |
Matthew Wilcox (Oracle) | 1b7f7e5 | 2022-02-17 12:46:35 -0500 | [diff] [blame] | 1802 | if (!folio_test_lru(folio) && drain_allow) { |
Christoph Hellwig | f9f38f7 | 2022-02-16 15:31:37 +1100 | [diff] [blame] | 1803 | lru_add_drain_all(); |
| 1804 | drain_allow = false; |
| 1805 | } |
| 1806 | |
Matthew Wilcox (Oracle) | 1b7f7e5 | 2022-02-17 12:46:35 -0500 | [diff] [blame] | 1807 | if (folio_isolate_lru(folio)) { |
Christoph Hellwig | f9f38f7 | 2022-02-16 15:31:37 +1100 | [diff] [blame] | 1808 | isolation_error_count++; |
| 1809 | continue; |
| 1810 | } |
Matthew Wilcox (Oracle) | 1b7f7e5 | 2022-02-17 12:46:35 -0500 | [diff] [blame] | 1811 | list_add_tail(&folio->lru, &movable_page_list); |
| 1812 | node_stat_mod_folio(folio, |
| 1813 | NR_ISOLATED_ANON + folio_is_file_lru(folio), |
| 1814 | folio_nr_pages(folio)); |
Aneesh Kumar K.V | 9a4e9f3 | 2019-03-05 15:47:44 -0800 | [diff] [blame] | 1815 | } |
| 1816 | |
Christoph Hellwig | f9f38f7 | 2022-02-16 15:31:37 +1100 | [diff] [blame] | 1817 | if (!list_empty(&movable_page_list) || isolation_error_count) |
| 1818 | goto unpin_pages; |
| 1819 | |
Pavel Tatashin | 6e7f34e | 2021-05-04 18:38:49 -0700 | [diff] [blame] | 1820 | /* |
| 1821 | * If list is empty, and no isolation errors, means that all pages are |
| 1822 | * in the correct zone. |
| 1823 | */ |
Christoph Hellwig | f9f38f7 | 2022-02-16 15:31:37 +1100 | [diff] [blame] | 1824 | return nr_pages; |
Pavel Tatashin | 6e7f34e | 2021-05-04 18:38:49 -0700 | [diff] [blame] | 1825 | |
Christoph Hellwig | f9f38f7 | 2022-02-16 15:31:37 +1100 | [diff] [blame] | 1826 | unpin_pages: |
Pavel Tatashin | f68749e | 2021-05-04 18:39:19 -0700 | [diff] [blame] | 1827 | if (gup_flags & FOLL_PIN) { |
| 1828 | unpin_user_pages(pages, nr_pages); |
| 1829 | } else { |
| 1830 | for (i = 0; i < nr_pages; i++) |
| 1831 | put_page(pages[i]); |
| 1832 | } |
Christoph Hellwig | f9f38f7 | 2022-02-16 15:31:37 +1100 | [diff] [blame] | 1833 | |
Pavel Tatashin | d1e153f | 2021-05-04 18:39:08 -0700 | [diff] [blame] | 1834 | if (!list_empty(&movable_page_list)) { |
Christoph Hellwig | f9f38f7 | 2022-02-16 15:31:37 +1100 | [diff] [blame] | 1835 | struct migration_target_control mtc = { |
| 1836 | .nid = NUMA_NO_NODE, |
| 1837 | .gfp_mask = GFP_USER | __GFP_NOWARN, |
| 1838 | }; |
| 1839 | |
Pavel Tatashin | d1e153f | 2021-05-04 18:39:08 -0700 | [diff] [blame] | 1840 | ret = migrate_pages(&movable_page_list, alloc_migration_target, |
Pavel Tatashin | f0f4463 | 2021-05-04 18:38:46 -0700 | [diff] [blame] | 1841 | NULL, (unsigned long)&mtc, MIGRATE_SYNC, |
Yang Shi | 5ac9588 | 2021-09-02 14:59:13 -0700 | [diff] [blame] | 1842 | MR_LONGTERM_PIN, NULL); |
Christoph Hellwig | f9f38f7 | 2022-02-16 15:31:37 +1100 | [diff] [blame] | 1843 | if (ret > 0) /* number of pages not migrated */ |
| 1844 | ret = -ENOMEM; |
Aneesh Kumar K.V | 9a4e9f3 | 2019-03-05 15:47:44 -0800 | [diff] [blame] | 1845 | } |
| 1846 | |
Christoph Hellwig | f9f38f7 | 2022-02-16 15:31:37 +1100 | [diff] [blame] | 1847 | if (ret && !list_empty(&movable_page_list)) |
| 1848 | putback_movable_pages(&movable_page_list); |
| 1849 | return ret; |
Aneesh Kumar K.V | 9a4e9f3 | 2019-03-05 15:47:44 -0800 | [diff] [blame] | 1850 | } |
| 1851 | #else |
Pavel Tatashin | f68749e | 2021-05-04 18:39:19 -0700 | [diff] [blame] | 1852 | static long check_and_migrate_movable_pages(unsigned long nr_pages, |
Pavel Tatashin | d1e153f | 2021-05-04 18:39:08 -0700 | [diff] [blame] | 1853 | struct page **pages, |
Pavel Tatashin | d1e153f | 2021-05-04 18:39:08 -0700 | [diff] [blame] | 1854 | unsigned int gup_flags) |
Aneesh Kumar K.V | 9a4e9f3 | 2019-03-05 15:47:44 -0800 | [diff] [blame] | 1855 | { |
| 1856 | return nr_pages; |
| 1857 | } |
Pavel Tatashin | d1e153f | 2021-05-04 18:39:08 -0700 | [diff] [blame] | 1858 | #endif /* CONFIG_MIGRATION */ |
Aneesh Kumar K.V | 9a4e9f3 | 2019-03-05 15:47:44 -0800 | [diff] [blame] | 1859 | |
Dan Williams | 2bb6d28 | 2017-11-29 16:10:35 -0800 | [diff] [blame] | 1860 | /* |
Ira Weiny | 932f4a6 | 2019-05-13 17:17:03 -0700 | [diff] [blame] | 1861 | * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which |
| 1862 | * allows us to process the FOLL_LONGTERM flag. |
Dan Williams | 2bb6d28 | 2017-11-29 16:10:35 -0800 | [diff] [blame] | 1863 | */ |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 1864 | static long __gup_longterm_locked(struct mm_struct *mm, |
Ira Weiny | 932f4a6 | 2019-05-13 17:17:03 -0700 | [diff] [blame] | 1865 | unsigned long start, |
| 1866 | unsigned long nr_pages, |
| 1867 | struct page **pages, |
| 1868 | struct vm_area_struct **vmas, |
| 1869 | unsigned int gup_flags) |
Dan Williams | 2bb6d28 | 2017-11-29 16:10:35 -0800 | [diff] [blame] | 1870 | { |
Pavel Tatashin | f68749e | 2021-05-04 18:39:19 -0700 | [diff] [blame] | 1871 | unsigned int flags; |
Jason Gunthorpe | 52650c8 | 2020-12-14 19:05:48 -0800 | [diff] [blame] | 1872 | long rc; |
Dan Williams | 2bb6d28 | 2017-11-29 16:10:35 -0800 | [diff] [blame] | 1873 | |
Pavel Tatashin | f68749e | 2021-05-04 18:39:19 -0700 | [diff] [blame] | 1874 | if (!(gup_flags & FOLL_LONGTERM)) |
| 1875 | return __get_user_pages_locked(mm, start, nr_pages, pages, vmas, |
| 1876 | NULL, gup_flags); |
| 1877 | flags = memalloc_pin_save(); |
| 1878 | do { |
| 1879 | rc = __get_user_pages_locked(mm, start, nr_pages, pages, vmas, |
| 1880 | NULL, gup_flags); |
| 1881 | if (rc <= 0) |
| 1882 | break; |
| 1883 | rc = check_and_migrate_movable_pages(rc, pages, gup_flags); |
| 1884 | } while (!rc); |
| 1885 | memalloc_pin_restore(flags); |
Dan Williams | 2bb6d28 | 2017-11-29 16:10:35 -0800 | [diff] [blame] | 1886 | |
Dan Williams | 2bb6d28 | 2017-11-29 16:10:35 -0800 | [diff] [blame] | 1887 | return rc; |
| 1888 | } |
Ira Weiny | 932f4a6 | 2019-05-13 17:17:03 -0700 | [diff] [blame] | 1889 | |
Barry Song | 447f3e4 | 2020-10-13 16:51:58 -0700 | [diff] [blame] | 1890 | static bool is_valid_gup_flags(unsigned int gup_flags) |
| 1891 | { |
| 1892 | /* |
| 1893 | * FOLL_PIN must only be set internally by the pin_user_pages*() APIs, |
| 1894 | * never directly by the caller, so enforce that with an assertion: |
| 1895 | */ |
| 1896 | if (WARN_ON_ONCE(gup_flags & FOLL_PIN)) |
| 1897 | return false; |
| 1898 | /* |
| 1899 | * FOLL_PIN is a prerequisite to FOLL_LONGTERM. Another way of saying |
| 1900 | * that is, FOLL_LONGTERM is a specific case, more restrictive case of |
| 1901 | * FOLL_PIN. |
| 1902 | */ |
| 1903 | if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM)) |
| 1904 | return false; |
| 1905 | |
| 1906 | return true; |
| 1907 | } |
| 1908 | |
John Hubbard | 22bf29b | 2020-04-01 21:05:10 -0700 | [diff] [blame] | 1909 | #ifdef CONFIG_MMU |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 1910 | static long __get_user_pages_remote(struct mm_struct *mm, |
John Hubbard | 22bf29b | 2020-04-01 21:05:10 -0700 | [diff] [blame] | 1911 | unsigned long start, unsigned long nr_pages, |
| 1912 | unsigned int gup_flags, struct page **pages, |
| 1913 | struct vm_area_struct **vmas, int *locked) |
| 1914 | { |
| 1915 | /* |
| 1916 | * Parts of FOLL_LONGTERM behavior are incompatible with |
| 1917 | * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on |
| 1918 | * vmas. However, this only comes up if locked is set, and there are |
| 1919 | * callers that do request FOLL_LONGTERM, but do not set locked. So, |
| 1920 | * allow what we can. |
| 1921 | */ |
| 1922 | if (gup_flags & FOLL_LONGTERM) { |
| 1923 | if (WARN_ON_ONCE(locked)) |
| 1924 | return -EINVAL; |
| 1925 | /* |
| 1926 | * This will check the vmas (even if our vmas arg is NULL) |
| 1927 | * and return -ENOTSUPP if DAX isn't allowed in this case: |
| 1928 | */ |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 1929 | return __gup_longterm_locked(mm, start, nr_pages, pages, |
John Hubbard | 22bf29b | 2020-04-01 21:05:10 -0700 | [diff] [blame] | 1930 | vmas, gup_flags | FOLL_TOUCH | |
| 1931 | FOLL_REMOTE); |
| 1932 | } |
| 1933 | |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 1934 | return __get_user_pages_locked(mm, start, nr_pages, pages, vmas, |
John Hubbard | 22bf29b | 2020-04-01 21:05:10 -0700 | [diff] [blame] | 1935 | locked, |
| 1936 | gup_flags | FOLL_TOUCH | FOLL_REMOTE); |
| 1937 | } |
| 1938 | |
Souptick Joarder | adc8cb4 | 2020-06-01 21:48:24 -0700 | [diff] [blame] | 1939 | /** |
John Hubbard | c4237f8 | 2020-01-30 22:12:36 -0800 | [diff] [blame] | 1940 | * get_user_pages_remote() - pin user pages in memory |
John Hubbard | c4237f8 | 2020-01-30 22:12:36 -0800 | [diff] [blame] | 1941 | * @mm: mm_struct of target mm |
| 1942 | * @start: starting user address |
| 1943 | * @nr_pages: number of pages from start to pin |
| 1944 | * @gup_flags: flags modifying lookup behaviour |
| 1945 | * @pages: array that receives pointers to the pages pinned. |
| 1946 | * Should be at least nr_pages long. Or NULL, if caller |
| 1947 | * only intends to ensure the pages are faulted in. |
| 1948 | * @vmas: array of pointers to vmas corresponding to each page. |
| 1949 | * Or NULL if the caller does not require them. |
| 1950 | * @locked: pointer to lock flag indicating whether lock is held and |
| 1951 | * subsequently whether VM_FAULT_RETRY functionality can be |
| 1952 | * utilised. Lock must initially be held. |
| 1953 | * |
| 1954 | * Returns either number of pages pinned (which may be less than the |
| 1955 | * number requested), or an error. Details about the return value: |
| 1956 | * |
| 1957 | * -- If nr_pages is 0, returns 0. |
| 1958 | * -- If nr_pages is >0, but no pages were pinned, returns -errno. |
| 1959 | * -- If nr_pages is >0, and some pages were pinned, returns the number of |
| 1960 | * pages pinned. Again, this may be less than nr_pages. |
| 1961 | * |
| 1962 | * The caller is responsible for releasing returned @pages, via put_page(). |
| 1963 | * |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 1964 | * @vmas are valid only as long as mmap_lock is held. |
John Hubbard | c4237f8 | 2020-01-30 22:12:36 -0800 | [diff] [blame] | 1965 | * |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 1966 | * Must be called with mmap_lock held for read or write. |
John Hubbard | c4237f8 | 2020-01-30 22:12:36 -0800 | [diff] [blame] | 1967 | * |
Souptick Joarder | adc8cb4 | 2020-06-01 21:48:24 -0700 | [diff] [blame] | 1968 | * get_user_pages_remote walks a process's page tables and takes a reference |
| 1969 | * to each struct page that each user address corresponds to at a given |
John Hubbard | c4237f8 | 2020-01-30 22:12:36 -0800 | [diff] [blame] | 1970 | * instant. That is, it takes the page that would be accessed if a user |
| 1971 | * thread accesses the given user virtual address at that instant. |
| 1972 | * |
| 1973 | * This does not guarantee that the page exists in the user mappings when |
Souptick Joarder | adc8cb4 | 2020-06-01 21:48:24 -0700 | [diff] [blame] | 1974 | * get_user_pages_remote returns, and there may even be a completely different |
John Hubbard | c4237f8 | 2020-01-30 22:12:36 -0800 | [diff] [blame] | 1975 | * page there in some cases (eg. if mmapped pagecache has been invalidated |
| 1976 | * and subsequently re faulted). However it does guarantee that the page |
| 1977 | * won't be freed completely. And mostly callers simply care that the page |
| 1978 | * contains data that was valid *at some point in time*. Typically, an IO |
| 1979 | * or similar operation cannot guarantee anything stronger anyway because |
| 1980 | * locks can't be held over the syscall boundary. |
| 1981 | * |
| 1982 | * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page |
| 1983 | * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must |
| 1984 | * be called after the page is finished with, and before put_page is called. |
| 1985 | * |
Souptick Joarder | adc8cb4 | 2020-06-01 21:48:24 -0700 | [diff] [blame] | 1986 | * get_user_pages_remote is typically used for fewer-copy IO operations, |
| 1987 | * to get a handle on the memory by some means other than accesses |
| 1988 | * via the user virtual addresses. The pages may be submitted for |
| 1989 | * DMA to devices or accessed via their kernel linear mapping (via the |
| 1990 | * kmap APIs). Care should be taken to use the correct cache flushing APIs. |
John Hubbard | c4237f8 | 2020-01-30 22:12:36 -0800 | [diff] [blame] | 1991 | * |
| 1992 | * See also get_user_pages_fast, for performance critical applications. |
| 1993 | * |
Souptick Joarder | adc8cb4 | 2020-06-01 21:48:24 -0700 | [diff] [blame] | 1994 | * get_user_pages_remote should be phased out in favor of |
John Hubbard | c4237f8 | 2020-01-30 22:12:36 -0800 | [diff] [blame] | 1995 | * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing |
Souptick Joarder | adc8cb4 | 2020-06-01 21:48:24 -0700 | [diff] [blame] | 1996 | * should use get_user_pages_remote because it cannot pass |
John Hubbard | c4237f8 | 2020-01-30 22:12:36 -0800 | [diff] [blame] | 1997 | * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault. |
| 1998 | */ |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 1999 | long get_user_pages_remote(struct mm_struct *mm, |
John Hubbard | c4237f8 | 2020-01-30 22:12:36 -0800 | [diff] [blame] | 2000 | unsigned long start, unsigned long nr_pages, |
| 2001 | unsigned int gup_flags, struct page **pages, |
| 2002 | struct vm_area_struct **vmas, int *locked) |
| 2003 | { |
Barry Song | 447f3e4 | 2020-10-13 16:51:58 -0700 | [diff] [blame] | 2004 | if (!is_valid_gup_flags(gup_flags)) |
John Hubbard | eddb1c2 | 2020-01-30 22:12:54 -0800 | [diff] [blame] | 2005 | return -EINVAL; |
| 2006 | |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 2007 | return __get_user_pages_remote(mm, start, nr_pages, gup_flags, |
John Hubbard | 22bf29b | 2020-04-01 21:05:10 -0700 | [diff] [blame] | 2008 | pages, vmas, locked); |
John Hubbard | c4237f8 | 2020-01-30 22:12:36 -0800 | [diff] [blame] | 2009 | } |
| 2010 | EXPORT_SYMBOL(get_user_pages_remote); |
| 2011 | |
John Hubbard | eddb1c2 | 2020-01-30 22:12:54 -0800 | [diff] [blame] | 2012 | #else /* CONFIG_MMU */ |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 2013 | long get_user_pages_remote(struct mm_struct *mm, |
John Hubbard | eddb1c2 | 2020-01-30 22:12:54 -0800 | [diff] [blame] | 2014 | unsigned long start, unsigned long nr_pages, |
| 2015 | unsigned int gup_flags, struct page **pages, |
| 2016 | struct vm_area_struct **vmas, int *locked) |
| 2017 | { |
| 2018 | return 0; |
| 2019 | } |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 2020 | |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 2021 | static long __get_user_pages_remote(struct mm_struct *mm, |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 2022 | unsigned long start, unsigned long nr_pages, |
| 2023 | unsigned int gup_flags, struct page **pages, |
| 2024 | struct vm_area_struct **vmas, int *locked) |
| 2025 | { |
| 2026 | return 0; |
| 2027 | } |
John Hubbard | eddb1c2 | 2020-01-30 22:12:54 -0800 | [diff] [blame] | 2028 | #endif /* !CONFIG_MMU */ |
| 2029 | |
Souptick Joarder | adc8cb4 | 2020-06-01 21:48:24 -0700 | [diff] [blame] | 2030 | /** |
| 2031 | * get_user_pages() - pin user pages in memory |
| 2032 | * @start: starting user address |
| 2033 | * @nr_pages: number of pages from start to pin |
| 2034 | * @gup_flags: flags modifying lookup behaviour |
| 2035 | * @pages: array that receives pointers to the pages pinned. |
| 2036 | * Should be at least nr_pages long. Or NULL, if caller |
| 2037 | * only intends to ensure the pages are faulted in. |
| 2038 | * @vmas: array of pointers to vmas corresponding to each page. |
| 2039 | * Or NULL if the caller does not require them. |
| 2040 | * |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 2041 | * This is the same as get_user_pages_remote(), just with a less-flexible |
| 2042 | * calling convention where we assume that the mm being operated on belongs to |
| 2043 | * the current task, and doesn't allow passing of a locked parameter. We also |
| 2044 | * obviously don't pass FOLL_REMOTE in here. |
Ira Weiny | 932f4a6 | 2019-05-13 17:17:03 -0700 | [diff] [blame] | 2045 | */ |
| 2046 | long get_user_pages(unsigned long start, unsigned long nr_pages, |
| 2047 | unsigned int gup_flags, struct page **pages, |
| 2048 | struct vm_area_struct **vmas) |
| 2049 | { |
Barry Song | 447f3e4 | 2020-10-13 16:51:58 -0700 | [diff] [blame] | 2050 | if (!is_valid_gup_flags(gup_flags)) |
John Hubbard | eddb1c2 | 2020-01-30 22:12:54 -0800 | [diff] [blame] | 2051 | return -EINVAL; |
| 2052 | |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 2053 | return __gup_longterm_locked(current->mm, start, nr_pages, |
Ira Weiny | 932f4a6 | 2019-05-13 17:17:03 -0700 | [diff] [blame] | 2054 | pages, vmas, gup_flags | FOLL_TOUCH); |
| 2055 | } |
| 2056 | EXPORT_SYMBOL(get_user_pages); |
Dan Williams | 2bb6d28 | 2017-11-29 16:10:35 -0800 | [diff] [blame] | 2057 | |
Kirill A. Shutemov | acc3c8d | 2015-04-14 15:44:45 -0700 | [diff] [blame] | 2058 | /* |
Christoph Hellwig | d3649f6 | 2019-07-11 20:57:18 -0700 | [diff] [blame] | 2059 | * get_user_pages_unlocked() is suitable to replace the form: |
Kirill A. Shutemov | acc3c8d | 2015-04-14 15:44:45 -0700 | [diff] [blame] | 2060 | * |
Michel Lespinasse | 3e4e28c | 2020-06-08 21:33:51 -0700 | [diff] [blame] | 2061 | * mmap_read_lock(mm); |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 2062 | * get_user_pages(mm, ..., pages, NULL); |
Michel Lespinasse | 3e4e28c | 2020-06-08 21:33:51 -0700 | [diff] [blame] | 2063 | * mmap_read_unlock(mm); |
Christoph Hellwig | d3649f6 | 2019-07-11 20:57:18 -0700 | [diff] [blame] | 2064 | * |
| 2065 | * with: |
| 2066 | * |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 2067 | * get_user_pages_unlocked(mm, ..., pages); |
Christoph Hellwig | d3649f6 | 2019-07-11 20:57:18 -0700 | [diff] [blame] | 2068 | * |
| 2069 | * It is functionally equivalent to get_user_pages_fast so |
| 2070 | * get_user_pages_fast should be used instead if specific gup_flags |
| 2071 | * (e.g. FOLL_FORCE) are not required. |
Kirill A. Shutemov | acc3c8d | 2015-04-14 15:44:45 -0700 | [diff] [blame] | 2072 | */ |
Christoph Hellwig | d3649f6 | 2019-07-11 20:57:18 -0700 | [diff] [blame] | 2073 | long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, |
| 2074 | struct page **pages, unsigned int gup_flags) |
Kirill A. Shutemov | acc3c8d | 2015-04-14 15:44:45 -0700 | [diff] [blame] | 2075 | { |
| 2076 | struct mm_struct *mm = current->mm; |
Christoph Hellwig | d3649f6 | 2019-07-11 20:57:18 -0700 | [diff] [blame] | 2077 | int locked = 1; |
| 2078 | long ret; |
Kirill A. Shutemov | acc3c8d | 2015-04-14 15:44:45 -0700 | [diff] [blame] | 2079 | |
Christoph Hellwig | d3649f6 | 2019-07-11 20:57:18 -0700 | [diff] [blame] | 2080 | /* |
| 2081 | * FIXME: Current FOLL_LONGTERM behavior is incompatible with |
| 2082 | * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on |
| 2083 | * vmas. As there are no users of this flag in this call we simply |
| 2084 | * disallow this option for now. |
| 2085 | */ |
| 2086 | if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM)) |
| 2087 | return -EINVAL; |
Kirill A. Shutemov | acc3c8d | 2015-04-14 15:44:45 -0700 | [diff] [blame] | 2088 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 2089 | mmap_read_lock(mm); |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 2090 | ret = __get_user_pages_locked(mm, start, nr_pages, pages, NULL, |
Christoph Hellwig | d3649f6 | 2019-07-11 20:57:18 -0700 | [diff] [blame] | 2091 | &locked, gup_flags | FOLL_TOUCH); |
Kirill A. Shutemov | acc3c8d | 2015-04-14 15:44:45 -0700 | [diff] [blame] | 2092 | if (locked) |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 2093 | mmap_read_unlock(mm); |
Christoph Hellwig | d3649f6 | 2019-07-11 20:57:18 -0700 | [diff] [blame] | 2094 | return ret; |
Kirill A. Shutemov | acc3c8d | 2015-04-14 15:44:45 -0700 | [diff] [blame] | 2095 | } |
Christoph Hellwig | d3649f6 | 2019-07-11 20:57:18 -0700 | [diff] [blame] | 2096 | EXPORT_SYMBOL(get_user_pages_unlocked); |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2097 | |
| 2098 | /* |
Christoph Hellwig | 67a929e | 2019-07-11 20:57:14 -0700 | [diff] [blame] | 2099 | * Fast GUP |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2100 | * |
| 2101 | * get_user_pages_fast attempts to pin user pages by walking the page |
| 2102 | * tables directly and avoids taking locks. Thus the walker needs to be |
| 2103 | * protected from page table pages being freed from under it, and should |
| 2104 | * block any THP splits. |
| 2105 | * |
| 2106 | * One way to achieve this is to have the walker disable interrupts, and |
| 2107 | * rely on IPIs from the TLB flushing code blocking before the page table |
| 2108 | * pages are freed. This is unsuitable for architectures that do not need |
| 2109 | * to broadcast an IPI when invalidating TLBs. |
| 2110 | * |
| 2111 | * Another way to achieve this is to batch up page table containing pages |
| 2112 | * belonging to more than one mm_user, then rcu_sched a callback to free those |
| 2113 | * pages. Disabling interrupts will allow the fast_gup walker to both block |
| 2114 | * the rcu_sched callback, and an IPI that we broadcast for splitting THPs |
| 2115 | * (which is a relatively rare event). The code below adopts this strategy. |
| 2116 | * |
| 2117 | * Before activating this code, please be aware that the following assumptions |
| 2118 | * are currently made: |
| 2119 | * |
Peter Zijlstra | ff2e6d72 | 2020-02-03 17:37:02 -0800 | [diff] [blame] | 2120 | * *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to |
Kirill A. Shutemov | e585513 | 2017-06-06 14:31:20 +0300 | [diff] [blame] | 2121 | * free pages containing page tables or TLB flushing requires IPI broadcast. |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2122 | * |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2123 | * *) ptes can be read atomically by the architecture. |
| 2124 | * |
| 2125 | * *) access_ok is sufficient to validate userspace address ranges. |
| 2126 | * |
| 2127 | * The last two assumptions can be relaxed by the addition of helper functions. |
| 2128 | * |
| 2129 | * This code is based heavily on the PowerPC implementation by Nick Piggin. |
| 2130 | */ |
Christoph Hellwig | 67a929e | 2019-07-11 20:57:14 -0700 | [diff] [blame] | 2131 | #ifdef CONFIG_HAVE_FAST_GUP |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 2132 | |
Guenter Roeck | 790c736 | 2019-07-11 20:57:46 -0700 | [diff] [blame] | 2133 | static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start, |
John Hubbard | 3b78d83 | 2020-04-01 21:05:22 -0700 | [diff] [blame] | 2134 | unsigned int flags, |
Guenter Roeck | 790c736 | 2019-07-11 20:57:46 -0700 | [diff] [blame] | 2135 | struct page **pages) |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 2136 | { |
| 2137 | while ((*nr) - nr_start) { |
| 2138 | struct page *page = pages[--(*nr)]; |
| 2139 | |
| 2140 | ClearPageReferenced(page); |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 2141 | if (flags & FOLL_PIN) |
| 2142 | unpin_user_page(page); |
| 2143 | else |
| 2144 | put_page(page); |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 2145 | } |
| 2146 | } |
| 2147 | |
Laurent Dufour | 3010a5e | 2018-06-07 17:06:08 -0700 | [diff] [blame] | 2148 | #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2149 | static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, |
Ira Weiny | b798bec | 2019-05-13 17:17:07 -0700 | [diff] [blame] | 2150 | unsigned int flags, struct page **pages, int *nr) |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2151 | { |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 2152 | struct dev_pagemap *pgmap = NULL; |
| 2153 | int nr_start = *nr, ret = 0; |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2154 | pte_t *ptep, *ptem; |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2155 | |
| 2156 | ptem = ptep = pte_offset_map(&pmd, addr); |
| 2157 | do { |
Peter Zijlstra | 2a4a06d | 2020-11-13 11:41:40 +0100 | [diff] [blame] | 2158 | pte_t pte = ptep_get_lockless(ptep); |
Matthew Wilcox (Oracle) | b0496fe | 2021-12-10 15:54:11 -0500 | [diff] [blame] | 2159 | struct page *page; |
| 2160 | struct folio *folio; |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2161 | |
| 2162 | /* |
| 2163 | * Similar to the PMD case below, NUMA hinting must take slow |
Mel Gorman | 8a0516e | 2015-02-12 14:58:22 -0800 | [diff] [blame] | 2164 | * path using the pte_protnone check. |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2165 | */ |
Kirill A. Shutemov | e7884f8 | 2017-03-16 18:26:50 +0300 | [diff] [blame] | 2166 | if (pte_protnone(pte)) |
| 2167 | goto pte_unmap; |
| 2168 | |
Ira Weiny | b798bec | 2019-05-13 17:17:07 -0700 | [diff] [blame] | 2169 | if (!pte_access_permitted(pte, flags & FOLL_WRITE)) |
Kirill A. Shutemov | e7884f8 | 2017-03-16 18:26:50 +0300 | [diff] [blame] | 2170 | goto pte_unmap; |
| 2171 | |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 2172 | if (pte_devmap(pte)) { |
Ira Weiny | 7af7556 | 2019-05-13 17:17:14 -0700 | [diff] [blame] | 2173 | if (unlikely(flags & FOLL_LONGTERM)) |
| 2174 | goto pte_unmap; |
| 2175 | |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 2176 | pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); |
| 2177 | if (unlikely(!pgmap)) { |
John Hubbard | 3b78d83 | 2020-04-01 21:05:22 -0700 | [diff] [blame] | 2178 | undo_dev_pagemap(nr, nr_start, flags, pages); |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 2179 | goto pte_unmap; |
| 2180 | } |
| 2181 | } else if (pte_special(pte)) |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2182 | goto pte_unmap; |
| 2183 | |
| 2184 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); |
| 2185 | page = pte_page(pte); |
| 2186 | |
Matthew Wilcox (Oracle) | b0496fe | 2021-12-10 15:54:11 -0500 | [diff] [blame] | 2187 | folio = try_grab_folio(page, 1, flags); |
| 2188 | if (!folio) |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2189 | goto pte_unmap; |
| 2190 | |
Mike Rapoport | 1507f51 | 2021-07-07 18:08:03 -0700 | [diff] [blame] | 2191 | if (unlikely(page_is_secretmem(page))) { |
Matthew Wilcox (Oracle) | b0496fe | 2021-12-10 15:54:11 -0500 | [diff] [blame] | 2192 | gup_put_folio(folio, 1, flags); |
Mike Rapoport | 1507f51 | 2021-07-07 18:08:03 -0700 | [diff] [blame] | 2193 | goto pte_unmap; |
| 2194 | } |
| 2195 | |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2196 | if (unlikely(pte_val(pte) != pte_val(*ptep))) { |
Matthew Wilcox (Oracle) | b0496fe | 2021-12-10 15:54:11 -0500 | [diff] [blame] | 2197 | gup_put_folio(folio, 1, flags); |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2198 | goto pte_unmap; |
| 2199 | } |
| 2200 | |
Claudio Imbrenda | f28d436 | 2020-04-01 21:05:56 -0700 | [diff] [blame] | 2201 | /* |
| 2202 | * We need to make the page accessible if and only if we are |
| 2203 | * going to access its content (the FOLL_PIN case). Please |
| 2204 | * see Documentation/core-api/pin_user_pages.rst for |
| 2205 | * details. |
| 2206 | */ |
| 2207 | if (flags & FOLL_PIN) { |
| 2208 | ret = arch_make_page_accessible(page); |
| 2209 | if (ret) { |
Matthew Wilcox (Oracle) | b0496fe | 2021-12-10 15:54:11 -0500 | [diff] [blame] | 2210 | gup_put_folio(folio, 1, flags); |
Claudio Imbrenda | f28d436 | 2020-04-01 21:05:56 -0700 | [diff] [blame] | 2211 | goto pte_unmap; |
| 2212 | } |
| 2213 | } |
Matthew Wilcox (Oracle) | b0496fe | 2021-12-10 15:54:11 -0500 | [diff] [blame] | 2214 | folio_set_referenced(folio); |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2215 | pages[*nr] = page; |
| 2216 | (*nr)++; |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2217 | } while (ptep++, addr += PAGE_SIZE, addr != end); |
| 2218 | |
| 2219 | ret = 1; |
| 2220 | |
| 2221 | pte_unmap: |
Christoph Hellwig | 832d7aa | 2017-12-29 08:54:01 +0100 | [diff] [blame] | 2222 | if (pgmap) |
| 2223 | put_dev_pagemap(pgmap); |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2224 | pte_unmap(ptem); |
| 2225 | return ret; |
| 2226 | } |
| 2227 | #else |
| 2228 | |
| 2229 | /* |
| 2230 | * If we can't determine whether or not a pte is special, then fail immediately |
| 2231 | * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not |
| 2232 | * to be special. |
| 2233 | * |
| 2234 | * For a futex to be placed on a THP tail page, get_futex_key requires a |
Souptick Joarder | dadbb61 | 2020-06-07 21:40:55 -0700 | [diff] [blame] | 2235 | * get_user_pages_fast_only implementation that can pin pages. Thus it's still |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2236 | * useful to have gup_huge_pmd even if we can't operate on ptes. |
| 2237 | */ |
| 2238 | static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, |
Ira Weiny | b798bec | 2019-05-13 17:17:07 -0700 | [diff] [blame] | 2239 | unsigned int flags, struct page **pages, int *nr) |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2240 | { |
| 2241 | return 0; |
| 2242 | } |
Laurent Dufour | 3010a5e | 2018-06-07 17:06:08 -0700 | [diff] [blame] | 2243 | #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2244 | |
Robin Murphy | 1759673 | 2019-07-16 16:30:47 -0700 | [diff] [blame] | 2245 | #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE) |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 2246 | static int __gup_device_huge(unsigned long pfn, unsigned long addr, |
John Hubbard | 86dfbed | 2020-04-01 21:05:14 -0700 | [diff] [blame] | 2247 | unsigned long end, unsigned int flags, |
| 2248 | struct page **pages, int *nr) |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 2249 | { |
| 2250 | int nr_start = *nr; |
| 2251 | struct dev_pagemap *pgmap = NULL; |
| 2252 | |
| 2253 | do { |
| 2254 | struct page *page = pfn_to_page(pfn); |
| 2255 | |
| 2256 | pgmap = get_dev_pagemap(pfn, pgmap); |
| 2257 | if (unlikely(!pgmap)) { |
John Hubbard | 3b78d83 | 2020-04-01 21:05:22 -0700 | [diff] [blame] | 2258 | undo_dev_pagemap(nr, nr_start, flags, pages); |
Miaohe Lin | 6401c4e | 2021-09-02 14:53:42 -0700 | [diff] [blame] | 2259 | break; |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 2260 | } |
| 2261 | SetPageReferenced(page); |
| 2262 | pages[*nr] = page; |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 2263 | if (unlikely(!try_grab_page(page, flags))) { |
| 2264 | undo_dev_pagemap(nr, nr_start, flags, pages); |
Miaohe Lin | 6401c4e | 2021-09-02 14:53:42 -0700 | [diff] [blame] | 2265 | break; |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 2266 | } |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 2267 | (*nr)++; |
| 2268 | pfn++; |
| 2269 | } while (addr += PAGE_SIZE, addr != end); |
Christoph Hellwig | 832d7aa | 2017-12-29 08:54:01 +0100 | [diff] [blame] | 2270 | |
Miaohe Lin | 6401c4e | 2021-09-02 14:53:42 -0700 | [diff] [blame] | 2271 | put_dev_pagemap(pgmap); |
John Hubbard | 20b7fee | 2021-11-05 13:37:16 -0700 | [diff] [blame] | 2272 | return addr == end; |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 2273 | } |
| 2274 | |
Dan Williams | a9b6de7 | 2018-04-19 21:32:19 -0700 | [diff] [blame] | 2275 | static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, |
John Hubbard | 86dfbed | 2020-04-01 21:05:14 -0700 | [diff] [blame] | 2276 | unsigned long end, unsigned int flags, |
| 2277 | struct page **pages, int *nr) |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 2278 | { |
| 2279 | unsigned long fault_pfn; |
Dan Williams | a9b6de7 | 2018-04-19 21:32:19 -0700 | [diff] [blame] | 2280 | int nr_start = *nr; |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 2281 | |
Dan Williams | a9b6de7 | 2018-04-19 21:32:19 -0700 | [diff] [blame] | 2282 | fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); |
John Hubbard | 86dfbed | 2020-04-01 21:05:14 -0700 | [diff] [blame] | 2283 | if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr)) |
Dan Williams | a9b6de7 | 2018-04-19 21:32:19 -0700 | [diff] [blame] | 2284 | return 0; |
| 2285 | |
| 2286 | if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { |
John Hubbard | 3b78d83 | 2020-04-01 21:05:22 -0700 | [diff] [blame] | 2287 | undo_dev_pagemap(nr, nr_start, flags, pages); |
Dan Williams | a9b6de7 | 2018-04-19 21:32:19 -0700 | [diff] [blame] | 2288 | return 0; |
| 2289 | } |
| 2290 | return 1; |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 2291 | } |
| 2292 | |
Dan Williams | a9b6de7 | 2018-04-19 21:32:19 -0700 | [diff] [blame] | 2293 | static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, |
John Hubbard | 86dfbed | 2020-04-01 21:05:14 -0700 | [diff] [blame] | 2294 | unsigned long end, unsigned int flags, |
| 2295 | struct page **pages, int *nr) |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 2296 | { |
| 2297 | unsigned long fault_pfn; |
Dan Williams | a9b6de7 | 2018-04-19 21:32:19 -0700 | [diff] [blame] | 2298 | int nr_start = *nr; |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 2299 | |
Dan Williams | a9b6de7 | 2018-04-19 21:32:19 -0700 | [diff] [blame] | 2300 | fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); |
John Hubbard | 86dfbed | 2020-04-01 21:05:14 -0700 | [diff] [blame] | 2301 | if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr)) |
Dan Williams | a9b6de7 | 2018-04-19 21:32:19 -0700 | [diff] [blame] | 2302 | return 0; |
| 2303 | |
| 2304 | if (unlikely(pud_val(orig) != pud_val(*pudp))) { |
John Hubbard | 3b78d83 | 2020-04-01 21:05:22 -0700 | [diff] [blame] | 2305 | undo_dev_pagemap(nr, nr_start, flags, pages); |
Dan Williams | a9b6de7 | 2018-04-19 21:32:19 -0700 | [diff] [blame] | 2306 | return 0; |
| 2307 | } |
| 2308 | return 1; |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 2309 | } |
| 2310 | #else |
Dan Williams | a9b6de7 | 2018-04-19 21:32:19 -0700 | [diff] [blame] | 2311 | static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, |
John Hubbard | 86dfbed | 2020-04-01 21:05:14 -0700 | [diff] [blame] | 2312 | unsigned long end, unsigned int flags, |
| 2313 | struct page **pages, int *nr) |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 2314 | { |
| 2315 | BUILD_BUG(); |
| 2316 | return 0; |
| 2317 | } |
| 2318 | |
Dan Williams | a9b6de7 | 2018-04-19 21:32:19 -0700 | [diff] [blame] | 2319 | static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr, |
John Hubbard | 86dfbed | 2020-04-01 21:05:14 -0700 | [diff] [blame] | 2320 | unsigned long end, unsigned int flags, |
| 2321 | struct page **pages, int *nr) |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 2322 | { |
| 2323 | BUILD_BUG(); |
| 2324 | return 0; |
| 2325 | } |
| 2326 | #endif |
| 2327 | |
John Hubbard | a43e982 | 2020-01-30 22:12:17 -0800 | [diff] [blame] | 2328 | static int record_subpages(struct page *page, unsigned long addr, |
| 2329 | unsigned long end, struct page **pages) |
| 2330 | { |
| 2331 | int nr; |
| 2332 | |
Matthew Wilcox (Oracle) | c228afb1 | 2022-01-07 13:25:55 -0500 | [diff] [blame] | 2333 | for (nr = 0; addr != end; nr++, addr += PAGE_SIZE) |
| 2334 | pages[nr] = nth_page(page, nr); |
John Hubbard | a43e982 | 2020-01-30 22:12:17 -0800 | [diff] [blame] | 2335 | |
| 2336 | return nr; |
| 2337 | } |
| 2338 | |
Christoph Hellwig | cbd34da | 2019-07-11 20:57:28 -0700 | [diff] [blame] | 2339 | #ifdef CONFIG_ARCH_HAS_HUGEPD |
| 2340 | static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end, |
| 2341 | unsigned long sz) |
| 2342 | { |
| 2343 | unsigned long __boundary = (addr + sz) & ~(sz-1); |
| 2344 | return (__boundary - 1 < end - 1) ? __boundary : end; |
| 2345 | } |
| 2346 | |
| 2347 | static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, |
John Hubbard | 0cd22af | 2019-10-18 20:19:53 -0700 | [diff] [blame] | 2348 | unsigned long end, unsigned int flags, |
| 2349 | struct page **pages, int *nr) |
Christoph Hellwig | cbd34da | 2019-07-11 20:57:28 -0700 | [diff] [blame] | 2350 | { |
| 2351 | unsigned long pte_end; |
Matthew Wilcox (Oracle) | 09a1626e | 2021-12-22 16:38:30 -0500 | [diff] [blame] | 2352 | struct page *page; |
| 2353 | struct folio *folio; |
Christoph Hellwig | cbd34da | 2019-07-11 20:57:28 -0700 | [diff] [blame] | 2354 | pte_t pte; |
| 2355 | int refs; |
| 2356 | |
| 2357 | pte_end = (addr + sz) & ~(sz-1); |
| 2358 | if (pte_end < end) |
| 2359 | end = pte_end; |
| 2360 | |
Christophe Leroy | 55ca226 | 2020-06-15 12:57:57 +0000 | [diff] [blame] | 2361 | pte = huge_ptep_get(ptep); |
Christoph Hellwig | cbd34da | 2019-07-11 20:57:28 -0700 | [diff] [blame] | 2362 | |
John Hubbard | 0cd22af | 2019-10-18 20:19:53 -0700 | [diff] [blame] | 2363 | if (!pte_access_permitted(pte, flags & FOLL_WRITE)) |
Christoph Hellwig | cbd34da | 2019-07-11 20:57:28 -0700 | [diff] [blame] | 2364 | return 0; |
| 2365 | |
| 2366 | /* hugepages are never "special" */ |
| 2367 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); |
| 2368 | |
Matthew Wilcox (Oracle) | 09a1626e | 2021-12-22 16:38:30 -0500 | [diff] [blame] | 2369 | page = nth_page(pte_page(pte), (addr & (sz - 1)) >> PAGE_SHIFT); |
John Hubbard | a43e982 | 2020-01-30 22:12:17 -0800 | [diff] [blame] | 2370 | refs = record_subpages(page, addr, end, pages + *nr); |
Christoph Hellwig | cbd34da | 2019-07-11 20:57:28 -0700 | [diff] [blame] | 2371 | |
Matthew Wilcox (Oracle) | 09a1626e | 2021-12-22 16:38:30 -0500 | [diff] [blame] | 2372 | folio = try_grab_folio(page, refs, flags); |
| 2373 | if (!folio) |
Christoph Hellwig | cbd34da | 2019-07-11 20:57:28 -0700 | [diff] [blame] | 2374 | return 0; |
Christoph Hellwig | cbd34da | 2019-07-11 20:57:28 -0700 | [diff] [blame] | 2375 | |
| 2376 | if (unlikely(pte_val(pte) != pte_val(*ptep))) { |
Matthew Wilcox (Oracle) | 09a1626e | 2021-12-22 16:38:30 -0500 | [diff] [blame] | 2377 | gup_put_folio(folio, refs, flags); |
Christoph Hellwig | cbd34da | 2019-07-11 20:57:28 -0700 | [diff] [blame] | 2378 | return 0; |
| 2379 | } |
| 2380 | |
John Hubbard | a43e982 | 2020-01-30 22:12:17 -0800 | [diff] [blame] | 2381 | *nr += refs; |
Matthew Wilcox (Oracle) | 09a1626e | 2021-12-22 16:38:30 -0500 | [diff] [blame] | 2382 | folio_set_referenced(folio); |
Christoph Hellwig | cbd34da | 2019-07-11 20:57:28 -0700 | [diff] [blame] | 2383 | return 1; |
| 2384 | } |
| 2385 | |
| 2386 | static int gup_huge_pd(hugepd_t hugepd, unsigned long addr, |
John Hubbard | 0cd22af | 2019-10-18 20:19:53 -0700 | [diff] [blame] | 2387 | unsigned int pdshift, unsigned long end, unsigned int flags, |
Christoph Hellwig | cbd34da | 2019-07-11 20:57:28 -0700 | [diff] [blame] | 2388 | struct page **pages, int *nr) |
| 2389 | { |
| 2390 | pte_t *ptep; |
| 2391 | unsigned long sz = 1UL << hugepd_shift(hugepd); |
| 2392 | unsigned long next; |
| 2393 | |
| 2394 | ptep = hugepte_offset(hugepd, addr, pdshift); |
| 2395 | do { |
| 2396 | next = hugepte_addr_end(addr, end, sz); |
John Hubbard | 0cd22af | 2019-10-18 20:19:53 -0700 | [diff] [blame] | 2397 | if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr)) |
Christoph Hellwig | cbd34da | 2019-07-11 20:57:28 -0700 | [diff] [blame] | 2398 | return 0; |
| 2399 | } while (ptep++, addr = next, addr != end); |
| 2400 | |
| 2401 | return 1; |
| 2402 | } |
| 2403 | #else |
| 2404 | static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr, |
John Hubbard | 0cd22af | 2019-10-18 20:19:53 -0700 | [diff] [blame] | 2405 | unsigned int pdshift, unsigned long end, unsigned int flags, |
Christoph Hellwig | cbd34da | 2019-07-11 20:57:28 -0700 | [diff] [blame] | 2406 | struct page **pages, int *nr) |
| 2407 | { |
| 2408 | return 0; |
| 2409 | } |
| 2410 | #endif /* CONFIG_ARCH_HAS_HUGEPD */ |
| 2411 | |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2412 | static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, |
John Hubbard | 0cd22af | 2019-10-18 20:19:53 -0700 | [diff] [blame] | 2413 | unsigned long end, unsigned int flags, |
| 2414 | struct page **pages, int *nr) |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2415 | { |
Matthew Wilcox (Oracle) | 667ed1f | 2021-12-22 16:57:23 -0500 | [diff] [blame] | 2416 | struct page *page; |
| 2417 | struct folio *folio; |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2418 | int refs; |
| 2419 | |
Ira Weiny | b798bec | 2019-05-13 17:17:07 -0700 | [diff] [blame] | 2420 | if (!pmd_access_permitted(orig, flags & FOLL_WRITE)) |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2421 | return 0; |
| 2422 | |
Ira Weiny | 7af7556 | 2019-05-13 17:17:14 -0700 | [diff] [blame] | 2423 | if (pmd_devmap(orig)) { |
| 2424 | if (unlikely(flags & FOLL_LONGTERM)) |
| 2425 | return 0; |
John Hubbard | 86dfbed | 2020-04-01 21:05:14 -0700 | [diff] [blame] | 2426 | return __gup_device_huge_pmd(orig, pmdp, addr, end, flags, |
| 2427 | pages, nr); |
Ira Weiny | 7af7556 | 2019-05-13 17:17:14 -0700 | [diff] [blame] | 2428 | } |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 2429 | |
Matthew Wilcox (Oracle) | c228afb1 | 2022-01-07 13:25:55 -0500 | [diff] [blame] | 2430 | page = nth_page(pmd_page(orig), (addr & ~PMD_MASK) >> PAGE_SHIFT); |
John Hubbard | a43e982 | 2020-01-30 22:12:17 -0800 | [diff] [blame] | 2431 | refs = record_subpages(page, addr, end, pages + *nr); |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2432 | |
Matthew Wilcox (Oracle) | 667ed1f | 2021-12-22 16:57:23 -0500 | [diff] [blame] | 2433 | folio = try_grab_folio(page, refs, flags); |
| 2434 | if (!folio) |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2435 | return 0; |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2436 | |
| 2437 | if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { |
Matthew Wilcox (Oracle) | 667ed1f | 2021-12-22 16:57:23 -0500 | [diff] [blame] | 2438 | gup_put_folio(folio, refs, flags); |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2439 | return 0; |
| 2440 | } |
| 2441 | |
John Hubbard | a43e982 | 2020-01-30 22:12:17 -0800 | [diff] [blame] | 2442 | *nr += refs; |
Matthew Wilcox (Oracle) | 667ed1f | 2021-12-22 16:57:23 -0500 | [diff] [blame] | 2443 | folio_set_referenced(folio); |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2444 | return 1; |
| 2445 | } |
| 2446 | |
| 2447 | static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, |
John Hubbard | 86dfbed | 2020-04-01 21:05:14 -0700 | [diff] [blame] | 2448 | unsigned long end, unsigned int flags, |
| 2449 | struct page **pages, int *nr) |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2450 | { |
Matthew Wilcox (Oracle) | 83afb52 | 2021-12-22 18:07:47 -0500 | [diff] [blame] | 2451 | struct page *page; |
| 2452 | struct folio *folio; |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2453 | int refs; |
| 2454 | |
Ira Weiny | b798bec | 2019-05-13 17:17:07 -0700 | [diff] [blame] | 2455 | if (!pud_access_permitted(orig, flags & FOLL_WRITE)) |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2456 | return 0; |
| 2457 | |
Ira Weiny | 7af7556 | 2019-05-13 17:17:14 -0700 | [diff] [blame] | 2458 | if (pud_devmap(orig)) { |
| 2459 | if (unlikely(flags & FOLL_LONGTERM)) |
| 2460 | return 0; |
John Hubbard | 86dfbed | 2020-04-01 21:05:14 -0700 | [diff] [blame] | 2461 | return __gup_device_huge_pud(orig, pudp, addr, end, flags, |
| 2462 | pages, nr); |
Ira Weiny | 7af7556 | 2019-05-13 17:17:14 -0700 | [diff] [blame] | 2463 | } |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 2464 | |
Matthew Wilcox (Oracle) | c228afb1 | 2022-01-07 13:25:55 -0500 | [diff] [blame] | 2465 | page = nth_page(pud_page(orig), (addr & ~PUD_MASK) >> PAGE_SHIFT); |
John Hubbard | a43e982 | 2020-01-30 22:12:17 -0800 | [diff] [blame] | 2466 | refs = record_subpages(page, addr, end, pages + *nr); |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2467 | |
Matthew Wilcox (Oracle) | 83afb52 | 2021-12-22 18:07:47 -0500 | [diff] [blame] | 2468 | folio = try_grab_folio(page, refs, flags); |
| 2469 | if (!folio) |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2470 | return 0; |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2471 | |
| 2472 | if (unlikely(pud_val(orig) != pud_val(*pudp))) { |
Matthew Wilcox (Oracle) | 83afb52 | 2021-12-22 18:07:47 -0500 | [diff] [blame] | 2473 | gup_put_folio(folio, refs, flags); |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2474 | return 0; |
| 2475 | } |
| 2476 | |
John Hubbard | a43e982 | 2020-01-30 22:12:17 -0800 | [diff] [blame] | 2477 | *nr += refs; |
Matthew Wilcox (Oracle) | 83afb52 | 2021-12-22 18:07:47 -0500 | [diff] [blame] | 2478 | folio_set_referenced(folio); |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2479 | return 1; |
| 2480 | } |
| 2481 | |
Aneesh Kumar K.V | f30c59e | 2014-11-05 21:57:40 +0530 | [diff] [blame] | 2482 | static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, |
Ira Weiny | b798bec | 2019-05-13 17:17:07 -0700 | [diff] [blame] | 2483 | unsigned long end, unsigned int flags, |
Aneesh Kumar K.V | f30c59e | 2014-11-05 21:57:40 +0530 | [diff] [blame] | 2484 | struct page **pages, int *nr) |
| 2485 | { |
| 2486 | int refs; |
Matthew Wilcox (Oracle) | 2d7919a | 2021-12-22 22:30:29 -0500 | [diff] [blame] | 2487 | struct page *page; |
| 2488 | struct folio *folio; |
Aneesh Kumar K.V | f30c59e | 2014-11-05 21:57:40 +0530 | [diff] [blame] | 2489 | |
Ira Weiny | b798bec | 2019-05-13 17:17:07 -0700 | [diff] [blame] | 2490 | if (!pgd_access_permitted(orig, flags & FOLL_WRITE)) |
Aneesh Kumar K.V | f30c59e | 2014-11-05 21:57:40 +0530 | [diff] [blame] | 2491 | return 0; |
| 2492 | |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 2493 | BUILD_BUG_ON(pgd_devmap(orig)); |
John Hubbard | a43e982 | 2020-01-30 22:12:17 -0800 | [diff] [blame] | 2494 | |
Matthew Wilcox (Oracle) | c228afb1 | 2022-01-07 13:25:55 -0500 | [diff] [blame] | 2495 | page = nth_page(pgd_page(orig), (addr & ~PGDIR_MASK) >> PAGE_SHIFT); |
John Hubbard | a43e982 | 2020-01-30 22:12:17 -0800 | [diff] [blame] | 2496 | refs = record_subpages(page, addr, end, pages + *nr); |
Aneesh Kumar K.V | f30c59e | 2014-11-05 21:57:40 +0530 | [diff] [blame] | 2497 | |
Matthew Wilcox (Oracle) | 2d7919a | 2021-12-22 22:30:29 -0500 | [diff] [blame] | 2498 | folio = try_grab_folio(page, refs, flags); |
| 2499 | if (!folio) |
Aneesh Kumar K.V | f30c59e | 2014-11-05 21:57:40 +0530 | [diff] [blame] | 2500 | return 0; |
Aneesh Kumar K.V | f30c59e | 2014-11-05 21:57:40 +0530 | [diff] [blame] | 2501 | |
| 2502 | if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) { |
Matthew Wilcox (Oracle) | 2d7919a | 2021-12-22 22:30:29 -0500 | [diff] [blame] | 2503 | gup_put_folio(folio, refs, flags); |
Aneesh Kumar K.V | f30c59e | 2014-11-05 21:57:40 +0530 | [diff] [blame] | 2504 | return 0; |
| 2505 | } |
| 2506 | |
John Hubbard | a43e982 | 2020-01-30 22:12:17 -0800 | [diff] [blame] | 2507 | *nr += refs; |
Matthew Wilcox (Oracle) | 2d7919a | 2021-12-22 22:30:29 -0500 | [diff] [blame] | 2508 | folio_set_referenced(folio); |
Aneesh Kumar K.V | f30c59e | 2014-11-05 21:57:40 +0530 | [diff] [blame] | 2509 | return 1; |
| 2510 | } |
| 2511 | |
Vasily Gorbik | d3f7b1b | 2020-09-25 21:19:10 -0700 | [diff] [blame] | 2512 | static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end, |
Ira Weiny | b798bec | 2019-05-13 17:17:07 -0700 | [diff] [blame] | 2513 | unsigned int flags, struct page **pages, int *nr) |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2514 | { |
| 2515 | unsigned long next; |
| 2516 | pmd_t *pmdp; |
| 2517 | |
Vasily Gorbik | d3f7b1b | 2020-09-25 21:19:10 -0700 | [diff] [blame] | 2518 | pmdp = pmd_offset_lockless(pudp, pud, addr); |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2519 | do { |
Christian Borntraeger | 38c5ce9 | 2015-01-06 22:54:46 +0100 | [diff] [blame] | 2520 | pmd_t pmd = READ_ONCE(*pmdp); |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2521 | |
| 2522 | next = pmd_addr_end(addr, end); |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 2523 | if (!pmd_present(pmd)) |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2524 | return 0; |
| 2525 | |
Yu Zhao | 414fd08 | 2019-02-12 15:35:58 -0800 | [diff] [blame] | 2526 | if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) || |
| 2527 | pmd_devmap(pmd))) { |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2528 | /* |
| 2529 | * NUMA hinting faults need to be handled in the GUP |
| 2530 | * slowpath for accounting purposes and so that they |
| 2531 | * can be serialised against THP migration. |
| 2532 | */ |
Mel Gorman | 8a0516e | 2015-02-12 14:58:22 -0800 | [diff] [blame] | 2533 | if (pmd_protnone(pmd)) |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2534 | return 0; |
| 2535 | |
Ira Weiny | b798bec | 2019-05-13 17:17:07 -0700 | [diff] [blame] | 2536 | if (!gup_huge_pmd(pmd, pmdp, addr, next, flags, |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2537 | pages, nr)) |
| 2538 | return 0; |
| 2539 | |
Aneesh Kumar K.V | f30c59e | 2014-11-05 21:57:40 +0530 | [diff] [blame] | 2540 | } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) { |
| 2541 | /* |
| 2542 | * architecture have different format for hugetlbfs |
| 2543 | * pmd format and THP pmd format |
| 2544 | */ |
| 2545 | if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr, |
Ira Weiny | b798bec | 2019-05-13 17:17:07 -0700 | [diff] [blame] | 2546 | PMD_SHIFT, next, flags, pages, nr)) |
Aneesh Kumar K.V | f30c59e | 2014-11-05 21:57:40 +0530 | [diff] [blame] | 2547 | return 0; |
Ira Weiny | b798bec | 2019-05-13 17:17:07 -0700 | [diff] [blame] | 2548 | } else if (!gup_pte_range(pmd, addr, next, flags, pages, nr)) |
Mario Leinweber | 2923117 | 2018-04-05 16:24:18 -0700 | [diff] [blame] | 2549 | return 0; |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2550 | } while (pmdp++, addr = next, addr != end); |
| 2551 | |
| 2552 | return 1; |
| 2553 | } |
| 2554 | |
Vasily Gorbik | d3f7b1b | 2020-09-25 21:19:10 -0700 | [diff] [blame] | 2555 | static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end, |
Ira Weiny | b798bec | 2019-05-13 17:17:07 -0700 | [diff] [blame] | 2556 | unsigned int flags, struct page **pages, int *nr) |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2557 | { |
| 2558 | unsigned long next; |
| 2559 | pud_t *pudp; |
| 2560 | |
Vasily Gorbik | d3f7b1b | 2020-09-25 21:19:10 -0700 | [diff] [blame] | 2561 | pudp = pud_offset_lockless(p4dp, p4d, addr); |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2562 | do { |
Christian Borntraeger | e37c698 | 2014-12-07 21:41:33 +0100 | [diff] [blame] | 2563 | pud_t pud = READ_ONCE(*pudp); |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2564 | |
| 2565 | next = pud_addr_end(addr, end); |
Qiujun Huang | 15494520 | 2020-01-30 22:12:10 -0800 | [diff] [blame] | 2566 | if (unlikely(!pud_present(pud))) |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2567 | return 0; |
Aneesh Kumar K.V | f30c59e | 2014-11-05 21:57:40 +0530 | [diff] [blame] | 2568 | if (unlikely(pud_huge(pud))) { |
Ira Weiny | b798bec | 2019-05-13 17:17:07 -0700 | [diff] [blame] | 2569 | if (!gup_huge_pud(pud, pudp, addr, next, flags, |
Aneesh Kumar K.V | f30c59e | 2014-11-05 21:57:40 +0530 | [diff] [blame] | 2570 | pages, nr)) |
| 2571 | return 0; |
| 2572 | } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) { |
| 2573 | if (!gup_huge_pd(__hugepd(pud_val(pud)), addr, |
Ira Weiny | b798bec | 2019-05-13 17:17:07 -0700 | [diff] [blame] | 2574 | PUD_SHIFT, next, flags, pages, nr)) |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2575 | return 0; |
Vasily Gorbik | d3f7b1b | 2020-09-25 21:19:10 -0700 | [diff] [blame] | 2576 | } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr)) |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2577 | return 0; |
| 2578 | } while (pudp++, addr = next, addr != end); |
| 2579 | |
| 2580 | return 1; |
| 2581 | } |
| 2582 | |
Vasily Gorbik | d3f7b1b | 2020-09-25 21:19:10 -0700 | [diff] [blame] | 2583 | static int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end, |
Ira Weiny | b798bec | 2019-05-13 17:17:07 -0700 | [diff] [blame] | 2584 | unsigned int flags, struct page **pages, int *nr) |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 2585 | { |
| 2586 | unsigned long next; |
| 2587 | p4d_t *p4dp; |
| 2588 | |
Vasily Gorbik | d3f7b1b | 2020-09-25 21:19:10 -0700 | [diff] [blame] | 2589 | p4dp = p4d_offset_lockless(pgdp, pgd, addr); |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 2590 | do { |
| 2591 | p4d_t p4d = READ_ONCE(*p4dp); |
| 2592 | |
| 2593 | next = p4d_addr_end(addr, end); |
| 2594 | if (p4d_none(p4d)) |
| 2595 | return 0; |
| 2596 | BUILD_BUG_ON(p4d_huge(p4d)); |
| 2597 | if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) { |
| 2598 | if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr, |
Ira Weiny | b798bec | 2019-05-13 17:17:07 -0700 | [diff] [blame] | 2599 | P4D_SHIFT, next, flags, pages, nr)) |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 2600 | return 0; |
Vasily Gorbik | d3f7b1b | 2020-09-25 21:19:10 -0700 | [diff] [blame] | 2601 | } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr)) |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 2602 | return 0; |
| 2603 | } while (p4dp++, addr = next, addr != end); |
| 2604 | |
| 2605 | return 1; |
| 2606 | } |
| 2607 | |
Kirill A. Shutemov | 5b65c467 | 2017-09-09 00:56:03 +0300 | [diff] [blame] | 2608 | static void gup_pgd_range(unsigned long addr, unsigned long end, |
Ira Weiny | b798bec | 2019-05-13 17:17:07 -0700 | [diff] [blame] | 2609 | unsigned int flags, struct page **pages, int *nr) |
Kirill A. Shutemov | 5b65c467 | 2017-09-09 00:56:03 +0300 | [diff] [blame] | 2610 | { |
| 2611 | unsigned long next; |
| 2612 | pgd_t *pgdp; |
| 2613 | |
| 2614 | pgdp = pgd_offset(current->mm, addr); |
| 2615 | do { |
| 2616 | pgd_t pgd = READ_ONCE(*pgdp); |
| 2617 | |
| 2618 | next = pgd_addr_end(addr, end); |
| 2619 | if (pgd_none(pgd)) |
| 2620 | return; |
| 2621 | if (unlikely(pgd_huge(pgd))) { |
Ira Weiny | b798bec | 2019-05-13 17:17:07 -0700 | [diff] [blame] | 2622 | if (!gup_huge_pgd(pgd, pgdp, addr, next, flags, |
Kirill A. Shutemov | 5b65c467 | 2017-09-09 00:56:03 +0300 | [diff] [blame] | 2623 | pages, nr)) |
| 2624 | return; |
| 2625 | } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) { |
| 2626 | if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, |
Ira Weiny | b798bec | 2019-05-13 17:17:07 -0700 | [diff] [blame] | 2627 | PGDIR_SHIFT, next, flags, pages, nr)) |
Kirill A. Shutemov | 5b65c467 | 2017-09-09 00:56:03 +0300 | [diff] [blame] | 2628 | return; |
Vasily Gorbik | d3f7b1b | 2020-09-25 21:19:10 -0700 | [diff] [blame] | 2629 | } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr)) |
Kirill A. Shutemov | 5b65c467 | 2017-09-09 00:56:03 +0300 | [diff] [blame] | 2630 | return; |
| 2631 | } while (pgdp++, addr = next, addr != end); |
| 2632 | } |
Christoph Hellwig | 050a9ad | 2019-07-11 20:57:21 -0700 | [diff] [blame] | 2633 | #else |
| 2634 | static inline void gup_pgd_range(unsigned long addr, unsigned long end, |
| 2635 | unsigned int flags, struct page **pages, int *nr) |
| 2636 | { |
| 2637 | } |
| 2638 | #endif /* CONFIG_HAVE_FAST_GUP */ |
Kirill A. Shutemov | 5b65c467 | 2017-09-09 00:56:03 +0300 | [diff] [blame] | 2639 | |
| 2640 | #ifndef gup_fast_permitted |
| 2641 | /* |
Souptick Joarder | dadbb61 | 2020-06-07 21:40:55 -0700 | [diff] [blame] | 2642 | * Check if it's allowed to use get_user_pages_fast_only() for the range, or |
Kirill A. Shutemov | 5b65c467 | 2017-09-09 00:56:03 +0300 | [diff] [blame] | 2643 | * we need to fall back to the slow version: |
| 2644 | */ |
Christoph Hellwig | 26f4c32 | 2019-07-11 20:56:45 -0700 | [diff] [blame] | 2645 | static bool gup_fast_permitted(unsigned long start, unsigned long end) |
Kirill A. Shutemov | 5b65c467 | 2017-09-09 00:56:03 +0300 | [diff] [blame] | 2646 | { |
Christoph Hellwig | 26f4c32 | 2019-07-11 20:56:45 -0700 | [diff] [blame] | 2647 | return true; |
Kirill A. Shutemov | 5b65c467 | 2017-09-09 00:56:03 +0300 | [diff] [blame] | 2648 | } |
| 2649 | #endif |
| 2650 | |
Ira Weiny | 7af7556 | 2019-05-13 17:17:14 -0700 | [diff] [blame] | 2651 | static int __gup_longterm_unlocked(unsigned long start, int nr_pages, |
| 2652 | unsigned int gup_flags, struct page **pages) |
| 2653 | { |
| 2654 | int ret; |
| 2655 | |
| 2656 | /* |
| 2657 | * FIXME: FOLL_LONGTERM does not work with |
| 2658 | * get_user_pages_unlocked() (see comments in that function) |
| 2659 | */ |
| 2660 | if (gup_flags & FOLL_LONGTERM) { |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 2661 | mmap_read_lock(current->mm); |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 2662 | ret = __gup_longterm_locked(current->mm, |
Ira Weiny | 7af7556 | 2019-05-13 17:17:14 -0700 | [diff] [blame] | 2663 | start, nr_pages, |
| 2664 | pages, NULL, gup_flags); |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 2665 | mmap_read_unlock(current->mm); |
Ira Weiny | 7af7556 | 2019-05-13 17:17:14 -0700 | [diff] [blame] | 2666 | } else { |
| 2667 | ret = get_user_pages_unlocked(start, nr_pages, |
| 2668 | pages, gup_flags); |
| 2669 | } |
| 2670 | |
| 2671 | return ret; |
| 2672 | } |
| 2673 | |
Jason Gunthorpe | c28b1fc | 2020-12-14 19:05:41 -0800 | [diff] [blame] | 2674 | static unsigned long lockless_pages_from_mm(unsigned long start, |
| 2675 | unsigned long end, |
| 2676 | unsigned int gup_flags, |
| 2677 | struct page **pages) |
| 2678 | { |
| 2679 | unsigned long flags; |
| 2680 | int nr_pinned = 0; |
Jason Gunthorpe | 57efa1f | 2020-12-14 19:05:44 -0800 | [diff] [blame] | 2681 | unsigned seq; |
Jason Gunthorpe | c28b1fc | 2020-12-14 19:05:41 -0800 | [diff] [blame] | 2682 | |
| 2683 | if (!IS_ENABLED(CONFIG_HAVE_FAST_GUP) || |
| 2684 | !gup_fast_permitted(start, end)) |
| 2685 | return 0; |
| 2686 | |
Jason Gunthorpe | 57efa1f | 2020-12-14 19:05:44 -0800 | [diff] [blame] | 2687 | if (gup_flags & FOLL_PIN) { |
| 2688 | seq = raw_read_seqcount(¤t->mm->write_protect_seq); |
| 2689 | if (seq & 1) |
| 2690 | return 0; |
| 2691 | } |
| 2692 | |
Jason Gunthorpe | c28b1fc | 2020-12-14 19:05:41 -0800 | [diff] [blame] | 2693 | /* |
| 2694 | * Disable interrupts. The nested form is used, in order to allow full, |
| 2695 | * general purpose use of this routine. |
| 2696 | * |
| 2697 | * With interrupts disabled, we block page table pages from being freed |
| 2698 | * from under us. See struct mmu_table_batch comments in |
| 2699 | * include/asm-generic/tlb.h for more details. |
| 2700 | * |
| 2701 | * We do not adopt an rcu_read_lock() here as we also want to block IPIs |
| 2702 | * that come from THPs splitting. |
| 2703 | */ |
| 2704 | local_irq_save(flags); |
| 2705 | gup_pgd_range(start, end, gup_flags, pages, &nr_pinned); |
| 2706 | local_irq_restore(flags); |
Jason Gunthorpe | 57efa1f | 2020-12-14 19:05:44 -0800 | [diff] [blame] | 2707 | |
| 2708 | /* |
| 2709 | * When pinning pages for DMA there could be a concurrent write protect |
| 2710 | * from fork() via copy_page_range(), in this case always fail fast GUP. |
| 2711 | */ |
| 2712 | if (gup_flags & FOLL_PIN) { |
| 2713 | if (read_seqcount_retry(¤t->mm->write_protect_seq, seq)) { |
| 2714 | unpin_user_pages(pages, nr_pinned); |
| 2715 | return 0; |
| 2716 | } |
| 2717 | } |
Jason Gunthorpe | c28b1fc | 2020-12-14 19:05:41 -0800 | [diff] [blame] | 2718 | return nr_pinned; |
| 2719 | } |
| 2720 | |
| 2721 | static int internal_get_user_pages_fast(unsigned long start, |
| 2722 | unsigned long nr_pages, |
John Hubbard | eddb1c2 | 2020-01-30 22:12:54 -0800 | [diff] [blame] | 2723 | unsigned int gup_flags, |
| 2724 | struct page **pages) |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2725 | { |
Jason Gunthorpe | c28b1fc | 2020-12-14 19:05:41 -0800 | [diff] [blame] | 2726 | unsigned long len, end; |
| 2727 | unsigned long nr_pinned; |
| 2728 | int ret; |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2729 | |
John Hubbard | f4000fd | 2020-01-30 22:12:43 -0800 | [diff] [blame] | 2730 | if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM | |
John Hubbard | 376a34ef | 2020-06-03 15:56:30 -0700 | [diff] [blame] | 2731 | FOLL_FORCE | FOLL_PIN | FOLL_GET | |
Andreas Gruenbacher | 55b8fe7 | 2021-08-17 22:52:08 +0200 | [diff] [blame] | 2732 | FOLL_FAST_ONLY | FOLL_NOFAULT))) |
Christoph Hellwig | 817be12 | 2019-07-11 20:57:25 -0700 | [diff] [blame] | 2733 | return -EINVAL; |
| 2734 | |
Andrea Arcangeli | a458b76 | 2021-06-28 19:36:40 -0700 | [diff] [blame] | 2735 | if (gup_flags & FOLL_PIN) |
| 2736 | mm_set_has_pinned_flag(¤t->mm->flags); |
Peter Xu | 008cfe4 | 2020-09-25 18:25:57 -0400 | [diff] [blame] | 2737 | |
John Hubbard | f81cd17 | 2020-06-03 15:56:40 -0700 | [diff] [blame] | 2738 | if (!(gup_flags & FOLL_FAST_ONLY)) |
Michel Lespinasse | da1c55f | 2020-06-08 21:33:47 -0700 | [diff] [blame] | 2739 | might_lock_read(¤t->mm->mmap_lock); |
John Hubbard | f81cd17 | 2020-06-03 15:56:40 -0700 | [diff] [blame] | 2740 | |
Christoph Hellwig | f455c854 | 2019-07-11 20:56:41 -0700 | [diff] [blame] | 2741 | start = untagged_addr(start) & PAGE_MASK; |
Jason Gunthorpe | c28b1fc | 2020-12-14 19:05:41 -0800 | [diff] [blame] | 2742 | len = nr_pages << PAGE_SHIFT; |
| 2743 | if (check_add_overflow(start, len, &end)) |
Michael S. Tsirkin | c61611f | 2018-04-13 15:35:20 -0700 | [diff] [blame] | 2744 | return 0; |
Linus Torvalds | 96d4f26 | 2019-01-03 18:57:57 -0800 | [diff] [blame] | 2745 | if (unlikely(!access_ok((void __user *)start, len))) |
Michael S. Tsirkin | c61611f | 2018-04-13 15:35:20 -0700 | [diff] [blame] | 2746 | return -EFAULT; |
Kirill A. Shutemov | 73e10a6 | 2017-03-16 18:26:54 +0300 | [diff] [blame] | 2747 | |
Jason Gunthorpe | c28b1fc | 2020-12-14 19:05:41 -0800 | [diff] [blame] | 2748 | nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages); |
| 2749 | if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY) |
| 2750 | return nr_pinned; |
John Hubbard | 376a34ef | 2020-06-03 15:56:30 -0700 | [diff] [blame] | 2751 | |
Jason Gunthorpe | c28b1fc | 2020-12-14 19:05:41 -0800 | [diff] [blame] | 2752 | /* Slow path: try to get the remaining pages with get_user_pages */ |
| 2753 | start += nr_pinned << PAGE_SHIFT; |
| 2754 | pages += nr_pinned; |
| 2755 | ret = __gup_longterm_unlocked(start, nr_pages - nr_pinned, gup_flags, |
| 2756 | pages); |
| 2757 | if (ret < 0) { |
| 2758 | /* |
| 2759 | * The caller has to unpin the pages we already pinned so |
| 2760 | * returning -errno is not an option |
| 2761 | */ |
| 2762 | if (nr_pinned) |
| 2763 | return nr_pinned; |
| 2764 | return ret; |
Kirill A. Shutemov | 73e10a6 | 2017-03-16 18:26:54 +0300 | [diff] [blame] | 2765 | } |
Jason Gunthorpe | c28b1fc | 2020-12-14 19:05:41 -0800 | [diff] [blame] | 2766 | return ret + nr_pinned; |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2767 | } |
Jason Gunthorpe | c28b1fc | 2020-12-14 19:05:41 -0800 | [diff] [blame] | 2768 | |
Souptick Joarder | dadbb61 | 2020-06-07 21:40:55 -0700 | [diff] [blame] | 2769 | /** |
| 2770 | * get_user_pages_fast_only() - pin user pages in memory |
| 2771 | * @start: starting user address |
| 2772 | * @nr_pages: number of pages from start to pin |
| 2773 | * @gup_flags: flags modifying pin behaviour |
| 2774 | * @pages: array that receives pointers to the pages pinned. |
| 2775 | * Should be at least nr_pages long. |
| 2776 | * |
John Hubbard | 9e1f058 | 2020-06-03 15:56:27 -0700 | [diff] [blame] | 2777 | * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to |
| 2778 | * the regular GUP. |
| 2779 | * Note a difference with get_user_pages_fast: this always returns the |
| 2780 | * number of pages pinned, 0 if no pages were pinned. |
| 2781 | * |
| 2782 | * If the architecture does not support this function, simply return with no |
| 2783 | * pages pinned. |
| 2784 | * |
| 2785 | * Careful, careful! COW breaking can go either way, so a non-write |
| 2786 | * access can get ambiguous page results. If you call this function without |
| 2787 | * 'write' set, you'd better be sure that you're ok with that ambiguity. |
| 2788 | */ |
Souptick Joarder | dadbb61 | 2020-06-07 21:40:55 -0700 | [diff] [blame] | 2789 | int get_user_pages_fast_only(unsigned long start, int nr_pages, |
| 2790 | unsigned int gup_flags, struct page **pages) |
John Hubbard | 9e1f058 | 2020-06-03 15:56:27 -0700 | [diff] [blame] | 2791 | { |
John Hubbard | 376a34ef | 2020-06-03 15:56:30 -0700 | [diff] [blame] | 2792 | int nr_pinned; |
John Hubbard | 9e1f058 | 2020-06-03 15:56:27 -0700 | [diff] [blame] | 2793 | /* |
| 2794 | * Internally (within mm/gup.c), gup fast variants must set FOLL_GET, |
| 2795 | * because gup fast is always a "pin with a +1 page refcount" request. |
John Hubbard | 376a34ef | 2020-06-03 15:56:30 -0700 | [diff] [blame] | 2796 | * |
| 2797 | * FOLL_FAST_ONLY is required in order to match the API description of |
| 2798 | * this routine: no fall back to regular ("slow") GUP. |
John Hubbard | 9e1f058 | 2020-06-03 15:56:27 -0700 | [diff] [blame] | 2799 | */ |
Souptick Joarder | dadbb61 | 2020-06-07 21:40:55 -0700 | [diff] [blame] | 2800 | gup_flags |= FOLL_GET | FOLL_FAST_ONLY; |
John Hubbard | 9e1f058 | 2020-06-03 15:56:27 -0700 | [diff] [blame] | 2801 | |
John Hubbard | 376a34ef | 2020-06-03 15:56:30 -0700 | [diff] [blame] | 2802 | nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags, |
| 2803 | pages); |
John Hubbard | 9e1f058 | 2020-06-03 15:56:27 -0700 | [diff] [blame] | 2804 | |
| 2805 | /* |
John Hubbard | 376a34ef | 2020-06-03 15:56:30 -0700 | [diff] [blame] | 2806 | * As specified in the API description above, this routine is not |
| 2807 | * allowed to return negative values. However, the common core |
| 2808 | * routine internal_get_user_pages_fast() *can* return -errno. |
| 2809 | * Therefore, correct for that here: |
John Hubbard | 9e1f058 | 2020-06-03 15:56:27 -0700 | [diff] [blame] | 2810 | */ |
John Hubbard | 376a34ef | 2020-06-03 15:56:30 -0700 | [diff] [blame] | 2811 | if (nr_pinned < 0) |
| 2812 | nr_pinned = 0; |
John Hubbard | 9e1f058 | 2020-06-03 15:56:27 -0700 | [diff] [blame] | 2813 | |
| 2814 | return nr_pinned; |
| 2815 | } |
Souptick Joarder | dadbb61 | 2020-06-07 21:40:55 -0700 | [diff] [blame] | 2816 | EXPORT_SYMBOL_GPL(get_user_pages_fast_only); |
John Hubbard | 9e1f058 | 2020-06-03 15:56:27 -0700 | [diff] [blame] | 2817 | |
John Hubbard | eddb1c2 | 2020-01-30 22:12:54 -0800 | [diff] [blame] | 2818 | /** |
| 2819 | * get_user_pages_fast() - pin user pages in memory |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 2820 | * @start: starting user address |
| 2821 | * @nr_pages: number of pages from start to pin |
| 2822 | * @gup_flags: flags modifying pin behaviour |
| 2823 | * @pages: array that receives pointers to the pages pinned. |
| 2824 | * Should be at least nr_pages long. |
John Hubbard | eddb1c2 | 2020-01-30 22:12:54 -0800 | [diff] [blame] | 2825 | * |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 2826 | * Attempt to pin user pages in memory without taking mm->mmap_lock. |
John Hubbard | eddb1c2 | 2020-01-30 22:12:54 -0800 | [diff] [blame] | 2827 | * If not successful, it will fall back to taking the lock and |
| 2828 | * calling get_user_pages(). |
| 2829 | * |
| 2830 | * Returns number of pages pinned. This may be fewer than the number requested. |
| 2831 | * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns |
| 2832 | * -errno. |
| 2833 | */ |
| 2834 | int get_user_pages_fast(unsigned long start, int nr_pages, |
| 2835 | unsigned int gup_flags, struct page **pages) |
| 2836 | { |
Barry Song | 447f3e4 | 2020-10-13 16:51:58 -0700 | [diff] [blame] | 2837 | if (!is_valid_gup_flags(gup_flags)) |
John Hubbard | eddb1c2 | 2020-01-30 22:12:54 -0800 | [diff] [blame] | 2838 | return -EINVAL; |
| 2839 | |
John Hubbard | 94202f1 | 2020-04-01 21:05:25 -0700 | [diff] [blame] | 2840 | /* |
| 2841 | * The caller may or may not have explicitly set FOLL_GET; either way is |
| 2842 | * OK. However, internally (within mm/gup.c), gup fast variants must set |
| 2843 | * FOLL_GET, because gup fast is always a "pin with a +1 page refcount" |
| 2844 | * request. |
| 2845 | */ |
| 2846 | gup_flags |= FOLL_GET; |
John Hubbard | eddb1c2 | 2020-01-30 22:12:54 -0800 | [diff] [blame] | 2847 | return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); |
| 2848 | } |
Christoph Hellwig | 050a9ad | 2019-07-11 20:57:21 -0700 | [diff] [blame] | 2849 | EXPORT_SYMBOL_GPL(get_user_pages_fast); |
John Hubbard | eddb1c2 | 2020-01-30 22:12:54 -0800 | [diff] [blame] | 2850 | |
| 2851 | /** |
| 2852 | * pin_user_pages_fast() - pin user pages in memory without taking locks |
| 2853 | * |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 2854 | * @start: starting user address |
| 2855 | * @nr_pages: number of pages from start to pin |
| 2856 | * @gup_flags: flags modifying pin behaviour |
| 2857 | * @pages: array that receives pointers to the pages pinned. |
| 2858 | * Should be at least nr_pages long. |
| 2859 | * |
| 2860 | * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See |
| 2861 | * get_user_pages_fast() for documentation on the function arguments, because |
| 2862 | * the arguments here are identical. |
| 2863 | * |
| 2864 | * FOLL_PIN means that the pages must be released via unpin_user_page(). Please |
Mauro Carvalho Chehab | 72ef5e5 | 2020-04-14 18:48:35 +0200 | [diff] [blame] | 2865 | * see Documentation/core-api/pin_user_pages.rst for further details. |
John Hubbard | eddb1c2 | 2020-01-30 22:12:54 -0800 | [diff] [blame] | 2866 | */ |
| 2867 | int pin_user_pages_fast(unsigned long start, int nr_pages, |
| 2868 | unsigned int gup_flags, struct page **pages) |
| 2869 | { |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 2870 | /* FOLL_GET and FOLL_PIN are mutually exclusive. */ |
| 2871 | if (WARN_ON_ONCE(gup_flags & FOLL_GET)) |
| 2872 | return -EINVAL; |
| 2873 | |
| 2874 | gup_flags |= FOLL_PIN; |
| 2875 | return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); |
John Hubbard | eddb1c2 | 2020-01-30 22:12:54 -0800 | [diff] [blame] | 2876 | } |
| 2877 | EXPORT_SYMBOL_GPL(pin_user_pages_fast); |
| 2878 | |
John Hubbard | 104acc3 | 2020-06-03 15:56:34 -0700 | [diff] [blame] | 2879 | /* |
Souptick Joarder | dadbb61 | 2020-06-07 21:40:55 -0700 | [diff] [blame] | 2880 | * This is the FOLL_PIN equivalent of get_user_pages_fast_only(). Behavior |
| 2881 | * is the same, except that this one sets FOLL_PIN instead of FOLL_GET. |
John Hubbard | 104acc3 | 2020-06-03 15:56:34 -0700 | [diff] [blame] | 2882 | * |
| 2883 | * The API rules are the same, too: no negative values may be returned. |
| 2884 | */ |
| 2885 | int pin_user_pages_fast_only(unsigned long start, int nr_pages, |
| 2886 | unsigned int gup_flags, struct page **pages) |
| 2887 | { |
| 2888 | int nr_pinned; |
| 2889 | |
| 2890 | /* |
| 2891 | * FOLL_GET and FOLL_PIN are mutually exclusive. Note that the API |
| 2892 | * rules require returning 0, rather than -errno: |
| 2893 | */ |
| 2894 | if (WARN_ON_ONCE(gup_flags & FOLL_GET)) |
| 2895 | return 0; |
| 2896 | /* |
| 2897 | * FOLL_FAST_ONLY is required in order to match the API description of |
| 2898 | * this routine: no fall back to regular ("slow") GUP. |
| 2899 | */ |
| 2900 | gup_flags |= (FOLL_PIN | FOLL_FAST_ONLY); |
| 2901 | nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags, |
| 2902 | pages); |
| 2903 | /* |
| 2904 | * This routine is not allowed to return negative values. However, |
| 2905 | * internal_get_user_pages_fast() *can* return -errno. Therefore, |
| 2906 | * correct for that here: |
| 2907 | */ |
| 2908 | if (nr_pinned < 0) |
| 2909 | nr_pinned = 0; |
| 2910 | |
| 2911 | return nr_pinned; |
| 2912 | } |
| 2913 | EXPORT_SYMBOL_GPL(pin_user_pages_fast_only); |
| 2914 | |
John Hubbard | eddb1c2 | 2020-01-30 22:12:54 -0800 | [diff] [blame] | 2915 | /** |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 2916 | * pin_user_pages_remote() - pin pages of a remote process |
John Hubbard | eddb1c2 | 2020-01-30 22:12:54 -0800 | [diff] [blame] | 2917 | * |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 2918 | * @mm: mm_struct of target mm |
| 2919 | * @start: starting user address |
| 2920 | * @nr_pages: number of pages from start to pin |
| 2921 | * @gup_flags: flags modifying lookup behaviour |
| 2922 | * @pages: array that receives pointers to the pages pinned. |
| 2923 | * Should be at least nr_pages long. Or NULL, if caller |
| 2924 | * only intends to ensure the pages are faulted in. |
| 2925 | * @vmas: array of pointers to vmas corresponding to each page. |
| 2926 | * Or NULL if the caller does not require them. |
| 2927 | * @locked: pointer to lock flag indicating whether lock is held and |
| 2928 | * subsequently whether VM_FAULT_RETRY functionality can be |
| 2929 | * utilised. Lock must initially be held. |
| 2930 | * |
| 2931 | * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See |
| 2932 | * get_user_pages_remote() for documentation on the function arguments, because |
| 2933 | * the arguments here are identical. |
| 2934 | * |
| 2935 | * FOLL_PIN means that the pages must be released via unpin_user_page(). Please |
Mauro Carvalho Chehab | 72ef5e5 | 2020-04-14 18:48:35 +0200 | [diff] [blame] | 2936 | * see Documentation/core-api/pin_user_pages.rst for details. |
John Hubbard | eddb1c2 | 2020-01-30 22:12:54 -0800 | [diff] [blame] | 2937 | */ |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 2938 | long pin_user_pages_remote(struct mm_struct *mm, |
John Hubbard | eddb1c2 | 2020-01-30 22:12:54 -0800 | [diff] [blame] | 2939 | unsigned long start, unsigned long nr_pages, |
| 2940 | unsigned int gup_flags, struct page **pages, |
| 2941 | struct vm_area_struct **vmas, int *locked) |
| 2942 | { |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 2943 | /* FOLL_GET and FOLL_PIN are mutually exclusive. */ |
| 2944 | if (WARN_ON_ONCE(gup_flags & FOLL_GET)) |
| 2945 | return -EINVAL; |
| 2946 | |
| 2947 | gup_flags |= FOLL_PIN; |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 2948 | return __get_user_pages_remote(mm, start, nr_pages, gup_flags, |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 2949 | pages, vmas, locked); |
John Hubbard | eddb1c2 | 2020-01-30 22:12:54 -0800 | [diff] [blame] | 2950 | } |
| 2951 | EXPORT_SYMBOL(pin_user_pages_remote); |
| 2952 | |
| 2953 | /** |
| 2954 | * pin_user_pages() - pin user pages in memory for use by other devices |
| 2955 | * |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 2956 | * @start: starting user address |
| 2957 | * @nr_pages: number of pages from start to pin |
| 2958 | * @gup_flags: flags modifying lookup behaviour |
| 2959 | * @pages: array that receives pointers to the pages pinned. |
| 2960 | * Should be at least nr_pages long. Or NULL, if caller |
| 2961 | * only intends to ensure the pages are faulted in. |
| 2962 | * @vmas: array of pointers to vmas corresponding to each page. |
| 2963 | * Or NULL if the caller does not require them. |
| 2964 | * |
| 2965 | * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and |
| 2966 | * FOLL_PIN is set. |
| 2967 | * |
| 2968 | * FOLL_PIN means that the pages must be released via unpin_user_page(). Please |
Mauro Carvalho Chehab | 72ef5e5 | 2020-04-14 18:48:35 +0200 | [diff] [blame] | 2969 | * see Documentation/core-api/pin_user_pages.rst for details. |
John Hubbard | eddb1c2 | 2020-01-30 22:12:54 -0800 | [diff] [blame] | 2970 | */ |
| 2971 | long pin_user_pages(unsigned long start, unsigned long nr_pages, |
| 2972 | unsigned int gup_flags, struct page **pages, |
| 2973 | struct vm_area_struct **vmas) |
| 2974 | { |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 2975 | /* FOLL_GET and FOLL_PIN are mutually exclusive. */ |
| 2976 | if (WARN_ON_ONCE(gup_flags & FOLL_GET)) |
| 2977 | return -EINVAL; |
| 2978 | |
| 2979 | gup_flags |= FOLL_PIN; |
Peter Xu | 64019a2 | 2020-08-11 18:39:01 -0700 | [diff] [blame] | 2980 | return __gup_longterm_locked(current->mm, start, nr_pages, |
John Hubbard | 3faa52c | 2020-04-01 21:05:29 -0700 | [diff] [blame] | 2981 | pages, vmas, gup_flags); |
John Hubbard | eddb1c2 | 2020-01-30 22:12:54 -0800 | [diff] [blame] | 2982 | } |
| 2983 | EXPORT_SYMBOL(pin_user_pages); |
John Hubbard | 9142902 | 2020-06-01 21:48:27 -0700 | [diff] [blame] | 2984 | |
| 2985 | /* |
| 2986 | * pin_user_pages_unlocked() is the FOLL_PIN variant of |
| 2987 | * get_user_pages_unlocked(). Behavior is the same, except that this one sets |
| 2988 | * FOLL_PIN and rejects FOLL_GET. |
| 2989 | */ |
| 2990 | long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, |
| 2991 | struct page **pages, unsigned int gup_flags) |
| 2992 | { |
| 2993 | /* FOLL_GET and FOLL_PIN are mutually exclusive. */ |
| 2994 | if (WARN_ON_ONCE(gup_flags & FOLL_GET)) |
| 2995 | return -EINVAL; |
| 2996 | |
| 2997 | gup_flags |= FOLL_PIN; |
| 2998 | return get_user_pages_unlocked(start, nr_pages, pages, gup_flags); |
| 2999 | } |
| 3000 | EXPORT_SYMBOL(pin_user_pages_unlocked); |