blob: 7032f6dd0ce198301598ef4811864f83bb5c2c47 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/mm/mlock.c
4 *
5 * (C) Copyright 1995 Linus Torvalds
6 * (C) Copyright 2002 Christoph Hellwig
7 */
8
Randy.Dunlapc59ede72006-01-11 12:17:46 -08009#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/mman.h>
11#include <linux/mm.h>
Ingo Molnar8703e8a2017-02-08 18:51:30 +010012#include <linux/sched/user.h>
Nick Pigginb291f002008-10-18 20:26:44 -070013#include <linux/swap.h>
14#include <linux/swapops.h>
15#include <linux/pagemap.h>
Vlastimil Babka72255222013-09-11 14:22:29 -070016#include <linux/pagevec.h>
Hugh Dickins34b67922022-02-14 18:31:48 -080017#include <linux/pagewalk.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/mempolicy.h>
19#include <linux/syscalls.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040020#include <linux/sched.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040021#include <linux/export.h>
Nick Pigginb291f002008-10-18 20:26:44 -070022#include <linux/rmap.h>
23#include <linux/mmzone.h>
24#include <linux/hugetlb.h>
Vlastimil Babka72255222013-09-11 14:22:29 -070025#include <linux/memcontrol.h>
26#include <linux/mm_inline.h>
Mike Rapoport1507f512021-07-07 18:08:03 -070027#include <linux/secretmem.h>
Nick Pigginb291f002008-10-18 20:26:44 -070028
29#include "internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Sebastian Andrzej Siewioradb11e72022-04-01 11:28:33 -070031struct mlock_pvec {
32 local_lock_t lock;
33 struct pagevec vec;
34};
35
36static DEFINE_PER_CPU(struct mlock_pvec, mlock_pvec) = {
37 .lock = INIT_LOCAL_LOCK(lock),
38};
Hugh Dickins2fbb0c12022-02-14 18:37:29 -080039
Wang Xiaoqiang7f43add2016-01-15 16:57:22 -080040bool can_do_mlock(void)
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040041{
Jiri Slaby59e99e52010-03-05 13:41:44 -080042 if (rlimit(RLIMIT_MEMLOCK) != 0)
Wang Xiaoqiang7f43add2016-01-15 16:57:22 -080043 return true;
Jeff Vander Stoepa5a65792015-03-12 16:26:17 -070044 if (capable(CAP_IPC_LOCK))
Wang Xiaoqiang7f43add2016-01-15 16:57:22 -080045 return true;
46 return false;
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040047}
48EXPORT_SYMBOL(can_do_mlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Nick Pigginb291f002008-10-18 20:26:44 -070050/*
51 * Mlocked pages are marked with PageMlocked() flag for efficient testing
52 * in vmscan and, possibly, the fault path; and to support semi-accurate
53 * statistics.
54 *
55 * An mlocked page [PageMlocked(page)] is unevictable. As such, it will
56 * be placed on the LRU "unevictable" list, rather than the [in]active lists.
57 * The unevictable list is an LRU sibling list to the [in]active lists.
58 * PageUnevictable is set to indicate the unevictable state.
Nick Pigginb291f002008-10-18 20:26:44 -070059 */
60
Hugh Dickins2fbb0c12022-02-14 18:37:29 -080061static struct lruvec *__mlock_page(struct page *page, struct lruvec *lruvec)
Nick Pigginb291f002008-10-18 20:26:44 -070062{
Hugh Dickins07ca7602022-02-14 18:29:54 -080063 /* There is nothing more we can do while it's off LRU */
64 if (!TestClearPageLRU(page))
Hugh Dickins2fbb0c12022-02-14 18:37:29 -080065 return lruvec;
Hugh Dickins09647302020-09-18 21:20:15 -070066
Hugh Dickins2fbb0c12022-02-14 18:37:29 -080067 lruvec = folio_lruvec_relock_irq(page_folio(page), lruvec);
Nick Pigginb291f002008-10-18 20:26:44 -070068
Hugh Dickins2fbb0c12022-02-14 18:37:29 -080069 if (unlikely(page_evictable(page))) {
Nick Pigginb291f002008-10-18 20:26:44 -070070 /*
Hugh Dickins2fbb0c12022-02-14 18:37:29 -080071 * This is a little surprising, but quite possible:
72 * PageMlocked must have got cleared already by another CPU.
73 * Could this page be on the Unevictable LRU? I'm not sure,
74 * but move it now if so.
Nick Pigginb291f002008-10-18 20:26:44 -070075 */
Hugh Dickins2fbb0c12022-02-14 18:37:29 -080076 if (PageUnevictable(page)) {
77 del_page_from_lru_list(page, lruvec);
78 ClearPageUnevictable(page);
79 add_page_to_lru_list(page, lruvec);
80 __count_vm_events(UNEVICTABLE_PGRESCUED,
81 thp_nr_pages(page));
82 }
83 goto out;
Nick Pigginb291f002008-10-18 20:26:44 -070084 }
Nick Pigginb291f002008-10-18 20:26:44 -070085
Hugh Dickins07ca7602022-02-14 18:29:54 -080086 if (PageUnevictable(page)) {
Hugh Dickins2fbb0c12022-02-14 18:37:29 -080087 if (PageMlocked(page))
88 page->mlock_count++;
Hugh Dickins07ca7602022-02-14 18:29:54 -080089 goto out;
Nick Piggin5344b7e2008-10-18 20:26:51 -070090 }
Hugh Dickins07ca7602022-02-14 18:29:54 -080091
92 del_page_from_lru_list(page, lruvec);
93 ClearPageActive(page);
94 SetPageUnevictable(page);
Hugh Dickins2fbb0c12022-02-14 18:37:29 -080095 page->mlock_count = !!PageMlocked(page);
Hugh Dickins07ca7602022-02-14 18:29:54 -080096 add_page_to_lru_list(page, lruvec);
Hugh Dickins2fbb0c12022-02-14 18:37:29 -080097 __count_vm_events(UNEVICTABLE_PGCULLED, thp_nr_pages(page));
Hugh Dickins07ca7602022-02-14 18:29:54 -080098out:
99 SetPageLRU(page);
Hugh Dickins2fbb0c12022-02-14 18:37:29 -0800100 return lruvec;
Nick Pigginb291f002008-10-18 20:26:44 -0700101}
102
Hugh Dickins2fbb0c12022-02-14 18:37:29 -0800103static struct lruvec *__mlock_new_page(struct page *page, struct lruvec *lruvec)
Vlastimil Babka72255222013-09-11 14:22:29 -0700104{
Hugh Dickins2fbb0c12022-02-14 18:37:29 -0800105 VM_BUG_ON_PAGE(PageLRU(page), page);
Vlastimil Babka72255222013-09-11 14:22:29 -0700106
Hugh Dickins2fbb0c12022-02-14 18:37:29 -0800107 lruvec = folio_lruvec_relock_irq(page_folio(page), lruvec);
Vlastimil Babka72255222013-09-11 14:22:29 -0700108
Hugh Dickins2fbb0c12022-02-14 18:37:29 -0800109 /* As above, this is a little surprising, but possible */
110 if (unlikely(page_evictable(page)))
111 goto out;
112
113 SetPageUnevictable(page);
114 page->mlock_count = !!PageMlocked(page);
115 __count_vm_events(UNEVICTABLE_PGCULLED, thp_nr_pages(page));
116out:
117 add_page_to_lru_list(page, lruvec);
118 SetPageLRU(page);
119 return lruvec;
Vlastimil Babka72255222013-09-11 14:22:29 -0700120}
121
Hugh Dickins2fbb0c12022-02-14 18:37:29 -0800122static struct lruvec *__munlock_page(struct page *page, struct lruvec *lruvec)
Vlastimil Babka72255222013-09-11 14:22:29 -0700123{
Hugh Dickins09647302020-09-18 21:20:15 -0700124 int nr_pages = thp_nr_pages(page);
Hugh Dickins2fbb0c12022-02-14 18:37:29 -0800125 bool isolated = false;
Hugh Dickins09647302020-09-18 21:20:15 -0700126
Hugh Dickins2fbb0c12022-02-14 18:37:29 -0800127 if (!TestClearPageLRU(page))
128 goto munlock;
Kirill A. Shutemove90309c2016-01-15 16:54:33 -0800129
Hugh Dickins2fbb0c12022-02-14 18:37:29 -0800130 isolated = true;
131 lruvec = folio_lruvec_relock_irq(page_folio(page), lruvec);
132
133 if (PageUnevictable(page)) {
Hugh Dickins07ca7602022-02-14 18:29:54 -0800134 /* Then mlock_count is maintained, but might undercount */
135 if (page->mlock_count)
136 page->mlock_count--;
137 if (page->mlock_count)
138 goto out;
Vlastimil Babka72255222013-09-11 14:22:29 -0700139 }
Hugh Dickins07ca7602022-02-14 18:29:54 -0800140 /* else assume that was the last mlock: reclaim will fix it if not */
141
Hugh Dickins2fbb0c12022-02-14 18:37:29 -0800142munlock:
Hugh Dickins07ca7602022-02-14 18:29:54 -0800143 if (TestClearPageMlocked(page)) {
144 __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
Hugh Dickins2fbb0c12022-02-14 18:37:29 -0800145 if (isolated || !PageUnevictable(page))
Hugh Dickins07ca7602022-02-14 18:29:54 -0800146 __count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages);
147 else
148 __count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
149 }
150
151 /* page_evictable() has to be checked *after* clearing Mlocked */
Hugh Dickins2fbb0c12022-02-14 18:37:29 -0800152 if (isolated && PageUnevictable(page) && page_evictable(page)) {
Hugh Dickins07ca7602022-02-14 18:29:54 -0800153 del_page_from_lru_list(page, lruvec);
154 ClearPageUnevictable(page);
155 add_page_to_lru_list(page, lruvec);
156 __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
157 }
158out:
Hugh Dickins2fbb0c12022-02-14 18:37:29 -0800159 if (isolated)
160 SetPageLRU(page);
161 return lruvec;
162}
163
164/*
165 * Flags held in the low bits of a struct page pointer on the mlock_pvec.
166 */
167#define LRU_PAGE 0x1
168#define NEW_PAGE 0x2
169static inline struct page *mlock_lru(struct page *page)
170{
171 return (struct page *)((unsigned long)page + LRU_PAGE);
172}
173
174static inline struct page *mlock_new(struct page *page)
175{
176 return (struct page *)((unsigned long)page + NEW_PAGE);
177}
178
179/*
180 * mlock_pagevec() is derived from pagevec_lru_move_fn():
181 * perhaps that can make use of such page pointer flags in future,
182 * but for now just keep it for mlock. We could use three separate
183 * pagevecs instead, but one feels better (munlocking a full pagevec
184 * does not need to drain mlocking pagevecs first).
185 */
186static void mlock_pagevec(struct pagevec *pvec)
187{
188 struct lruvec *lruvec = NULL;
189 unsigned long mlock;
190 struct page *page;
191 int i;
192
193 for (i = 0; i < pagevec_count(pvec); i++) {
194 page = pvec->pages[i];
195 mlock = (unsigned long)page & (LRU_PAGE | NEW_PAGE);
196 page = (struct page *)((unsigned long)page - mlock);
197 pvec->pages[i] = page;
198
199 if (mlock & LRU_PAGE)
200 lruvec = __mlock_page(page, lruvec);
201 else if (mlock & NEW_PAGE)
202 lruvec = __mlock_new_page(page, lruvec);
203 else
204 lruvec = __munlock_page(page, lruvec);
205 }
206
207 if (lruvec)
208 unlock_page_lruvec_irq(lruvec);
209 release_pages(pvec->pages, pvec->nr);
210 pagevec_reinit(pvec);
211}
212
Sebastian Andrzej Siewioradb11e72022-04-01 11:28:33 -0700213void mlock_page_drain_local(void)
Hugh Dickins2fbb0c12022-02-14 18:37:29 -0800214{
215 struct pagevec *pvec;
216
Sebastian Andrzej Siewioradb11e72022-04-01 11:28:33 -0700217 local_lock(&mlock_pvec.lock);
218 pvec = this_cpu_ptr(&mlock_pvec.vec);
219 if (pagevec_count(pvec))
220 mlock_pagevec(pvec);
221 local_unlock(&mlock_pvec.lock);
222}
223
224void mlock_page_drain_remote(int cpu)
225{
226 struct pagevec *pvec;
227
228 WARN_ON_ONCE(cpu_online(cpu));
229 pvec = &per_cpu(mlock_pvec.vec, cpu);
Hugh Dickins2fbb0c12022-02-14 18:37:29 -0800230 if (pagevec_count(pvec))
231 mlock_pagevec(pvec);
232}
233
234bool need_mlock_page_drain(int cpu)
235{
Sebastian Andrzej Siewioradb11e72022-04-01 11:28:33 -0700236 return pagevec_count(&per_cpu(mlock_pvec.vec, cpu));
Lee Schermerhorn6927c1d2009-12-14 17:59:55 -0800237}
Nick Pigginb291f002008-10-18 20:26:44 -0700238
Michel Lespinasseff6a6da2013-02-27 17:02:44 -0800239/**
Matthew Wilcox (Oracle)dcc5d332022-02-15 13:33:59 -0500240 * mlock_folio - mlock a folio already on (or temporarily off) LRU
241 * @folio: folio to be mlocked.
Hugh Dickins3d470fc2011-10-31 17:09:43 -0700242 */
Matthew Wilcox (Oracle)dcc5d332022-02-15 13:33:59 -0500243void mlock_folio(struct folio *folio)
Hugh Dickins3d470fc2011-10-31 17:09:43 -0700244{
Sebastian Andrzej Siewioradb11e72022-04-01 11:28:33 -0700245 struct pagevec *pvec;
246
247 local_lock(&mlock_pvec.lock);
248 pvec = this_cpu_ptr(&mlock_pvec.vec);
Hugh Dickins3d470fc2011-10-31 17:09:43 -0700249
Matthew Wilcox (Oracle)dcc5d332022-02-15 13:33:59 -0500250 if (!folio_test_set_mlocked(folio)) {
251 int nr_pages = folio_nr_pages(folio);
Hugh Dickins3d470fc2011-10-31 17:09:43 -0700252
Matthew Wilcox (Oracle)dcc5d332022-02-15 13:33:59 -0500253 zone_stat_mod_folio(folio, NR_MLOCK, nr_pages);
Hugh Dickins2fbb0c12022-02-14 18:37:29 -0800254 __count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
Hugh Dickins53f79ac2009-12-14 17:58:58 -0800255 }
Nick Piggin5344b7e2008-10-18 20:26:51 -0700256
Matthew Wilcox (Oracle)dcc5d332022-02-15 13:33:59 -0500257 folio_get(folio);
258 if (!pagevec_add(pvec, mlock_lru(&folio->page)) ||
259 folio_test_large(folio) || lru_cache_disabled())
Hugh Dickins2fbb0c12022-02-14 18:37:29 -0800260 mlock_pagevec(pvec);
Sebastian Andrzej Siewioradb11e72022-04-01 11:28:33 -0700261 local_unlock(&mlock_pvec.lock);
Nick Piggin5344b7e2008-10-18 20:26:51 -0700262}
263
Hugh Dickins2fbb0c12022-02-14 18:37:29 -0800264/**
265 * mlock_new_page - mlock a newly allocated page not yet on LRU
266 * @page: page to be mlocked, either a normal page or a THP head.
Lee Schermerhorn6927c1d2009-12-14 17:59:55 -0800267 */
Hugh Dickins2fbb0c12022-02-14 18:37:29 -0800268void mlock_new_page(struct page *page)
Lee Schermerhorn6927c1d2009-12-14 17:59:55 -0800269{
Sebastian Andrzej Siewioradb11e72022-04-01 11:28:33 -0700270 struct pagevec *pvec;
Hugh Dickins2fbb0c12022-02-14 18:37:29 -0800271 int nr_pages = thp_nr_pages(page);
272
Sebastian Andrzej Siewioradb11e72022-04-01 11:28:33 -0700273 local_lock(&mlock_pvec.lock);
274 pvec = this_cpu_ptr(&mlock_pvec.vec);
Hugh Dickins2fbb0c12022-02-14 18:37:29 -0800275 SetPageMlocked(page);
276 mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
277 __count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
278
279 get_page(page);
280 if (!pagevec_add(pvec, mlock_new(page)) ||
281 PageHead(page) || lru_cache_disabled())
282 mlock_pagevec(pvec);
Sebastian Andrzej Siewioradb11e72022-04-01 11:28:33 -0700283 local_unlock(&mlock_pvec.lock);
Nick Piggin5344b7e2008-10-18 20:26:51 -0700284}
285
Hugh Dickins2fbb0c12022-02-14 18:37:29 -0800286/**
287 * munlock_page - munlock a page
288 * @page: page to be munlocked, either a normal page or a THP head.
Rik van Rielba470de2008-10-18 20:26:50 -0700289 */
Hugh Dickins2fbb0c12022-02-14 18:37:29 -0800290void munlock_page(struct page *page)
Hugh Dickins408e82b2009-09-21 17:03:23 -0700291{
Sebastian Andrzej Siewioradb11e72022-04-01 11:28:33 -0700292 struct pagevec *pvec;
Vlastimil Babka56afe477d2013-09-11 14:22:32 -0700293
Sebastian Andrzej Siewioradb11e72022-04-01 11:28:33 -0700294 local_lock(&mlock_pvec.lock);
295 pvec = this_cpu_ptr(&mlock_pvec.vec);
Vlastimil Babka5b409982013-09-11 14:22:33 -0700296 /*
Hugh Dickins2fbb0c12022-02-14 18:37:29 -0800297 * TestClearPageMlocked(page) must be left to __munlock_page(),
298 * which will check whether the page is multiply mlocked.
Vlastimil Babka5b409982013-09-11 14:22:33 -0700299 */
Hugh Dickins2fbb0c12022-02-14 18:37:29 -0800300
301 get_page(page);
302 if (!pagevec_add(pvec, page) ||
303 PageHead(page) || lru_cache_disabled())
304 mlock_pagevec(pvec);
Sebastian Andrzej Siewioradb11e72022-04-01 11:28:33 -0700305 local_unlock(&mlock_pvec.lock);
Vlastimil Babka7a8010c2013-09-11 14:22:35 -0700306}
Vlastimil Babka56afe477d2013-09-11 14:22:32 -0700307
Hugh Dickins34b67922022-02-14 18:31:48 -0800308static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
309 unsigned long end, struct mm_walk *walk)
Hugh Dickins408e82b2009-09-21 17:03:23 -0700310
Hugh Dickins408e82b2009-09-21 17:03:23 -0700311{
Hugh Dickins34b67922022-02-14 18:31:48 -0800312 struct vm_area_struct *vma = walk->vma;
Hugh Dickins6e919712009-09-21 17:03:32 -0700313 spinlock_t *ptl;
Hugh Dickins34b67922022-02-14 18:31:48 -0800314 pte_t *start_pte, *pte;
315 struct page *page;
Michel Lespinasseff6a6da2013-02-27 17:02:44 -0800316
Hugh Dickins34b67922022-02-14 18:31:48 -0800317 ptl = pmd_trans_huge_lock(pmd, vma);
318 if (ptl) {
319 if (!pmd_present(*pmd))
320 goto out;
321 if (is_huge_zero_pmd(*pmd))
322 goto out;
323 page = pmd_page(*pmd);
324 if (vma->vm_flags & VM_LOCKED)
Matthew Wilcox (Oracle)dcc5d332022-02-15 13:33:59 -0500325 mlock_folio(page_folio(page));
Hugh Dickins34b67922022-02-14 18:31:48 -0800326 else
327 munlock_page(page);
328 goto out;
Nick Pigginb291f002008-10-18 20:26:44 -0700329 }
Hugh Dickins34b67922022-02-14 18:31:48 -0800330
331 start_pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
332 for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) {
333 if (!pte_present(*pte))
334 continue;
335 page = vm_normal_page(vma, addr, *pte);
Alex Sierra3218f872022-07-15 10:05:11 -0500336 if (!page || is_zone_device_page(page))
Hugh Dickins34b67922022-02-14 18:31:48 -0800337 continue;
338 if (PageTransCompound(page))
339 continue;
340 if (vma->vm_flags & VM_LOCKED)
Matthew Wilcox (Oracle)dcc5d332022-02-15 13:33:59 -0500341 mlock_folio(page_folio(page));
Hugh Dickins34b67922022-02-14 18:31:48 -0800342 else
343 munlock_page(page);
344 }
345 pte_unmap(start_pte);
346out:
347 spin_unlock(ptl);
348 cond_resched();
349 return 0;
Nick Pigginb291f002008-10-18 20:26:44 -0700350}
351
352/*
Hugh Dickins34b67922022-02-14 18:31:48 -0800353 * mlock_vma_pages_range() - mlock any pages already in the range,
354 * or munlock all pages in the range.
355 * @vma - vma containing range to be mlock()ed or munlock()ed
Rik van Rielba470de2008-10-18 20:26:50 -0700356 * @start - start address in @vma of the range
Hugh Dickins34b67922022-02-14 18:31:48 -0800357 * @end - end of range in @vma
358 * @newflags - the new set of flags for @vma.
Rik van Rielba470de2008-10-18 20:26:50 -0700359 *
Hugh Dickins34b67922022-02-14 18:31:48 -0800360 * Called for mlock(), mlock2() and mlockall(), to set @vma VM_LOCKED;
361 * called for munlock() and munlockall(), to clear VM_LOCKED from @vma.
Nick Pigginb291f002008-10-18 20:26:44 -0700362 */
Hugh Dickins34b67922022-02-14 18:31:48 -0800363static void mlock_vma_pages_range(struct vm_area_struct *vma,
364 unsigned long start, unsigned long end, vm_flags_t newflags)
Nick Pigginb291f002008-10-18 20:26:44 -0700365{
Hugh Dickins34b67922022-02-14 18:31:48 -0800366 static const struct mm_walk_ops mlock_walk_ops = {
367 .pmd_entry = mlock_pte_range,
368 };
Hugh Dickins408e82b2009-09-21 17:03:23 -0700369
Hugh Dickins34b67922022-02-14 18:31:48 -0800370 /*
371 * There is a slight chance that concurrent page migration,
372 * or page reclaim finding a page of this now-VM_LOCKED vma,
373 * will call mlock_vma_page() and raise page's mlock_count:
374 * double counting, leaving the page unevictable indefinitely.
375 * Communicate this danger to mlock_vma_page() with VM_IO,
376 * which is a VM_SPECIAL flag not allowed on VM_LOCKED vmas.
377 * mmap_lock is held in write mode here, so this weird
378 * combination should not be visible to other mmap_lock users;
379 * but WRITE_ONCE so rmap walkers must see VM_IO if VM_LOCKED.
380 */
381 if (newflags & VM_LOCKED)
382 newflags |= VM_IO;
383 WRITE_ONCE(vma->vm_flags, newflags);
Michel Lespinasseff6a6da2013-02-27 17:02:44 -0800384
Hugh Dickins34b67922022-02-14 18:31:48 -0800385 lru_add_drain();
386 walk_page_range(vma->vm_mm, start, end, &mlock_walk_ops, NULL);
387 lru_add_drain();
Vlastimil Babka7a8010c2013-09-11 14:22:35 -0700388
Hugh Dickins34b67922022-02-14 18:31:48 -0800389 if (newflags & VM_IO) {
390 newflags &= ~VM_IO;
391 WRITE_ONCE(vma->vm_flags, newflags);
Hugh Dickins408e82b2009-09-21 17:03:23 -0700392 }
Nick Pigginb291f002008-10-18 20:26:44 -0700393}
394
395/*
396 * mlock_fixup - handle mlock[all]/munlock[all] requests.
397 *
398 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
399 * munlock is a no-op. However, for some special vmas, we go ahead and
Michel Lespinassecea10a12013-02-22 16:32:44 -0800400 * populate the ptes.
Nick Pigginb291f002008-10-18 20:26:44 -0700401 *
402 * For vmas that pass the filters, merge/split as appropriate.
403 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
KOSAKI Motohiroca16d142011-05-26 19:16:19 +0900405 unsigned long start, unsigned long end, vm_flags_t newflags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406{
Nick Pigginb291f002008-10-18 20:26:44 -0700407 struct mm_struct *mm = vma->vm_mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 pgoff_t pgoff;
Nick Pigginb291f002008-10-18 20:26:44 -0700409 int nr_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 int ret = 0;
Hugh Dickins34b67922022-02-14 18:31:48 -0800411 vm_flags_t oldflags = vma->vm_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412
Hugh Dickins34b67922022-02-14 18:31:48 -0800413 if (newflags == oldflags || (oldflags & VM_SPECIAL) ||
Dave Jiange1fb4a02018-08-17 15:43:40 -0700414 is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) ||
Mike Rapoport1507f512021-07-07 18:08:03 -0700415 vma_is_dax(vma) || vma_is_secretmem(vma))
Eric B Munsonb0f205c2015-11-05 18:51:39 -0800416 /* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */
417 goto out;
Nick Pigginb291f002008-10-18 20:26:44 -0700418
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
420 *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
Andrea Arcangeli19a809a2015-09-04 15:46:24 -0700421 vma->vm_file, pgoff, vma_policy(vma),
Suren Baghdasaryan5c26f6a2022-03-04 20:28:51 -0800422 vma->vm_userfaultfd_ctx, anon_vma_name(vma));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 if (*prev) {
424 vma = *prev;
425 goto success;
426 }
427
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 if (start != vma->vm_start) {
429 ret = split_vma(mm, vma, start, 1);
430 if (ret)
431 goto out;
432 }
433
434 if (end != vma->vm_end) {
435 ret = split_vma(mm, vma, end, 0);
436 if (ret)
437 goto out;
438 }
439
440success:
441 /*
Nick Pigginb291f002008-10-18 20:26:44 -0700442 * Keep track of amount of locked VM.
443 */
444 nr_pages = (end - start) >> PAGE_SHIFT;
Hugh Dickins34b67922022-02-14 18:31:48 -0800445 if (!(newflags & VM_LOCKED))
Nick Pigginb291f002008-10-18 20:26:44 -0700446 nr_pages = -nr_pages;
Hugh Dickins34b67922022-02-14 18:31:48 -0800447 else if (oldflags & VM_LOCKED)
Simon Guob155b4f2016-10-07 16:59:40 -0700448 nr_pages = 0;
Nick Pigginb291f002008-10-18 20:26:44 -0700449 mm->locked_vm += nr_pages;
450
451 /*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700452 * vm_flags is protected by the mmap_lock held in write mode.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 * It's okay if try_to_unmap_one unmaps a page just after we
Kirill A. Shutemovfc05f562015-04-14 15:44:39 -0700454 * set VM_LOCKED, populate_vma_page_range will bring it back.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456
Hugh Dickins34b67922022-02-14 18:31:48 -0800457 if ((newflags & VM_LOCKED) && (oldflags & VM_LOCKED)) {
458 /* No work to do, and mlocking twice would be wrong */
Hugh Dickins408e82b2009-09-21 17:03:23 -0700459 vma->vm_flags = newflags;
Hugh Dickins34b67922022-02-14 18:31:48 -0800460 } else {
461 mlock_vma_pages_range(vma, start, end, newflags);
462 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463out:
Nick Pigginb291f002008-10-18 20:26:44 -0700464 *prev = vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 return ret;
466}
467
Eric B Munson1aab92ec2015-11-05 18:51:29 -0800468static int apply_vma_lock_flags(unsigned long start, size_t len,
469 vm_flags_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470{
471 unsigned long nstart, end, tmp;
Zhiyuan Dai68d68ff2021-05-04 18:40:12 -0700472 struct vm_area_struct *vma, *prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 int error;
Matthew Wilcox (Oracle)33108b02022-09-06 19:49:02 +0000474 MA_STATE(mas, &current->mm->mm_mt, start, start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475
Alexander Kuleshov8fd9e482015-11-05 18:46:49 -0800476 VM_BUG_ON(offset_in_page(start));
Michel Lespinassefed067d2011-01-13 15:46:10 -0800477 VM_BUG_ON(len != PAGE_ALIGN(len));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 end = start + len;
479 if (end < start)
480 return -EINVAL;
481 if (end == start)
482 return 0;
Matthew Wilcox (Oracle)33108b02022-09-06 19:49:02 +0000483 vma = mas_walk(&mas);
484 if (!vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 return -ENOMEM;
486
487 if (start > vma->vm_start)
488 prev = vma;
Matthew Wilcox (Oracle)33108b02022-09-06 19:49:02 +0000489 else
490 prev = mas_prev(&mas, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491
492 for (nstart = start ; ; ) {
Eric B Munsonb0f205c2015-11-05 18:51:39 -0800493 vm_flags_t newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
Eric B Munson1aab92ec2015-11-05 18:51:29 -0800494
495 newflags |= flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496
497 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 tmp = vma->vm_end;
499 if (tmp > end)
500 tmp = end;
501 error = mlock_fixup(vma, &prev, nstart, tmp, newflags);
502 if (error)
503 break;
504 nstart = tmp;
505 if (nstart < prev->vm_end)
506 nstart = prev->vm_end;
507 if (nstart >= end)
508 break;
509
Matthew Wilcox (Oracle)33108b02022-09-06 19:49:02 +0000510 vma = find_vma(prev->vm_mm, prev->vm_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 if (!vma || vma->vm_start != nstart) {
512 error = -ENOMEM;
513 break;
514 }
515 }
516 return error;
517}
518
Simon Guo0cf2f6f2016-10-07 16:59:36 -0700519/*
520 * Go through vma areas and sum size of mlocked
521 * vma pages, as return value.
522 * Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT)
523 * is also counted.
524 * Return value: previously mlocked page counts
525 */
swkhack0874bb42019-06-13 15:56:08 -0700526static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
Simon Guo0cf2f6f2016-10-07 16:59:36 -0700527 unsigned long start, size_t len)
528{
529 struct vm_area_struct *vma;
swkhack0874bb42019-06-13 15:56:08 -0700530 unsigned long count = 0;
Matthew Wilcox (Oracle)33108b02022-09-06 19:49:02 +0000531 unsigned long end;
532 VMA_ITERATOR(vmi, mm, start);
Simon Guo0cf2f6f2016-10-07 16:59:36 -0700533
Matthew Wilcox (Oracle)33108b02022-09-06 19:49:02 +0000534 /* Don't overflow past ULONG_MAX */
535 if (unlikely(ULONG_MAX - len < start))
536 end = ULONG_MAX;
537 else
538 end = start + len;
Liam Howlett66071892022-06-15 17:40:58 +0000539
Matthew Wilcox (Oracle)33108b02022-09-06 19:49:02 +0000540 for_each_vma_range(vmi, vma, end) {
Simon Guo0cf2f6f2016-10-07 16:59:36 -0700541 if (vma->vm_flags & VM_LOCKED) {
542 if (start > vma->vm_start)
543 count -= (start - vma->vm_start);
Matthew Wilcox (Oracle)33108b02022-09-06 19:49:02 +0000544 if (end < vma->vm_end) {
545 count += end - vma->vm_start;
Simon Guo0cf2f6f2016-10-07 16:59:36 -0700546 break;
547 }
548 count += vma->vm_end - vma->vm_start;
549 }
550 }
551
552 return count >> PAGE_SHIFT;
553}
554
Hugh Dickinsebcbc6e2022-02-14 18:20:24 -0800555/*
556 * convert get_user_pages() return value to posix mlock() error
557 */
558static int __mlock_posix_error_return(long retval)
559{
560 if (retval == -EFAULT)
561 retval = -ENOMEM;
562 else if (retval == -ENOMEM)
563 retval = -EAGAIN;
564 return retval;
565}
566
Michal Hockodc0ef0d2016-05-23 16:25:27 -0700567static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568{
569 unsigned long locked;
570 unsigned long lock_limit;
571 int error = -ENOMEM;
572
Andrey Konovalov057d33892019-09-25 16:48:30 -0700573 start = untagged_addr(start);
574
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 if (!can_do_mlock())
576 return -EPERM;
577
Alexander Kuleshov8fd9e482015-11-05 18:46:49 -0800578 len = PAGE_ALIGN(len + (offset_in_page(start)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 start &= PAGE_MASK;
580
Jiri Slaby59e99e52010-03-05 13:41:44 -0800581 lock_limit = rlimit(RLIMIT_MEMLOCK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 lock_limit >>= PAGE_SHIFT;
Davidlohr Bueso1f1cd702014-01-21 15:49:16 -0800583 locked = len >> PAGE_SHIFT;
584
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700585 if (mmap_write_lock_killable(current->mm))
Michal Hockodc0ef0d2016-05-23 16:25:27 -0700586 return -EINTR;
Davidlohr Bueso1f1cd702014-01-21 15:49:16 -0800587
588 locked += current->mm->locked_vm;
Simon Guo0cf2f6f2016-10-07 16:59:36 -0700589 if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) {
590 /*
591 * It is possible that the regions requested intersect with
592 * previously mlocked areas, that part area in "mm->locked_vm"
593 * should not be counted to new mlock increment count. So check
594 * and adjust locked count if necessary.
595 */
596 locked -= count_mm_mlocked_page_nr(current->mm,
597 start, len);
598 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599
600 /* check against resource limits */
601 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
Eric B Munson1aab92ec2015-11-05 18:51:29 -0800602 error = apply_vma_lock_flags(start, len, flags);
Davidlohr Bueso1f1cd702014-01-21 15:49:16 -0800603
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700604 mmap_write_unlock(current->mm);
Kirill A. Shutemovc5612592015-04-14 15:44:42 -0700605 if (error)
606 return error;
607
608 error = __mm_populate(start, len, 0);
609 if (error)
610 return __mlock_posix_error_return(error);
611 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612}
613
Eric B Munson1aab92ec2015-11-05 18:51:29 -0800614SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
615{
616 return do_mlock(start, len, VM_LOCKED);
617}
618
Eric B Munsona8ca5d02015-11-05 18:51:33 -0800619SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags)
620{
Eric B Munsonb0f205c2015-11-05 18:51:39 -0800621 vm_flags_t vm_flags = VM_LOCKED;
622
623 if (flags & ~MLOCK_ONFAULT)
Eric B Munsona8ca5d02015-11-05 18:51:33 -0800624 return -EINVAL;
625
Eric B Munsonb0f205c2015-11-05 18:51:39 -0800626 if (flags & MLOCK_ONFAULT)
627 vm_flags |= VM_LOCKONFAULT;
628
629 return do_mlock(start, len, vm_flags);
Eric B Munsona8ca5d02015-11-05 18:51:33 -0800630}
631
Heiko Carstens6a6160a2009-01-14 14:14:15 +0100632SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633{
634 int ret;
635
Andrey Konovalov057d33892019-09-25 16:48:30 -0700636 start = untagged_addr(start);
637
Alexander Kuleshov8fd9e482015-11-05 18:46:49 -0800638 len = PAGE_ALIGN(len + (offset_in_page(start)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 start &= PAGE_MASK;
Davidlohr Bueso1f1cd702014-01-21 15:49:16 -0800640
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700641 if (mmap_write_lock_killable(current->mm))
Michal Hockodc0ef0d2016-05-23 16:25:27 -0700642 return -EINTR;
Eric B Munson1aab92ec2015-11-05 18:51:29 -0800643 ret = apply_vma_lock_flags(start, len, 0);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700644 mmap_write_unlock(current->mm);
Davidlohr Bueso1f1cd702014-01-21 15:49:16 -0800645
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 return ret;
647}
648
Eric B Munsonb0f205c2015-11-05 18:51:39 -0800649/*
650 * Take the MCL_* flags passed into mlockall (or 0 if called from munlockall)
651 * and translate into the appropriate modifications to mm->def_flags and/or the
652 * flags for all current VMAs.
653 *
654 * There are a couple of subtleties with this. If mlockall() is called multiple
655 * times with different flags, the values do not necessarily stack. If mlockall
656 * is called once including the MCL_FUTURE flag and then a second time without
657 * it, VM_LOCKED and VM_LOCKONFAULT will be cleared from mm->def_flags.
658 */
Eric B Munson1aab92ec2015-11-05 18:51:29 -0800659static int apply_mlockall_flags(int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660{
Matthew Wilcox (Oracle)33108b02022-09-06 19:49:02 +0000661 MA_STATE(mas, &current->mm->mm_mt, 0, 0);
Zhiyuan Dai68d68ff2021-05-04 18:40:12 -0700662 struct vm_area_struct *vma, *prev = NULL;
Eric B Munsonb0f205c2015-11-05 18:51:39 -0800663 vm_flags_t to_add = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664
Eric B Munsonb0f205c2015-11-05 18:51:39 -0800665 current->mm->def_flags &= VM_LOCKED_CLEAR_MASK;
666 if (flags & MCL_FUTURE) {
Michel Lespinasse09a9f1d2013-03-28 16:26:23 -0700667 current->mm->def_flags |= VM_LOCKED;
Eric B Munson1aab92ec2015-11-05 18:51:29 -0800668
Eric B Munsonb0f205c2015-11-05 18:51:39 -0800669 if (flags & MCL_ONFAULT)
670 current->mm->def_flags |= VM_LOCKONFAULT;
671
672 if (!(flags & MCL_CURRENT))
673 goto out;
674 }
675
676 if (flags & MCL_CURRENT) {
677 to_add |= VM_LOCKED;
678 if (flags & MCL_ONFAULT)
679 to_add |= VM_LOCKONFAULT;
680 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681
Matthew Wilcox (Oracle)33108b02022-09-06 19:49:02 +0000682 mas_for_each(&mas, vma, ULONG_MAX) {
KOSAKI Motohiroca16d142011-05-26 19:16:19 +0900683 vm_flags_t newflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684
Eric B Munsonb0f205c2015-11-05 18:51:39 -0800685 newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
686 newflags |= to_add;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687
688 /* Ignore errors */
689 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
Matthew Wilcox (Oracle)33108b02022-09-06 19:49:02 +0000690 mas_pause(&mas);
Paul E. McKenney50d4fb72017-10-24 08:22:18 -0700691 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 }
693out:
694 return 0;
695}
696
Heiko Carstens3480b252009-01-14 14:14:16 +0100697SYSCALL_DEFINE1(mlockall, int, flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698{
699 unsigned long lock_limit;
Alexey Klimov86d2adcc2015-11-05 18:46:00 -0800700 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
Potyra, Stefandedca632019-06-13 15:55:55 -0700702 if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)) ||
703 flags == MCL_ONFAULT)
Alexey Klimov86d2adcc2015-11-05 18:46:00 -0800704 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 if (!can_do_mlock())
Alexey Klimov86d2adcc2015-11-05 18:46:00 -0800707 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708
Jiri Slaby59e99e52010-03-05 13:41:44 -0800709 lock_limit = rlimit(RLIMIT_MEMLOCK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 lock_limit >>= PAGE_SHIFT;
711
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700712 if (mmap_write_lock_killable(current->mm))
Michal Hockodc0ef0d2016-05-23 16:25:27 -0700713 return -EINTR;
Davidlohr Bueso1f1cd702014-01-21 15:49:16 -0800714
Michal Hockodc0ef0d2016-05-23 16:25:27 -0700715 ret = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
717 capable(CAP_IPC_LOCK))
Eric B Munson1aab92ec2015-11-05 18:51:29 -0800718 ret = apply_mlockall_flags(flags);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700719 mmap_write_unlock(current->mm);
Michel Lespinassebebeb3d2013-02-22 16:32:37 -0800720 if (!ret && (flags & MCL_CURRENT))
721 mm_populate(0, TASK_SIZE);
Alexey Klimov86d2adcc2015-11-05 18:46:00 -0800722
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 return ret;
724}
725
Heiko Carstens3480b252009-01-14 14:14:16 +0100726SYSCALL_DEFINE0(munlockall)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727{
728 int ret;
729
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700730 if (mmap_write_lock_killable(current->mm))
Michal Hockodc0ef0d2016-05-23 16:25:27 -0700731 return -EINTR;
Eric B Munson1aab92ec2015-11-05 18:51:29 -0800732 ret = apply_mlockall_flags(0);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700733 mmap_write_unlock(current->mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 return ret;
735}
736
737/*
738 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
739 * shm segments) get accounted against the user_struct instead.
740 */
741static DEFINE_SPINLOCK(shmlock_user_lock);
742
Alexey Gladkovd7c9e992021-04-22 14:27:14 +0200743int user_shm_lock(size_t size, struct ucounts *ucounts)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744{
745 unsigned long lock_limit, locked;
Alexey Gladkovd7c9e992021-04-22 14:27:14 +0200746 long memlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 int allowed = 0;
748
749 locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
Jiri Slaby59e99e52010-03-05 13:41:44 -0800750 lock_limit = rlimit(RLIMIT_MEMLOCK);
Miaohe Line97824f2022-03-22 16:09:18 +0800751 if (lock_limit != RLIM_INFINITY)
752 lock_limit >>= PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 spin_lock(&shmlock_user_lock);
Alexey Gladkovd7c9e992021-04-22 14:27:14 +0200754 memlock = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
755
Miaohe Line97824f2022-03-22 16:09:18 +0800756 if ((memlock == LONG_MAX || memlock > lock_limit) && !capable(CAP_IPC_LOCK)) {
Alexey Gladkovd7c9e992021-04-22 14:27:14 +0200757 dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 goto out;
Alexey Gladkovd7c9e992021-04-22 14:27:14 +0200759 }
760 if (!get_ucounts(ucounts)) {
761 dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
Miaohe Lin5c2a9562022-03-22 14:44:56 -0700762 allowed = 0;
Alexey Gladkovd7c9e992021-04-22 14:27:14 +0200763 goto out;
764 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 allowed = 1;
766out:
767 spin_unlock(&shmlock_user_lock);
768 return allowed;
769}
770
Alexey Gladkovd7c9e992021-04-22 14:27:14 +0200771void user_shm_unlock(size_t size, struct ucounts *ucounts)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772{
773 spin_lock(&shmlock_user_lock);
Alexey Gladkovd7c9e992021-04-22 14:27:14 +0200774 dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 spin_unlock(&shmlock_user_lock);
Alexey Gladkovd7c9e992021-04-22 14:27:14 +0200776 put_ucounts(ucounts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777}