blob: 92e6f56a932da3b5ca401f62a5e956f1a7d230db [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/mm.h>
5#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +01006#include <linux/sched/mm.h>
Ingo Molnarf7ccbae2017-02-08 18:51:30 +01007#include <linux/sched/coredump.h>
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07008#include <linux/mmu_notifier.h>
9#include <linux/rmap.h>
10#include <linux/swap.h>
11#include <linux/mm_inline.h>
12#include <linux/kthread.h>
13#include <linux/khugepaged.h>
14#include <linux/freezer.h>
15#include <linux/mman.h>
16#include <linux/hashtable.h>
17#include <linux/userfaultfd_k.h>
18#include <linux/page_idle.h>
Pasha Tatashin80110bb2022-02-03 20:49:24 -080019#include <linux/page_table_check.h>
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070020#include <linux/swapops.h>
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -070021#include <linux/shmem_fs.h>
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070022
23#include <asm/tlb.h>
24#include <asm/pgalloc.h>
25#include "internal.h"
Qi Zhengb26e2702022-08-31 11:19:46 +080026#include "mm_slot.h"
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070027
28enum scan_result {
29 SCAN_FAIL,
30 SCAN_SUCCEED,
31 SCAN_PMD_NULL,
Zach O'Keefe34488392022-09-22 15:40:39 -070032 SCAN_PMD_NONE,
Zach O'Keefe50722802022-07-06 16:59:26 -070033 SCAN_PMD_MAPPED,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070034 SCAN_EXCEED_NONE_PTE,
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -070035 SCAN_EXCEED_SWAP_PTE,
36 SCAN_EXCEED_SHARED_PTE,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070037 SCAN_PTE_NON_PRESENT,
Peter Xue1e267c2020-04-06 20:06:04 -070038 SCAN_PTE_UFFD_WP,
Zach O'Keefe58ac9a82022-09-22 15:40:38 -070039 SCAN_PTE_MAPPED_HUGEPAGE,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070040 SCAN_PAGE_RO,
Ebru Akagunduz0db501f2016-07-26 15:26:46 -070041 SCAN_LACK_REFERENCED_PAGE,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070042 SCAN_PAGE_NULL,
43 SCAN_SCAN_ABORT,
44 SCAN_PAGE_COUNT,
45 SCAN_PAGE_LRU,
46 SCAN_PAGE_LOCK,
47 SCAN_PAGE_ANON,
48 SCAN_PAGE_COMPOUND,
49 SCAN_ANY_PROCESS,
50 SCAN_VMA_NULL,
51 SCAN_VMA_CHECK,
52 SCAN_ADDRESS_RANGE,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070053 SCAN_DEL_PAGE_LRU,
54 SCAN_ALLOC_HUGE_PAGE_FAIL,
55 SCAN_CGROUP_CHARGE_FAIL,
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -070056 SCAN_TRUNCATED,
Song Liu99cb0db2019-09-23 15:38:00 -070057 SCAN_PAGE_HAS_PRIVATE,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070058};
59
60#define CREATE_TRACE_POINTS
61#include <trace/events/huge_memory.h>
62
Vijay Balakrishna4aab2be2020-10-10 23:16:40 -070063static struct task_struct *khugepaged_thread __read_mostly;
64static DEFINE_MUTEX(khugepaged_mutex);
65
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070066/* default scan 8*512 pte (or vmas) every 30 second */
67static unsigned int khugepaged_pages_to_scan __read_mostly;
68static unsigned int khugepaged_pages_collapsed;
69static unsigned int khugepaged_full_scans;
70static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
71/* during fragmentation poll the hugepage allocator once every minute */
72static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
73static unsigned long khugepaged_sleep_expire;
74static DEFINE_SPINLOCK(khugepaged_mm_lock);
75static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
76/*
77 * default collapse hugepages if there is at least one pte mapped like
78 * it would have happened if the vma was large enough during page
79 * fault.
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -070080 *
81 * Note that these are only respected if collapse was initiated by khugepaged.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070082 */
83static unsigned int khugepaged_max_ptes_none __read_mostly;
84static unsigned int khugepaged_max_ptes_swap __read_mostly;
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -070085static unsigned int khugepaged_max_ptes_shared __read_mostly;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070086
87#define MM_SLOTS_HASH_BITS 10
88static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
89
90static struct kmem_cache *mm_slot_cache __read_mostly;
91
Song Liu27e1f822019-09-23 15:38:30 -070092#define MAX_PTE_MAPPED_THP 8
93
Zach O'Keefe34d6b472022-07-06 16:59:21 -070094struct collapse_control {
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -070095 bool is_khugepaged;
96
Zach O'Keefe34d6b472022-07-06 16:59:21 -070097 /* Num pages scanned per node */
98 u32 node_load[MAX_NUMNODES];
99
Yang Shie031ff92022-11-08 10:43:56 -0800100 /* nodemask for allocation fallback */
101 nodemask_t alloc_nmask;
Zach O'Keefe34d6b472022-07-06 16:59:21 -0700102};
103
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700104/**
Qi Zhengb26e2702022-08-31 11:19:46 +0800105 * struct khugepaged_mm_slot - khugepaged information per mm that is being scanned
106 * @slot: hash lookup from mm to mm_slot
Alex Shi336e6b52020-12-14 19:12:01 -0800107 * @nr_pte_mapped_thp: number of pte mapped THP
108 * @pte_mapped_thp: address array corresponding pte mapped THP
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700109 */
Qi Zhengb26e2702022-08-31 11:19:46 +0800110struct khugepaged_mm_slot {
111 struct mm_slot slot;
Song Liu27e1f822019-09-23 15:38:30 -0700112
113 /* pte-mapped THP in this mm */
114 int nr_pte_mapped_thp;
115 unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700116};
117
118/**
119 * struct khugepaged_scan - cursor for scanning
120 * @mm_head: the head of the mm list to scan
121 * @mm_slot: the current mm_slot we are scanning
122 * @address: the next address inside that to be scanned
123 *
124 * There is only the one khugepaged_scan instance of this cursor structure.
125 */
126struct khugepaged_scan {
127 struct list_head mm_head;
Qi Zhengb26e2702022-08-31 11:19:46 +0800128 struct khugepaged_mm_slot *mm_slot;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700129 unsigned long address;
130};
131
132static struct khugepaged_scan khugepaged_scan = {
133 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
134};
135
Jérémy Lefauree1465d12016-11-30 15:54:02 -0800136#ifdef CONFIG_SYSFS
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700137static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
138 struct kobj_attribute *attr,
139 char *buf)
140{
Joe Perchesae7a9272020-12-14 19:14:42 -0800141 return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700142}
143
144static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
145 struct kobj_attribute *attr,
146 const char *buf, size_t count)
147{
Alexey Dobriyandfefd222020-12-14 19:15:03 -0800148 unsigned int msecs;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700149 int err;
150
Alexey Dobriyandfefd222020-12-14 19:15:03 -0800151 err = kstrtouint(buf, 10, &msecs);
152 if (err)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700153 return -EINVAL;
154
155 khugepaged_scan_sleep_millisecs = msecs;
156 khugepaged_sleep_expire = 0;
157 wake_up_interruptible(&khugepaged_wait);
158
159 return count;
160}
161static struct kobj_attribute scan_sleep_millisecs_attr =
Miaohe Lin6dcdc942022-06-25 17:28:14 +0800162 __ATTR_RW(scan_sleep_millisecs);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700163
164static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
165 struct kobj_attribute *attr,
166 char *buf)
167{
Joe Perchesae7a9272020-12-14 19:14:42 -0800168 return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700169}
170
171static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
172 struct kobj_attribute *attr,
173 const char *buf, size_t count)
174{
Alexey Dobriyandfefd222020-12-14 19:15:03 -0800175 unsigned int msecs;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700176 int err;
177
Alexey Dobriyandfefd222020-12-14 19:15:03 -0800178 err = kstrtouint(buf, 10, &msecs);
179 if (err)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700180 return -EINVAL;
181
182 khugepaged_alloc_sleep_millisecs = msecs;
183 khugepaged_sleep_expire = 0;
184 wake_up_interruptible(&khugepaged_wait);
185
186 return count;
187}
188static struct kobj_attribute alloc_sleep_millisecs_attr =
Miaohe Lin6dcdc942022-06-25 17:28:14 +0800189 __ATTR_RW(alloc_sleep_millisecs);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700190
191static ssize_t pages_to_scan_show(struct kobject *kobj,
192 struct kobj_attribute *attr,
193 char *buf)
194{
Joe Perchesae7a9272020-12-14 19:14:42 -0800195 return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700196}
197static ssize_t pages_to_scan_store(struct kobject *kobj,
198 struct kobj_attribute *attr,
199 const char *buf, size_t count)
200{
Alexey Dobriyandfefd222020-12-14 19:15:03 -0800201 unsigned int pages;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700202 int err;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700203
Alexey Dobriyandfefd222020-12-14 19:15:03 -0800204 err = kstrtouint(buf, 10, &pages);
205 if (err || !pages)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700206 return -EINVAL;
207
208 khugepaged_pages_to_scan = pages;
209
210 return count;
211}
212static struct kobj_attribute pages_to_scan_attr =
Miaohe Lin6dcdc942022-06-25 17:28:14 +0800213 __ATTR_RW(pages_to_scan);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700214
215static ssize_t pages_collapsed_show(struct kobject *kobj,
216 struct kobj_attribute *attr,
217 char *buf)
218{
Joe Perchesae7a9272020-12-14 19:14:42 -0800219 return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700220}
221static struct kobj_attribute pages_collapsed_attr =
222 __ATTR_RO(pages_collapsed);
223
224static ssize_t full_scans_show(struct kobject *kobj,
225 struct kobj_attribute *attr,
226 char *buf)
227{
Joe Perchesae7a9272020-12-14 19:14:42 -0800228 return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700229}
230static struct kobj_attribute full_scans_attr =
231 __ATTR_RO(full_scans);
232
Miaohe Lin6dcdc942022-06-25 17:28:14 +0800233static ssize_t defrag_show(struct kobject *kobj,
234 struct kobj_attribute *attr, char *buf)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700235{
236 return single_hugepage_flag_show(kobj, attr, buf,
Joe Perchesae7a9272020-12-14 19:14:42 -0800237 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700238}
Miaohe Lin6dcdc942022-06-25 17:28:14 +0800239static ssize_t defrag_store(struct kobject *kobj,
240 struct kobj_attribute *attr,
241 const char *buf, size_t count)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700242{
243 return single_hugepage_flag_store(kobj, attr, buf, count,
244 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
245}
246static struct kobj_attribute khugepaged_defrag_attr =
Miaohe Lin6dcdc942022-06-25 17:28:14 +0800247 __ATTR_RW(defrag);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700248
249/*
250 * max_ptes_none controls if khugepaged should collapse hugepages over
251 * any unmapped ptes in turn potentially increasing the memory
252 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
253 * reduce the available free memory in the system as it
254 * runs. Increasing max_ptes_none will instead potentially reduce the
255 * free memory in the system during the khugepaged scan.
256 */
Miaohe Lin6dcdc942022-06-25 17:28:14 +0800257static ssize_t max_ptes_none_show(struct kobject *kobj,
258 struct kobj_attribute *attr,
259 char *buf)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700260{
Joe Perchesae7a9272020-12-14 19:14:42 -0800261 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700262}
Miaohe Lin6dcdc942022-06-25 17:28:14 +0800263static ssize_t max_ptes_none_store(struct kobject *kobj,
264 struct kobj_attribute *attr,
265 const char *buf, size_t count)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700266{
267 int err;
268 unsigned long max_ptes_none;
269
270 err = kstrtoul(buf, 10, &max_ptes_none);
Miaohe Lin36ee2c72022-06-25 17:28:12 +0800271 if (err || max_ptes_none > HPAGE_PMD_NR - 1)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700272 return -EINVAL;
273
274 khugepaged_max_ptes_none = max_ptes_none;
275
276 return count;
277}
278static struct kobj_attribute khugepaged_max_ptes_none_attr =
Miaohe Lin6dcdc942022-06-25 17:28:14 +0800279 __ATTR_RW(max_ptes_none);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700280
Miaohe Lin6dcdc942022-06-25 17:28:14 +0800281static ssize_t max_ptes_swap_show(struct kobject *kobj,
282 struct kobj_attribute *attr,
283 char *buf)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700284{
Joe Perchesae7a9272020-12-14 19:14:42 -0800285 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700286}
287
Miaohe Lin6dcdc942022-06-25 17:28:14 +0800288static ssize_t max_ptes_swap_store(struct kobject *kobj,
289 struct kobj_attribute *attr,
290 const char *buf, size_t count)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700291{
292 int err;
293 unsigned long max_ptes_swap;
294
295 err = kstrtoul(buf, 10, &max_ptes_swap);
Miaohe Lin36ee2c72022-06-25 17:28:12 +0800296 if (err || max_ptes_swap > HPAGE_PMD_NR - 1)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700297 return -EINVAL;
298
299 khugepaged_max_ptes_swap = max_ptes_swap;
300
301 return count;
302}
303
304static struct kobj_attribute khugepaged_max_ptes_swap_attr =
Miaohe Lin6dcdc942022-06-25 17:28:14 +0800305 __ATTR_RW(max_ptes_swap);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700306
Miaohe Lin6dcdc942022-06-25 17:28:14 +0800307static ssize_t max_ptes_shared_show(struct kobject *kobj,
308 struct kobj_attribute *attr,
309 char *buf)
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700310{
Joe Perchesae7a9272020-12-14 19:14:42 -0800311 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700312}
313
Miaohe Lin6dcdc942022-06-25 17:28:14 +0800314static ssize_t max_ptes_shared_store(struct kobject *kobj,
315 struct kobj_attribute *attr,
316 const char *buf, size_t count)
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700317{
318 int err;
319 unsigned long max_ptes_shared;
320
321 err = kstrtoul(buf, 10, &max_ptes_shared);
Miaohe Lin36ee2c72022-06-25 17:28:12 +0800322 if (err || max_ptes_shared > HPAGE_PMD_NR - 1)
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700323 return -EINVAL;
324
325 khugepaged_max_ptes_shared = max_ptes_shared;
326
327 return count;
328}
329
330static struct kobj_attribute khugepaged_max_ptes_shared_attr =
Miaohe Lin6dcdc942022-06-25 17:28:14 +0800331 __ATTR_RW(max_ptes_shared);
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700332
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700333static struct attribute *khugepaged_attr[] = {
334 &khugepaged_defrag_attr.attr,
335 &khugepaged_max_ptes_none_attr.attr,
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700336 &khugepaged_max_ptes_swap_attr.attr,
337 &khugepaged_max_ptes_shared_attr.attr,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700338 &pages_to_scan_attr.attr,
339 &pages_collapsed_attr.attr,
340 &full_scans_attr.attr,
341 &scan_sleep_millisecs_attr.attr,
342 &alloc_sleep_millisecs_attr.attr,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700343 NULL,
344};
345
346struct attribute_group khugepaged_attr_group = {
347 .attrs = khugepaged_attr,
348 .name = "khugepaged",
349};
Jérémy Lefauree1465d12016-11-30 15:54:02 -0800350#endif /* CONFIG_SYSFS */
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700351
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700352int hugepage_madvise(struct vm_area_struct *vma,
353 unsigned long *vm_flags, int advice)
354{
355 switch (advice) {
356 case MADV_HUGEPAGE:
357#ifdef CONFIG_S390
358 /*
359 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
360 * can't handle this properly after s390_enable_sie, so we simply
361 * ignore the madvise to prevent qemu from causing a SIGSEGV.
362 */
363 if (mm_has_pgste(vma->vm_mm))
364 return 0;
365#endif
366 *vm_flags &= ~VM_NOHUGEPAGE;
367 *vm_flags |= VM_HUGEPAGE;
368 /*
369 * If the vma become good for khugepaged to scan,
370 * register it here without waiting a page fault that
371 * may not happen any time soon.
372 */
Yang Shic7915762022-05-19 14:08:50 -0700373 khugepaged_enter_vma(vma, *vm_flags);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700374 break;
375 case MADV_NOHUGEPAGE:
376 *vm_flags &= ~VM_HUGEPAGE;
377 *vm_flags |= VM_NOHUGEPAGE;
378 /*
379 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
380 * this vma even if we leave the mm registered in khugepaged if
381 * it got registered before VM_NOHUGEPAGE was set.
382 */
383 break;
384 }
385
386 return 0;
387}
388
389int __init khugepaged_init(void)
390{
391 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
Qi Zhengb26e2702022-08-31 11:19:46 +0800392 sizeof(struct khugepaged_mm_slot),
393 __alignof__(struct khugepaged_mm_slot),
394 0, NULL);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700395 if (!mm_slot_cache)
396 return -ENOMEM;
397
398 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
399 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
400 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700401 khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700402
403 return 0;
404}
405
406void __init khugepaged_destroy(void)
407{
408 kmem_cache_destroy(mm_slot_cache);
409}
410
Zach O'Keefe7d2c4382022-07-06 16:59:28 -0700411static inline int hpage_collapse_test_exit(struct mm_struct *mm)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700412{
Jann Horn4d45e752020-10-15 20:13:00 -0700413 return atomic_read(&mm->mm_users) == 0;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700414}
415
Yang Shid2081b22022-05-19 14:08:49 -0700416void __khugepaged_enter(struct mm_struct *mm)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700417{
Qi Zhengb26e2702022-08-31 11:19:46 +0800418 struct khugepaged_mm_slot *mm_slot;
419 struct mm_slot *slot;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700420 int wakeup;
421
Qi Zhengb26e2702022-08-31 11:19:46 +0800422 mm_slot = mm_slot_alloc(mm_slot_cache);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700423 if (!mm_slot)
Yang Shid2081b22022-05-19 14:08:49 -0700424 return;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700425
Qi Zhengb26e2702022-08-31 11:19:46 +0800426 slot = &mm_slot->slot;
427
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700428 /* __khugepaged_exit() must not run from under us */
Zach O'Keefe7d2c4382022-07-06 16:59:28 -0700429 VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700430 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
Qi Zhengb26e2702022-08-31 11:19:46 +0800431 mm_slot_free(mm_slot_cache, mm_slot);
Yang Shid2081b22022-05-19 14:08:49 -0700432 return;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700433 }
434
435 spin_lock(&khugepaged_mm_lock);
Qi Zhengb26e2702022-08-31 11:19:46 +0800436 mm_slot_insert(mm_slots_hash, mm, slot);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700437 /*
438 * Insert just behind the scanning cursor, to let the area settle
439 * down a little.
440 */
441 wakeup = list_empty(&khugepaged_scan.mm_head);
Qi Zhengb26e2702022-08-31 11:19:46 +0800442 list_add_tail(&slot->mm_node, &khugepaged_scan.mm_head);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700443 spin_unlock(&khugepaged_mm_lock);
444
Vegard Nossumf1f10072017-02-27 14:30:07 -0800445 mmgrab(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700446 if (wakeup)
447 wake_up_interruptible(&khugepaged_wait);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700448}
449
Yang Shic7915762022-05-19 14:08:50 -0700450void khugepaged_enter_vma(struct vm_area_struct *vma,
451 unsigned long vm_flags)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700452{
Yang Shi2647d112022-05-19 14:08:49 -0700453 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
Yang Shi10640262022-06-16 10:48:39 -0700454 hugepage_flags_enabled()) {
Zach O'Keefea7f4e6e2022-07-06 16:59:25 -0700455 if (hugepage_vma_check(vma, vm_flags, false, false, true))
Yang Shi2647d112022-05-19 14:08:49 -0700456 __khugepaged_enter(vma->vm_mm);
457 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700458}
459
460void __khugepaged_exit(struct mm_struct *mm)
461{
Qi Zhengb26e2702022-08-31 11:19:46 +0800462 struct khugepaged_mm_slot *mm_slot;
463 struct mm_slot *slot;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700464 int free = 0;
465
466 spin_lock(&khugepaged_mm_lock);
Qi Zhengb26e2702022-08-31 11:19:46 +0800467 slot = mm_slot_lookup(mm_slots_hash, mm);
468 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700469 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
Qi Zhengb26e2702022-08-31 11:19:46 +0800470 hash_del(&slot->hash);
471 list_del(&slot->mm_node);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700472 free = 1;
473 }
474 spin_unlock(&khugepaged_mm_lock);
475
476 if (free) {
477 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
Qi Zhengb26e2702022-08-31 11:19:46 +0800478 mm_slot_free(mm_slot_cache, mm_slot);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700479 mmdrop(mm);
480 } else if (mm_slot) {
481 /*
482 * This is required to serialize against
Zach O'Keefe7d2c4382022-07-06 16:59:28 -0700483 * hpage_collapse_test_exit() (which is guaranteed to run
484 * under mmap sem read mode). Stop here (after we return all
485 * pagetables will be destroyed) until khugepaged has finished
486 * working on the pagetables under the mmap_lock.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700487 */
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700488 mmap_write_lock(mm);
489 mmap_write_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700490 }
491}
492
Vishal Moola (Oracle)92644f52023-01-13 16:15:55 -0800493static void release_pte_folio(struct folio *folio)
494{
495 node_stat_mod_folio(folio,
496 NR_ISOLATED_ANON + folio_is_file_lru(folio),
497 -folio_nr_pages(folio));
498 folio_unlock(folio);
499 folio_putback_lru(folio);
500}
501
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700502static void release_pte_page(struct page *page)
503{
Vishal Moola (Oracle)92644f52023-01-13 16:15:55 -0800504 release_pte_folio(page_folio(page));
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700505}
506
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700507static void release_pte_pages(pte_t *pte, pte_t *_pte,
508 struct list_head *compound_pagelist)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700509{
Vishal Moola (Oracle)9bdfeea2023-01-13 16:15:56 -0800510 struct folio *folio, *tmp;
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700511
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700512 while (--_pte >= pte) {
513 pte_t pteval = *_pte;
Vishal Moola (Oracle)f5282602023-02-13 13:43:24 -0800514 unsigned long pfn;
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700515
Vishal Moola (Oracle)f5282602023-02-13 13:43:24 -0800516 if (pte_none(pteval))
517 continue;
518 pfn = pte_pfn(pteval);
519 if (is_zero_pfn(pfn))
520 continue;
521 folio = pfn_folio(pfn);
522 if (folio_test_large(folio))
523 continue;
524 release_pte_folio(folio);
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700525 }
526
Vishal Moola (Oracle)9bdfeea2023-01-13 16:15:56 -0800527 list_for_each_entry_safe(folio, tmp, compound_pagelist, lru) {
528 list_del(&folio->lru);
529 release_pte_folio(folio);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700530 }
531}
532
Kirill A. Shutemov94456892020-06-03 16:00:20 -0700533static bool is_refcount_suitable(struct page *page)
534{
535 int expected_refcount;
536
537 expected_refcount = total_mapcount(page);
538 if (PageSwapCache(page))
539 expected_refcount += compound_nr(page);
540
541 return page_count(page) == expected_refcount;
542}
543
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700544static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
545 unsigned long address,
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700546 pte_t *pte,
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -0700547 struct collapse_control *cc,
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700548 struct list_head *compound_pagelist)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700549{
550 struct page *page = NULL;
551 pte_t *_pte;
Zach O'Keefe50ad2f22022-07-06 16:59:23 -0700552 int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700553 bool writable = false;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700554
Miaohe Lin36ee2c72022-06-25 17:28:12 +0800555 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700556 _pte++, address += PAGE_SIZE) {
557 pte_t pteval = *_pte;
558 if (pte_none(pteval) || (pte_present(pteval) &&
559 is_zero_pfn(pte_pfn(pteval)))) {
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -0700560 ++none_or_zero;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700561 if (!userfaultfd_armed(vma) &&
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -0700562 (!cc->is_khugepaged ||
563 none_or_zero <= khugepaged_max_ptes_none)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700564 continue;
565 } else {
566 result = SCAN_EXCEED_NONE_PTE;
Yang Yange9ea8742022-01-14 14:07:55 -0800567 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700568 goto out;
569 }
570 }
571 if (!pte_present(pteval)) {
572 result = SCAN_PTE_NON_PRESENT;
573 goto out;
574 }
575 page = vm_normal_page(vma, address, pteval);
Alex Sierra3218f872022-07-15 10:05:11 -0500576 if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700577 result = SCAN_PAGE_NULL;
578 goto out;
579 }
580
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700581 VM_BUG_ON_PAGE(!PageAnon(page), page);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700582
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -0700583 if (page_mapcount(page) > 1) {
584 ++shared;
585 if (cc->is_khugepaged &&
586 shared > khugepaged_max_ptes_shared) {
587 result = SCAN_EXCEED_SHARED_PTE;
588 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
589 goto out;
590 }
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700591 }
592
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700593 if (PageCompound(page)) {
594 struct page *p;
595 page = compound_head(page);
596
597 /*
598 * Check if we have dealt with the compound page
599 * already
600 */
601 list_for_each_entry(p, compound_pagelist, lru) {
602 if (page == p)
603 goto next;
604 }
605 }
606
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700607 /*
608 * We can do it before isolate_lru_page because the
609 * page can't be freed from under us. NOTE: PG_lock
610 * is needed to serialize against split_huge_page
611 * when invoked from the VM.
612 */
613 if (!trylock_page(page)) {
614 result = SCAN_PAGE_LOCK;
615 goto out;
616 }
617
618 /*
Kirill A. Shutemov94456892020-06-03 16:00:20 -0700619 * Check if the page has any GUP (or other external) pins.
620 *
621 * The page table that maps the page has been already unlinked
622 * from the page table tree and this process cannot get
Ingo Molnarf0953a12021-05-06 18:06:47 -0700623 * an additional pin on the page.
Kirill A. Shutemov94456892020-06-03 16:00:20 -0700624 *
625 * New pins can come later if the page is shared across fork,
626 * but not from this process. The other process cannot write to
627 * the page, only trigger CoW.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700628 */
Kirill A. Shutemov94456892020-06-03 16:00:20 -0700629 if (!is_refcount_suitable(page)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700630 unlock_page(page);
631 result = SCAN_PAGE_COUNT;
632 goto out;
633 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700634
635 /*
636 * Isolate the page to avoid collapsing an hugepage
637 * currently in use by the VM.
638 */
Baolin Wangf7f9c002023-02-15 18:39:35 +0800639 if (!isolate_lru_page(page)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700640 unlock_page(page);
641 result = SCAN_DEL_PAGE_LRU;
642 goto out;
643 }
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700644 mod_node_page_state(page_pgdat(page),
645 NR_ISOLATED_ANON + page_is_file_lru(page),
646 compound_nr(page));
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700647 VM_BUG_ON_PAGE(!PageLocked(page), page);
648 VM_BUG_ON_PAGE(PageLRU(page), page);
649
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700650 if (PageCompound(page))
651 list_add_tail(&page->lru, compound_pagelist);
652next:
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -0700653 /*
654 * If collapse was initiated by khugepaged, check that there is
655 * enough young pte to justify collapsing the page
656 */
657 if (cc->is_khugepaged &&
658 (pte_young(pteval) || page_is_young(page) ||
659 PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
660 address)))
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700661 referenced++;
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700662
663 if (pte_write(pteval))
664 writable = true;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700665 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700666
Miaohe Lin74e579b2021-05-04 18:33:46 -0700667 if (unlikely(!writable)) {
668 result = SCAN_PAGE_RO;
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -0700669 } else if (unlikely(cc->is_khugepaged && !referenced)) {
Miaohe Lin74e579b2021-05-04 18:33:46 -0700670 result = SCAN_LACK_REFERENCED_PAGE;
671 } else {
672 result = SCAN_SUCCEED;
673 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
674 referenced, writable, result);
Zach O'Keefe50ad2f22022-07-06 16:59:23 -0700675 return result;
Miaohe Lin74e579b2021-05-04 18:33:46 -0700676 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700677out:
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700678 release_pte_pages(pte, _pte, compound_pagelist);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700679 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
680 referenced, writable, result);
Zach O'Keefe50ad2f22022-07-06 16:59:23 -0700681 return result;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700682}
683
684static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
685 struct vm_area_struct *vma,
686 unsigned long address,
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700687 spinlock_t *ptl,
688 struct list_head *compound_pagelist)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700689{
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700690 struct page *src_page, *tmp;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700691 pte_t *_pte;
David Rientjes338a16b2017-05-12 15:47:03 -0700692 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
693 _pte++, page++, address += PAGE_SIZE) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700694 pte_t pteval = *_pte;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700695
696 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
697 clear_user_highpage(page, address);
698 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
699 if (is_zero_pfn(pte_pfn(pteval))) {
700 /*
701 * ptl mostly unnecessary.
702 */
703 spin_lock(ptl);
Pasha Tatashin08d5b292022-01-14 14:06:33 -0800704 ptep_clear(vma->vm_mm, address, _pte);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700705 spin_unlock(ptl);
706 }
707 } else {
708 src_page = pte_page(pteval);
709 copy_user_highpage(page, src_page, address, vma);
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700710 if (!PageCompound(src_page))
711 release_pte_page(src_page);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700712 /*
713 * ptl mostly unnecessary, but preempt has to
714 * be disabled to update the per-cpu stats
715 * inside page_remove_rmap().
716 */
717 spin_lock(ptl);
Pasha Tatashin08d5b292022-01-14 14:06:33 -0800718 ptep_clear(vma->vm_mm, address, _pte);
Hugh Dickinscea86fe2022-02-14 18:26:39 -0800719 page_remove_rmap(src_page, vma, false);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700720 spin_unlock(ptl);
721 free_page_and_swap_cache(src_page);
722 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700723 }
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700724
725 list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
726 list_del(&src_page->lru);
Miaohe Lin1baec202022-06-25 17:28:16 +0800727 mod_node_page_state(page_pgdat(src_page),
728 NR_ISOLATED_ANON + page_is_file_lru(src_page),
729 -compound_nr(src_page));
730 unlock_page(src_page);
731 free_swap_cache(src_page);
732 putback_lru_page(src_page);
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700733 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700734}
735
736static void khugepaged_alloc_sleep(void)
737{
738 DEFINE_WAIT(wait);
739
740 add_wait_queue(&khugepaged_wait, &wait);
Peter Zijlstraf5d39b02022-08-22 13:18:22 +0200741 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
742 schedule_timeout(msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700743 remove_wait_queue(&khugepaged_wait, &wait);
744}
745
Zach O'Keefe34d6b472022-07-06 16:59:21 -0700746struct collapse_control khugepaged_collapse_control = {
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -0700747 .is_khugepaged = true,
Zach O'Keefe34d6b472022-07-06 16:59:21 -0700748};
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700749
Zach O'Keefe7d2c4382022-07-06 16:59:28 -0700750static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700751{
752 int i;
753
754 /*
Mel Gormana5f5f912016-07-28 15:46:32 -0700755 * If node_reclaim_mode is disabled, then no extra effort is made to
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700756 * allocate memory locally.
757 */
Dave Hansen202e35d2021-05-04 18:36:04 -0700758 if (!node_reclaim_enabled())
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700759 return false;
760
761 /* If there is a count for this node already, it must be acceptable */
Zach O'Keefe34d6b472022-07-06 16:59:21 -0700762 if (cc->node_load[nid])
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700763 return false;
764
765 for (i = 0; i < MAX_NUMNODES; i++) {
Zach O'Keefe34d6b472022-07-06 16:59:21 -0700766 if (!cc->node_load[i])
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700767 continue;
Matt Fleminga55c7452019-08-08 20:53:01 +0100768 if (node_distance(nid, i) > node_reclaim_distance)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700769 return true;
770 }
771 return false;
772}
773
Yang Shi10640262022-06-16 10:48:39 -0700774#define khugepaged_defrag() \
775 (transparent_hugepage_flags & \
776 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
777
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700778/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
779static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
780{
Vlastimil Babka25160352016-07-28 15:49:25 -0700781 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700782}
783
784#ifdef CONFIG_NUMA
Zach O'Keefe7d2c4382022-07-06 16:59:28 -0700785static int hpage_collapse_find_target_node(struct collapse_control *cc)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700786{
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700787 int nid, target_node = 0, max_value = 0;
788
789 /* find first node with max normal pages hit */
790 for (nid = 0; nid < MAX_NUMNODES; nid++)
Zach O'Keefe34d6b472022-07-06 16:59:21 -0700791 if (cc->node_load[nid] > max_value) {
792 max_value = cc->node_load[nid];
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700793 target_node = nid;
794 }
795
Yang Shie031ff92022-11-08 10:43:56 -0800796 for_each_online_node(nid) {
797 if (max_value == cc->node_load[nid])
798 node_set(nid, cc->alloc_nmask);
799 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700800
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700801 return target_node;
802}
Yang Shic6a7f442022-07-06 16:59:20 -0700803#else
Zach O'Keefe7d2c4382022-07-06 16:59:28 -0700804static int hpage_collapse_find_target_node(struct collapse_control *cc)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700805{
Yang Shic6a7f442022-07-06 16:59:20 -0700806 return 0;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700807}
Yang Shic6a7f442022-07-06 16:59:20 -0700808#endif
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700809
Yang Shie031ff92022-11-08 10:43:56 -0800810static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
811 nodemask_t *nmask)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700812{
Yang Shie031ff92022-11-08 10:43:56 -0800813 *hpage = __alloc_pages(gfp, HPAGE_PMD_ORDER, node, nmask);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700814 if (unlikely(!*hpage)) {
815 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
Zach O'Keefe9710a78a2022-07-06 16:59:22 -0700816 return false;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700817 }
818
819 prep_transhuge_page(*hpage);
820 count_vm_event(THP_COLLAPSE_ALLOC);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700821 return true;
822}
823
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700824/*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700825 * If mmap_lock temporarily dropped, revalidate vma
826 * before taking mmap_lock.
Zach O'Keefe50ad2f22022-07-06 16:59:23 -0700827 * Returns enum scan_result value.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700828 */
829
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700830static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
Zach O'Keefe34488392022-09-22 15:40:39 -0700831 bool expect_anon,
Zach O'Keefea7f4e6e2022-07-06 16:59:25 -0700832 struct vm_area_struct **vmap,
833 struct collapse_control *cc)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700834{
835 struct vm_area_struct *vma;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700836
Zach O'Keefe7d2c4382022-07-06 16:59:28 -0700837 if (unlikely(hpage_collapse_test_exit(mm)))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700838 return SCAN_ANY_PROCESS;
839
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700840 *vmap = vma = find_vma(mm, address);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700841 if (!vma)
842 return SCAN_VMA_NULL;
843
Yang Shi4fa68932022-06-16 10:48:35 -0700844 if (!transhuge_vma_suitable(vma, address))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700845 return SCAN_ADDRESS_RANGE;
Zach O'Keefea7f4e6e2022-07-06 16:59:25 -0700846 if (!hugepage_vma_check(vma, vma->vm_flags, false, false,
847 cc->is_khugepaged))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700848 return SCAN_VMA_CHECK;
Yang Shif707fa42022-06-16 10:48:36 -0700849 /*
850 * Anon VMA expected, the address may be unmapped then
851 * remapped to file after khugepaged reaquired the mmap_lock.
852 *
853 * hugepage_vma_check may return true for qualified file
854 * vmas.
855 */
Zach O'Keefe34488392022-09-22 15:40:39 -0700856 if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
857 return SCAN_PAGE_ANON;
Zach O'Keefe50ad2f22022-07-06 16:59:23 -0700858 return SCAN_SUCCEED;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700859}
860
Zach O'Keefeedb5d0c2023-01-25 14:53:58 -0800861/*
862 * See pmd_trans_unstable() for how the result may change out from
863 * underneath us, even if we hold mmap_lock in read.
864 */
Zach O'Keefe50722802022-07-06 16:59:26 -0700865static int find_pmd_or_thp_or_none(struct mm_struct *mm,
866 unsigned long address,
867 pmd_t **pmd)
868{
869 pmd_t pmde;
870
871 *pmd = mm_find_pmd(mm, address);
872 if (!*pmd)
873 return SCAN_PMD_NULL;
874
Peter Zijlstradab6e712020-11-26 17:20:28 +0100875 pmde = pmdp_get_lockless(*pmd);
Zach O'Keefe50722802022-07-06 16:59:26 -0700876
877#ifdef CONFIG_TRANSPARENT_HUGEPAGE
878 /* See comments in pmd_none_or_trans_huge_or_clear_bad() */
879 barrier();
880#endif
Zach O'Keefe34488392022-09-22 15:40:39 -0700881 if (pmd_none(pmde))
882 return SCAN_PMD_NONE;
Zach O'Keefeedb5d0c2023-01-25 14:53:58 -0800883 if (!pmd_present(pmde))
884 return SCAN_PMD_NULL;
Zach O'Keefe50722802022-07-06 16:59:26 -0700885 if (pmd_trans_huge(pmde))
886 return SCAN_PMD_MAPPED;
Zach O'Keefeedb5d0c2023-01-25 14:53:58 -0800887 if (pmd_devmap(pmde))
888 return SCAN_PMD_NULL;
Zach O'Keefe50722802022-07-06 16:59:26 -0700889 if (pmd_bad(pmde))
890 return SCAN_PMD_NULL;
891 return SCAN_SUCCEED;
892}
893
894static int check_pmd_still_valid(struct mm_struct *mm,
895 unsigned long address,
896 pmd_t *pmd)
897{
898 pmd_t *new_pmd;
899 int result = find_pmd_or_thp_or_none(mm, address, &new_pmd);
900
901 if (result != SCAN_SUCCEED)
902 return result;
903 if (new_pmd != pmd)
904 return SCAN_FAIL;
905 return SCAN_SUCCEED;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700906}
907
908/*
909 * Bring missing pages in from swap, to complete THP collapse.
Zach O'Keefe7d2c4382022-07-06 16:59:28 -0700910 * Only done if hpage_collapse_scan_pmd believes it is worthwhile.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700911 *
Miaohe Lin4d928e22022-06-25 17:28:11 +0800912 * Called and returns without pte mapped or spinlocks held.
913 * Note that if false is returned, mmap_lock will be released.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700914 */
915
Zach O'Keefe50ad2f22022-07-06 16:59:23 -0700916static int __collapse_huge_page_swapin(struct mm_struct *mm,
917 struct vm_area_struct *vma,
918 unsigned long haddr, pmd_t *pmd,
919 int referenced)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700920{
Souptick Joarder2b740302018-08-23 17:01:36 -0700921 int swapped_in = 0;
922 vm_fault_t ret = 0;
Will Deacon2b635dd2021-01-14 15:33:49 +0000923 unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700924
Will Deacon2b635dd2021-01-14 15:33:49 +0000925 for (address = haddr; address < end; address += PAGE_SIZE) {
926 struct vm_fault vmf = {
927 .vma = vma,
928 .address = address,
929 .pgoff = linear_page_index(vma, haddr),
930 .flags = FAULT_FLAG_ALLOW_RETRY,
931 .pmd = pmd,
932 };
933
934 vmf.pte = pte_offset_map(pmd, address);
Jan Kara29943022016-12-14 15:07:16 -0800935 vmf.orig_pte = *vmf.pte;
Will Deacon2b635dd2021-01-14 15:33:49 +0000936 if (!is_swap_pte(vmf.orig_pte)) {
937 pte_unmap(vmf.pte);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700938 continue;
Will Deacon2b635dd2021-01-14 15:33:49 +0000939 }
Jan Kara29943022016-12-14 15:07:16 -0800940 ret = do_swap_page(&vmf);
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700941
Miaohe Lin4d928e22022-06-25 17:28:11 +0800942 /*
943 * do_swap_page returns VM_FAULT_RETRY with released mmap_lock.
944 * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because
945 * we do not retry here and swap entry will remain in pagetable
946 * resulting in later failure.
947 */
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700948 if (ret & VM_FAULT_RETRY) {
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700949 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Zach O'Keefe50ad2f22022-07-06 16:59:23 -0700950 /* Likely, but not guaranteed, that page lock failed */
951 return SCAN_PAGE_LOCK;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700952 }
Miaohe Lin4d928e22022-06-25 17:28:11 +0800953 if (ret & VM_FAULT_ERROR) {
954 mmap_read_unlock(mm);
955 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Zach O'Keefe50ad2f22022-07-06 16:59:23 -0700956 return SCAN_FAIL;
Miaohe Lin4d928e22022-06-25 17:28:11 +0800957 }
958 swapped_in++;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700959 }
Kirill A. Shutemovae2c5d82020-06-03 16:00:17 -0700960
961 /* Drain LRU add pagevec to remove extra pin on the swapped in pages */
962 if (swapped_in)
963 lru_add_drain();
964
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700965 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
Zach O'Keefe50ad2f22022-07-06 16:59:23 -0700966 return SCAN_SUCCEED;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700967}
968
Zach O'Keefe9710a78a2022-07-06 16:59:22 -0700969static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
970 struct collapse_control *cc)
971{
Zach O'Keefe7d8faaf2022-07-06 16:59:27 -0700972 gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
Yang Shie031ff92022-11-08 10:43:56 -0800973 GFP_TRANSHUGE);
Zach O'Keefe7d2c4382022-07-06 16:59:28 -0700974 int node = hpage_collapse_find_target_node(cc);
Zach O'Keefe9710a78a2022-07-06 16:59:22 -0700975
Yang Shie031ff92022-11-08 10:43:56 -0800976 if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask))
Zach O'Keefe9710a78a2022-07-06 16:59:22 -0700977 return SCAN_ALLOC_HUGE_PAGE_FAIL;
978 if (unlikely(mem_cgroup_charge(page_folio(*hpage), mm, gfp)))
979 return SCAN_CGROUP_CHARGE_FAIL;
980 count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC);
981 return SCAN_SUCCEED;
982}
983
Zach O'Keefe50ad2f22022-07-06 16:59:23 -0700984static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
985 int referenced, int unmapped,
986 struct collapse_control *cc)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700987{
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700988 LIST_HEAD(compound_pagelist);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700989 pmd_t *pmd, _pmd;
990 pte_t *pte;
991 pgtable_t pgtable;
Zach O'Keefe50ad2f22022-07-06 16:59:23 -0700992 struct page *hpage;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700993 spinlock_t *pmd_ptl, *pte_ptl;
Zach O'Keefe50ad2f22022-07-06 16:59:23 -0700994 int result = SCAN_FAIL;
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700995 struct vm_area_struct *vma;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800996 struct mmu_notifier_range range;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700997
998 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
999
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -07001000 /*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001001 * Before allocating the hugepage, release the mmap_lock read lock.
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -07001002 * The allocation can take potentially a long time if it involves
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001003 * sync compaction, and we do not need to hold the mmap_lock during
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -07001004 * that. We will recheck the vma after taking it again in write mode.
1005 */
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001006 mmap_read_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001007
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001008 result = alloc_charge_hpage(&hpage, mm, cc);
Zach O'Keefe9710a78a2022-07-06 16:59:22 -07001009 if (result != SCAN_SUCCEED)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001010 goto out_nolock;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001011
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001012 mmap_read_lock(mm);
Zach O'Keefe34488392022-09-22 15:40:39 -07001013 result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001014 if (result != SCAN_SUCCEED) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001015 mmap_read_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001016 goto out_nolock;
1017 }
1018
Zach O'Keefe50722802022-07-06 16:59:26 -07001019 result = find_pmd_or_thp_or_none(mm, address, &pmd);
1020 if (result != SCAN_SUCCEED) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001021 mmap_read_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001022 goto out_nolock;
1023 }
1024
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001025 if (unmapped) {
1026 /*
1027 * __collapse_huge_page_swapin will return with mmap_lock
1028 * released when it fails. So we jump out_nolock directly in
1029 * that case. Continuing to collapse causes inconsistency.
1030 */
1031 result = __collapse_huge_page_swapin(mm, vma, address, pmd,
1032 referenced);
1033 if (result != SCAN_SUCCEED)
1034 goto out_nolock;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001035 }
1036
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001037 mmap_read_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001038 /*
1039 * Prevent all access to pagetables with the exception of
1040 * gup_fast later handled by the ptep_clear_flush and the VM
1041 * handled by the anon_vma lock + PG_lock.
1042 */
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001043 mmap_write_lock(mm);
Zach O'Keefe34488392022-09-22 15:40:39 -07001044 result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001045 if (result != SCAN_SUCCEED)
Miaohe Lin18d24a72021-05-04 18:34:17 -07001046 goto out_up_write;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001047 /* check if the pmd is still valid */
Zach O'Keefe50722802022-07-06 16:59:26 -07001048 result = check_pmd_still_valid(mm, address, pmd);
1049 if (result != SCAN_SUCCEED)
Miaohe Lin18d24a72021-05-04 18:34:17 -07001050 goto out_up_write;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001051
1052 anon_vma_lock_write(vma->anon_vma);
1053
Alistair Popple7d4a8be2023-01-10 13:57:22 +11001054 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address,
1055 address + HPAGE_PMD_SIZE);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001056 mmu_notifier_invalidate_range_start(&range);
Ville Syrjäläec649c9d2019-11-05 21:16:48 -08001057
1058 pte = pte_offset_map(pmd, address);
1059 pte_ptl = pte_lockptr(mm, pmd);
1060
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001061 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1062 /*
Yang Shi70cbc3c2022-09-07 11:01:43 -07001063 * This removes any huge TLB entry from the CPU so we won't allow
1064 * huge and small TLB entries for the same virtual address to
1065 * avoid the risk of CPU bugs in that area.
1066 *
1067 * Parallel fast GUP is fine since fast GUP will back off when
1068 * it detects PMD is changed.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001069 */
1070 _pmd = pmdp_collapse_flush(vma, address, pmd);
1071 spin_unlock(pmd_ptl);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001072 mmu_notifier_invalidate_range_end(&range);
Jann Horn2ba99c52022-11-25 22:37:13 +01001073 tlb_remove_table_sync_one();
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001074
1075 spin_lock(pte_ptl);
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -07001076 result = __collapse_huge_page_isolate(vma, address, pte, cc,
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001077 &compound_pagelist);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001078 spin_unlock(pte_ptl);
1079
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001080 if (unlikely(result != SCAN_SUCCEED)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001081 pte_unmap(pte);
1082 spin_lock(pmd_ptl);
1083 BUG_ON(!pmd_none(*pmd));
1084 /*
1085 * We can only use set_pmd_at when establishing
1086 * hugepmds and never for establishing regular pmds that
1087 * points to regular pagetables. Use pmd_populate for that
1088 */
1089 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1090 spin_unlock(pmd_ptl);
1091 anon_vma_unlock_write(vma->anon_vma);
Miaohe Lin18d24a72021-05-04 18:34:17 -07001092 goto out_up_write;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001093 }
1094
1095 /*
1096 * All pages are isolated and locked so anon_vma rmap
1097 * can't run anymore.
1098 */
1099 anon_vma_unlock_write(vma->anon_vma);
1100
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001101 __collapse_huge_page_copy(pte, hpage, vma, address, pte_ptl,
1102 &compound_pagelist);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001103 pte_unmap(pte);
Miaohe Lin588d01f2021-05-04 18:33:40 -07001104 /*
1105 * spin_lock() below is not the equivalent of smp_wmb(), but
1106 * the smp_wmb() inside __SetPageUptodate() can be reused to
1107 * avoid the copy_huge_page writes to become visible after
1108 * the set_pmd_at() write.
1109 */
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001110 __SetPageUptodate(hpage);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001111 pgtable = pmd_pgtable(_pmd);
1112
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001113 _pmd = mk_huge_pmd(hpage, vma->vm_page_prot);
Linus Torvaldsf55e1012017-11-29 09:01:01 -08001114 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001115
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001116 spin_lock(pmd_ptl);
1117 BUG_ON(!pmd_none(*pmd));
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001118 page_add_new_anon_rmap(hpage, vma, address);
1119 lru_cache_add_inactive_or_unevictable(hpage, vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001120 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1121 set_pmd_at(mm, address, pmd, _pmd);
1122 update_mmu_cache_pmd(vma, address, pmd);
1123 spin_unlock(pmd_ptl);
1124
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001125 hpage = NULL;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001126
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001127 result = SCAN_SUCCEED;
1128out_up_write:
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001129 mmap_write_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001130out_nolock:
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001131 if (hpage) {
1132 mem_cgroup_uncharge(page_folio(hpage));
1133 put_page(hpage);
Yang Shic6a7f442022-07-06 16:59:20 -07001134 }
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001135 trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result);
1136 return result;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001137}
1138
Zach O'Keefe7d2c4382022-07-06 16:59:28 -07001139static int hpage_collapse_scan_pmd(struct mm_struct *mm,
1140 struct vm_area_struct *vma,
1141 unsigned long address, bool *mmap_locked,
1142 struct collapse_control *cc)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001143{
1144 pmd_t *pmd;
1145 pte_t *pte, *_pte;
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001146 int result = SCAN_FAIL, referenced = 0;
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -07001147 int none_or_zero = 0, shared = 0;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001148 struct page *page = NULL;
1149 unsigned long _address;
1150 spinlock_t *ptl;
1151 int node = NUMA_NO_NODE, unmapped = 0;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001152 bool writable = false;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001153
1154 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1155
Zach O'Keefe50722802022-07-06 16:59:26 -07001156 result = find_pmd_or_thp_or_none(mm, address, &pmd);
1157 if (result != SCAN_SUCCEED)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001158 goto out;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001159
Zach O'Keefe34d6b472022-07-06 16:59:21 -07001160 memset(cc->node_load, 0, sizeof(cc->node_load));
Yang Shie031ff92022-11-08 10:43:56 -08001161 nodes_clear(cc->alloc_nmask);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001162 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
Miaohe Lin36ee2c72022-06-25 17:28:12 +08001163 for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001164 _pte++, _address += PAGE_SIZE) {
1165 pte_t pteval = *_pte;
1166 if (is_swap_pte(pteval)) {
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -07001167 ++unmapped;
1168 if (!cc->is_khugepaged ||
1169 unmapped <= khugepaged_max_ptes_swap) {
Peter Xue1e267c2020-04-06 20:06:04 -07001170 /*
1171 * Always be strict with uffd-wp
1172 * enabled swap entries. Please see
1173 * comment below for pte_uffd_wp().
1174 */
1175 if (pte_swp_uffd_wp(pteval)) {
1176 result = SCAN_PTE_UFFD_WP;
1177 goto out_unmap;
1178 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001179 continue;
1180 } else {
1181 result = SCAN_EXCEED_SWAP_PTE;
Yang Yange9ea8742022-01-14 14:07:55 -08001182 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001183 goto out_unmap;
1184 }
1185 }
1186 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -07001187 ++none_or_zero;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001188 if (!userfaultfd_armed(vma) &&
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -07001189 (!cc->is_khugepaged ||
1190 none_or_zero <= khugepaged_max_ptes_none)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001191 continue;
1192 } else {
1193 result = SCAN_EXCEED_NONE_PTE;
Yang Yange9ea8742022-01-14 14:07:55 -08001194 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001195 goto out_unmap;
1196 }
1197 }
Peter Xue1e267c2020-04-06 20:06:04 -07001198 if (pte_uffd_wp(pteval)) {
1199 /*
1200 * Don't collapse the page if any of the small
1201 * PTEs are armed with uffd write protection.
1202 * Here we can also mark the new huge pmd as
1203 * write protected if any of the small ones is
Haitao Shi8958b242020-12-15 20:47:26 -08001204 * marked but that could bring unknown
Peter Xue1e267c2020-04-06 20:06:04 -07001205 * userfault messages that falls outside of
1206 * the registered range. So, just be simple.
1207 */
1208 result = SCAN_PTE_UFFD_WP;
1209 goto out_unmap;
1210 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001211 if (pte_write(pteval))
1212 writable = true;
1213
1214 page = vm_normal_page(vma, _address, pteval);
Alex Sierra3218f872022-07-15 10:05:11 -05001215 if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001216 result = SCAN_PAGE_NULL;
1217 goto out_unmap;
1218 }
1219
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -07001220 if (page_mapcount(page) > 1) {
1221 ++shared;
1222 if (cc->is_khugepaged &&
1223 shared > khugepaged_max_ptes_shared) {
1224 result = SCAN_EXCEED_SHARED_PTE;
1225 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
1226 goto out_unmap;
1227 }
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -07001228 }
1229
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -07001230 page = compound_head(page);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001231
1232 /*
1233 * Record which node the original page is from and save this
Zach O'Keefe34d6b472022-07-06 16:59:21 -07001234 * information to cc->node_load[].
Quanfa Fu0b8f0d82022-01-14 14:09:25 -08001235 * Khugepaged will allocate hugepage from the node has the max
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001236 * hit record.
1237 */
1238 node = page_to_nid(page);
Zach O'Keefe7d2c4382022-07-06 16:59:28 -07001239 if (hpage_collapse_scan_abort(node, cc)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001240 result = SCAN_SCAN_ABORT;
1241 goto out_unmap;
1242 }
Zach O'Keefe34d6b472022-07-06 16:59:21 -07001243 cc->node_load[node]++;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001244 if (!PageLRU(page)) {
1245 result = SCAN_PAGE_LRU;
1246 goto out_unmap;
1247 }
1248 if (PageLocked(page)) {
1249 result = SCAN_PAGE_LOCK;
1250 goto out_unmap;
1251 }
1252 if (!PageAnon(page)) {
1253 result = SCAN_PAGE_ANON;
1254 goto out_unmap;
1255 }
1256
1257 /*
Kirill A. Shutemov94456892020-06-03 16:00:20 -07001258 * Check if the page has any GUP (or other external) pins.
1259 *
Hugh Dickinscb67f422022-11-02 18:51:38 -07001260 * Here the check may be racy:
1261 * it may see total_mapcount > refcount in some cases?
Kirill A. Shutemov94456892020-06-03 16:00:20 -07001262 * But such case is ephemeral we could always retry collapse
1263 * later. However it may report false positive if the page
1264 * has excessive GUP pins (i.e. 512). Anyway the same check
1265 * will be done again later the risk seems low.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001266 */
Kirill A. Shutemov94456892020-06-03 16:00:20 -07001267 if (!is_refcount_suitable(page)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001268 result = SCAN_PAGE_COUNT;
1269 goto out_unmap;
1270 }
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -07001271
1272 /*
1273 * If collapse was initiated by khugepaged, check that there is
1274 * enough young pte to justify collapsing the page
1275 */
1276 if (cc->is_khugepaged &&
1277 (pte_young(pteval) || page_is_young(page) ||
1278 PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
1279 address)))
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001280 referenced++;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001281 }
Kirill A. Shutemovffe945e2020-06-03 16:00:09 -07001282 if (!writable) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001283 result = SCAN_PAGE_RO;
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -07001284 } else if (cc->is_khugepaged &&
1285 (!referenced ||
1286 (unmapped && referenced < HPAGE_PMD_NR / 2))) {
Kirill A. Shutemovffe945e2020-06-03 16:00:09 -07001287 result = SCAN_LACK_REFERENCED_PAGE;
1288 } else {
1289 result = SCAN_SUCCEED;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001290 }
1291out_unmap:
1292 pte_unmap_unlock(pte, ptl);
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001293 if (result == SCAN_SUCCEED) {
1294 result = collapse_huge_page(mm, address, referenced,
1295 unmapped, cc);
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001296 /* collapse_huge_page will return with the mmap_lock released */
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001297 *mmap_locked = false;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001298 }
1299out:
1300 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1301 none_or_zero, result, unmapped);
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001302 return result;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001303}
1304
Qi Zhengb26e2702022-08-31 11:19:46 +08001305static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001306{
Qi Zhengb26e2702022-08-31 11:19:46 +08001307 struct mm_slot *slot = &mm_slot->slot;
1308 struct mm_struct *mm = slot->mm;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001309
Lance Roy35f3aa32018-10-04 23:45:47 -07001310 lockdep_assert_held(&khugepaged_mm_lock);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001311
Zach O'Keefe7d2c4382022-07-06 16:59:28 -07001312 if (hpage_collapse_test_exit(mm)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001313 /* free mm_slot */
Qi Zhengb26e2702022-08-31 11:19:46 +08001314 hash_del(&slot->hash);
1315 list_del(&slot->mm_node);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001316
1317 /*
1318 * Not strictly needed because the mm exited already.
1319 *
1320 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1321 */
1322
1323 /* khugepaged_mm_lock actually not necessary for the below */
Qi Zhengb26e2702022-08-31 11:19:46 +08001324 mm_slot_free(mm_slot_cache, mm_slot);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001325 mmdrop(mm);
1326 }
1327}
1328
Matthew Wilcox (Oracle)396bcc52020-04-06 20:04:35 -07001329#ifdef CONFIG_SHMEM
Song Liu27e1f822019-09-23 15:38:30 -07001330/*
1331 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1332 * khugepaged should try to collapse the page table.
Zach O'Keefe34488392022-09-22 15:40:39 -07001333 *
1334 * Note that following race exists:
1335 * (1) khugepaged calls khugepaged_collapse_pte_mapped_thps() for mm_struct A,
1336 * emptying the A's ->pte_mapped_thp[] array.
1337 * (2) MADV_COLLAPSE collapses some file extent with target mm_struct B, and
1338 * retract_page_tables() finds a VMA in mm_struct A mapping the same extent
1339 * (at virtual address X) and adds an entry (for X) into mm_struct A's
1340 * ->pte-mapped_thp[] array.
1341 * (3) khugepaged calls khugepaged_collapse_scan_file() for mm_struct A at X,
1342 * sees a pte-mapped THP (SCAN_PTE_MAPPED_HUGEPAGE) and adds an entry
1343 * (for X) into mm_struct A's ->pte-mapped_thp[] array.
1344 * Thus, it's possible the same address is added multiple times for the same
1345 * mm_struct. Should this happen, we'll simply attempt
1346 * collapse_pte_mapped_thp() multiple times for the same address, under the same
1347 * exclusive mmap_lock, and assuming the first call is successful, subsequent
1348 * attempts will return quickly (without grabbing any additional locks) when
1349 * a huge pmd is found in find_pmd_or_thp_or_none(). Since this is a cheap
1350 * check, and since this is a rare occurrence, the cost of preventing this
1351 * "multiple-add" is thought to be more expensive than just handling it, should
1352 * it occur.
Song Liu27e1f822019-09-23 15:38:30 -07001353 */
Zach O'Keefe58ac9a82022-09-22 15:40:38 -07001354static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
Miaohe Lin081c3252022-06-25 17:28:15 +08001355 unsigned long addr)
Song Liu27e1f822019-09-23 15:38:30 -07001356{
Qi Zhengb26e2702022-08-31 11:19:46 +08001357 struct khugepaged_mm_slot *mm_slot;
1358 struct mm_slot *slot;
Zach O'Keefe58ac9a82022-09-22 15:40:38 -07001359 bool ret = false;
Song Liu27e1f822019-09-23 15:38:30 -07001360
1361 VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1362
1363 spin_lock(&khugepaged_mm_lock);
Qi Zhengb26e2702022-08-31 11:19:46 +08001364 slot = mm_slot_lookup(mm_slots_hash, mm);
1365 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
Zach O'Keefe58ac9a82022-09-22 15:40:38 -07001366 if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP)) {
Song Liu27e1f822019-09-23 15:38:30 -07001367 mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
Zach O'Keefe58ac9a82022-09-22 15:40:38 -07001368 ret = true;
1369 }
Song Liu27e1f822019-09-23 15:38:30 -07001370 spin_unlock(&khugepaged_mm_lock);
Zach O'Keefe58ac9a82022-09-22 15:40:38 -07001371 return ret;
Song Liu27e1f822019-09-23 15:38:30 -07001372}
1373
Zach O'Keefe34488392022-09-22 15:40:39 -07001374/* hpage must be locked, and mmap_lock must be held in write */
1375static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
1376 pmd_t *pmdp, struct page *hpage)
1377{
1378 struct vm_fault vmf = {
1379 .vma = vma,
1380 .address = addr,
1381 .flags = 0,
1382 .pmd = pmdp,
1383 };
1384
1385 VM_BUG_ON(!PageTransHuge(hpage));
1386 mmap_assert_write_locked(vma->vm_mm);
1387
1388 if (do_set_pmd(&vmf, hpage))
1389 return SCAN_FAIL;
1390
1391 get_page(hpage);
1392 return SCAN_SUCCEED;
Song Liu27e1f822019-09-23 15:38:30 -07001393}
1394
Jann Horn8d3c1062022-11-25 22:37:12 +01001395/*
1396 * A note about locking:
1397 * Trying to take the page table spinlocks would be useless here because those
1398 * are only used to synchronize:
1399 *
1400 * - modifying terminal entries (ones that point to a data page, not to another
1401 * page table)
1402 * - installing *new* non-terminal entries
1403 *
1404 * Instead, we need roughly the same kind of protection as free_pgtables() or
1405 * mm_take_all_locks() (but only for a single VMA):
1406 * The mmap lock together with this VMA's rmap locks covers all paths towards
1407 * the page table entries we're messing with here, except for hardware page
1408 * table walks and lockless_pages_from_mm().
1409 */
Pasha Tatashine59a47b82022-02-03 20:49:20 -08001410static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
1411 unsigned long addr, pmd_t *pmdp)
1412{
Pasha Tatashine59a47b82022-02-03 20:49:20 -08001413 pmd_t pmd;
Jann Hornf268f6c2022-11-25 22:37:14 +01001414 struct mmu_notifier_range range;
Pasha Tatashine59a47b82022-02-03 20:49:20 -08001415
Pasha Tatashin80110bb2022-02-03 20:49:24 -08001416 mmap_assert_write_locked(mm);
Jann Horn8d3c1062022-11-25 22:37:12 +01001417 if (vma->vm_file)
1418 lockdep_assert_held_write(&vma->vm_file->f_mapping->i_mmap_rwsem);
1419 /*
1420 * All anon_vmas attached to the VMA have the same root and are
1421 * therefore locked by the same lock.
1422 */
1423 if (vma->anon_vma)
1424 lockdep_assert_held_write(&vma->anon_vma->root->rwsem);
1425
Alistair Popple7d4a8be2023-01-10 13:57:22 +11001426 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr,
Jann Hornf268f6c2022-11-25 22:37:14 +01001427 addr + HPAGE_PMD_SIZE);
1428 mmu_notifier_invalidate_range_start(&range);
Pasha Tatashine59a47b82022-02-03 20:49:20 -08001429 pmd = pmdp_collapse_flush(vma, addr, pmdp);
Jann Horn2ba99c52022-11-25 22:37:13 +01001430 tlb_remove_table_sync_one();
Jann Hornf268f6c2022-11-25 22:37:14 +01001431 mmu_notifier_invalidate_range_end(&range);
Pasha Tatashine59a47b82022-02-03 20:49:20 -08001432 mm_dec_nr_ptes(mm);
Pasha Tatashin80110bb2022-02-03 20:49:24 -08001433 page_table_check_pte_clear_range(mm, addr, pmd);
Pasha Tatashine59a47b82022-02-03 20:49:20 -08001434 pte_free(mm, pmd_pgtable(pmd));
1435}
1436
Song Liu27e1f822019-09-23 15:38:30 -07001437/**
Alex Shi336e6b52020-12-14 19:12:01 -08001438 * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1439 * address haddr.
1440 *
1441 * @mm: process address space where collapse happens
1442 * @addr: THP collapse address
Zach O'Keefe34488392022-09-22 15:40:39 -07001443 * @install_pmd: If a huge PMD should be installed
Song Liu27e1f822019-09-23 15:38:30 -07001444 *
1445 * This function checks whether all the PTEs in the PMD are pointing to the
1446 * right THP. If so, retract the page table so the THP can refault in with
Zach O'Keefe34488392022-09-22 15:40:39 -07001447 * as pmd-mapped. Possibly install a huge PMD mapping the THP.
Song Liu27e1f822019-09-23 15:38:30 -07001448 */
Zach O'Keefe34488392022-09-22 15:40:39 -07001449int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
1450 bool install_pmd)
Song Liu27e1f822019-09-23 15:38:30 -07001451{
1452 unsigned long haddr = addr & HPAGE_PMD_MASK;
Liam R. Howlett94d815b2022-09-06 19:48:50 +00001453 struct vm_area_struct *vma = vma_lookup(mm, haddr);
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001454 struct page *hpage;
Song Liu27e1f822019-09-23 15:38:30 -07001455 pte_t *start_pte, *pte;
Pasha Tatashine59a47b82022-02-03 20:49:20 -08001456 pmd_t *pmd;
Song Liu27e1f822019-09-23 15:38:30 -07001457 spinlock_t *ptl;
Zach O'Keefe58ac9a82022-09-22 15:40:38 -07001458 int count = 0, result = SCAN_FAIL;
Song Liu27e1f822019-09-23 15:38:30 -07001459 int i;
1460
Zach O'Keefe58ac9a82022-09-22 15:40:38 -07001461 mmap_assert_write_locked(mm);
1462
Zach O'Keefe34488392022-09-22 15:40:39 -07001463 /* Fast check before locking page if already PMD-mapped */
Zach O'Keefe58ac9a82022-09-22 15:40:38 -07001464 result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
Zach O'Keefe34488392022-09-22 15:40:39 -07001465 if (result == SCAN_PMD_MAPPED)
1466 return result;
Zach O'Keefe58ac9a82022-09-22 15:40:38 -07001467
Song Liu27e1f822019-09-23 15:38:30 -07001468 if (!vma || !vma->vm_file ||
Miaohe Linfef792a2021-05-04 18:34:15 -07001469 !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
Zach O'Keefe34488392022-09-22 15:40:39 -07001470 return SCAN_VMA_CHECK;
Song Liu27e1f822019-09-23 15:38:30 -07001471
1472 /*
Zach O'Keefea7f4e6e2022-07-06 16:59:25 -07001473 * If we are here, we've succeeded in replacing all the native pages
1474 * in the page cache with a single hugepage. If a mm were to fault-in
1475 * this memory (mapped by a suitably aligned VMA), we'd get the hugepage
1476 * and map it by a PMD, regardless of sysfs THP settings. As such, let's
1477 * analogously elide sysfs THP settings here.
Song Liu27e1f822019-09-23 15:38:30 -07001478 */
Zach O'Keefea7f4e6e2022-07-06 16:59:25 -07001479 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
Zach O'Keefe34488392022-09-22 15:40:39 -07001480 return SCAN_VMA_CHECK;
Song Liu27e1f822019-09-23 15:38:30 -07001481
Peter Xudeb4c932022-05-12 20:22:55 -07001482 /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
1483 if (userfaultfd_wp(vma))
Zach O'Keefe34488392022-09-22 15:40:39 -07001484 return SCAN_PTE_UFFD_WP;
Peter Xudeb4c932022-05-12 20:22:55 -07001485
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001486 hpage = find_lock_page(vma->vm_file->f_mapping,
1487 linear_page_index(vma, haddr));
1488 if (!hpage)
Zach O'Keefe34488392022-09-22 15:40:39 -07001489 return SCAN_PAGE_NULL;
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001490
Zach O'Keefe34488392022-09-22 15:40:39 -07001491 if (!PageHead(hpage)) {
1492 result = SCAN_FAIL;
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001493 goto drop_hpage;
Zach O'Keefe34488392022-09-22 15:40:39 -07001494 }
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001495
Zach O'Keefe34488392022-09-22 15:40:39 -07001496 if (compound_order(hpage) != HPAGE_PMD_ORDER) {
1497 result = SCAN_PAGE_COMPOUND;
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001498 goto drop_hpage;
Zach O'Keefe34488392022-09-22 15:40:39 -07001499 }
Zach O'Keefe780a4b62022-09-22 15:27:31 -07001500
Zach O'Keefe34488392022-09-22 15:40:39 -07001501 switch (result) {
1502 case SCAN_SUCCEED:
1503 break;
1504 case SCAN_PMD_NONE:
1505 /*
1506 * In MADV_COLLAPSE path, possible race with khugepaged where
1507 * all pte entries have been removed and pmd cleared. If so,
1508 * skip all the pte checks and just update the pmd mapping.
1509 */
1510 goto maybe_install_pmd;
1511 default:
Song Liu27e1f822019-09-23 15:38:30 -07001512 goto drop_hpage;
Zach O'Keefe34488392022-09-22 15:40:39 -07001513 }
Song Liu27e1f822019-09-23 15:38:30 -07001514
Jann Horn8d3c1062022-11-25 22:37:12 +01001515 /*
1516 * We need to lock the mapping so that from here on, only GUP-fast and
1517 * hardware page walks can access the parts of the page tables that
1518 * we're operating on.
1519 * See collapse_and_free_pmd().
1520 */
1521 i_mmap_lock_write(vma->vm_file->f_mapping);
1522
1523 /*
1524 * This spinlock should be unnecessary: Nobody else should be accessing
1525 * the page tables under spinlock protection here, only
1526 * lockless_pages_from_mm() and the hardware page walker can access page
1527 * tables while all the high-level locks are held in write mode.
1528 */
Song Liu27e1f822019-09-23 15:38:30 -07001529 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
Zach O'Keefe34488392022-09-22 15:40:39 -07001530 result = SCAN_FAIL;
Song Liu27e1f822019-09-23 15:38:30 -07001531
1532 /* step 1: check all mapped PTEs are to the right huge page */
1533 for (i = 0, addr = haddr, pte = start_pte;
1534 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1535 struct page *page;
1536
1537 /* empty pte, skip */
1538 if (pte_none(*pte))
1539 continue;
1540
1541 /* page swapped out, abort */
Zach O'Keefe34488392022-09-22 15:40:39 -07001542 if (!pte_present(*pte)) {
1543 result = SCAN_PTE_NON_PRESENT;
Song Liu27e1f822019-09-23 15:38:30 -07001544 goto abort;
Zach O'Keefe34488392022-09-22 15:40:39 -07001545 }
Song Liu27e1f822019-09-23 15:38:30 -07001546
1547 page = vm_normal_page(vma, addr, *pte);
Alex Sierra3218f872022-07-15 10:05:11 -05001548 if (WARN_ON_ONCE(page && is_zone_device_page(page)))
1549 page = NULL;
Song Liu27e1f822019-09-23 15:38:30 -07001550 /*
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001551 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1552 * page table, but the new page will not be a subpage of hpage.
Song Liu27e1f822019-09-23 15:38:30 -07001553 */
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001554 if (hpage + i != page)
Song Liu27e1f822019-09-23 15:38:30 -07001555 goto abort;
1556 count++;
1557 }
1558
1559 /* step 2: adjust rmap */
1560 for (i = 0, addr = haddr, pte = start_pte;
1561 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1562 struct page *page;
1563
1564 if (pte_none(*pte))
1565 continue;
1566 page = vm_normal_page(vma, addr, *pte);
Alex Sierra3218f872022-07-15 10:05:11 -05001567 if (WARN_ON_ONCE(page && is_zone_device_page(page)))
1568 goto abort;
Hugh Dickinscea86fe2022-02-14 18:26:39 -08001569 page_remove_rmap(page, vma, false);
Song Liu27e1f822019-09-23 15:38:30 -07001570 }
1571
1572 pte_unmap_unlock(start_pte, ptl);
1573
1574 /* step 3: set proper refcount and mm_counters. */
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001575 if (count) {
Song Liu27e1f822019-09-23 15:38:30 -07001576 page_ref_sub(hpage, count);
1577 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1578 }
1579
Zach O'Keefe34488392022-09-22 15:40:39 -07001580 /* step 4: remove pte entries */
Hugh Dickinsab0c3f12022-12-22 12:41:50 -08001581 /* we make no change to anon, but protect concurrent anon page lookup */
1582 if (vma->anon_vma)
1583 anon_vma_lock_write(vma->anon_vma);
1584
Pasha Tatashine59a47b82022-02-03 20:49:20 -08001585 collapse_and_free_pmd(mm, vma, haddr, pmd);
Zach O'Keefe34488392022-09-22 15:40:39 -07001586
Hugh Dickinsab0c3f12022-12-22 12:41:50 -08001587 if (vma->anon_vma)
1588 anon_vma_unlock_write(vma->anon_vma);
Jann Horn8d3c1062022-11-25 22:37:12 +01001589 i_mmap_unlock_write(vma->vm_file->f_mapping);
1590
Zach O'Keefe34488392022-09-22 15:40:39 -07001591maybe_install_pmd:
1592 /* step 5: install pmd entry */
1593 result = install_pmd
1594 ? set_huge_pmd(vma, haddr, pmd, hpage)
1595 : SCAN_SUCCEED;
1596
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001597drop_hpage:
1598 unlock_page(hpage);
1599 put_page(hpage);
Zach O'Keefe34488392022-09-22 15:40:39 -07001600 return result;
Song Liu27e1f822019-09-23 15:38:30 -07001601
1602abort:
1603 pte_unmap_unlock(start_pte, ptl);
Jann Horn8d3c1062022-11-25 22:37:12 +01001604 i_mmap_unlock_write(vma->vm_file->f_mapping);
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001605 goto drop_hpage;
Song Liu27e1f822019-09-23 15:38:30 -07001606}
1607
Qi Zhengb26e2702022-08-31 11:19:46 +08001608static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot)
Song Liu27e1f822019-09-23 15:38:30 -07001609{
Qi Zhengb26e2702022-08-31 11:19:46 +08001610 struct mm_slot *slot = &mm_slot->slot;
1611 struct mm_struct *mm = slot->mm;
Song Liu27e1f822019-09-23 15:38:30 -07001612 int i;
1613
1614 if (likely(mm_slot->nr_pte_mapped_thp == 0))
Miaohe Lin0edf61e2021-05-04 18:33:37 -07001615 return;
Song Liu27e1f822019-09-23 15:38:30 -07001616
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001617 if (!mmap_write_trylock(mm))
Miaohe Lin0edf61e2021-05-04 18:33:37 -07001618 return;
Song Liu27e1f822019-09-23 15:38:30 -07001619
Zach O'Keefe7d2c4382022-07-06 16:59:28 -07001620 if (unlikely(hpage_collapse_test_exit(mm)))
Song Liu27e1f822019-09-23 15:38:30 -07001621 goto out;
1622
1623 for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
Zach O'Keefe34488392022-09-22 15:40:39 -07001624 collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i], false);
Song Liu27e1f822019-09-23 15:38:30 -07001625
1626out:
1627 mm_slot->nr_pte_mapped_thp = 0;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001628 mmap_write_unlock(mm);
Song Liu27e1f822019-09-23 15:38:30 -07001629}
1630
Zach O'Keefe34488392022-09-22 15:40:39 -07001631static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
1632 struct mm_struct *target_mm,
1633 unsigned long target_addr, struct page *hpage,
1634 struct collapse_control *cc)
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001635{
1636 struct vm_area_struct *vma;
Zach O'Keefe34488392022-09-22 15:40:39 -07001637 int target_result = SCAN_FAIL;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001638
1639 i_mmap_lock_write(mapping);
1640 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
Zach O'Keefe34488392022-09-22 15:40:39 -07001641 int result = SCAN_FAIL;
1642 struct mm_struct *mm = NULL;
1643 unsigned long addr = 0;
1644 pmd_t *pmd;
1645 bool is_target = false;
1646
Song Liu27e1f822019-09-23 15:38:30 -07001647 /*
1648 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1649 * got written to. These VMAs are likely not worth investing
Michel Lespinasse3e4e28c2020-06-08 21:33:51 -07001650 * mmap_write_lock(mm) as PMD-mapping is likely to be split
Song Liu27e1f822019-09-23 15:38:30 -07001651 * later.
1652 *
Miaohe Lin36ee2c72022-06-25 17:28:12 +08001653 * Note that vma->anon_vma check is racy: it can be set up after
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001654 * the check but before we took mmap_lock by the fault path.
Song Liu27e1f822019-09-23 15:38:30 -07001655 * But page lock would prevent establishing any new ptes of the
1656 * page, so we are safe.
1657 *
1658 * An alternative would be drop the check, but check that page
1659 * table is clear before calling pmdp_collapse_flush() under
1660 * ptl. It has higher chance to recover THP for the VMA, but
Jann Horn8d3c1062022-11-25 22:37:12 +01001661 * has higher cost too. It would also probably require locking
1662 * the anon_vma.
Song Liu27e1f822019-09-23 15:38:30 -07001663 */
Jann Horn023f47a2023-01-11 14:33:51 +01001664 if (READ_ONCE(vma->anon_vma)) {
Zach O'Keefe34488392022-09-22 15:40:39 -07001665 result = SCAN_PAGE_ANON;
1666 goto next;
1667 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001668 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
Zach O'Keefe34488392022-09-22 15:40:39 -07001669 if (addr & ~HPAGE_PMD_MASK ||
1670 vma->vm_end < addr + HPAGE_PMD_SIZE) {
1671 result = SCAN_VMA_CHECK;
1672 goto next;
1673 }
Hugh Dickins18e77602020-08-06 23:26:22 -07001674 mm = vma->vm_mm;
Zach O'Keefe34488392022-09-22 15:40:39 -07001675 is_target = mm == target_mm && addr == target_addr;
1676 result = find_pmd_or_thp_or_none(mm, addr, &pmd);
1677 if (result != SCAN_SUCCEED)
1678 goto next;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001679 /*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001680 * We need exclusive mmap_lock to retract page table.
Song Liu27e1f822019-09-23 15:38:30 -07001681 *
1682 * We use trylock due to lock inversion: we need to acquire
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001683 * mmap_lock while holding page lock. Fault path does it in
Song Liu27e1f822019-09-23 15:38:30 -07001684 * reverse order. Trylock is a way to avoid deadlock.
Zach O'Keefe34488392022-09-22 15:40:39 -07001685 *
1686 * Also, it's not MADV_COLLAPSE's job to collapse other
1687 * mappings - let khugepaged take care of them later.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001688 */
Zach O'Keefe34488392022-09-22 15:40:39 -07001689 result = SCAN_PTE_MAPPED_HUGEPAGE;
1690 if ((cc->is_khugepaged || is_target) &&
1691 mmap_write_trylock(mm)) {
Peter Xudeb4c932022-05-12 20:22:55 -07001692 /*
Jann Horn023f47a2023-01-11 14:33:51 +01001693 * Re-check whether we have an ->anon_vma, because
1694 * collapse_and_free_pmd() requires that either no
1695 * ->anon_vma exists or the anon_vma is locked.
1696 * We already checked ->anon_vma above, but that check
1697 * is racy because ->anon_vma can be populated under the
1698 * mmap lock in read mode.
1699 */
1700 if (vma->anon_vma) {
1701 result = SCAN_PAGE_ANON;
1702 goto unlock_next;
1703 }
1704 /*
Peter Xudeb4c932022-05-12 20:22:55 -07001705 * When a vma is registered with uffd-wp, we can't
1706 * recycle the pmd pgtable because there can be pte
1707 * markers installed. Skip it only, so the rest mm/vma
1708 * can still have the same file mapped hugely, however
1709 * it'll always mapped in small page size for uffd-wp
1710 * registered ranges.
1711 */
Zach O'Keefe34488392022-09-22 15:40:39 -07001712 if (hpage_collapse_test_exit(mm)) {
1713 result = SCAN_ANY_PROCESS;
1714 goto unlock_next;
1715 }
1716 if (userfaultfd_wp(vma)) {
1717 result = SCAN_PTE_UFFD_WP;
1718 goto unlock_next;
1719 }
1720 collapse_and_free_pmd(mm, vma, addr, pmd);
1721 if (!cc->is_khugepaged && is_target)
1722 result = set_huge_pmd(vma, addr, pmd, hpage);
1723 else
1724 result = SCAN_SUCCEED;
1725
1726unlock_next:
Hugh Dickins18e77602020-08-06 23:26:22 -07001727 mmap_write_unlock(mm);
Zach O'Keefe34488392022-09-22 15:40:39 -07001728 goto next;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001729 }
Zach O'Keefe34488392022-09-22 15:40:39 -07001730 /*
1731 * Calling context will handle target mm/addr. Otherwise, let
1732 * khugepaged try again later.
1733 */
1734 if (!is_target) {
1735 khugepaged_add_pte_mapped_thp(mm, addr);
1736 continue;
1737 }
1738next:
1739 if (is_target)
1740 target_result = result;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001741 }
1742 i_mmap_unlock_write(mapping);
Zach O'Keefe34488392022-09-22 15:40:39 -07001743 return target_result;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001744}
1745
1746/**
Song Liu99cb0db2019-09-23 15:38:00 -07001747 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001748 *
Alex Shi336e6b52020-12-14 19:12:01 -08001749 * @mm: process address space where collapse happens
Zach O'Keefe34488392022-09-22 15:40:39 -07001750 * @addr: virtual collapse start address
Alex Shi336e6b52020-12-14 19:12:01 -08001751 * @file: file that collapse on
1752 * @start: collapse start address
Zach O'Keefe9710a78a2022-07-06 16:59:22 -07001753 * @cc: collapse context and scratchpad
Alex Shi336e6b52020-12-14 19:12:01 -08001754 *
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001755 * Basic scheme is simple, details are more complex:
Hugh Dickins87c460a2018-11-30 14:10:43 -08001756 * - allocate and lock a new huge page;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001757 * - scan page cache replacing old pages with the new one
Song Liu99cb0db2019-09-23 15:38:00 -07001758 * + swap/gup in pages if necessary;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001759 * + fill in gaps;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001760 * + keep old pages around in case rollback is required;
1761 * - if replacing succeeds:
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001762 * + copy data over;
1763 * + free old pages;
Hugh Dickins87c460a2018-11-30 14:10:43 -08001764 * + unlock huge page;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001765 * - if replacing failed;
1766 * + put all pages back and unfreeze them;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001767 * + restore gaps in the page cache;
Hugh Dickins87c460a2018-11-30 14:10:43 -08001768 * + unlock and free huge page;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001769 */
Zach O'Keefe34488392022-09-22 15:40:39 -07001770static int collapse_file(struct mm_struct *mm, unsigned long addr,
1771 struct file *file, pgoff_t start,
1772 struct collapse_control *cc)
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001773{
Song Liu579c5712019-09-23 15:37:57 -07001774 struct address_space *mapping = file->f_mapping;
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001775 struct page *hpage;
Gautam Menghani4c9473e2022-10-26 10:52:18 +05301776 pgoff_t index = 0, end = start + HPAGE_PMD_NR;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001777 LIST_HEAD(pagelist);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001778 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001779 int nr_none = 0, result = SCAN_SUCCEED;
Song Liu99cb0db2019-09-23 15:38:00 -07001780 bool is_shmem = shmem_file(file);
Gautam Menghani4c9473e2022-10-26 10:52:18 +05301781 int nr = 0;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001782
Song Liu99cb0db2019-09-23 15:38:00 -07001783 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001784 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1785
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001786 result = alloc_charge_hpage(&hpage, mm, cc);
Zach O'Keefe9710a78a2022-07-06 16:59:22 -07001787 if (result != SCAN_SUCCEED)
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001788 goto out;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001789
Matthew Wilcox (Oracle)6b24ca42020-06-27 22:19:08 -04001790 /*
1791 * Ensure we have slots for all the pages in the range. This is
1792 * almost certainly a no-op because most of the pages must be present
1793 */
Hugh Dickins95feeab2018-11-30 14:10:50 -08001794 do {
1795 xas_lock_irq(&xas);
1796 xas_create_range(&xas);
1797 if (!xas_error(&xas))
1798 break;
1799 xas_unlock_irq(&xas);
1800 if (!xas_nomem(&xas, GFP_KERNEL)) {
Hugh Dickins95feeab2018-11-30 14:10:50 -08001801 result = SCAN_FAIL;
1802 goto out;
1803 }
1804 } while (1);
1805
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001806 __SetPageLocked(hpage);
Song Liu99cb0db2019-09-23 15:38:00 -07001807 if (is_shmem)
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001808 __SetPageSwapBacked(hpage);
1809 hpage->index = start;
1810 hpage->mapping = mapping;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001811
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001812 /*
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001813 * At this point the hpage is locked and not up-to-date.
Hugh Dickins87c460a2018-11-30 14:10:43 -08001814 * It's safe to insert it into the page cache, because nobody would
1815 * be able to map it or use it in another way until we unlock it.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001816 */
1817
Matthew Wilcox77da9382017-12-04 14:56:08 -05001818 xas_set(&xas, start);
1819 for (index = start; index < end; index++) {
1820 struct page *page = xas_next(&xas);
Vishal Moola (Oracle)64ab3192022-11-17 23:30:53 -08001821 struct folio *folio;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001822
1823 VM_BUG_ON(index != xas.xa_index);
Song Liu99cb0db2019-09-23 15:38:00 -07001824 if (is_shmem) {
1825 if (!page) {
1826 /*
1827 * Stop if extent has been truncated or
1828 * hole-punched, and is now completely
1829 * empty.
1830 */
1831 if (index == start) {
1832 if (!xas_next_entry(&xas, end - 1)) {
1833 result = SCAN_TRUNCATED;
1834 goto xa_locked;
1835 }
1836 xas_set(&xas, index);
1837 }
1838 if (!shmem_charge(mapping->host, 1)) {
1839 result = SCAN_FAIL;
Hugh Dickins042a3082018-11-30 14:10:39 -08001840 goto xa_locked;
Hugh Dickins701270f2018-11-30 14:10:25 -08001841 }
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001842 xas_store(&xas, hpage);
Song Liu99cb0db2019-09-23 15:38:00 -07001843 nr_none++;
1844 continue;
Hugh Dickins701270f2018-11-30 14:10:25 -08001845 }
Song Liu99cb0db2019-09-23 15:38:00 -07001846
1847 if (xa_is_value(page) || !PageUptodate(page)) {
1848 xas_unlock_irq(&xas);
1849 /* swap in or instantiate fallocated page */
Matthew Wilcox (Oracle)7459c142022-09-02 20:46:27 +01001850 if (shmem_get_folio(mapping->host, index,
1851 &folio, SGP_NOALLOC)) {
Song Liu99cb0db2019-09-23 15:38:00 -07001852 result = SCAN_FAIL;
1853 goto xa_unlocked;
1854 }
Matthew Wilcox (Oracle)7459c142022-09-02 20:46:27 +01001855 page = folio_file_page(folio, index);
Song Liu99cb0db2019-09-23 15:38:00 -07001856 } else if (trylock_page(page)) {
1857 get_page(page);
1858 xas_unlock_irq(&xas);
1859 } else {
1860 result = SCAN_PAGE_LOCK;
Hugh Dickins042a3082018-11-30 14:10:39 -08001861 goto xa_locked;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001862 }
Song Liu99cb0db2019-09-23 15:38:00 -07001863 } else { /* !is_shmem */
1864 if (!page || xa_is_value(page)) {
1865 xas_unlock_irq(&xas);
1866 page_cache_sync_readahead(mapping, &file->f_ra,
1867 file, index,
David Howellse5a59d32020-09-04 16:36:16 -07001868 end - index);
Song Liu99cb0db2019-09-23 15:38:00 -07001869 /* drain pagevecs to help isolate_lru_page() */
1870 lru_add_drain();
1871 page = find_lock_page(mapping, index);
1872 if (unlikely(page == NULL)) {
1873 result = SCAN_FAIL;
1874 goto xa_unlocked;
1875 }
Song Liu75f36062019-11-30 17:57:19 -08001876 } else if (PageDirty(page)) {
1877 /*
1878 * khugepaged only works on read-only fd,
1879 * so this page is dirty because it hasn't
1880 * been flushed since first write. There
1881 * won't be new dirty pages.
1882 *
1883 * Trigger async flush here and hope the
1884 * writeback is done when khugepaged
1885 * revisits this page.
1886 *
1887 * This is a one-off situation. We are not
1888 * forcing writeback in loop.
1889 */
1890 xas_unlock_irq(&xas);
1891 filemap_flush(mapping);
1892 result = SCAN_FAIL;
1893 goto xa_unlocked;
Rongwei Wang74c42e12021-10-28 14:36:27 -07001894 } else if (PageWriteback(page)) {
1895 xas_unlock_irq(&xas);
1896 result = SCAN_FAIL;
1897 goto xa_unlocked;
Song Liu99cb0db2019-09-23 15:38:00 -07001898 } else if (trylock_page(page)) {
1899 get_page(page);
1900 xas_unlock_irq(&xas);
1901 } else {
1902 result = SCAN_PAGE_LOCK;
1903 goto xa_locked;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001904 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001905 }
1906
1907 /*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07001908 * The page must be locked, so we can drop the i_pages lock
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001909 * without racing with truncate.
1910 */
1911 VM_BUG_ON_PAGE(!PageLocked(page), page);
Song Liu4655e5e2019-11-15 17:34:53 -08001912
1913 /* make sure the page is up to date */
1914 if (unlikely(!PageUptodate(page))) {
1915 result = SCAN_FAIL;
1916 goto out_unlock;
1917 }
Hugh Dickins06a5e122018-11-30 14:10:47 -08001918
1919 /*
1920 * If file was truncated then extended, or hole-punched, before
1921 * we locked the first page, then a THP might be there already.
Zach O'Keefe58ac9a82022-09-22 15:40:38 -07001922 * This will be discovered on the first iteration.
Hugh Dickins06a5e122018-11-30 14:10:47 -08001923 */
1924 if (PageTransCompound(page)) {
Zach O'Keefe58ac9a82022-09-22 15:40:38 -07001925 struct page *head = compound_head(page);
1926
1927 result = compound_order(head) == HPAGE_PMD_ORDER &&
1928 head->index == start
1929 /* Maybe PMD-mapped */
1930 ? SCAN_PTE_MAPPED_HUGEPAGE
1931 : SCAN_PAGE_COMPOUND;
Hugh Dickins06a5e122018-11-30 14:10:47 -08001932 goto out_unlock;
1933 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001934
Vishal Moola (Oracle)64ab3192022-11-17 23:30:53 -08001935 folio = page_folio(page);
1936
1937 if (folio_mapping(folio) != mapping) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001938 result = SCAN_TRUNCATED;
1939 goto out_unlock;
1940 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001941
Vishal Moola (Oracle)64ab3192022-11-17 23:30:53 -08001942 if (!is_shmem && (folio_test_dirty(folio) ||
1943 folio_test_writeback(folio))) {
Song Liu4655e5e2019-11-15 17:34:53 -08001944 /*
1945 * khugepaged only works on read-only fd, so this
1946 * page is dirty because it hasn't been flushed
1947 * since first write.
1948 */
1949 result = SCAN_FAIL;
1950 goto out_unlock;
1951 }
1952
Baolin Wangbe2d5752023-02-15 18:39:34 +08001953 if (!folio_isolate_lru(folio)) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001954 result = SCAN_DEL_PAGE_LRU;
Hugh Dickins042a3082018-11-30 14:10:39 -08001955 goto out_unlock;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001956 }
1957
Vishal Moola (Oracle)64ab3192022-11-17 23:30:53 -08001958 if (folio_has_private(folio) &&
1959 !filemap_release_folio(folio, GFP_KERNEL)) {
Song Liu99cb0db2019-09-23 15:38:00 -07001960 result = SCAN_PAGE_HAS_PRIVATE;
Vishal Moola (Oracle)64ab3192022-11-17 23:30:53 -08001961 folio_putback_lru(folio);
Song Liu99cb0db2019-09-23 15:38:00 -07001962 goto out_unlock;
1963 }
1964
Vishal Moola (Oracle)64ab3192022-11-17 23:30:53 -08001965 if (folio_mapped(folio))
1966 try_to_unmap(folio,
Matthew Wilcox (Oracle)869f7ee2022-02-15 09:28:49 -05001967 TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001968
Matthew Wilcox77da9382017-12-04 14:56:08 -05001969 xas_lock_irq(&xas);
1970 xas_set(&xas, index);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001971
Matthew Wilcox77da9382017-12-04 14:56:08 -05001972 VM_BUG_ON_PAGE(page != xas_load(&xas), page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001973
1974 /*
1975 * The page is expected to have page_count() == 3:
1976 * - we hold a pin on it;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001977 * - one reference from page cache;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001978 * - one from isolate_lru_page;
1979 */
1980 if (!page_ref_freeze(page, 3)) {
1981 result = SCAN_PAGE_COUNT;
Hugh Dickins042a3082018-11-30 14:10:39 -08001982 xas_unlock_irq(&xas);
1983 putback_lru_page(page);
1984 goto out_unlock;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001985 }
1986
1987 /*
1988 * Add the page to the list to be able to undo the collapse if
1989 * something go wrong.
1990 */
1991 list_add_tail(&page->lru, &pagelist);
1992
1993 /* Finally, replace with the new page. */
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001994 xas_store(&xas, hpage);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001995 continue;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001996out_unlock:
1997 unlock_page(page);
1998 put_page(page);
Hugh Dickins042a3082018-11-30 14:10:39 -08001999 goto xa_unlocked;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002000 }
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002001 nr = thp_nr_pages(hpage);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002002
Song Liu99cb0db2019-09-23 15:38:00 -07002003 if (is_shmem)
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002004 __mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
Song Liu09d91cd2019-09-23 15:38:03 -07002005 else {
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002006 __mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
Song Liu09d91cd2019-09-23 15:38:03 -07002007 filemap_nr_thps_inc(mapping);
Collin Fijalkovicheb6ecbe2021-06-30 18:51:32 -07002008 /*
2009 * Paired with smp_mb() in do_dentry_open() to ensure
2010 * i_writecount is up to date and the update to nr_thps is
2011 * visible. Ensures the page cache will be truncated if the
2012 * file is opened writable.
2013 */
2014 smp_mb();
2015 if (inode_is_open_for_write(mapping->host)) {
2016 result = SCAN_FAIL;
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002017 __mod_lruvec_page_state(hpage, NR_FILE_THPS, -nr);
Collin Fijalkovicheb6ecbe2021-06-30 18:51:32 -07002018 filemap_nr_thps_dec(mapping);
2019 goto xa_locked;
2020 }
Song Liu09d91cd2019-09-23 15:38:03 -07002021 }
Song Liu99cb0db2019-09-23 15:38:00 -07002022
Hugh Dickins042a3082018-11-30 14:10:39 -08002023 if (nr_none) {
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002024 __mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
Miaohe Lin2f55f072022-06-25 17:28:13 +08002025 /* nr_none is always 0 for non-shmem. */
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002026 __mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
Hugh Dickins042a3082018-11-30 14:10:39 -08002027 }
2028
Matthew Wilcox (Oracle)6b24ca42020-06-27 22:19:08 -04002029 /* Join all the small entries into a single multi-index entry */
2030 xas_set_order(&xas, start, HPAGE_PMD_ORDER);
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002031 xas_store(&xas, hpage);
Hugh Dickins042a3082018-11-30 14:10:39 -08002032xa_locked:
2033 xas_unlock_irq(&xas);
Matthew Wilcox77da9382017-12-04 14:56:08 -05002034xa_unlocked:
Hugh Dickins042a3082018-11-30 14:10:39 -08002035
Hugh Dickins6d9df8a2022-02-14 18:40:55 -08002036 /*
2037 * If collapse is successful, flush must be done now before copying.
2038 * If collapse is unsuccessful, does flush actually need to be done?
2039 * Do it anyway, to clear the state.
2040 */
2041 try_to_unmap_flush();
2042
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002043 if (result == SCAN_SUCCEED) {
Matthew Wilcox77da9382017-12-04 14:56:08 -05002044 struct page *page, *tmp;
Vishal Moola (Oracle)284a3442022-11-01 10:53:25 -07002045 struct folio *folio;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002046
2047 /*
Matthew Wilcox77da9382017-12-04 14:56:08 -05002048 * Replacing old pages with new one has succeeded, now we
2049 * need to copy the content and free the old pages.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002050 */
Hugh Dickins2af8ff22018-11-30 14:10:35 -08002051 index = start;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002052 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
Hugh Dickins2af8ff22018-11-30 14:10:35 -08002053 while (index < page->index) {
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002054 clear_highpage(hpage + (index % HPAGE_PMD_NR));
Hugh Dickins2af8ff22018-11-30 14:10:35 -08002055 index++;
2056 }
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002057 copy_highpage(hpage + (page->index % HPAGE_PMD_NR),
2058 page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002059 list_del(&page->lru);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002060 page->mapping = NULL;
Hugh Dickins042a3082018-11-30 14:10:39 -08002061 page_ref_unfreeze(page, 1);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002062 ClearPageActive(page);
2063 ClearPageUnevictable(page);
Hugh Dickins042a3082018-11-30 14:10:39 -08002064 unlock_page(page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002065 put_page(page);
Hugh Dickins2af8ff22018-11-30 14:10:35 -08002066 index++;
2067 }
2068 while (index < end) {
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002069 clear_highpage(hpage + (index % HPAGE_PMD_NR));
Hugh Dickins2af8ff22018-11-30 14:10:35 -08002070 index++;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002071 }
2072
Vishal Moola (Oracle)284a3442022-11-01 10:53:25 -07002073 folio = page_folio(hpage);
2074 folio_mark_uptodate(folio);
2075 folio_ref_add(folio, HPAGE_PMD_NR - 1);
2076
Johannes Weiner6058eae2020-06-03 16:02:40 -07002077 if (is_shmem)
Vishal Moola (Oracle)284a3442022-11-01 10:53:25 -07002078 folio_mark_dirty(folio);
2079 folio_add_lru(folio);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002080
Hugh Dickins042a3082018-11-30 14:10:39 -08002081 /*
2082 * Remove pte page tables, so we can re-fault the page as huge.
2083 */
Zach O'Keefe34488392022-09-22 15:40:39 -07002084 result = retract_page_tables(mapping, start, mm, addr, hpage,
2085 cc);
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002086 unlock_page(hpage);
2087 hpage = NULL;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002088 } else {
Matthew Wilcox77da9382017-12-04 14:56:08 -05002089 struct page *page;
Hugh Dickinsaaa52e32018-11-30 14:10:29 -08002090
Matthew Wilcox77da9382017-12-04 14:56:08 -05002091 /* Something went wrong: roll back page cache changes */
Matthew Wilcox77da9382017-12-04 14:56:08 -05002092 xas_lock_irq(&xas);
Miaohe Lin2f55f072022-06-25 17:28:13 +08002093 if (nr_none) {
2094 mapping->nrpages -= nr_none;
Song Liu99cb0db2019-09-23 15:38:00 -07002095 shmem_uncharge(mapping->host, nr_none);
Miaohe Lin2f55f072022-06-25 17:28:13 +08002096 }
Hugh Dickinsaaa52e32018-11-30 14:10:29 -08002097
Matthew Wilcox77da9382017-12-04 14:56:08 -05002098 xas_set(&xas, start);
2099 xas_for_each(&xas, page, end - 1) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002100 page = list_first_entry_or_null(&pagelist,
2101 struct page, lru);
Matthew Wilcox77da9382017-12-04 14:56:08 -05002102 if (!page || xas.xa_index < page->index) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002103 if (!nr_none)
2104 break;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002105 nr_none--;
Johannes Weiner59749e62016-12-12 16:43:35 -08002106 /* Put holes back where they were */
Matthew Wilcox77da9382017-12-04 14:56:08 -05002107 xas_store(&xas, NULL);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002108 continue;
2109 }
2110
Matthew Wilcox77da9382017-12-04 14:56:08 -05002111 VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002112
2113 /* Unfreeze the page. */
2114 list_del(&page->lru);
2115 page_ref_unfreeze(page, 2);
Matthew Wilcox77da9382017-12-04 14:56:08 -05002116 xas_store(&xas, page);
2117 xas_pause(&xas);
2118 xas_unlock_irq(&xas);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002119 unlock_page(page);
Hugh Dickins042a3082018-11-30 14:10:39 -08002120 putback_lru_page(page);
Matthew Wilcox77da9382017-12-04 14:56:08 -05002121 xas_lock_irq(&xas);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002122 }
2123 VM_BUG_ON(nr_none);
Matthew Wilcox77da9382017-12-04 14:56:08 -05002124 xas_unlock_irq(&xas);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002125
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002126 hpage->mapping = NULL;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002127 }
Hugh Dickins042a3082018-11-30 14:10:39 -08002128
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002129 if (hpage)
2130 unlock_page(hpage);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002131out:
2132 VM_BUG_ON(!list_empty(&pagelist));
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002133 if (hpage) {
2134 mem_cgroup_uncharge(page_folio(hpage));
2135 put_page(hpage);
Yang Shic6a7f442022-07-06 16:59:20 -07002136 }
Gautam Menghani4c9473e2022-10-26 10:52:18 +05302137
2138 trace_mm_khugepaged_collapse_file(mm, hpage, index, is_shmem, addr, file, nr, result);
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002139 return result;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002140}
2141
Zach O'Keefe34488392022-09-22 15:40:39 -07002142static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2143 struct file *file, pgoff_t start,
2144 struct collapse_control *cc)
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002145{
2146 struct page *page = NULL;
Song Liu579c5712019-09-23 15:37:57 -07002147 struct address_space *mapping = file->f_mapping;
Matthew Wilcox85b392d2017-12-04 15:06:23 -05002148 XA_STATE(xas, &mapping->i_pages, start);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002149 int present, swap;
2150 int node = NUMA_NO_NODE;
2151 int result = SCAN_SUCCEED;
2152
2153 present = 0;
2154 swap = 0;
Zach O'Keefe34d6b472022-07-06 16:59:21 -07002155 memset(cc->node_load, 0, sizeof(cc->node_load));
Yang Shie031ff92022-11-08 10:43:56 -08002156 nodes_clear(cc->alloc_nmask);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002157 rcu_read_lock();
Matthew Wilcox85b392d2017-12-04 15:06:23 -05002158 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
2159 if (xas_retry(&xas, page))
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002160 continue;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002161
Matthew Wilcox85b392d2017-12-04 15:06:23 -05002162 if (xa_is_value(page)) {
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -07002163 ++swap;
2164 if (cc->is_khugepaged &&
2165 swap > khugepaged_max_ptes_swap) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002166 result = SCAN_EXCEED_SWAP_PTE;
Yang Yange9ea8742022-01-14 14:07:55 -08002167 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002168 break;
2169 }
2170 continue;
2171 }
2172
Matthew Wilcox (Oracle)6b24ca42020-06-27 22:19:08 -04002173 /*
Zach O'Keefe58ac9a82022-09-22 15:40:38 -07002174 * TODO: khugepaged should compact smaller compound pages
Matthew Wilcox (Oracle)6b24ca42020-06-27 22:19:08 -04002175 * into a PMD sized page
2176 */
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002177 if (PageTransCompound(page)) {
Zach O'Keefe58ac9a82022-09-22 15:40:38 -07002178 struct page *head = compound_head(page);
2179
2180 result = compound_order(head) == HPAGE_PMD_ORDER &&
2181 head->index == start
2182 /* Maybe PMD-mapped */
2183 ? SCAN_PTE_MAPPED_HUGEPAGE
2184 : SCAN_PAGE_COMPOUND;
2185 /*
2186 * For SCAN_PTE_MAPPED_HUGEPAGE, further processing
2187 * by the caller won't touch the page cache, and so
2188 * it's safe to skip LRU and refcount checks before
2189 * returning.
2190 */
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002191 break;
2192 }
2193
2194 node = page_to_nid(page);
Zach O'Keefe7d2c4382022-07-06 16:59:28 -07002195 if (hpage_collapse_scan_abort(node, cc)) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002196 result = SCAN_SCAN_ABORT;
2197 break;
2198 }
Zach O'Keefe34d6b472022-07-06 16:59:21 -07002199 cc->node_load[node]++;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002200
2201 if (!PageLRU(page)) {
2202 result = SCAN_PAGE_LRU;
2203 break;
2204 }
2205
Song Liu99cb0db2019-09-23 15:38:00 -07002206 if (page_count(page) !=
2207 1 + page_mapcount(page) + page_has_private(page)) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002208 result = SCAN_PAGE_COUNT;
2209 break;
2210 }
2211
2212 /*
2213 * We probably should check if the page is referenced here, but
2214 * nobody would transfer pte_young() to PageReferenced() for us.
2215 * And rmap walk here is just too costly...
2216 */
2217
2218 present++;
2219
2220 if (need_resched()) {
Matthew Wilcox85b392d2017-12-04 15:06:23 -05002221 xas_pause(&xas);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002222 cond_resched_rcu();
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002223 }
2224 }
2225 rcu_read_unlock();
2226
2227 if (result == SCAN_SUCCEED) {
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -07002228 if (cc->is_khugepaged &&
2229 present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002230 result = SCAN_EXCEED_NONE_PTE;
Yang Yange9ea8742022-01-14 14:07:55 -08002231 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002232 } else {
Zach O'Keefe34488392022-09-22 15:40:39 -07002233 result = collapse_file(mm, addr, file, start, cc);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002234 }
2235 }
2236
Gautam Menghani045634f2022-10-26 10:15:24 +05302237 trace_mm_khugepaged_scan_file(mm, page, file, present, swap, result);
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002238 return result;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002239}
2240#else
Zach O'Keefe34488392022-09-22 15:40:39 -07002241static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2242 struct file *file, pgoff_t start,
2243 struct collapse_control *cc)
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002244{
2245 BUILD_BUG();
2246}
Song Liu27e1f822019-09-23 15:38:30 -07002247
Qi Zhengb26e2702022-08-31 11:19:46 +08002248static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot)
Song Liu27e1f822019-09-23 15:38:30 -07002249{
Song Liu27e1f822019-09-23 15:38:30 -07002250}
Zach O'Keefe58ac9a82022-09-22 15:40:38 -07002251
2252static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
2253 unsigned long addr)
2254{
2255 return false;
2256}
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002257#endif
2258
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002259static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
Zach O'Keefe34d6b472022-07-06 16:59:21 -07002260 struct collapse_control *cc)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002261 __releases(&khugepaged_mm_lock)
2262 __acquires(&khugepaged_mm_lock)
2263{
Matthew Wilcox (Oracle)68540502022-09-06 19:49:00 +00002264 struct vma_iterator vmi;
Qi Zhengb26e2702022-08-31 11:19:46 +08002265 struct khugepaged_mm_slot *mm_slot;
2266 struct mm_slot *slot;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002267 struct mm_struct *mm;
2268 struct vm_area_struct *vma;
2269 int progress = 0;
2270
2271 VM_BUG_ON(!pages);
Lance Roy35f3aa32018-10-04 23:45:47 -07002272 lockdep_assert_held(&khugepaged_mm_lock);
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002273 *result = SCAN_FAIL;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002274
Qi Zhengb26e2702022-08-31 11:19:46 +08002275 if (khugepaged_scan.mm_slot) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002276 mm_slot = khugepaged_scan.mm_slot;
Qi Zhengb26e2702022-08-31 11:19:46 +08002277 slot = &mm_slot->slot;
2278 } else {
2279 slot = list_entry(khugepaged_scan.mm_head.next,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002280 struct mm_slot, mm_node);
Qi Zhengb26e2702022-08-31 11:19:46 +08002281 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002282 khugepaged_scan.address = 0;
2283 khugepaged_scan.mm_slot = mm_slot;
2284 }
2285 spin_unlock(&khugepaged_mm_lock);
Song Liu27e1f822019-09-23 15:38:30 -07002286 khugepaged_collapse_pte_mapped_thps(mm_slot);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002287
Qi Zhengb26e2702022-08-31 11:19:46 +08002288 mm = slot->mm;
Yang Shi3b454ad2018-01-31 16:18:28 -08002289 /*
2290 * Don't wait for semaphore (to avoid long wait times). Just move to
2291 * the next mm on the list.
2292 */
2293 vma = NULL;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002294 if (unlikely(!mmap_read_trylock(mm)))
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07002295 goto breakouterloop_mmap_lock;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002296
2297 progress++;
Matthew Wilcox (Oracle)68540502022-09-06 19:49:00 +00002298 if (unlikely(hpage_collapse_test_exit(mm)))
2299 goto breakouterloop;
2300
2301 vma_iter_init(&vmi, mm, khugepaged_scan.address);
2302 for_each_vma(vmi, vma) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002303 unsigned long hstart, hend;
2304
2305 cond_resched();
Zach O'Keefe7d2c4382022-07-06 16:59:28 -07002306 if (unlikely(hpage_collapse_test_exit(mm))) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002307 progress++;
2308 break;
2309 }
Zach O'Keefea7f4e6e2022-07-06 16:59:25 -07002310 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, true)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002311skip:
2312 progress++;
2313 continue;
2314 }
Yang Shi4fa68932022-06-16 10:48:35 -07002315 hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE);
2316 hend = round_down(vma->vm_end, HPAGE_PMD_SIZE);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002317 if (khugepaged_scan.address > hend)
2318 goto skip;
2319 if (khugepaged_scan.address < hstart)
2320 khugepaged_scan.address = hstart;
2321 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2322
2323 while (khugepaged_scan.address < hend) {
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002324 bool mmap_locked = true;
2325
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002326 cond_resched();
Zach O'Keefe7d2c4382022-07-06 16:59:28 -07002327 if (unlikely(hpage_collapse_test_exit(mm)))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002328 goto breakouterloop;
2329
2330 VM_BUG_ON(khugepaged_scan.address < hstart ||
2331 khugepaged_scan.address + HPAGE_PMD_SIZE >
2332 hend);
Song Liu99cb0db2019-09-23 15:38:00 -07002333 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
Matthew Wilcox (Oracle)396bcc52020-04-06 20:04:35 -07002334 struct file *file = get_file(vma->vm_file);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002335 pgoff_t pgoff = linear_page_index(vma,
2336 khugepaged_scan.address);
Song Liu99cb0db2019-09-23 15:38:00 -07002337
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002338 mmap_read_unlock(mm);
Zach O'Keefe34488392022-09-22 15:40:39 -07002339 *result = hpage_collapse_scan_file(mm,
2340 khugepaged_scan.address,
2341 file, pgoff, cc);
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002342 mmap_locked = false;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002343 fput(file);
2344 } else {
Zach O'Keefe7d2c4382022-07-06 16:59:28 -07002345 *result = hpage_collapse_scan_pmd(mm, vma,
2346 khugepaged_scan.address,
2347 &mmap_locked,
2348 cc);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002349 }
Zach O'Keefe58ac9a82022-09-22 15:40:38 -07002350 switch (*result) {
2351 case SCAN_PTE_MAPPED_HUGEPAGE: {
2352 pmd_t *pmd;
2353
2354 *result = find_pmd_or_thp_or_none(mm,
2355 khugepaged_scan.address,
2356 &pmd);
2357 if (*result != SCAN_SUCCEED)
2358 break;
2359 if (!khugepaged_add_pte_mapped_thp(mm,
2360 khugepaged_scan.address))
2361 break;
2362 } fallthrough;
2363 case SCAN_SUCCEED:
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002364 ++khugepaged_pages_collapsed;
Zach O'Keefe58ac9a82022-09-22 15:40:38 -07002365 break;
2366 default:
2367 break;
2368 }
2369
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002370 /* move to next address */
2371 khugepaged_scan.address += HPAGE_PMD_SIZE;
2372 progress += HPAGE_PMD_NR;
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002373 if (!mmap_locked)
2374 /*
2375 * We released mmap_lock so break loop. Note
2376 * that we drop mmap_lock before all hugepage
2377 * allocations, so if allocation fails, we are
2378 * guaranteed to break here and report the
2379 * correct result back to caller.
2380 */
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07002381 goto breakouterloop_mmap_lock;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002382 if (progress >= pages)
2383 goto breakouterloop;
2384 }
2385 }
2386breakouterloop:
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002387 mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07002388breakouterloop_mmap_lock:
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002389
2390 spin_lock(&khugepaged_mm_lock);
2391 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2392 /*
2393 * Release the current mm_slot if this mm is about to die, or
2394 * if we scanned all vmas of this mm.
2395 */
Zach O'Keefe7d2c4382022-07-06 16:59:28 -07002396 if (hpage_collapse_test_exit(mm) || !vma) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002397 /*
2398 * Make sure that if mm_users is reaching zero while
2399 * khugepaged runs here, khugepaged_exit will find
2400 * mm_slot not pointing to the exiting mm.
2401 */
Qi Zhengb26e2702022-08-31 11:19:46 +08002402 if (slot->mm_node.next != &khugepaged_scan.mm_head) {
2403 slot = list_entry(slot->mm_node.next,
2404 struct mm_slot, mm_node);
2405 khugepaged_scan.mm_slot =
2406 mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002407 khugepaged_scan.address = 0;
2408 } else {
2409 khugepaged_scan.mm_slot = NULL;
2410 khugepaged_full_scans++;
2411 }
2412
2413 collect_mm_slot(mm_slot);
2414 }
2415
2416 return progress;
2417}
2418
2419static int khugepaged_has_work(void)
2420{
2421 return !list_empty(&khugepaged_scan.mm_head) &&
Yang Shi10640262022-06-16 10:48:39 -07002422 hugepage_flags_enabled();
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002423}
2424
2425static int khugepaged_wait_event(void)
2426{
2427 return !list_empty(&khugepaged_scan.mm_head) ||
2428 kthread_should_stop();
2429}
2430
Zach O'Keefe34d6b472022-07-06 16:59:21 -07002431static void khugepaged_do_scan(struct collapse_control *cc)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002432{
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002433 unsigned int progress = 0, pass_through_head = 0;
Yanfei Xu89dc6a92021-05-04 18:34:12 -07002434 unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002435 bool wait = true;
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002436 int result = SCAN_SUCCEED;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002437
Kirill A. Shutemova980df32020-06-03 16:00:12 -07002438 lru_add_drain_all();
2439
Yang Shic6a7f442022-07-06 16:59:20 -07002440 while (true) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002441 cond_resched();
2442
2443 if (unlikely(kthread_should_stop() || try_to_freeze()))
2444 break;
2445
2446 spin_lock(&khugepaged_mm_lock);
2447 if (!khugepaged_scan.mm_slot)
2448 pass_through_head++;
2449 if (khugepaged_has_work() &&
2450 pass_through_head < 2)
2451 progress += khugepaged_scan_mm_slot(pages - progress,
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002452 &result, cc);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002453 else
2454 progress = pages;
2455 spin_unlock(&khugepaged_mm_lock);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002456
Yang Shic6a7f442022-07-06 16:59:20 -07002457 if (progress >= pages)
2458 break;
2459
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002460 if (result == SCAN_ALLOC_HUGE_PAGE_FAIL) {
Yang Shic6a7f442022-07-06 16:59:20 -07002461 /*
2462 * If fail to allocate the first time, try to sleep for
2463 * a while. When hit again, cancel the scan.
2464 */
2465 if (!wait)
2466 break;
2467 wait = false;
Yang Shic6a7f442022-07-06 16:59:20 -07002468 khugepaged_alloc_sleep();
2469 }
2470 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002471}
2472
2473static bool khugepaged_should_wakeup(void)
2474{
2475 return kthread_should_stop() ||
2476 time_after_eq(jiffies, khugepaged_sleep_expire);
2477}
2478
2479static void khugepaged_wait_work(void)
2480{
2481 if (khugepaged_has_work()) {
2482 const unsigned long scan_sleep_jiffies =
2483 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2484
2485 if (!scan_sleep_jiffies)
2486 return;
2487
2488 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2489 wait_event_freezable_timeout(khugepaged_wait,
2490 khugepaged_should_wakeup(),
2491 scan_sleep_jiffies);
2492 return;
2493 }
2494
Yang Shi10640262022-06-16 10:48:39 -07002495 if (hugepage_flags_enabled())
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002496 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2497}
2498
2499static int khugepaged(void *none)
2500{
Qi Zhengb26e2702022-08-31 11:19:46 +08002501 struct khugepaged_mm_slot *mm_slot;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002502
2503 set_freezable();
2504 set_user_nice(current, MAX_NICE);
2505
2506 while (!kthread_should_stop()) {
Zach O'Keefe34d6b472022-07-06 16:59:21 -07002507 khugepaged_do_scan(&khugepaged_collapse_control);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002508 khugepaged_wait_work();
2509 }
2510
2511 spin_lock(&khugepaged_mm_lock);
2512 mm_slot = khugepaged_scan.mm_slot;
2513 khugepaged_scan.mm_slot = NULL;
2514 if (mm_slot)
2515 collect_mm_slot(mm_slot);
2516 spin_unlock(&khugepaged_mm_lock);
2517 return 0;
2518}
2519
2520static void set_recommended_min_free_kbytes(void)
2521{
2522 struct zone *zone;
2523 int nr_zones = 0;
2524 unsigned long recommended_min;
2525
Yang Shi10640262022-06-16 10:48:39 -07002526 if (!hugepage_flags_enabled()) {
Liangcai Fanbd3400e2021-11-05 13:41:36 -07002527 calculate_min_free_kbytes();
2528 goto update_wmarks;
2529 }
2530
Joonsoo Kimb7d349c2018-04-10 16:30:27 -07002531 for_each_populated_zone(zone) {
2532 /*
2533 * We don't need to worry about fragmentation of
2534 * ZONE_MOVABLE since it only has movable pages.
2535 */
2536 if (zone_idx(zone) > gfp_zone(GFP_USER))
2537 continue;
2538
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002539 nr_zones++;
Joonsoo Kimb7d349c2018-04-10 16:30:27 -07002540 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002541
2542 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2543 recommended_min = pageblock_nr_pages * nr_zones * 2;
2544
2545 /*
2546 * Make sure that on average at least two pageblocks are almost free
2547 * of another type, one for a migratetype to fall back to and a
2548 * second to avoid subsequent fallbacks of other types There are 3
2549 * MIGRATE_TYPES we care about.
2550 */
2551 recommended_min += pageblock_nr_pages * nr_zones *
2552 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2553
2554 /* don't ever allow to reserve more than 5% of the lowmem */
2555 recommended_min = min(recommended_min,
2556 (unsigned long) nr_free_buffer_pages() / 20);
2557 recommended_min <<= (PAGE_SHIFT-10);
2558
2559 if (recommended_min > min_free_kbytes) {
2560 if (user_min_free_kbytes >= 0)
2561 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2562 min_free_kbytes, recommended_min);
2563
2564 min_free_kbytes = recommended_min;
2565 }
Liangcai Fanbd3400e2021-11-05 13:41:36 -07002566
2567update_wmarks:
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002568 setup_per_zone_wmarks();
2569}
2570
2571int start_stop_khugepaged(void)
2572{
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002573 int err = 0;
2574
2575 mutex_lock(&khugepaged_mutex);
Yang Shi10640262022-06-16 10:48:39 -07002576 if (hugepage_flags_enabled()) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002577 if (!khugepaged_thread)
2578 khugepaged_thread = kthread_run(khugepaged, NULL,
2579 "khugepaged");
2580 if (IS_ERR(khugepaged_thread)) {
2581 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2582 err = PTR_ERR(khugepaged_thread);
2583 khugepaged_thread = NULL;
2584 goto fail;
2585 }
2586
2587 if (!list_empty(&khugepaged_scan.mm_head))
2588 wake_up_interruptible(&khugepaged_wait);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002589 } else if (khugepaged_thread) {
2590 kthread_stop(khugepaged_thread);
2591 khugepaged_thread = NULL;
2592 }
Liangcai Fanbd3400e2021-11-05 13:41:36 -07002593 set_recommended_min_free_kbytes();
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002594fail:
2595 mutex_unlock(&khugepaged_mutex);
2596 return err;
2597}
Vijay Balakrishna4aab2be2020-10-10 23:16:40 -07002598
2599void khugepaged_min_free_kbytes_update(void)
2600{
2601 mutex_lock(&khugepaged_mutex);
Yang Shi10640262022-06-16 10:48:39 -07002602 if (hugepage_flags_enabled() && khugepaged_thread)
Vijay Balakrishna4aab2be2020-10-10 23:16:40 -07002603 set_recommended_min_free_kbytes();
2604 mutex_unlock(&khugepaged_mutex);
2605}
Zach O'Keefe7d8faaf2022-07-06 16:59:27 -07002606
Johannes Weiner57e9cc52022-10-26 14:01:33 -04002607bool current_is_khugepaged(void)
2608{
2609 return kthread_func(current) == khugepaged;
2610}
2611
Zach O'Keefe7d8faaf2022-07-06 16:59:27 -07002612static int madvise_collapse_errno(enum scan_result r)
2613{
2614 /*
2615 * MADV_COLLAPSE breaks from existing madvise(2) conventions to provide
2616 * actionable feedback to caller, so they may take an appropriate
2617 * fallback measure depending on the nature of the failure.
2618 */
2619 switch (r) {
2620 case SCAN_ALLOC_HUGE_PAGE_FAIL:
2621 return -ENOMEM;
2622 case SCAN_CGROUP_CHARGE_FAIL:
2623 return -EBUSY;
2624 /* Resource temporary unavailable - trying again might succeed */
Zach O'Keefeae63c892023-01-24 17:57:37 -08002625 case SCAN_PAGE_COUNT:
Zach O'Keefe7d8faaf2022-07-06 16:59:27 -07002626 case SCAN_PAGE_LOCK:
2627 case SCAN_PAGE_LRU:
Zach O'Keefe0f3e2a22022-09-22 11:46:50 -07002628 case SCAN_DEL_PAGE_LRU:
Zach O'Keefe7d8faaf2022-07-06 16:59:27 -07002629 return -EAGAIN;
2630 /*
2631 * Other: Trying again likely not to succeed / error intrinsic to
2632 * specified memory range. khugepaged likely won't be able to collapse
2633 * either.
2634 */
2635 default:
2636 return -EINVAL;
2637 }
2638}
2639
2640int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
2641 unsigned long start, unsigned long end)
2642{
2643 struct collapse_control *cc;
2644 struct mm_struct *mm = vma->vm_mm;
2645 unsigned long hstart, hend, addr;
2646 int thps = 0, last_fail = SCAN_FAIL;
2647 bool mmap_locked = true;
2648
2649 BUG_ON(vma->vm_start > start);
2650 BUG_ON(vma->vm_end < end);
2651
2652 *prev = vma;
2653
Zach O'Keefe7d8faaf2022-07-06 16:59:27 -07002654 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
2655 return -EINVAL;
2656
2657 cc = kmalloc(sizeof(*cc), GFP_KERNEL);
2658 if (!cc)
2659 return -ENOMEM;
2660 cc->is_khugepaged = false;
Zach O'Keefe7d8faaf2022-07-06 16:59:27 -07002661
2662 mmgrab(mm);
2663 lru_add_drain_all();
2664
2665 hstart = (start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2666 hend = end & HPAGE_PMD_MASK;
2667
2668 for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) {
2669 int result = SCAN_FAIL;
2670
2671 if (!mmap_locked) {
2672 cond_resched();
2673 mmap_read_lock(mm);
2674 mmap_locked = true;
Zach O'Keefe34488392022-09-22 15:40:39 -07002675 result = hugepage_vma_revalidate(mm, addr, false, &vma,
2676 cc);
Zach O'Keefe7d8faaf2022-07-06 16:59:27 -07002677 if (result != SCAN_SUCCEED) {
2678 last_fail = result;
2679 goto out_nolock;
2680 }
Yang Shi4d24de92022-09-14 09:22:20 -07002681
Zach O'Keefe52dc0312022-12-24 00:20:34 -08002682 hend = min(hend, vma->vm_end & HPAGE_PMD_MASK);
Zach O'Keefe7d8faaf2022-07-06 16:59:27 -07002683 }
2684 mmap_assert_locked(mm);
2685 memset(cc->node_load, 0, sizeof(cc->node_load));
Yang Shie031ff92022-11-08 10:43:56 -08002686 nodes_clear(cc->alloc_nmask);
Zach O'Keefe34488392022-09-22 15:40:39 -07002687 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2688 struct file *file = get_file(vma->vm_file);
2689 pgoff_t pgoff = linear_page_index(vma, addr);
2690
2691 mmap_read_unlock(mm);
2692 mmap_locked = false;
2693 result = hpage_collapse_scan_file(mm, addr, file, pgoff,
2694 cc);
2695 fput(file);
2696 } else {
2697 result = hpage_collapse_scan_pmd(mm, vma, addr,
2698 &mmap_locked, cc);
2699 }
Zach O'Keefe7d8faaf2022-07-06 16:59:27 -07002700 if (!mmap_locked)
2701 *prev = NULL; /* Tell caller we dropped mmap_lock */
2702
Zach O'Keefe34488392022-09-22 15:40:39 -07002703handle_result:
Zach O'Keefe7d8faaf2022-07-06 16:59:27 -07002704 switch (result) {
2705 case SCAN_SUCCEED:
2706 case SCAN_PMD_MAPPED:
2707 ++thps;
2708 break;
Zach O'Keefe34488392022-09-22 15:40:39 -07002709 case SCAN_PTE_MAPPED_HUGEPAGE:
2710 BUG_ON(mmap_locked);
2711 BUG_ON(*prev);
2712 mmap_write_lock(mm);
2713 result = collapse_pte_mapped_thp(mm, addr, true);
2714 mmap_write_unlock(mm);
2715 goto handle_result;
Zach O'Keefe7d8faaf2022-07-06 16:59:27 -07002716 /* Whitelisted set of results where continuing OK */
2717 case SCAN_PMD_NULL:
2718 case SCAN_PTE_NON_PRESENT:
2719 case SCAN_PTE_UFFD_WP:
2720 case SCAN_PAGE_RO:
2721 case SCAN_LACK_REFERENCED_PAGE:
2722 case SCAN_PAGE_NULL:
2723 case SCAN_PAGE_COUNT:
2724 case SCAN_PAGE_LOCK:
2725 case SCAN_PAGE_COMPOUND:
2726 case SCAN_PAGE_LRU:
Zach O'Keefe0f3e2a22022-09-22 11:46:50 -07002727 case SCAN_DEL_PAGE_LRU:
Zach O'Keefe7d8faaf2022-07-06 16:59:27 -07002728 last_fail = result;
2729 break;
2730 default:
2731 last_fail = result;
2732 /* Other error, exit */
2733 goto out_maybelock;
2734 }
2735 }
2736
2737out_maybelock:
2738 /* Caller expects us to hold mmap_lock on return */
2739 if (!mmap_locked)
2740 mmap_read_lock(mm);
2741out_nolock:
2742 mmap_assert_locked(mm);
2743 mmdrop(mm);
2744 kfree(cc);
2745
2746 return thps == ((hend - hstart) >> HPAGE_PMD_SHIFT) ? 0
2747 : madvise_collapse_errno(last_fail);
2748}