blob: 6b9d39d65b7303e42767ee856e8c8ec21633477d [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/mm.h>
5#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +01006#include <linux/sched/mm.h>
Ingo Molnarf7ccbae2017-02-08 18:51:30 +01007#include <linux/sched/coredump.h>
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07008#include <linux/mmu_notifier.h>
9#include <linux/rmap.h>
10#include <linux/swap.h>
11#include <linux/mm_inline.h>
12#include <linux/kthread.h>
13#include <linux/khugepaged.h>
14#include <linux/freezer.h>
15#include <linux/mman.h>
16#include <linux/hashtable.h>
17#include <linux/userfaultfd_k.h>
18#include <linux/page_idle.h>
Pasha Tatashin80110bb2022-02-03 20:49:24 -080019#include <linux/page_table_check.h>
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070020#include <linux/swapops.h>
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -070021#include <linux/shmem_fs.h>
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070022
23#include <asm/tlb.h>
24#include <asm/pgalloc.h>
25#include "internal.h"
Qi Zhengb26e2702022-08-31 11:19:46 +080026#include "mm_slot.h"
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070027
28enum scan_result {
29 SCAN_FAIL,
30 SCAN_SUCCEED,
31 SCAN_PMD_NULL,
Zach O'Keefe34488392022-09-22 15:40:39 -070032 SCAN_PMD_NONE,
Zach O'Keefe50722802022-07-06 16:59:26 -070033 SCAN_PMD_MAPPED,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070034 SCAN_EXCEED_NONE_PTE,
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -070035 SCAN_EXCEED_SWAP_PTE,
36 SCAN_EXCEED_SHARED_PTE,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070037 SCAN_PTE_NON_PRESENT,
Peter Xue1e267c2020-04-06 20:06:04 -070038 SCAN_PTE_UFFD_WP,
Zach O'Keefe58ac9a82022-09-22 15:40:38 -070039 SCAN_PTE_MAPPED_HUGEPAGE,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070040 SCAN_PAGE_RO,
Ebru Akagunduz0db501f2016-07-26 15:26:46 -070041 SCAN_LACK_REFERENCED_PAGE,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070042 SCAN_PAGE_NULL,
43 SCAN_SCAN_ABORT,
44 SCAN_PAGE_COUNT,
45 SCAN_PAGE_LRU,
46 SCAN_PAGE_LOCK,
47 SCAN_PAGE_ANON,
48 SCAN_PAGE_COMPOUND,
49 SCAN_ANY_PROCESS,
50 SCAN_VMA_NULL,
51 SCAN_VMA_CHECK,
52 SCAN_ADDRESS_RANGE,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070053 SCAN_DEL_PAGE_LRU,
54 SCAN_ALLOC_HUGE_PAGE_FAIL,
55 SCAN_CGROUP_CHARGE_FAIL,
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -070056 SCAN_TRUNCATED,
Song Liu99cb0db2019-09-23 15:38:00 -070057 SCAN_PAGE_HAS_PRIVATE,
Ivan Orlov2ce0bdf2023-03-29 18:53:30 +040058 SCAN_STORE_FAILED,
Jiaqi Yan98c76c92023-03-29 08:11:19 -070059 SCAN_COPY_MC,
David Stevensac492b92023-04-04 21:01:16 +090060 SCAN_PAGE_FILLED,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070061};
62
63#define CREATE_TRACE_POINTS
64#include <trace/events/huge_memory.h>
65
Vijay Balakrishna4aab2be2020-10-10 23:16:40 -070066static struct task_struct *khugepaged_thread __read_mostly;
67static DEFINE_MUTEX(khugepaged_mutex);
68
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070069/* default scan 8*512 pte (or vmas) every 30 second */
70static unsigned int khugepaged_pages_to_scan __read_mostly;
71static unsigned int khugepaged_pages_collapsed;
72static unsigned int khugepaged_full_scans;
73static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
74/* during fragmentation poll the hugepage allocator once every minute */
75static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
76static unsigned long khugepaged_sleep_expire;
77static DEFINE_SPINLOCK(khugepaged_mm_lock);
78static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
79/*
80 * default collapse hugepages if there is at least one pte mapped like
81 * it would have happened if the vma was large enough during page
82 * fault.
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -070083 *
84 * Note that these are only respected if collapse was initiated by khugepaged.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070085 */
86static unsigned int khugepaged_max_ptes_none __read_mostly;
87static unsigned int khugepaged_max_ptes_swap __read_mostly;
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -070088static unsigned int khugepaged_max_ptes_shared __read_mostly;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -070089
90#define MM_SLOTS_HASH_BITS 10
91static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
92
93static struct kmem_cache *mm_slot_cache __read_mostly;
94
Song Liu27e1f822019-09-23 15:38:30 -070095#define MAX_PTE_MAPPED_THP 8
96
Zach O'Keefe34d6b472022-07-06 16:59:21 -070097struct collapse_control {
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -070098 bool is_khugepaged;
99
Zach O'Keefe34d6b472022-07-06 16:59:21 -0700100 /* Num pages scanned per node */
101 u32 node_load[MAX_NUMNODES];
102
Yang Shie031ff92022-11-08 10:43:56 -0800103 /* nodemask for allocation fallback */
104 nodemask_t alloc_nmask;
Zach O'Keefe34d6b472022-07-06 16:59:21 -0700105};
106
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700107/**
Qi Zhengb26e2702022-08-31 11:19:46 +0800108 * struct khugepaged_mm_slot - khugepaged information per mm that is being scanned
109 * @slot: hash lookup from mm to mm_slot
Alex Shi336e6b52020-12-14 19:12:01 -0800110 * @nr_pte_mapped_thp: number of pte mapped THP
111 * @pte_mapped_thp: address array corresponding pte mapped THP
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700112 */
Qi Zhengb26e2702022-08-31 11:19:46 +0800113struct khugepaged_mm_slot {
114 struct mm_slot slot;
Song Liu27e1f822019-09-23 15:38:30 -0700115
116 /* pte-mapped THP in this mm */
117 int nr_pte_mapped_thp;
118 unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700119};
120
121/**
122 * struct khugepaged_scan - cursor for scanning
123 * @mm_head: the head of the mm list to scan
124 * @mm_slot: the current mm_slot we are scanning
125 * @address: the next address inside that to be scanned
126 *
127 * There is only the one khugepaged_scan instance of this cursor structure.
128 */
129struct khugepaged_scan {
130 struct list_head mm_head;
Qi Zhengb26e2702022-08-31 11:19:46 +0800131 struct khugepaged_mm_slot *mm_slot;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700132 unsigned long address;
133};
134
135static struct khugepaged_scan khugepaged_scan = {
136 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
137};
138
Jérémy Lefauree1465d12016-11-30 15:54:02 -0800139#ifdef CONFIG_SYSFS
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700140static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
141 struct kobj_attribute *attr,
142 char *buf)
143{
Joe Perchesae7a9272020-12-14 19:14:42 -0800144 return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700145}
146
147static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
148 struct kobj_attribute *attr,
149 const char *buf, size_t count)
150{
Alexey Dobriyandfefd222020-12-14 19:15:03 -0800151 unsigned int msecs;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700152 int err;
153
Alexey Dobriyandfefd222020-12-14 19:15:03 -0800154 err = kstrtouint(buf, 10, &msecs);
155 if (err)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700156 return -EINVAL;
157
158 khugepaged_scan_sleep_millisecs = msecs;
159 khugepaged_sleep_expire = 0;
160 wake_up_interruptible(&khugepaged_wait);
161
162 return count;
163}
164static struct kobj_attribute scan_sleep_millisecs_attr =
Miaohe Lin6dcdc942022-06-25 17:28:14 +0800165 __ATTR_RW(scan_sleep_millisecs);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700166
167static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
168 struct kobj_attribute *attr,
169 char *buf)
170{
Joe Perchesae7a9272020-12-14 19:14:42 -0800171 return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700172}
173
174static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
175 struct kobj_attribute *attr,
176 const char *buf, size_t count)
177{
Alexey Dobriyandfefd222020-12-14 19:15:03 -0800178 unsigned int msecs;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700179 int err;
180
Alexey Dobriyandfefd222020-12-14 19:15:03 -0800181 err = kstrtouint(buf, 10, &msecs);
182 if (err)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700183 return -EINVAL;
184
185 khugepaged_alloc_sleep_millisecs = msecs;
186 khugepaged_sleep_expire = 0;
187 wake_up_interruptible(&khugepaged_wait);
188
189 return count;
190}
191static struct kobj_attribute alloc_sleep_millisecs_attr =
Miaohe Lin6dcdc942022-06-25 17:28:14 +0800192 __ATTR_RW(alloc_sleep_millisecs);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700193
194static ssize_t pages_to_scan_show(struct kobject *kobj,
195 struct kobj_attribute *attr,
196 char *buf)
197{
Joe Perchesae7a9272020-12-14 19:14:42 -0800198 return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700199}
200static ssize_t pages_to_scan_store(struct kobject *kobj,
201 struct kobj_attribute *attr,
202 const char *buf, size_t count)
203{
Alexey Dobriyandfefd222020-12-14 19:15:03 -0800204 unsigned int pages;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700205 int err;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700206
Alexey Dobriyandfefd222020-12-14 19:15:03 -0800207 err = kstrtouint(buf, 10, &pages);
208 if (err || !pages)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700209 return -EINVAL;
210
211 khugepaged_pages_to_scan = pages;
212
213 return count;
214}
215static struct kobj_attribute pages_to_scan_attr =
Miaohe Lin6dcdc942022-06-25 17:28:14 +0800216 __ATTR_RW(pages_to_scan);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700217
218static ssize_t pages_collapsed_show(struct kobject *kobj,
219 struct kobj_attribute *attr,
220 char *buf)
221{
Joe Perchesae7a9272020-12-14 19:14:42 -0800222 return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700223}
224static struct kobj_attribute pages_collapsed_attr =
225 __ATTR_RO(pages_collapsed);
226
227static ssize_t full_scans_show(struct kobject *kobj,
228 struct kobj_attribute *attr,
229 char *buf)
230{
Joe Perchesae7a9272020-12-14 19:14:42 -0800231 return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700232}
233static struct kobj_attribute full_scans_attr =
234 __ATTR_RO(full_scans);
235
Miaohe Lin6dcdc942022-06-25 17:28:14 +0800236static ssize_t defrag_show(struct kobject *kobj,
237 struct kobj_attribute *attr, char *buf)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700238{
239 return single_hugepage_flag_show(kobj, attr, buf,
Joe Perchesae7a9272020-12-14 19:14:42 -0800240 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700241}
Miaohe Lin6dcdc942022-06-25 17:28:14 +0800242static ssize_t defrag_store(struct kobject *kobj,
243 struct kobj_attribute *attr,
244 const char *buf, size_t count)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700245{
246 return single_hugepage_flag_store(kobj, attr, buf, count,
247 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
248}
249static struct kobj_attribute khugepaged_defrag_attr =
Miaohe Lin6dcdc942022-06-25 17:28:14 +0800250 __ATTR_RW(defrag);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700251
252/*
253 * max_ptes_none controls if khugepaged should collapse hugepages over
254 * any unmapped ptes in turn potentially increasing the memory
255 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
256 * reduce the available free memory in the system as it
257 * runs. Increasing max_ptes_none will instead potentially reduce the
258 * free memory in the system during the khugepaged scan.
259 */
Miaohe Lin6dcdc942022-06-25 17:28:14 +0800260static ssize_t max_ptes_none_show(struct kobject *kobj,
261 struct kobj_attribute *attr,
262 char *buf)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700263{
Joe Perchesae7a9272020-12-14 19:14:42 -0800264 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700265}
Miaohe Lin6dcdc942022-06-25 17:28:14 +0800266static ssize_t max_ptes_none_store(struct kobject *kobj,
267 struct kobj_attribute *attr,
268 const char *buf, size_t count)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700269{
270 int err;
271 unsigned long max_ptes_none;
272
273 err = kstrtoul(buf, 10, &max_ptes_none);
Miaohe Lin36ee2c72022-06-25 17:28:12 +0800274 if (err || max_ptes_none > HPAGE_PMD_NR - 1)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700275 return -EINVAL;
276
277 khugepaged_max_ptes_none = max_ptes_none;
278
279 return count;
280}
281static struct kobj_attribute khugepaged_max_ptes_none_attr =
Miaohe Lin6dcdc942022-06-25 17:28:14 +0800282 __ATTR_RW(max_ptes_none);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700283
Miaohe Lin6dcdc942022-06-25 17:28:14 +0800284static ssize_t max_ptes_swap_show(struct kobject *kobj,
285 struct kobj_attribute *attr,
286 char *buf)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700287{
Joe Perchesae7a9272020-12-14 19:14:42 -0800288 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700289}
290
Miaohe Lin6dcdc942022-06-25 17:28:14 +0800291static ssize_t max_ptes_swap_store(struct kobject *kobj,
292 struct kobj_attribute *attr,
293 const char *buf, size_t count)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700294{
295 int err;
296 unsigned long max_ptes_swap;
297
298 err = kstrtoul(buf, 10, &max_ptes_swap);
Miaohe Lin36ee2c72022-06-25 17:28:12 +0800299 if (err || max_ptes_swap > HPAGE_PMD_NR - 1)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700300 return -EINVAL;
301
302 khugepaged_max_ptes_swap = max_ptes_swap;
303
304 return count;
305}
306
307static struct kobj_attribute khugepaged_max_ptes_swap_attr =
Miaohe Lin6dcdc942022-06-25 17:28:14 +0800308 __ATTR_RW(max_ptes_swap);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700309
Miaohe Lin6dcdc942022-06-25 17:28:14 +0800310static ssize_t max_ptes_shared_show(struct kobject *kobj,
311 struct kobj_attribute *attr,
312 char *buf)
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700313{
Joe Perchesae7a9272020-12-14 19:14:42 -0800314 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700315}
316
Miaohe Lin6dcdc942022-06-25 17:28:14 +0800317static ssize_t max_ptes_shared_store(struct kobject *kobj,
318 struct kobj_attribute *attr,
319 const char *buf, size_t count)
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700320{
321 int err;
322 unsigned long max_ptes_shared;
323
324 err = kstrtoul(buf, 10, &max_ptes_shared);
Miaohe Lin36ee2c72022-06-25 17:28:12 +0800325 if (err || max_ptes_shared > HPAGE_PMD_NR - 1)
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700326 return -EINVAL;
327
328 khugepaged_max_ptes_shared = max_ptes_shared;
329
330 return count;
331}
332
333static struct kobj_attribute khugepaged_max_ptes_shared_attr =
Miaohe Lin6dcdc942022-06-25 17:28:14 +0800334 __ATTR_RW(max_ptes_shared);
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700335
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700336static struct attribute *khugepaged_attr[] = {
337 &khugepaged_defrag_attr.attr,
338 &khugepaged_max_ptes_none_attr.attr,
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700339 &khugepaged_max_ptes_swap_attr.attr,
340 &khugepaged_max_ptes_shared_attr.attr,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700341 &pages_to_scan_attr.attr,
342 &pages_collapsed_attr.attr,
343 &full_scans_attr.attr,
344 &scan_sleep_millisecs_attr.attr,
345 &alloc_sleep_millisecs_attr.attr,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700346 NULL,
347};
348
349struct attribute_group khugepaged_attr_group = {
350 .attrs = khugepaged_attr,
351 .name = "khugepaged",
352};
Jérémy Lefauree1465d12016-11-30 15:54:02 -0800353#endif /* CONFIG_SYSFS */
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700354
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700355int hugepage_madvise(struct vm_area_struct *vma,
356 unsigned long *vm_flags, int advice)
357{
358 switch (advice) {
359 case MADV_HUGEPAGE:
360#ifdef CONFIG_S390
361 /*
362 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
363 * can't handle this properly after s390_enable_sie, so we simply
364 * ignore the madvise to prevent qemu from causing a SIGSEGV.
365 */
366 if (mm_has_pgste(vma->vm_mm))
367 return 0;
368#endif
369 *vm_flags &= ~VM_NOHUGEPAGE;
370 *vm_flags |= VM_HUGEPAGE;
371 /*
372 * If the vma become good for khugepaged to scan,
373 * register it here without waiting a page fault that
374 * may not happen any time soon.
375 */
Yang Shic7915762022-05-19 14:08:50 -0700376 khugepaged_enter_vma(vma, *vm_flags);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700377 break;
378 case MADV_NOHUGEPAGE:
379 *vm_flags &= ~VM_HUGEPAGE;
380 *vm_flags |= VM_NOHUGEPAGE;
381 /*
382 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
383 * this vma even if we leave the mm registered in khugepaged if
384 * it got registered before VM_NOHUGEPAGE was set.
385 */
386 break;
387 }
388
389 return 0;
390}
391
392int __init khugepaged_init(void)
393{
394 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
Qi Zhengb26e2702022-08-31 11:19:46 +0800395 sizeof(struct khugepaged_mm_slot),
396 __alignof__(struct khugepaged_mm_slot),
397 0, NULL);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700398 if (!mm_slot_cache)
399 return -ENOMEM;
400
401 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
402 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
403 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700404 khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700405
406 return 0;
407}
408
409void __init khugepaged_destroy(void)
410{
411 kmem_cache_destroy(mm_slot_cache);
412}
413
Zach O'Keefe7d2c4382022-07-06 16:59:28 -0700414static inline int hpage_collapse_test_exit(struct mm_struct *mm)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700415{
Jann Horn4d45e752020-10-15 20:13:00 -0700416 return atomic_read(&mm->mm_users) == 0;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700417}
418
Yang Shid2081b22022-05-19 14:08:49 -0700419void __khugepaged_enter(struct mm_struct *mm)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700420{
Qi Zhengb26e2702022-08-31 11:19:46 +0800421 struct khugepaged_mm_slot *mm_slot;
422 struct mm_slot *slot;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700423 int wakeup;
424
Qi Zhengb26e2702022-08-31 11:19:46 +0800425 mm_slot = mm_slot_alloc(mm_slot_cache);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700426 if (!mm_slot)
Yang Shid2081b22022-05-19 14:08:49 -0700427 return;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700428
Qi Zhengb26e2702022-08-31 11:19:46 +0800429 slot = &mm_slot->slot;
430
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700431 /* __khugepaged_exit() must not run from under us */
Zach O'Keefe7d2c4382022-07-06 16:59:28 -0700432 VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700433 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
Qi Zhengb26e2702022-08-31 11:19:46 +0800434 mm_slot_free(mm_slot_cache, mm_slot);
Yang Shid2081b22022-05-19 14:08:49 -0700435 return;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700436 }
437
438 spin_lock(&khugepaged_mm_lock);
Qi Zhengb26e2702022-08-31 11:19:46 +0800439 mm_slot_insert(mm_slots_hash, mm, slot);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700440 /*
441 * Insert just behind the scanning cursor, to let the area settle
442 * down a little.
443 */
444 wakeup = list_empty(&khugepaged_scan.mm_head);
Qi Zhengb26e2702022-08-31 11:19:46 +0800445 list_add_tail(&slot->mm_node, &khugepaged_scan.mm_head);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700446 spin_unlock(&khugepaged_mm_lock);
447
Vegard Nossumf1f10072017-02-27 14:30:07 -0800448 mmgrab(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700449 if (wakeup)
450 wake_up_interruptible(&khugepaged_wait);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700451}
452
Yang Shic7915762022-05-19 14:08:50 -0700453void khugepaged_enter_vma(struct vm_area_struct *vma,
454 unsigned long vm_flags)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700455{
Yang Shi2647d112022-05-19 14:08:49 -0700456 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
Yang Shi10640262022-06-16 10:48:39 -0700457 hugepage_flags_enabled()) {
Zach O'Keefea7f4e6e2022-07-06 16:59:25 -0700458 if (hugepage_vma_check(vma, vm_flags, false, false, true))
Yang Shi2647d112022-05-19 14:08:49 -0700459 __khugepaged_enter(vma->vm_mm);
460 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700461}
462
463void __khugepaged_exit(struct mm_struct *mm)
464{
Qi Zhengb26e2702022-08-31 11:19:46 +0800465 struct khugepaged_mm_slot *mm_slot;
466 struct mm_slot *slot;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700467 int free = 0;
468
469 spin_lock(&khugepaged_mm_lock);
Qi Zhengb26e2702022-08-31 11:19:46 +0800470 slot = mm_slot_lookup(mm_slots_hash, mm);
471 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700472 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
Qi Zhengb26e2702022-08-31 11:19:46 +0800473 hash_del(&slot->hash);
474 list_del(&slot->mm_node);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700475 free = 1;
476 }
477 spin_unlock(&khugepaged_mm_lock);
478
479 if (free) {
480 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
Qi Zhengb26e2702022-08-31 11:19:46 +0800481 mm_slot_free(mm_slot_cache, mm_slot);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700482 mmdrop(mm);
483 } else if (mm_slot) {
484 /*
485 * This is required to serialize against
Zach O'Keefe7d2c4382022-07-06 16:59:28 -0700486 * hpage_collapse_test_exit() (which is guaranteed to run
487 * under mmap sem read mode). Stop here (after we return all
488 * pagetables will be destroyed) until khugepaged has finished
489 * working on the pagetables under the mmap_lock.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700490 */
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700491 mmap_write_lock(mm);
492 mmap_write_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700493 }
494}
495
Vishal Moola (Oracle)92644f52023-01-13 16:15:55 -0800496static void release_pte_folio(struct folio *folio)
497{
498 node_stat_mod_folio(folio,
499 NR_ISOLATED_ANON + folio_is_file_lru(folio),
500 -folio_nr_pages(folio));
501 folio_unlock(folio);
502 folio_putback_lru(folio);
503}
504
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700505static void release_pte_page(struct page *page)
506{
Vishal Moola (Oracle)92644f52023-01-13 16:15:55 -0800507 release_pte_folio(page_folio(page));
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700508}
509
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700510static void release_pte_pages(pte_t *pte, pte_t *_pte,
511 struct list_head *compound_pagelist)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700512{
Vishal Moola (Oracle)9bdfeea2023-01-13 16:15:56 -0800513 struct folio *folio, *tmp;
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700514
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700515 while (--_pte >= pte) {
516 pte_t pteval = *_pte;
Vishal Moola (Oracle)f5282602023-02-13 13:43:24 -0800517 unsigned long pfn;
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700518
Vishal Moola (Oracle)f5282602023-02-13 13:43:24 -0800519 if (pte_none(pteval))
520 continue;
521 pfn = pte_pfn(pteval);
522 if (is_zero_pfn(pfn))
523 continue;
524 folio = pfn_folio(pfn);
525 if (folio_test_large(folio))
526 continue;
527 release_pte_folio(folio);
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700528 }
529
Vishal Moola (Oracle)9bdfeea2023-01-13 16:15:56 -0800530 list_for_each_entry_safe(folio, tmp, compound_pagelist, lru) {
531 list_del(&folio->lru);
532 release_pte_folio(folio);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700533 }
534}
535
Kirill A. Shutemov94456892020-06-03 16:00:20 -0700536static bool is_refcount_suitable(struct page *page)
537{
538 int expected_refcount;
539
540 expected_refcount = total_mapcount(page);
541 if (PageSwapCache(page))
542 expected_refcount += compound_nr(page);
543
544 return page_count(page) == expected_refcount;
545}
546
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700547static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
548 unsigned long address,
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700549 pte_t *pte,
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -0700550 struct collapse_control *cc,
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700551 struct list_head *compound_pagelist)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700552{
553 struct page *page = NULL;
554 pte_t *_pte;
Zach O'Keefe50ad2f22022-07-06 16:59:23 -0700555 int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700556 bool writable = false;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700557
Miaohe Lin36ee2c72022-06-25 17:28:12 +0800558 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700559 _pte++, address += PAGE_SIZE) {
560 pte_t pteval = *_pte;
561 if (pte_none(pteval) || (pte_present(pteval) &&
562 is_zero_pfn(pte_pfn(pteval)))) {
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -0700563 ++none_or_zero;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700564 if (!userfaultfd_armed(vma) &&
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -0700565 (!cc->is_khugepaged ||
566 none_or_zero <= khugepaged_max_ptes_none)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700567 continue;
568 } else {
569 result = SCAN_EXCEED_NONE_PTE;
Yang Yange9ea8742022-01-14 14:07:55 -0800570 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700571 goto out;
572 }
573 }
574 if (!pte_present(pteval)) {
575 result = SCAN_PTE_NON_PRESENT;
576 goto out;
577 }
Peter Xudd47ac42023-04-05 11:51:20 -0400578 if (pte_uffd_wp(pteval)) {
579 result = SCAN_PTE_UFFD_WP;
580 goto out;
581 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700582 page = vm_normal_page(vma, address, pteval);
Alex Sierra3218f872022-07-15 10:05:11 -0500583 if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700584 result = SCAN_PAGE_NULL;
585 goto out;
586 }
587
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700588 VM_BUG_ON_PAGE(!PageAnon(page), page);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700589
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -0700590 if (page_mapcount(page) > 1) {
591 ++shared;
592 if (cc->is_khugepaged &&
593 shared > khugepaged_max_ptes_shared) {
594 result = SCAN_EXCEED_SHARED_PTE;
595 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
596 goto out;
597 }
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -0700598 }
599
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700600 if (PageCompound(page)) {
601 struct page *p;
602 page = compound_head(page);
603
604 /*
605 * Check if we have dealt with the compound page
606 * already
607 */
608 list_for_each_entry(p, compound_pagelist, lru) {
609 if (page == p)
610 goto next;
611 }
612 }
613
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700614 /*
615 * We can do it before isolate_lru_page because the
616 * page can't be freed from under us. NOTE: PG_lock
617 * is needed to serialize against split_huge_page
618 * when invoked from the VM.
619 */
620 if (!trylock_page(page)) {
621 result = SCAN_PAGE_LOCK;
622 goto out;
623 }
624
625 /*
Kirill A. Shutemov94456892020-06-03 16:00:20 -0700626 * Check if the page has any GUP (or other external) pins.
627 *
628 * The page table that maps the page has been already unlinked
629 * from the page table tree and this process cannot get
Ingo Molnarf0953a12021-05-06 18:06:47 -0700630 * an additional pin on the page.
Kirill A. Shutemov94456892020-06-03 16:00:20 -0700631 *
632 * New pins can come later if the page is shared across fork,
633 * but not from this process. The other process cannot write to
634 * the page, only trigger CoW.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700635 */
Kirill A. Shutemov94456892020-06-03 16:00:20 -0700636 if (!is_refcount_suitable(page)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700637 unlock_page(page);
638 result = SCAN_PAGE_COUNT;
639 goto out;
640 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700641
642 /*
643 * Isolate the page to avoid collapsing an hugepage
644 * currently in use by the VM.
645 */
Baolin Wangf7f9c002023-02-15 18:39:35 +0800646 if (!isolate_lru_page(page)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700647 unlock_page(page);
648 result = SCAN_DEL_PAGE_LRU;
649 goto out;
650 }
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700651 mod_node_page_state(page_pgdat(page),
652 NR_ISOLATED_ANON + page_is_file_lru(page),
653 compound_nr(page));
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700654 VM_BUG_ON_PAGE(!PageLocked(page), page);
655 VM_BUG_ON_PAGE(PageLRU(page), page);
656
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700657 if (PageCompound(page))
658 list_add_tail(&page->lru, compound_pagelist);
659next:
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -0700660 /*
661 * If collapse was initiated by khugepaged, check that there is
662 * enough young pte to justify collapsing the page
663 */
664 if (cc->is_khugepaged &&
665 (pte_young(pteval) || page_is_young(page) ||
666 PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
667 address)))
Ebru Akagunduz0db501f2016-07-26 15:26:46 -0700668 referenced++;
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700669
670 if (pte_write(pteval))
671 writable = true;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700672 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700673
Miaohe Lin74e579b2021-05-04 18:33:46 -0700674 if (unlikely(!writable)) {
675 result = SCAN_PAGE_RO;
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -0700676 } else if (unlikely(cc->is_khugepaged && !referenced)) {
Miaohe Lin74e579b2021-05-04 18:33:46 -0700677 result = SCAN_LACK_REFERENCED_PAGE;
678 } else {
679 result = SCAN_SUCCEED;
680 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
681 referenced, writable, result);
Zach O'Keefe50ad2f22022-07-06 16:59:23 -0700682 return result;
Miaohe Lin74e579b2021-05-04 18:33:46 -0700683 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700684out:
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700685 release_pte_pages(pte, _pte, compound_pagelist);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700686 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
687 referenced, writable, result);
Zach O'Keefe50ad2f22022-07-06 16:59:23 -0700688 return result;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700689}
690
Jiaqi Yan98c76c92023-03-29 08:11:19 -0700691static void __collapse_huge_page_copy_succeeded(pte_t *pte,
692 struct vm_area_struct *vma,
693 unsigned long address,
694 spinlock_t *ptl,
695 struct list_head *compound_pagelist)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700696{
Jiaqi Yan98c76c92023-03-29 08:11:19 -0700697 struct page *src_page;
698 struct page *tmp;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700699 pte_t *_pte;
Jiaqi Yan98c76c92023-03-29 08:11:19 -0700700 pte_t pteval;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700701
Jiaqi Yan98c76c92023-03-29 08:11:19 -0700702 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
703 _pte++, address += PAGE_SIZE) {
704 pteval = *_pte;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700705 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700706 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
707 if (is_zero_pfn(pte_pfn(pteval))) {
708 /*
709 * ptl mostly unnecessary.
710 */
711 spin_lock(ptl);
Pasha Tatashin08d5b292022-01-14 14:06:33 -0800712 ptep_clear(vma->vm_mm, address, _pte);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700713 spin_unlock(ptl);
714 }
715 } else {
716 src_page = pte_page(pteval);
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700717 if (!PageCompound(src_page))
718 release_pte_page(src_page);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700719 /*
720 * ptl mostly unnecessary, but preempt has to
721 * be disabled to update the per-cpu stats
722 * inside page_remove_rmap().
723 */
724 spin_lock(ptl);
Pasha Tatashin08d5b292022-01-14 14:06:33 -0800725 ptep_clear(vma->vm_mm, address, _pte);
Hugh Dickinscea86fe2022-02-14 18:26:39 -0800726 page_remove_rmap(src_page, vma, false);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700727 spin_unlock(ptl);
728 free_page_and_swap_cache(src_page);
729 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700730 }
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700731
732 list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
733 list_del(&src_page->lru);
Miaohe Lin1baec202022-06-25 17:28:16 +0800734 mod_node_page_state(page_pgdat(src_page),
735 NR_ISOLATED_ANON + page_is_file_lru(src_page),
736 -compound_nr(src_page));
737 unlock_page(src_page);
738 free_swap_cache(src_page);
739 putback_lru_page(src_page);
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -0700740 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700741}
742
Jiaqi Yan98c76c92023-03-29 08:11:19 -0700743static void __collapse_huge_page_copy_failed(pte_t *pte,
744 pmd_t *pmd,
745 pmd_t orig_pmd,
746 struct vm_area_struct *vma,
747 struct list_head *compound_pagelist)
748{
749 spinlock_t *pmd_ptl;
750
751 /*
752 * Re-establish the PMD to point to the original page table
753 * entry. Restoring PMD needs to be done prior to releasing
754 * pages. Since pages are still isolated and locked here,
755 * acquiring anon_vma_lock_write is unnecessary.
756 */
757 pmd_ptl = pmd_lock(vma->vm_mm, pmd);
758 pmd_populate(vma->vm_mm, pmd, pmd_pgtable(orig_pmd));
759 spin_unlock(pmd_ptl);
760 /*
761 * Release both raw and compound pages isolated
762 * in __collapse_huge_page_isolate.
763 */
764 release_pte_pages(pte, pte + HPAGE_PMD_NR, compound_pagelist);
765}
766
767/*
768 * __collapse_huge_page_copy - attempts to copy memory contents from raw
769 * pages to a hugepage. Cleans up the raw pages if copying succeeds;
770 * otherwise restores the original page table and releases isolated raw pages.
771 * Returns SCAN_SUCCEED if copying succeeds, otherwise returns SCAN_COPY_MC.
772 *
773 * @pte: starting of the PTEs to copy from
774 * @page: the new hugepage to copy contents to
775 * @pmd: pointer to the new hugepage's PMD
776 * @orig_pmd: the original raw pages' PMD
777 * @vma: the original raw pages' virtual memory area
778 * @address: starting address to copy
779 * @ptl: lock on raw pages' PTEs
780 * @compound_pagelist: list that stores compound pages
781 */
782static int __collapse_huge_page_copy(pte_t *pte,
783 struct page *page,
784 pmd_t *pmd,
785 pmd_t orig_pmd,
786 struct vm_area_struct *vma,
787 unsigned long address,
788 spinlock_t *ptl,
789 struct list_head *compound_pagelist)
790{
791 struct page *src_page;
792 pte_t *_pte;
793 pte_t pteval;
794 unsigned long _address;
795 int result = SCAN_SUCCEED;
796
797 /*
798 * Copying pages' contents is subject to memory poison at any iteration.
799 */
800 for (_pte = pte, _address = address; _pte < pte + HPAGE_PMD_NR;
801 _pte++, page++, _address += PAGE_SIZE) {
802 pteval = *_pte;
803 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
804 clear_user_highpage(page, _address);
805 continue;
806 }
807 src_page = pte_page(pteval);
808 if (copy_mc_user_highpage(page, src_page, _address, vma) > 0) {
809 result = SCAN_COPY_MC;
810 break;
811 }
812 }
813
814 if (likely(result == SCAN_SUCCEED))
815 __collapse_huge_page_copy_succeeded(pte, vma, address, ptl,
816 compound_pagelist);
817 else
818 __collapse_huge_page_copy_failed(pte, pmd, orig_pmd, vma,
819 compound_pagelist);
820
821 return result;
822}
823
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700824static void khugepaged_alloc_sleep(void)
825{
826 DEFINE_WAIT(wait);
827
828 add_wait_queue(&khugepaged_wait, &wait);
Peter Zijlstraf5d39b02022-08-22 13:18:22 +0200829 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
830 schedule_timeout(msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700831 remove_wait_queue(&khugepaged_wait, &wait);
832}
833
Zach O'Keefe34d6b472022-07-06 16:59:21 -0700834struct collapse_control khugepaged_collapse_control = {
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -0700835 .is_khugepaged = true,
Zach O'Keefe34d6b472022-07-06 16:59:21 -0700836};
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700837
Zach O'Keefe7d2c4382022-07-06 16:59:28 -0700838static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700839{
840 int i;
841
842 /*
Mel Gormana5f5f912016-07-28 15:46:32 -0700843 * If node_reclaim_mode is disabled, then no extra effort is made to
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700844 * allocate memory locally.
845 */
Dave Hansen202e35d2021-05-04 18:36:04 -0700846 if (!node_reclaim_enabled())
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700847 return false;
848
849 /* If there is a count for this node already, it must be acceptable */
Zach O'Keefe34d6b472022-07-06 16:59:21 -0700850 if (cc->node_load[nid])
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700851 return false;
852
853 for (i = 0; i < MAX_NUMNODES; i++) {
Zach O'Keefe34d6b472022-07-06 16:59:21 -0700854 if (!cc->node_load[i])
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700855 continue;
Matt Fleminga55c7452019-08-08 20:53:01 +0100856 if (node_distance(nid, i) > node_reclaim_distance)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700857 return true;
858 }
859 return false;
860}
861
Yang Shi10640262022-06-16 10:48:39 -0700862#define khugepaged_defrag() \
863 (transparent_hugepage_flags & \
864 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
865
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700866/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
867static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
868{
Vlastimil Babka25160352016-07-28 15:49:25 -0700869 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700870}
871
872#ifdef CONFIG_NUMA
Zach O'Keefe7d2c4382022-07-06 16:59:28 -0700873static int hpage_collapse_find_target_node(struct collapse_control *cc)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700874{
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700875 int nid, target_node = 0, max_value = 0;
876
877 /* find first node with max normal pages hit */
878 for (nid = 0; nid < MAX_NUMNODES; nid++)
Zach O'Keefe34d6b472022-07-06 16:59:21 -0700879 if (cc->node_load[nid] > max_value) {
880 max_value = cc->node_load[nid];
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700881 target_node = nid;
882 }
883
Yang Shie031ff92022-11-08 10:43:56 -0800884 for_each_online_node(nid) {
885 if (max_value == cc->node_load[nid])
886 node_set(nid, cc->alloc_nmask);
887 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700888
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700889 return target_node;
890}
Yang Shic6a7f442022-07-06 16:59:20 -0700891#else
Zach O'Keefe7d2c4382022-07-06 16:59:28 -0700892static int hpage_collapse_find_target_node(struct collapse_control *cc)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700893{
Yang Shic6a7f442022-07-06 16:59:20 -0700894 return 0;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700895}
Yang Shic6a7f442022-07-06 16:59:20 -0700896#endif
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700897
Yang Shie031ff92022-11-08 10:43:56 -0800898static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
899 nodemask_t *nmask)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700900{
Yang Shie031ff92022-11-08 10:43:56 -0800901 *hpage = __alloc_pages(gfp, HPAGE_PMD_ORDER, node, nmask);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700902 if (unlikely(!*hpage)) {
903 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
Zach O'Keefe9710a78a2022-07-06 16:59:22 -0700904 return false;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700905 }
906
907 prep_transhuge_page(*hpage);
908 count_vm_event(THP_COLLAPSE_ALLOC);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700909 return true;
910}
911
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700912/*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700913 * If mmap_lock temporarily dropped, revalidate vma
914 * before taking mmap_lock.
Zach O'Keefe50ad2f22022-07-06 16:59:23 -0700915 * Returns enum scan_result value.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700916 */
917
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700918static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
Zach O'Keefe34488392022-09-22 15:40:39 -0700919 bool expect_anon,
Zach O'Keefea7f4e6e2022-07-06 16:59:25 -0700920 struct vm_area_struct **vmap,
921 struct collapse_control *cc)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700922{
923 struct vm_area_struct *vma;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700924
Zach O'Keefe7d2c4382022-07-06 16:59:28 -0700925 if (unlikely(hpage_collapse_test_exit(mm)))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700926 return SCAN_ANY_PROCESS;
927
Kirill A. Shutemovc131f752016-09-19 14:44:01 -0700928 *vmap = vma = find_vma(mm, address);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700929 if (!vma)
930 return SCAN_VMA_NULL;
931
Yang Shi4fa68932022-06-16 10:48:35 -0700932 if (!transhuge_vma_suitable(vma, address))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700933 return SCAN_ADDRESS_RANGE;
Zach O'Keefea7f4e6e2022-07-06 16:59:25 -0700934 if (!hugepage_vma_check(vma, vma->vm_flags, false, false,
935 cc->is_khugepaged))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700936 return SCAN_VMA_CHECK;
Yang Shif707fa42022-06-16 10:48:36 -0700937 /*
938 * Anon VMA expected, the address may be unmapped then
939 * remapped to file after khugepaged reaquired the mmap_lock.
940 *
941 * hugepage_vma_check may return true for qualified file
942 * vmas.
943 */
Zach O'Keefe34488392022-09-22 15:40:39 -0700944 if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
945 return SCAN_PAGE_ANON;
Zach O'Keefe50ad2f22022-07-06 16:59:23 -0700946 return SCAN_SUCCEED;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700947}
948
Zach O'Keefeedb5d0c2023-01-25 14:53:58 -0800949/*
950 * See pmd_trans_unstable() for how the result may change out from
951 * underneath us, even if we hold mmap_lock in read.
952 */
Zach O'Keefe50722802022-07-06 16:59:26 -0700953static int find_pmd_or_thp_or_none(struct mm_struct *mm,
954 unsigned long address,
955 pmd_t **pmd)
956{
957 pmd_t pmde;
958
959 *pmd = mm_find_pmd(mm, address);
960 if (!*pmd)
961 return SCAN_PMD_NULL;
962
Peter Zijlstradab6e712020-11-26 17:20:28 +0100963 pmde = pmdp_get_lockless(*pmd);
Zach O'Keefe50722802022-07-06 16:59:26 -0700964
965#ifdef CONFIG_TRANSPARENT_HUGEPAGE
966 /* See comments in pmd_none_or_trans_huge_or_clear_bad() */
967 barrier();
968#endif
Zach O'Keefe34488392022-09-22 15:40:39 -0700969 if (pmd_none(pmde))
970 return SCAN_PMD_NONE;
Zach O'Keefeedb5d0c2023-01-25 14:53:58 -0800971 if (!pmd_present(pmde))
972 return SCAN_PMD_NULL;
Zach O'Keefe50722802022-07-06 16:59:26 -0700973 if (pmd_trans_huge(pmde))
974 return SCAN_PMD_MAPPED;
Zach O'Keefeedb5d0c2023-01-25 14:53:58 -0800975 if (pmd_devmap(pmde))
976 return SCAN_PMD_NULL;
Zach O'Keefe50722802022-07-06 16:59:26 -0700977 if (pmd_bad(pmde))
978 return SCAN_PMD_NULL;
979 return SCAN_SUCCEED;
980}
981
982static int check_pmd_still_valid(struct mm_struct *mm,
983 unsigned long address,
984 pmd_t *pmd)
985{
986 pmd_t *new_pmd;
987 int result = find_pmd_or_thp_or_none(mm, address, &new_pmd);
988
989 if (result != SCAN_SUCCEED)
990 return result;
991 if (new_pmd != pmd)
992 return SCAN_FAIL;
993 return SCAN_SUCCEED;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700994}
995
996/*
997 * Bring missing pages in from swap, to complete THP collapse.
Zach O'Keefe7d2c4382022-07-06 16:59:28 -0700998 * Only done if hpage_collapse_scan_pmd believes it is worthwhile.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -0700999 *
Miaohe Lin4d928e22022-06-25 17:28:11 +08001000 * Called and returns without pte mapped or spinlocks held.
1001 * Note that if false is returned, mmap_lock will be released.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001002 */
1003
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001004static int __collapse_huge_page_swapin(struct mm_struct *mm,
1005 struct vm_area_struct *vma,
1006 unsigned long haddr, pmd_t *pmd,
1007 int referenced)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001008{
Souptick Joarder2b740302018-08-23 17:01:36 -07001009 int swapped_in = 0;
1010 vm_fault_t ret = 0;
Will Deacon2b635dd2021-01-14 15:33:49 +00001011 unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001012
Will Deacon2b635dd2021-01-14 15:33:49 +00001013 for (address = haddr; address < end; address += PAGE_SIZE) {
1014 struct vm_fault vmf = {
1015 .vma = vma,
1016 .address = address,
1017 .pgoff = linear_page_index(vma, haddr),
1018 .flags = FAULT_FLAG_ALLOW_RETRY,
1019 .pmd = pmd,
1020 };
1021
1022 vmf.pte = pte_offset_map(pmd, address);
Jan Kara29943022016-12-14 15:07:16 -08001023 vmf.orig_pte = *vmf.pte;
Will Deacon2b635dd2021-01-14 15:33:49 +00001024 if (!is_swap_pte(vmf.orig_pte)) {
1025 pte_unmap(vmf.pte);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001026 continue;
Will Deacon2b635dd2021-01-14 15:33:49 +00001027 }
Jan Kara29943022016-12-14 15:07:16 -08001028 ret = do_swap_page(&vmf);
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001029
Miaohe Lin4d928e22022-06-25 17:28:11 +08001030 /*
1031 * do_swap_page returns VM_FAULT_RETRY with released mmap_lock.
1032 * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because
1033 * we do not retry here and swap entry will remain in pagetable
1034 * resulting in later failure.
1035 */
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001036 if (ret & VM_FAULT_RETRY) {
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001037 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001038 /* Likely, but not guaranteed, that page lock failed */
1039 return SCAN_PAGE_LOCK;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001040 }
Miaohe Lin4d928e22022-06-25 17:28:11 +08001041 if (ret & VM_FAULT_ERROR) {
1042 mmap_read_unlock(mm);
1043 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001044 return SCAN_FAIL;
Miaohe Lin4d928e22022-06-25 17:28:11 +08001045 }
1046 swapped_in++;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001047 }
Kirill A. Shutemovae2c5d82020-06-03 16:00:17 -07001048
1049 /* Drain LRU add pagevec to remove extra pin on the swapped in pages */
1050 if (swapped_in)
1051 lru_add_drain();
1052
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001053 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001054 return SCAN_SUCCEED;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001055}
1056
Zach O'Keefe9710a78a2022-07-06 16:59:22 -07001057static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
1058 struct collapse_control *cc)
1059{
Zach O'Keefe7d8faaf2022-07-06 16:59:27 -07001060 gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
Yang Shie031ff92022-11-08 10:43:56 -08001061 GFP_TRANSHUGE);
Zach O'Keefe7d2c4382022-07-06 16:59:28 -07001062 int node = hpage_collapse_find_target_node(cc);
Peter Xu94c02ad2023-02-22 14:52:47 -05001063 struct folio *folio;
Zach O'Keefe9710a78a2022-07-06 16:59:22 -07001064
Yang Shie031ff92022-11-08 10:43:56 -08001065 if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask))
Zach O'Keefe9710a78a2022-07-06 16:59:22 -07001066 return SCAN_ALLOC_HUGE_PAGE_FAIL;
Peter Xu94c02ad2023-02-22 14:52:47 -05001067
1068 folio = page_folio(*hpage);
1069 if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
1070 folio_put(folio);
1071 *hpage = NULL;
Zach O'Keefe9710a78a2022-07-06 16:59:22 -07001072 return SCAN_CGROUP_CHARGE_FAIL;
Peter Xu94c02ad2023-02-22 14:52:47 -05001073 }
Zach O'Keefe9710a78a2022-07-06 16:59:22 -07001074 count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC);
Peter Xu94c02ad2023-02-22 14:52:47 -05001075
Zach O'Keefe9710a78a2022-07-06 16:59:22 -07001076 return SCAN_SUCCEED;
1077}
1078
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001079static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
1080 int referenced, int unmapped,
1081 struct collapse_control *cc)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001082{
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -07001083 LIST_HEAD(compound_pagelist);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001084 pmd_t *pmd, _pmd;
1085 pte_t *pte;
1086 pgtable_t pgtable;
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001087 struct page *hpage;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001088 spinlock_t *pmd_ptl, *pte_ptl;
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001089 int result = SCAN_FAIL;
Kirill A. Shutemovc131f752016-09-19 14:44:01 -07001090 struct vm_area_struct *vma;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001091 struct mmu_notifier_range range;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001092
1093 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1094
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -07001095 /*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001096 * Before allocating the hugepage, release the mmap_lock read lock.
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -07001097 * The allocation can take potentially a long time if it involves
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001098 * sync compaction, and we do not need to hold the mmap_lock during
Kirill A. Shutemov988ddb72016-07-26 15:26:26 -07001099 * that. We will recheck the vma after taking it again in write mode.
1100 */
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001101 mmap_read_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001102
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001103 result = alloc_charge_hpage(&hpage, mm, cc);
Zach O'Keefe9710a78a2022-07-06 16:59:22 -07001104 if (result != SCAN_SUCCEED)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001105 goto out_nolock;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001106
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001107 mmap_read_lock(mm);
Zach O'Keefe34488392022-09-22 15:40:39 -07001108 result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001109 if (result != SCAN_SUCCEED) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001110 mmap_read_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001111 goto out_nolock;
1112 }
1113
Zach O'Keefe50722802022-07-06 16:59:26 -07001114 result = find_pmd_or_thp_or_none(mm, address, &pmd);
1115 if (result != SCAN_SUCCEED) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001116 mmap_read_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001117 goto out_nolock;
1118 }
1119
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001120 if (unmapped) {
1121 /*
1122 * __collapse_huge_page_swapin will return with mmap_lock
1123 * released when it fails. So we jump out_nolock directly in
1124 * that case. Continuing to collapse causes inconsistency.
1125 */
1126 result = __collapse_huge_page_swapin(mm, vma, address, pmd,
1127 referenced);
1128 if (result != SCAN_SUCCEED)
1129 goto out_nolock;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001130 }
1131
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001132 mmap_read_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001133 /*
1134 * Prevent all access to pagetables with the exception of
1135 * gup_fast later handled by the ptep_clear_flush and the VM
1136 * handled by the anon_vma lock + PG_lock.
1137 */
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001138 mmap_write_lock(mm);
Zach O'Keefe34488392022-09-22 15:40:39 -07001139 result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001140 if (result != SCAN_SUCCEED)
Miaohe Lin18d24a72021-05-04 18:34:17 -07001141 goto out_up_write;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001142 /* check if the pmd is still valid */
Zach O'Keefe50722802022-07-06 16:59:26 -07001143 result = check_pmd_still_valid(mm, address, pmd);
1144 if (result != SCAN_SUCCEED)
Miaohe Lin18d24a72021-05-04 18:34:17 -07001145 goto out_up_write;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001146
Suren Baghdasaryan55fd6fc2023-02-27 09:36:14 -08001147 vma_start_write(vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001148 anon_vma_lock_write(vma->anon_vma);
1149
Alistair Popple7d4a8be2023-01-10 13:57:22 +11001150 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address,
1151 address + HPAGE_PMD_SIZE);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001152 mmu_notifier_invalidate_range_start(&range);
Ville Syrjäläec649c9d2019-11-05 21:16:48 -08001153
1154 pte = pte_offset_map(pmd, address);
1155 pte_ptl = pte_lockptr(mm, pmd);
1156
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001157 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1158 /*
Yang Shi70cbc3c2022-09-07 11:01:43 -07001159 * This removes any huge TLB entry from the CPU so we won't allow
1160 * huge and small TLB entries for the same virtual address to
1161 * avoid the risk of CPU bugs in that area.
1162 *
1163 * Parallel fast GUP is fine since fast GUP will back off when
1164 * it detects PMD is changed.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001165 */
1166 _pmd = pmdp_collapse_flush(vma, address, pmd);
1167 spin_unlock(pmd_ptl);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08001168 mmu_notifier_invalidate_range_end(&range);
Jann Horn2ba99c52022-11-25 22:37:13 +01001169 tlb_remove_table_sync_one();
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001170
1171 spin_lock(pte_ptl);
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -07001172 result = __collapse_huge_page_isolate(vma, address, pte, cc,
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001173 &compound_pagelist);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001174 spin_unlock(pte_ptl);
1175
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001176 if (unlikely(result != SCAN_SUCCEED)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001177 pte_unmap(pte);
1178 spin_lock(pmd_ptl);
1179 BUG_ON(!pmd_none(*pmd));
1180 /*
1181 * We can only use set_pmd_at when establishing
1182 * hugepmds and never for establishing regular pmds that
1183 * points to regular pagetables. Use pmd_populate for that
1184 */
1185 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1186 spin_unlock(pmd_ptl);
1187 anon_vma_unlock_write(vma->anon_vma);
Miaohe Lin18d24a72021-05-04 18:34:17 -07001188 goto out_up_write;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001189 }
1190
1191 /*
1192 * All pages are isolated and locked so anon_vma rmap
1193 * can't run anymore.
1194 */
1195 anon_vma_unlock_write(vma->anon_vma);
1196
Jiaqi Yan98c76c92023-03-29 08:11:19 -07001197 result = __collapse_huge_page_copy(pte, hpage, pmd, _pmd,
1198 vma, address, pte_ptl,
1199 &compound_pagelist);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001200 pte_unmap(pte);
Jiaqi Yan98c76c92023-03-29 08:11:19 -07001201 if (unlikely(result != SCAN_SUCCEED))
1202 goto out_up_write;
1203
Miaohe Lin588d01f2021-05-04 18:33:40 -07001204 /*
1205 * spin_lock() below is not the equivalent of smp_wmb(), but
1206 * the smp_wmb() inside __SetPageUptodate() can be reused to
1207 * avoid the copy_huge_page writes to become visible after
1208 * the set_pmd_at() write.
1209 */
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001210 __SetPageUptodate(hpage);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001211 pgtable = pmd_pgtable(_pmd);
1212
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001213 _pmd = mk_huge_pmd(hpage, vma->vm_page_prot);
Linus Torvaldsf55e1012017-11-29 09:01:01 -08001214 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001215
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001216 spin_lock(pmd_ptl);
1217 BUG_ON(!pmd_none(*pmd));
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001218 page_add_new_anon_rmap(hpage, vma, address);
1219 lru_cache_add_inactive_or_unevictable(hpage, vma);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001220 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1221 set_pmd_at(mm, address, pmd, _pmd);
1222 update_mmu_cache_pmd(vma, address, pmd);
1223 spin_unlock(pmd_ptl);
1224
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001225 hpage = NULL;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001226
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001227 result = SCAN_SUCCEED;
1228out_up_write:
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001229 mmap_write_unlock(mm);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001230out_nolock:
Peter Xu7cb1d7e2023-03-03 10:12:18 -05001231 if (hpage)
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001232 put_page(hpage);
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001233 trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result);
1234 return result;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001235}
1236
Zach O'Keefe7d2c4382022-07-06 16:59:28 -07001237static int hpage_collapse_scan_pmd(struct mm_struct *mm,
1238 struct vm_area_struct *vma,
1239 unsigned long address, bool *mmap_locked,
1240 struct collapse_control *cc)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001241{
1242 pmd_t *pmd;
1243 pte_t *pte, *_pte;
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001244 int result = SCAN_FAIL, referenced = 0;
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -07001245 int none_or_zero = 0, shared = 0;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001246 struct page *page = NULL;
1247 unsigned long _address;
1248 spinlock_t *ptl;
1249 int node = NUMA_NO_NODE, unmapped = 0;
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001250 bool writable = false;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001251
1252 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1253
Zach O'Keefe50722802022-07-06 16:59:26 -07001254 result = find_pmd_or_thp_or_none(mm, address, &pmd);
1255 if (result != SCAN_SUCCEED)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001256 goto out;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001257
Zach O'Keefe34d6b472022-07-06 16:59:21 -07001258 memset(cc->node_load, 0, sizeof(cc->node_load));
Yang Shie031ff92022-11-08 10:43:56 -08001259 nodes_clear(cc->alloc_nmask);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001260 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
Miaohe Lin36ee2c72022-06-25 17:28:12 +08001261 for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001262 _pte++, _address += PAGE_SIZE) {
1263 pte_t pteval = *_pte;
1264 if (is_swap_pte(pteval)) {
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -07001265 ++unmapped;
1266 if (!cc->is_khugepaged ||
1267 unmapped <= khugepaged_max_ptes_swap) {
Peter Xue1e267c2020-04-06 20:06:04 -07001268 /*
1269 * Always be strict with uffd-wp
1270 * enabled swap entries. Please see
1271 * comment below for pte_uffd_wp().
1272 */
Peter Xu2bad4662023-03-09 17:37:10 -05001273 if (pte_swp_uffd_wp_any(pteval)) {
Peter Xue1e267c2020-04-06 20:06:04 -07001274 result = SCAN_PTE_UFFD_WP;
1275 goto out_unmap;
1276 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001277 continue;
1278 } else {
1279 result = SCAN_EXCEED_SWAP_PTE;
Yang Yange9ea8742022-01-14 14:07:55 -08001280 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001281 goto out_unmap;
1282 }
1283 }
1284 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -07001285 ++none_or_zero;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001286 if (!userfaultfd_armed(vma) &&
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -07001287 (!cc->is_khugepaged ||
1288 none_or_zero <= khugepaged_max_ptes_none)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001289 continue;
1290 } else {
1291 result = SCAN_EXCEED_NONE_PTE;
Yang Yange9ea8742022-01-14 14:07:55 -08001292 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001293 goto out_unmap;
1294 }
1295 }
Peter Xue1e267c2020-04-06 20:06:04 -07001296 if (pte_uffd_wp(pteval)) {
1297 /*
1298 * Don't collapse the page if any of the small
1299 * PTEs are armed with uffd write protection.
1300 * Here we can also mark the new huge pmd as
1301 * write protected if any of the small ones is
Haitao Shi8958b242020-12-15 20:47:26 -08001302 * marked but that could bring unknown
Peter Xue1e267c2020-04-06 20:06:04 -07001303 * userfault messages that falls outside of
1304 * the registered range. So, just be simple.
1305 */
1306 result = SCAN_PTE_UFFD_WP;
1307 goto out_unmap;
1308 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001309 if (pte_write(pteval))
1310 writable = true;
1311
1312 page = vm_normal_page(vma, _address, pteval);
Alex Sierra3218f872022-07-15 10:05:11 -05001313 if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001314 result = SCAN_PAGE_NULL;
1315 goto out_unmap;
1316 }
1317
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -07001318 if (page_mapcount(page) > 1) {
1319 ++shared;
1320 if (cc->is_khugepaged &&
1321 shared > khugepaged_max_ptes_shared) {
1322 result = SCAN_EXCEED_SHARED_PTE;
1323 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
1324 goto out_unmap;
1325 }
Kirill A. Shutemov71a2c112020-06-03 16:00:30 -07001326 }
1327
Kirill A. Shutemov5503fbf2020-06-03 16:00:23 -07001328 page = compound_head(page);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001329
1330 /*
1331 * Record which node the original page is from and save this
Zach O'Keefe34d6b472022-07-06 16:59:21 -07001332 * information to cc->node_load[].
Quanfa Fu0b8f0d82022-01-14 14:09:25 -08001333 * Khugepaged will allocate hugepage from the node has the max
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001334 * hit record.
1335 */
1336 node = page_to_nid(page);
Zach O'Keefe7d2c4382022-07-06 16:59:28 -07001337 if (hpage_collapse_scan_abort(node, cc)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001338 result = SCAN_SCAN_ABORT;
1339 goto out_unmap;
1340 }
Zach O'Keefe34d6b472022-07-06 16:59:21 -07001341 cc->node_load[node]++;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001342 if (!PageLRU(page)) {
1343 result = SCAN_PAGE_LRU;
1344 goto out_unmap;
1345 }
1346 if (PageLocked(page)) {
1347 result = SCAN_PAGE_LOCK;
1348 goto out_unmap;
1349 }
1350 if (!PageAnon(page)) {
1351 result = SCAN_PAGE_ANON;
1352 goto out_unmap;
1353 }
1354
1355 /*
Kirill A. Shutemov94456892020-06-03 16:00:20 -07001356 * Check if the page has any GUP (or other external) pins.
1357 *
Hugh Dickinscb67f422022-11-02 18:51:38 -07001358 * Here the check may be racy:
1359 * it may see total_mapcount > refcount in some cases?
Kirill A. Shutemov94456892020-06-03 16:00:20 -07001360 * But such case is ephemeral we could always retry collapse
1361 * later. However it may report false positive if the page
1362 * has excessive GUP pins (i.e. 512). Anyway the same check
1363 * will be done again later the risk seems low.
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001364 */
Kirill A. Shutemov94456892020-06-03 16:00:20 -07001365 if (!is_refcount_suitable(page)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001366 result = SCAN_PAGE_COUNT;
1367 goto out_unmap;
1368 }
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -07001369
1370 /*
1371 * If collapse was initiated by khugepaged, check that there is
1372 * enough young pte to justify collapsing the page
1373 */
1374 if (cc->is_khugepaged &&
1375 (pte_young(pteval) || page_is_young(page) ||
1376 PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
1377 address)))
Ebru Akagunduz0db501f2016-07-26 15:26:46 -07001378 referenced++;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001379 }
Kirill A. Shutemovffe945e2020-06-03 16:00:09 -07001380 if (!writable) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001381 result = SCAN_PAGE_RO;
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -07001382 } else if (cc->is_khugepaged &&
1383 (!referenced ||
1384 (unmapped && referenced < HPAGE_PMD_NR / 2))) {
Kirill A. Shutemovffe945e2020-06-03 16:00:09 -07001385 result = SCAN_LACK_REFERENCED_PAGE;
1386 } else {
1387 result = SCAN_SUCCEED;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001388 }
1389out_unmap:
1390 pte_unmap_unlock(pte, ptl);
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001391 if (result == SCAN_SUCCEED) {
1392 result = collapse_huge_page(mm, address, referenced,
1393 unmapped, cc);
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001394 /* collapse_huge_page will return with the mmap_lock released */
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001395 *mmap_locked = false;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001396 }
1397out:
1398 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1399 none_or_zero, result, unmapped);
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001400 return result;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001401}
1402
Qi Zhengb26e2702022-08-31 11:19:46 +08001403static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001404{
Qi Zhengb26e2702022-08-31 11:19:46 +08001405 struct mm_slot *slot = &mm_slot->slot;
1406 struct mm_struct *mm = slot->mm;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001407
Lance Roy35f3aa32018-10-04 23:45:47 -07001408 lockdep_assert_held(&khugepaged_mm_lock);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001409
Zach O'Keefe7d2c4382022-07-06 16:59:28 -07001410 if (hpage_collapse_test_exit(mm)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001411 /* free mm_slot */
Qi Zhengb26e2702022-08-31 11:19:46 +08001412 hash_del(&slot->hash);
1413 list_del(&slot->mm_node);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001414
1415 /*
1416 * Not strictly needed because the mm exited already.
1417 *
1418 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1419 */
1420
1421 /* khugepaged_mm_lock actually not necessary for the below */
Qi Zhengb26e2702022-08-31 11:19:46 +08001422 mm_slot_free(mm_slot_cache, mm_slot);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07001423 mmdrop(mm);
1424 }
1425}
1426
Matthew Wilcox (Oracle)396bcc52020-04-06 20:04:35 -07001427#ifdef CONFIG_SHMEM
Song Liu27e1f822019-09-23 15:38:30 -07001428/*
1429 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1430 * khugepaged should try to collapse the page table.
Zach O'Keefe34488392022-09-22 15:40:39 -07001431 *
1432 * Note that following race exists:
1433 * (1) khugepaged calls khugepaged_collapse_pte_mapped_thps() for mm_struct A,
1434 * emptying the A's ->pte_mapped_thp[] array.
1435 * (2) MADV_COLLAPSE collapses some file extent with target mm_struct B, and
1436 * retract_page_tables() finds a VMA in mm_struct A mapping the same extent
1437 * (at virtual address X) and adds an entry (for X) into mm_struct A's
1438 * ->pte-mapped_thp[] array.
1439 * (3) khugepaged calls khugepaged_collapse_scan_file() for mm_struct A at X,
1440 * sees a pte-mapped THP (SCAN_PTE_MAPPED_HUGEPAGE) and adds an entry
1441 * (for X) into mm_struct A's ->pte-mapped_thp[] array.
1442 * Thus, it's possible the same address is added multiple times for the same
1443 * mm_struct. Should this happen, we'll simply attempt
1444 * collapse_pte_mapped_thp() multiple times for the same address, under the same
1445 * exclusive mmap_lock, and assuming the first call is successful, subsequent
1446 * attempts will return quickly (without grabbing any additional locks) when
1447 * a huge pmd is found in find_pmd_or_thp_or_none(). Since this is a cheap
1448 * check, and since this is a rare occurrence, the cost of preventing this
1449 * "multiple-add" is thought to be more expensive than just handling it, should
1450 * it occur.
Song Liu27e1f822019-09-23 15:38:30 -07001451 */
Zach O'Keefe58ac9a82022-09-22 15:40:38 -07001452static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
Miaohe Lin081c3252022-06-25 17:28:15 +08001453 unsigned long addr)
Song Liu27e1f822019-09-23 15:38:30 -07001454{
Qi Zhengb26e2702022-08-31 11:19:46 +08001455 struct khugepaged_mm_slot *mm_slot;
1456 struct mm_slot *slot;
Zach O'Keefe58ac9a82022-09-22 15:40:38 -07001457 bool ret = false;
Song Liu27e1f822019-09-23 15:38:30 -07001458
1459 VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1460
1461 spin_lock(&khugepaged_mm_lock);
Qi Zhengb26e2702022-08-31 11:19:46 +08001462 slot = mm_slot_lookup(mm_slots_hash, mm);
1463 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
Zach O'Keefe58ac9a82022-09-22 15:40:38 -07001464 if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP)) {
Song Liu27e1f822019-09-23 15:38:30 -07001465 mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
Zach O'Keefe58ac9a82022-09-22 15:40:38 -07001466 ret = true;
1467 }
Song Liu27e1f822019-09-23 15:38:30 -07001468 spin_unlock(&khugepaged_mm_lock);
Zach O'Keefe58ac9a82022-09-22 15:40:38 -07001469 return ret;
Song Liu27e1f822019-09-23 15:38:30 -07001470}
1471
Zach O'Keefe34488392022-09-22 15:40:39 -07001472/* hpage must be locked, and mmap_lock must be held in write */
1473static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
1474 pmd_t *pmdp, struct page *hpage)
1475{
1476 struct vm_fault vmf = {
1477 .vma = vma,
1478 .address = addr,
1479 .flags = 0,
1480 .pmd = pmdp,
1481 };
1482
1483 VM_BUG_ON(!PageTransHuge(hpage));
1484 mmap_assert_write_locked(vma->vm_mm);
1485
1486 if (do_set_pmd(&vmf, hpage))
1487 return SCAN_FAIL;
1488
1489 get_page(hpage);
1490 return SCAN_SUCCEED;
Song Liu27e1f822019-09-23 15:38:30 -07001491}
1492
Jann Horn8d3c1062022-11-25 22:37:12 +01001493/*
1494 * A note about locking:
1495 * Trying to take the page table spinlocks would be useless here because those
1496 * are only used to synchronize:
1497 *
1498 * - modifying terminal entries (ones that point to a data page, not to another
1499 * page table)
1500 * - installing *new* non-terminal entries
1501 *
1502 * Instead, we need roughly the same kind of protection as free_pgtables() or
1503 * mm_take_all_locks() (but only for a single VMA):
1504 * The mmap lock together with this VMA's rmap locks covers all paths towards
1505 * the page table entries we're messing with here, except for hardware page
1506 * table walks and lockless_pages_from_mm().
1507 */
Pasha Tatashine59a47b82022-02-03 20:49:20 -08001508static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
1509 unsigned long addr, pmd_t *pmdp)
1510{
Pasha Tatashine59a47b82022-02-03 20:49:20 -08001511 pmd_t pmd;
Jann Hornf268f6c2022-11-25 22:37:14 +01001512 struct mmu_notifier_range range;
Pasha Tatashine59a47b82022-02-03 20:49:20 -08001513
Pasha Tatashin80110bb2022-02-03 20:49:24 -08001514 mmap_assert_write_locked(mm);
Jann Horn8d3c1062022-11-25 22:37:12 +01001515 if (vma->vm_file)
1516 lockdep_assert_held_write(&vma->vm_file->f_mapping->i_mmap_rwsem);
1517 /*
1518 * All anon_vmas attached to the VMA have the same root and are
1519 * therefore locked by the same lock.
1520 */
1521 if (vma->anon_vma)
1522 lockdep_assert_held_write(&vma->anon_vma->root->rwsem);
1523
Alistair Popple7d4a8be2023-01-10 13:57:22 +11001524 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr,
Jann Hornf268f6c2022-11-25 22:37:14 +01001525 addr + HPAGE_PMD_SIZE);
1526 mmu_notifier_invalidate_range_start(&range);
Pasha Tatashine59a47b82022-02-03 20:49:20 -08001527 pmd = pmdp_collapse_flush(vma, addr, pmdp);
Jann Horn2ba99c52022-11-25 22:37:13 +01001528 tlb_remove_table_sync_one();
Jann Hornf268f6c2022-11-25 22:37:14 +01001529 mmu_notifier_invalidate_range_end(&range);
Pasha Tatashine59a47b82022-02-03 20:49:20 -08001530 mm_dec_nr_ptes(mm);
Pasha Tatashin80110bb2022-02-03 20:49:24 -08001531 page_table_check_pte_clear_range(mm, addr, pmd);
Pasha Tatashine59a47b82022-02-03 20:49:20 -08001532 pte_free(mm, pmd_pgtable(pmd));
1533}
1534
Song Liu27e1f822019-09-23 15:38:30 -07001535/**
Alex Shi336e6b52020-12-14 19:12:01 -08001536 * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1537 * address haddr.
1538 *
1539 * @mm: process address space where collapse happens
1540 * @addr: THP collapse address
Zach O'Keefe34488392022-09-22 15:40:39 -07001541 * @install_pmd: If a huge PMD should be installed
Song Liu27e1f822019-09-23 15:38:30 -07001542 *
1543 * This function checks whether all the PTEs in the PMD are pointing to the
1544 * right THP. If so, retract the page table so the THP can refault in with
Zach O'Keefe34488392022-09-22 15:40:39 -07001545 * as pmd-mapped. Possibly install a huge PMD mapping the THP.
Song Liu27e1f822019-09-23 15:38:30 -07001546 */
Zach O'Keefe34488392022-09-22 15:40:39 -07001547int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
1548 bool install_pmd)
Song Liu27e1f822019-09-23 15:38:30 -07001549{
1550 unsigned long haddr = addr & HPAGE_PMD_MASK;
Liam R. Howlett94d815b2022-09-06 19:48:50 +00001551 struct vm_area_struct *vma = vma_lookup(mm, haddr);
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001552 struct page *hpage;
Song Liu27e1f822019-09-23 15:38:30 -07001553 pte_t *start_pte, *pte;
Pasha Tatashine59a47b82022-02-03 20:49:20 -08001554 pmd_t *pmd;
Song Liu27e1f822019-09-23 15:38:30 -07001555 spinlock_t *ptl;
Zach O'Keefe58ac9a82022-09-22 15:40:38 -07001556 int count = 0, result = SCAN_FAIL;
Song Liu27e1f822019-09-23 15:38:30 -07001557 int i;
1558
Zach O'Keefe58ac9a82022-09-22 15:40:38 -07001559 mmap_assert_write_locked(mm);
1560
Zach O'Keefe34488392022-09-22 15:40:39 -07001561 /* Fast check before locking page if already PMD-mapped */
Zach O'Keefe58ac9a82022-09-22 15:40:38 -07001562 result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
Zach O'Keefe34488392022-09-22 15:40:39 -07001563 if (result == SCAN_PMD_MAPPED)
1564 return result;
Zach O'Keefe58ac9a82022-09-22 15:40:38 -07001565
Song Liu27e1f822019-09-23 15:38:30 -07001566 if (!vma || !vma->vm_file ||
Miaohe Linfef792a2021-05-04 18:34:15 -07001567 !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
Zach O'Keefe34488392022-09-22 15:40:39 -07001568 return SCAN_VMA_CHECK;
Song Liu27e1f822019-09-23 15:38:30 -07001569
1570 /*
Zach O'Keefea7f4e6e2022-07-06 16:59:25 -07001571 * If we are here, we've succeeded in replacing all the native pages
1572 * in the page cache with a single hugepage. If a mm were to fault-in
1573 * this memory (mapped by a suitably aligned VMA), we'd get the hugepage
1574 * and map it by a PMD, regardless of sysfs THP settings. As such, let's
1575 * analogously elide sysfs THP settings here.
Song Liu27e1f822019-09-23 15:38:30 -07001576 */
Zach O'Keefea7f4e6e2022-07-06 16:59:25 -07001577 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
Zach O'Keefe34488392022-09-22 15:40:39 -07001578 return SCAN_VMA_CHECK;
Song Liu27e1f822019-09-23 15:38:30 -07001579
Peter Xudeb4c932022-05-12 20:22:55 -07001580 /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
1581 if (userfaultfd_wp(vma))
Zach O'Keefe34488392022-09-22 15:40:39 -07001582 return SCAN_PTE_UFFD_WP;
Peter Xudeb4c932022-05-12 20:22:55 -07001583
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001584 hpage = find_lock_page(vma->vm_file->f_mapping,
1585 linear_page_index(vma, haddr));
1586 if (!hpage)
Zach O'Keefe34488392022-09-22 15:40:39 -07001587 return SCAN_PAGE_NULL;
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001588
Zach O'Keefe34488392022-09-22 15:40:39 -07001589 if (!PageHead(hpage)) {
1590 result = SCAN_FAIL;
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001591 goto drop_hpage;
Zach O'Keefe34488392022-09-22 15:40:39 -07001592 }
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001593
Zach O'Keefe34488392022-09-22 15:40:39 -07001594 if (compound_order(hpage) != HPAGE_PMD_ORDER) {
1595 result = SCAN_PAGE_COMPOUND;
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001596 goto drop_hpage;
Zach O'Keefe34488392022-09-22 15:40:39 -07001597 }
Zach O'Keefe780a4b62022-09-22 15:27:31 -07001598
Zach O'Keefe34488392022-09-22 15:40:39 -07001599 switch (result) {
1600 case SCAN_SUCCEED:
1601 break;
1602 case SCAN_PMD_NONE:
1603 /*
1604 * In MADV_COLLAPSE path, possible race with khugepaged where
1605 * all pte entries have been removed and pmd cleared. If so,
1606 * skip all the pte checks and just update the pmd mapping.
1607 */
1608 goto maybe_install_pmd;
1609 default:
Song Liu27e1f822019-09-23 15:38:30 -07001610 goto drop_hpage;
Zach O'Keefe34488392022-09-22 15:40:39 -07001611 }
Song Liu27e1f822019-09-23 15:38:30 -07001612
Suren Baghdasaryan55fd6fc2023-02-27 09:36:14 -08001613 /* Lock the vma before taking i_mmap and page table locks */
1614 vma_start_write(vma);
1615
Jann Horn8d3c1062022-11-25 22:37:12 +01001616 /*
1617 * We need to lock the mapping so that from here on, only GUP-fast and
1618 * hardware page walks can access the parts of the page tables that
1619 * we're operating on.
1620 * See collapse_and_free_pmd().
1621 */
1622 i_mmap_lock_write(vma->vm_file->f_mapping);
1623
1624 /*
1625 * This spinlock should be unnecessary: Nobody else should be accessing
1626 * the page tables under spinlock protection here, only
1627 * lockless_pages_from_mm() and the hardware page walker can access page
1628 * tables while all the high-level locks are held in write mode.
1629 */
Song Liu27e1f822019-09-23 15:38:30 -07001630 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
Zach O'Keefe34488392022-09-22 15:40:39 -07001631 result = SCAN_FAIL;
Song Liu27e1f822019-09-23 15:38:30 -07001632
1633 /* step 1: check all mapped PTEs are to the right huge page */
1634 for (i = 0, addr = haddr, pte = start_pte;
1635 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1636 struct page *page;
1637
1638 /* empty pte, skip */
1639 if (pte_none(*pte))
1640 continue;
1641
1642 /* page swapped out, abort */
Zach O'Keefe34488392022-09-22 15:40:39 -07001643 if (!pte_present(*pte)) {
1644 result = SCAN_PTE_NON_PRESENT;
Song Liu27e1f822019-09-23 15:38:30 -07001645 goto abort;
Zach O'Keefe34488392022-09-22 15:40:39 -07001646 }
Song Liu27e1f822019-09-23 15:38:30 -07001647
1648 page = vm_normal_page(vma, addr, *pte);
Alex Sierra3218f872022-07-15 10:05:11 -05001649 if (WARN_ON_ONCE(page && is_zone_device_page(page)))
1650 page = NULL;
Song Liu27e1f822019-09-23 15:38:30 -07001651 /*
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001652 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1653 * page table, but the new page will not be a subpage of hpage.
Song Liu27e1f822019-09-23 15:38:30 -07001654 */
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001655 if (hpage + i != page)
Song Liu27e1f822019-09-23 15:38:30 -07001656 goto abort;
1657 count++;
1658 }
1659
1660 /* step 2: adjust rmap */
1661 for (i = 0, addr = haddr, pte = start_pte;
1662 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1663 struct page *page;
1664
1665 if (pte_none(*pte))
1666 continue;
1667 page = vm_normal_page(vma, addr, *pte);
Alex Sierra3218f872022-07-15 10:05:11 -05001668 if (WARN_ON_ONCE(page && is_zone_device_page(page)))
1669 goto abort;
Hugh Dickinscea86fe2022-02-14 18:26:39 -08001670 page_remove_rmap(page, vma, false);
Song Liu27e1f822019-09-23 15:38:30 -07001671 }
1672
1673 pte_unmap_unlock(start_pte, ptl);
1674
1675 /* step 3: set proper refcount and mm_counters. */
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001676 if (count) {
Song Liu27e1f822019-09-23 15:38:30 -07001677 page_ref_sub(hpage, count);
1678 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1679 }
1680
Zach O'Keefe34488392022-09-22 15:40:39 -07001681 /* step 4: remove pte entries */
Hugh Dickinsab0c3f12022-12-22 12:41:50 -08001682 /* we make no change to anon, but protect concurrent anon page lookup */
1683 if (vma->anon_vma)
1684 anon_vma_lock_write(vma->anon_vma);
1685
Pasha Tatashine59a47b82022-02-03 20:49:20 -08001686 collapse_and_free_pmd(mm, vma, haddr, pmd);
Zach O'Keefe34488392022-09-22 15:40:39 -07001687
Hugh Dickinsab0c3f12022-12-22 12:41:50 -08001688 if (vma->anon_vma)
1689 anon_vma_unlock_write(vma->anon_vma);
Jann Horn8d3c1062022-11-25 22:37:12 +01001690 i_mmap_unlock_write(vma->vm_file->f_mapping);
1691
Zach O'Keefe34488392022-09-22 15:40:39 -07001692maybe_install_pmd:
1693 /* step 5: install pmd entry */
1694 result = install_pmd
1695 ? set_huge_pmd(vma, haddr, pmd, hpage)
1696 : SCAN_SUCCEED;
1697
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001698drop_hpage:
1699 unlock_page(hpage);
1700 put_page(hpage);
Zach O'Keefe34488392022-09-22 15:40:39 -07001701 return result;
Song Liu27e1f822019-09-23 15:38:30 -07001702
1703abort:
1704 pte_unmap_unlock(start_pte, ptl);
Jann Horn8d3c1062022-11-25 22:37:12 +01001705 i_mmap_unlock_write(vma->vm_file->f_mapping);
Hugh Dickins119a5fc2020-08-06 23:26:18 -07001706 goto drop_hpage;
Song Liu27e1f822019-09-23 15:38:30 -07001707}
1708
Qi Zhengb26e2702022-08-31 11:19:46 +08001709static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot)
Song Liu27e1f822019-09-23 15:38:30 -07001710{
Qi Zhengb26e2702022-08-31 11:19:46 +08001711 struct mm_slot *slot = &mm_slot->slot;
1712 struct mm_struct *mm = slot->mm;
Song Liu27e1f822019-09-23 15:38:30 -07001713 int i;
1714
1715 if (likely(mm_slot->nr_pte_mapped_thp == 0))
Miaohe Lin0edf61e2021-05-04 18:33:37 -07001716 return;
Song Liu27e1f822019-09-23 15:38:30 -07001717
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001718 if (!mmap_write_trylock(mm))
Miaohe Lin0edf61e2021-05-04 18:33:37 -07001719 return;
Song Liu27e1f822019-09-23 15:38:30 -07001720
Zach O'Keefe7d2c4382022-07-06 16:59:28 -07001721 if (unlikely(hpage_collapse_test_exit(mm)))
Song Liu27e1f822019-09-23 15:38:30 -07001722 goto out;
1723
1724 for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
Zach O'Keefe34488392022-09-22 15:40:39 -07001725 collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i], false);
Song Liu27e1f822019-09-23 15:38:30 -07001726
1727out:
1728 mm_slot->nr_pte_mapped_thp = 0;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001729 mmap_write_unlock(mm);
Song Liu27e1f822019-09-23 15:38:30 -07001730}
1731
Zach O'Keefe34488392022-09-22 15:40:39 -07001732static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
1733 struct mm_struct *target_mm,
1734 unsigned long target_addr, struct page *hpage,
1735 struct collapse_control *cc)
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001736{
1737 struct vm_area_struct *vma;
Zach O'Keefe34488392022-09-22 15:40:39 -07001738 int target_result = SCAN_FAIL;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001739
1740 i_mmap_lock_write(mapping);
1741 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
Zach O'Keefe34488392022-09-22 15:40:39 -07001742 int result = SCAN_FAIL;
1743 struct mm_struct *mm = NULL;
1744 unsigned long addr = 0;
1745 pmd_t *pmd;
1746 bool is_target = false;
1747
Song Liu27e1f822019-09-23 15:38:30 -07001748 /*
1749 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1750 * got written to. These VMAs are likely not worth investing
Michel Lespinasse3e4e28c2020-06-08 21:33:51 -07001751 * mmap_write_lock(mm) as PMD-mapping is likely to be split
Song Liu27e1f822019-09-23 15:38:30 -07001752 * later.
1753 *
Miaohe Lin36ee2c72022-06-25 17:28:12 +08001754 * Note that vma->anon_vma check is racy: it can be set up after
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001755 * the check but before we took mmap_lock by the fault path.
Song Liu27e1f822019-09-23 15:38:30 -07001756 * But page lock would prevent establishing any new ptes of the
1757 * page, so we are safe.
1758 *
1759 * An alternative would be drop the check, but check that page
1760 * table is clear before calling pmdp_collapse_flush() under
1761 * ptl. It has higher chance to recover THP for the VMA, but
Jann Horn8d3c1062022-11-25 22:37:12 +01001762 * has higher cost too. It would also probably require locking
1763 * the anon_vma.
Song Liu27e1f822019-09-23 15:38:30 -07001764 */
Jann Horn023f47a2023-01-11 14:33:51 +01001765 if (READ_ONCE(vma->anon_vma)) {
Zach O'Keefe34488392022-09-22 15:40:39 -07001766 result = SCAN_PAGE_ANON;
1767 goto next;
1768 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001769 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
Zach O'Keefe34488392022-09-22 15:40:39 -07001770 if (addr & ~HPAGE_PMD_MASK ||
1771 vma->vm_end < addr + HPAGE_PMD_SIZE) {
1772 result = SCAN_VMA_CHECK;
1773 goto next;
1774 }
Hugh Dickins18e77602020-08-06 23:26:22 -07001775 mm = vma->vm_mm;
Zach O'Keefe34488392022-09-22 15:40:39 -07001776 is_target = mm == target_mm && addr == target_addr;
1777 result = find_pmd_or_thp_or_none(mm, addr, &pmd);
1778 if (result != SCAN_SUCCEED)
1779 goto next;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001780 /*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001781 * We need exclusive mmap_lock to retract page table.
Song Liu27e1f822019-09-23 15:38:30 -07001782 *
1783 * We use trylock due to lock inversion: we need to acquire
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07001784 * mmap_lock while holding page lock. Fault path does it in
Song Liu27e1f822019-09-23 15:38:30 -07001785 * reverse order. Trylock is a way to avoid deadlock.
Zach O'Keefe34488392022-09-22 15:40:39 -07001786 *
1787 * Also, it's not MADV_COLLAPSE's job to collapse other
1788 * mappings - let khugepaged take care of them later.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001789 */
Zach O'Keefe34488392022-09-22 15:40:39 -07001790 result = SCAN_PTE_MAPPED_HUGEPAGE;
1791 if ((cc->is_khugepaged || is_target) &&
1792 mmap_write_trylock(mm)) {
Suren Baghdasaryan55fd6fc2023-02-27 09:36:14 -08001793 /* trylock for the same lock inversion as above */
1794 if (!vma_try_start_write(vma))
1795 goto unlock_next;
1796
Peter Xudeb4c932022-05-12 20:22:55 -07001797 /*
Jann Horn023f47a2023-01-11 14:33:51 +01001798 * Re-check whether we have an ->anon_vma, because
1799 * collapse_and_free_pmd() requires that either no
1800 * ->anon_vma exists or the anon_vma is locked.
1801 * We already checked ->anon_vma above, but that check
1802 * is racy because ->anon_vma can be populated under the
1803 * mmap lock in read mode.
1804 */
1805 if (vma->anon_vma) {
1806 result = SCAN_PAGE_ANON;
1807 goto unlock_next;
1808 }
1809 /*
Peter Xudeb4c932022-05-12 20:22:55 -07001810 * When a vma is registered with uffd-wp, we can't
1811 * recycle the pmd pgtable because there can be pte
1812 * markers installed. Skip it only, so the rest mm/vma
1813 * can still have the same file mapped hugely, however
1814 * it'll always mapped in small page size for uffd-wp
1815 * registered ranges.
1816 */
Zach O'Keefe34488392022-09-22 15:40:39 -07001817 if (hpage_collapse_test_exit(mm)) {
1818 result = SCAN_ANY_PROCESS;
1819 goto unlock_next;
1820 }
1821 if (userfaultfd_wp(vma)) {
1822 result = SCAN_PTE_UFFD_WP;
1823 goto unlock_next;
1824 }
1825 collapse_and_free_pmd(mm, vma, addr, pmd);
1826 if (!cc->is_khugepaged && is_target)
1827 result = set_huge_pmd(vma, addr, pmd, hpage);
1828 else
1829 result = SCAN_SUCCEED;
1830
1831unlock_next:
Hugh Dickins18e77602020-08-06 23:26:22 -07001832 mmap_write_unlock(mm);
Zach O'Keefe34488392022-09-22 15:40:39 -07001833 goto next;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001834 }
Zach O'Keefe34488392022-09-22 15:40:39 -07001835 /*
1836 * Calling context will handle target mm/addr. Otherwise, let
1837 * khugepaged try again later.
1838 */
1839 if (!is_target) {
1840 khugepaged_add_pte_mapped_thp(mm, addr);
1841 continue;
1842 }
1843next:
1844 if (is_target)
1845 target_result = result;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001846 }
1847 i_mmap_unlock_write(mapping);
Zach O'Keefe34488392022-09-22 15:40:39 -07001848 return target_result;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001849}
1850
1851/**
Song Liu99cb0db2019-09-23 15:38:00 -07001852 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001853 *
Alex Shi336e6b52020-12-14 19:12:01 -08001854 * @mm: process address space where collapse happens
Zach O'Keefe34488392022-09-22 15:40:39 -07001855 * @addr: virtual collapse start address
Alex Shi336e6b52020-12-14 19:12:01 -08001856 * @file: file that collapse on
1857 * @start: collapse start address
Zach O'Keefe9710a78a2022-07-06 16:59:22 -07001858 * @cc: collapse context and scratchpad
Alex Shi336e6b52020-12-14 19:12:01 -08001859 *
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001860 * Basic scheme is simple, details are more complex:
Hugh Dickins87c460a2018-11-30 14:10:43 -08001861 * - allocate and lock a new huge page;
David Stevensa2e17cc2023-04-04 21:01:17 +09001862 * - scan page cache, locking old pages
Song Liu99cb0db2019-09-23 15:38:00 -07001863 * + swap/gup in pages if necessary;
David Stevensa2e17cc2023-04-04 21:01:17 +09001864 * - copy data to new page
1865 * - handle shmem holes
1866 * + re-validate that holes weren't filled by someone else
1867 * + check for userfaultfd
David Stevensac492b92023-04-04 21:01:16 +09001868 * - finalize updates to the page cache;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001869 * - if replacing succeeds:
Hugh Dickins87c460a2018-11-30 14:10:43 -08001870 * + unlock huge page;
David Stevensa2e17cc2023-04-04 21:01:17 +09001871 * + free old pages;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001872 * - if replacing failed;
David Stevensa2e17cc2023-04-04 21:01:17 +09001873 * + unlock old pages
Hugh Dickins87c460a2018-11-30 14:10:43 -08001874 * + unlock and free huge page;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001875 */
Zach O'Keefe34488392022-09-22 15:40:39 -07001876static int collapse_file(struct mm_struct *mm, unsigned long addr,
1877 struct file *file, pgoff_t start,
1878 struct collapse_control *cc)
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001879{
Song Liu579c5712019-09-23 15:37:57 -07001880 struct address_space *mapping = file->f_mapping;
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001881 struct page *hpage;
Jiaqi Yan12904d92023-03-29 08:11:21 -07001882 struct page *page;
1883 struct page *tmp;
1884 struct folio *folio;
Gautam Menghani4c9473e2022-10-26 10:52:18 +05301885 pgoff_t index = 0, end = start + HPAGE_PMD_NR;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001886 LIST_HEAD(pagelist);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001887 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001888 int nr_none = 0, result = SCAN_SUCCEED;
Song Liu99cb0db2019-09-23 15:38:00 -07001889 bool is_shmem = shmem_file(file);
Gautam Menghani4c9473e2022-10-26 10:52:18 +05301890 int nr = 0;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001891
Song Liu99cb0db2019-09-23 15:38:00 -07001892 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001893 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1894
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07001895 result = alloc_charge_hpage(&hpage, mm, cc);
Zach O'Keefe9710a78a2022-07-06 16:59:22 -07001896 if (result != SCAN_SUCCEED)
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001897 goto out;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07001898
David Stevenscae106d2023-04-04 21:01:15 +09001899 __SetPageLocked(hpage);
1900 if (is_shmem)
1901 __SetPageSwapBacked(hpage);
1902 hpage->index = start;
1903 hpage->mapping = mapping;
1904
Matthew Wilcox (Oracle)6b24ca42020-06-27 22:19:08 -04001905 /*
1906 * Ensure we have slots for all the pages in the range. This is
1907 * almost certainly a no-op because most of the pages must be present
1908 */
Hugh Dickins95feeab2018-11-30 14:10:50 -08001909 do {
1910 xas_lock_irq(&xas);
1911 xas_create_range(&xas);
1912 if (!xas_error(&xas))
1913 break;
1914 xas_unlock_irq(&xas);
1915 if (!xas_nomem(&xas, GFP_KERNEL)) {
Hugh Dickins95feeab2018-11-30 14:10:50 -08001916 result = SCAN_FAIL;
David Stevenscae106d2023-04-04 21:01:15 +09001917 goto rollback;
Hugh Dickins95feeab2018-11-30 14:10:50 -08001918 }
1919 } while (1);
1920
Matthew Wilcox77da9382017-12-04 14:56:08 -05001921 xas_set(&xas, start);
1922 for (index = start; index < end; index++) {
Jiaqi Yan12904d92023-03-29 08:11:21 -07001923 page = xas_next(&xas);
Matthew Wilcox77da9382017-12-04 14:56:08 -05001924
1925 VM_BUG_ON(index != xas.xa_index);
Song Liu99cb0db2019-09-23 15:38:00 -07001926 if (is_shmem) {
1927 if (!page) {
1928 /*
1929 * Stop if extent has been truncated or
1930 * hole-punched, and is now completely
1931 * empty.
1932 */
1933 if (index == start) {
1934 if (!xas_next_entry(&xas, end - 1)) {
1935 result = SCAN_TRUNCATED;
1936 goto xa_locked;
1937 }
David Stevensac492b92023-04-04 21:01:16 +09001938 xas_set(&xas, index + 1);
Song Liu99cb0db2019-09-23 15:38:00 -07001939 }
1940 if (!shmem_charge(mapping->host, 1)) {
1941 result = SCAN_FAIL;
Hugh Dickins042a3082018-11-30 14:10:39 -08001942 goto xa_locked;
Hugh Dickins701270f2018-11-30 14:10:25 -08001943 }
Song Liu99cb0db2019-09-23 15:38:00 -07001944 nr_none++;
1945 continue;
Hugh Dickins701270f2018-11-30 14:10:25 -08001946 }
Song Liu99cb0db2019-09-23 15:38:00 -07001947
1948 if (xa_is_value(page) || !PageUptodate(page)) {
1949 xas_unlock_irq(&xas);
1950 /* swap in or instantiate fallocated page */
Matthew Wilcox (Oracle)7459c142022-09-02 20:46:27 +01001951 if (shmem_get_folio(mapping->host, index,
1952 &folio, SGP_NOALLOC)) {
Song Liu99cb0db2019-09-23 15:38:00 -07001953 result = SCAN_FAIL;
1954 goto xa_unlocked;
1955 }
David Stevensefa3d812023-04-04 21:01:14 +09001956 /* drain pagevecs to help isolate_lru_page() */
1957 lru_add_drain();
Matthew Wilcox (Oracle)7459c142022-09-02 20:46:27 +01001958 page = folio_file_page(folio, index);
Song Liu99cb0db2019-09-23 15:38:00 -07001959 } else if (trylock_page(page)) {
1960 get_page(page);
1961 xas_unlock_irq(&xas);
1962 } else {
1963 result = SCAN_PAGE_LOCK;
Hugh Dickins042a3082018-11-30 14:10:39 -08001964 goto xa_locked;
Matthew Wilcox77da9382017-12-04 14:56:08 -05001965 }
Song Liu99cb0db2019-09-23 15:38:00 -07001966 } else { /* !is_shmem */
1967 if (!page || xa_is_value(page)) {
1968 xas_unlock_irq(&xas);
1969 page_cache_sync_readahead(mapping, &file->f_ra,
1970 file, index,
David Howellse5a59d32020-09-04 16:36:16 -07001971 end - index);
Song Liu99cb0db2019-09-23 15:38:00 -07001972 /* drain pagevecs to help isolate_lru_page() */
1973 lru_add_drain();
1974 page = find_lock_page(mapping, index);
1975 if (unlikely(page == NULL)) {
1976 result = SCAN_FAIL;
1977 goto xa_unlocked;
1978 }
Song Liu75f36062019-11-30 17:57:19 -08001979 } else if (PageDirty(page)) {
1980 /*
1981 * khugepaged only works on read-only fd,
1982 * so this page is dirty because it hasn't
1983 * been flushed since first write. There
1984 * won't be new dirty pages.
1985 *
1986 * Trigger async flush here and hope the
1987 * writeback is done when khugepaged
1988 * revisits this page.
1989 *
1990 * This is a one-off situation. We are not
1991 * forcing writeback in loop.
1992 */
1993 xas_unlock_irq(&xas);
1994 filemap_flush(mapping);
1995 result = SCAN_FAIL;
1996 goto xa_unlocked;
Rongwei Wang74c42e12021-10-28 14:36:27 -07001997 } else if (PageWriteback(page)) {
1998 xas_unlock_irq(&xas);
1999 result = SCAN_FAIL;
2000 goto xa_unlocked;
Song Liu99cb0db2019-09-23 15:38:00 -07002001 } else if (trylock_page(page)) {
2002 get_page(page);
2003 xas_unlock_irq(&xas);
2004 } else {
2005 result = SCAN_PAGE_LOCK;
2006 goto xa_locked;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002007 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002008 }
2009
2010 /*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07002011 * The page must be locked, so we can drop the i_pages lock
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002012 * without racing with truncate.
2013 */
2014 VM_BUG_ON_PAGE(!PageLocked(page), page);
Song Liu4655e5e2019-11-15 17:34:53 -08002015
2016 /* make sure the page is up to date */
2017 if (unlikely(!PageUptodate(page))) {
2018 result = SCAN_FAIL;
2019 goto out_unlock;
2020 }
Hugh Dickins06a5e122018-11-30 14:10:47 -08002021
2022 /*
2023 * If file was truncated then extended, or hole-punched, before
2024 * we locked the first page, then a THP might be there already.
Zach O'Keefe58ac9a82022-09-22 15:40:38 -07002025 * This will be discovered on the first iteration.
Hugh Dickins06a5e122018-11-30 14:10:47 -08002026 */
2027 if (PageTransCompound(page)) {
Zach O'Keefe58ac9a82022-09-22 15:40:38 -07002028 struct page *head = compound_head(page);
2029
2030 result = compound_order(head) == HPAGE_PMD_ORDER &&
2031 head->index == start
2032 /* Maybe PMD-mapped */
2033 ? SCAN_PTE_MAPPED_HUGEPAGE
2034 : SCAN_PAGE_COMPOUND;
Hugh Dickins06a5e122018-11-30 14:10:47 -08002035 goto out_unlock;
2036 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002037
Vishal Moola (Oracle)64ab3192022-11-17 23:30:53 -08002038 folio = page_folio(page);
2039
2040 if (folio_mapping(folio) != mapping) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002041 result = SCAN_TRUNCATED;
2042 goto out_unlock;
2043 }
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002044
Vishal Moola (Oracle)64ab3192022-11-17 23:30:53 -08002045 if (!is_shmem && (folio_test_dirty(folio) ||
2046 folio_test_writeback(folio))) {
Song Liu4655e5e2019-11-15 17:34:53 -08002047 /*
2048 * khugepaged only works on read-only fd, so this
2049 * page is dirty because it hasn't been flushed
2050 * since first write.
2051 */
2052 result = SCAN_FAIL;
2053 goto out_unlock;
2054 }
2055
Baolin Wangbe2d5752023-02-15 18:39:34 +08002056 if (!folio_isolate_lru(folio)) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002057 result = SCAN_DEL_PAGE_LRU;
Hugh Dickins042a3082018-11-30 14:10:39 -08002058 goto out_unlock;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002059 }
2060
Vishal Moola (Oracle)64ab3192022-11-17 23:30:53 -08002061 if (folio_has_private(folio) &&
2062 !filemap_release_folio(folio, GFP_KERNEL)) {
Song Liu99cb0db2019-09-23 15:38:00 -07002063 result = SCAN_PAGE_HAS_PRIVATE;
Vishal Moola (Oracle)64ab3192022-11-17 23:30:53 -08002064 folio_putback_lru(folio);
Song Liu99cb0db2019-09-23 15:38:00 -07002065 goto out_unlock;
2066 }
2067
Vishal Moola (Oracle)64ab3192022-11-17 23:30:53 -08002068 if (folio_mapped(folio))
2069 try_to_unmap(folio,
Matthew Wilcox (Oracle)869f7ee2022-02-15 09:28:49 -05002070 TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002071
Matthew Wilcox77da9382017-12-04 14:56:08 -05002072 xas_lock_irq(&xas);
2073 xas_set(&xas, index);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002074
Matthew Wilcox77da9382017-12-04 14:56:08 -05002075 VM_BUG_ON_PAGE(page != xas_load(&xas), page);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002076
2077 /*
David Stevensa2e17cc2023-04-04 21:01:17 +09002078 * We control three references to the page:
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002079 * - we hold a pin on it;
Matthew Wilcox77da9382017-12-04 14:56:08 -05002080 * - one reference from page cache;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002081 * - one from isolate_lru_page;
David Stevensa2e17cc2023-04-04 21:01:17 +09002082 * If those are the only references, then any new usage of the
2083 * page will have to fetch it from the page cache. That requires
2084 * locking the page to handle truncate, so any new usage will be
2085 * blocked until we unlock page after collapse/during rollback.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002086 */
David Stevensa2e17cc2023-04-04 21:01:17 +09002087 if (page_count(page) != 3) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002088 result = SCAN_PAGE_COUNT;
Hugh Dickins042a3082018-11-30 14:10:39 -08002089 xas_unlock_irq(&xas);
2090 putback_lru_page(page);
2091 goto out_unlock;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002092 }
2093
2094 /*
David Stevensa2e17cc2023-04-04 21:01:17 +09002095 * Accumulate the pages that are being collapsed.
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002096 */
2097 list_add_tail(&page->lru, &pagelist);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002098 continue;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002099out_unlock:
2100 unlock_page(page);
2101 put_page(page);
Hugh Dickins042a3082018-11-30 14:10:39 -08002102 goto xa_unlocked;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002103 }
2104
Jiaqi Yan12904d92023-03-29 08:11:21 -07002105 if (!is_shmem) {
Song Liu09d91cd2019-09-23 15:38:03 -07002106 filemap_nr_thps_inc(mapping);
Collin Fijalkovicheb6ecbe2021-06-30 18:51:32 -07002107 /*
2108 * Paired with smp_mb() in do_dentry_open() to ensure
2109 * i_writecount is up to date and the update to nr_thps is
2110 * visible. Ensures the page cache will be truncated if the
2111 * file is opened writable.
2112 */
2113 smp_mb();
2114 if (inode_is_open_for_write(mapping->host)) {
2115 result = SCAN_FAIL;
Collin Fijalkovicheb6ecbe2021-06-30 18:51:32 -07002116 filemap_nr_thps_dec(mapping);
Collin Fijalkovicheb6ecbe2021-06-30 18:51:32 -07002117 }
Song Liu09d91cd2019-09-23 15:38:03 -07002118 }
Song Liu99cb0db2019-09-23 15:38:00 -07002119
Hugh Dickins042a3082018-11-30 14:10:39 -08002120xa_locked:
2121 xas_unlock_irq(&xas);
Matthew Wilcox77da9382017-12-04 14:56:08 -05002122xa_unlocked:
Hugh Dickins042a3082018-11-30 14:10:39 -08002123
Hugh Dickins6d9df8a2022-02-14 18:40:55 -08002124 /*
2125 * If collapse is successful, flush must be done now before copying.
2126 * If collapse is unsuccessful, does flush actually need to be done?
2127 * Do it anyway, to clear the state.
2128 */
2129 try_to_unmap_flush();
2130
David Stevenscae106d2023-04-04 21:01:15 +09002131 if (result != SCAN_SUCCEED)
2132 goto rollback;
2133
2134 /*
David Stevensa2e17cc2023-04-04 21:01:17 +09002135 * The old pages are locked, so they won't change anymore.
David Stevenscae106d2023-04-04 21:01:15 +09002136 */
2137 index = start;
2138 list_for_each_entry(page, &pagelist, lru) {
2139 while (index < page->index) {
Jiaqi Yan12904d92023-03-29 08:11:21 -07002140 clear_highpage(hpage + (index % HPAGE_PMD_NR));
2141 index++;
2142 }
David Stevenscae106d2023-04-04 21:01:15 +09002143 if (copy_mc_highpage(hpage + (page->index % HPAGE_PMD_NR), page) > 0) {
2144 result = SCAN_COPY_MC;
2145 goto rollback;
2146 }
2147 index++;
2148 }
2149 while (index < end) {
2150 clear_highpage(hpage + (index % HPAGE_PMD_NR));
2151 index++;
2152 }
2153
David Stevensac492b92023-04-04 21:01:16 +09002154 if (nr_none) {
2155 struct vm_area_struct *vma;
2156 int nr_none_check = 0;
2157
2158 i_mmap_lock_read(mapping);
2159 xas_lock_irq(&xas);
2160
2161 xas_set(&xas, start);
2162 for (index = start; index < end; index++) {
2163 if (!xas_next(&xas)) {
2164 xas_store(&xas, XA_RETRY_ENTRY);
2165 if (xas_error(&xas)) {
2166 result = SCAN_STORE_FAILED;
2167 goto immap_locked;
2168 }
2169 nr_none_check++;
2170 }
2171 }
2172
2173 if (nr_none != nr_none_check) {
2174 result = SCAN_PAGE_FILLED;
2175 goto immap_locked;
2176 }
2177
2178 /*
2179 * If userspace observed a missing page in a VMA with a MODE_MISSING
2180 * userfaultfd, then it might expect a UFFD_EVENT_PAGEFAULT for that
2181 * page. If so, we need to roll back to avoid suppressing such an
2182 * event. Since wp/minor userfaultfds don't give userspace any
2183 * guarantees that the kernel doesn't fill a missing page with a zero
2184 * page, so they don't matter here.
2185 *
2186 * Any userfaultfds registered after this point will not be able to
2187 * observe any missing pages due to the previously inserted retry
2188 * entries.
2189 */
2190 vma_interval_tree_foreach(vma, &mapping->i_mmap, start, end) {
2191 if (userfaultfd_missing(vma)) {
2192 result = SCAN_EXCEED_NONE_PTE;
2193 goto immap_locked;
2194 }
2195 }
2196
2197immap_locked:
2198 i_mmap_unlock_read(mapping);
2199 if (result != SCAN_SUCCEED) {
2200 xas_set(&xas, start);
2201 for (index = start; index < end; index++) {
2202 if (xas_next(&xas) == XA_RETRY_ENTRY)
2203 xas_store(&xas, NULL);
2204 }
2205
2206 xas_unlock_irq(&xas);
2207 goto rollback;
2208 }
2209 } else {
2210 xas_lock_irq(&xas);
Jiaqi Yan12904d92023-03-29 08:11:21 -07002211 }
2212
2213 nr = thp_nr_pages(hpage);
David Stevenscae106d2023-04-04 21:01:15 +09002214 if (is_shmem)
2215 __mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
2216 else
2217 __mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
Jiaqi Yan12904d92023-03-29 08:11:21 -07002218
David Stevenscae106d2023-04-04 21:01:15 +09002219 if (nr_none) {
2220 __mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
2221 /* nr_none is always 0 for non-shmem. */
2222 __mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
2223 }
Jiaqi Yan12904d92023-03-29 08:11:21 -07002224
David Stevensa2e17cc2023-04-04 21:01:17 +09002225 /*
2226 * Mark hpage as uptodate before inserting it into the page cache so
2227 * that it isn't mistaken for an fallocated but unwritten page.
2228 */
David Stevenscae106d2023-04-04 21:01:15 +09002229 folio = page_folio(hpage);
2230 folio_mark_uptodate(folio);
2231 folio_ref_add(folio, HPAGE_PMD_NR - 1);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002232
David Stevenscae106d2023-04-04 21:01:15 +09002233 if (is_shmem)
2234 folio_mark_dirty(folio);
2235 folio_add_lru(folio);
Vishal Moola (Oracle)284a3442022-11-01 10:53:25 -07002236
David Stevensa2e17cc2023-04-04 21:01:17 +09002237 /* Join all the small entries into a single multi-index entry. */
2238 xas_set_order(&xas, start, HPAGE_PMD_ORDER);
2239 xas_store(&xas, hpage);
Hugh Dickins0175ab62023-04-22 21:47:20 -07002240 WARN_ON_ONCE(xas_error(&xas));
David Stevensa2e17cc2023-04-04 21:01:17 +09002241 xas_unlock_irq(&xas);
2242
David Stevenscae106d2023-04-04 21:01:15 +09002243 /*
2244 * Remove pte page tables, so we can re-fault the page as huge.
2245 */
2246 result = retract_page_tables(mapping, start, mm, addr, hpage,
2247 cc);
2248 unlock_page(hpage);
David Stevensac492b92023-04-04 21:01:16 +09002249
2250 /*
2251 * The collapse has succeeded, so free the old pages.
2252 */
2253 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
2254 list_del(&page->lru);
2255 page->mapping = NULL;
David Stevensac492b92023-04-04 21:01:16 +09002256 ClearPageActive(page);
2257 ClearPageUnevictable(page);
2258 unlock_page(page);
David Stevensa2e17cc2023-04-04 21:01:17 +09002259 folio_put_refs(page_folio(page), 3);
David Stevensac492b92023-04-04 21:01:16 +09002260 }
2261
David Stevenscae106d2023-04-04 21:01:15 +09002262 goto out;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002263
David Stevenscae106d2023-04-04 21:01:15 +09002264rollback:
2265 /* Something went wrong: roll back page cache changes */
David Stevenscae106d2023-04-04 21:01:15 +09002266 if (nr_none) {
David Stevensa2e17cc2023-04-04 21:01:17 +09002267 xas_lock_irq(&xas);
David Stevenscae106d2023-04-04 21:01:15 +09002268 mapping->nrpages -= nr_none;
2269 shmem_uncharge(mapping->host, nr_none);
David Stevensa2e17cc2023-04-04 21:01:17 +09002270 xas_unlock_irq(&xas);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002271 }
Hugh Dickins042a3082018-11-30 14:10:39 -08002272
David Stevensa2e17cc2023-04-04 21:01:17 +09002273 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
David Stevenscae106d2023-04-04 21:01:15 +09002274 list_del(&page->lru);
David Stevenscae106d2023-04-04 21:01:15 +09002275 unlock_page(page);
2276 putback_lru_page(page);
David Stevensa2e17cc2023-04-04 21:01:17 +09002277 put_page(page);
David Stevenscae106d2023-04-04 21:01:15 +09002278 }
David Stevenscae106d2023-04-04 21:01:15 +09002279 /*
2280 * Undo the updates of filemap_nr_thps_inc for non-SHMEM
2281 * file only. This undo is not needed unless failure is
2282 * due to SCAN_COPY_MC.
2283 */
2284 if (!is_shmem && result == SCAN_COPY_MC) {
2285 filemap_nr_thps_dec(mapping);
2286 /*
2287 * Paired with smp_mb() in do_dentry_open() to
2288 * ensure the update to nr_thps is visible.
2289 */
2290 smp_mb();
2291 }
2292
David Stevenscae106d2023-04-04 21:01:15 +09002293 hpage->mapping = NULL;
2294
2295 unlock_page(hpage);
2296 put_page(hpage);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002297out:
2298 VM_BUG_ON(!list_empty(&pagelist));
Gautam Menghani4c9473e2022-10-26 10:52:18 +05302299 trace_mm_khugepaged_collapse_file(mm, hpage, index, is_shmem, addr, file, nr, result);
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002300 return result;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002301}
2302
Zach O'Keefe34488392022-09-22 15:40:39 -07002303static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2304 struct file *file, pgoff_t start,
2305 struct collapse_control *cc)
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002306{
2307 struct page *page = NULL;
Song Liu579c5712019-09-23 15:37:57 -07002308 struct address_space *mapping = file->f_mapping;
Matthew Wilcox85b392d2017-12-04 15:06:23 -05002309 XA_STATE(xas, &mapping->i_pages, start);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002310 int present, swap;
2311 int node = NUMA_NO_NODE;
2312 int result = SCAN_SUCCEED;
2313
2314 present = 0;
2315 swap = 0;
Zach O'Keefe34d6b472022-07-06 16:59:21 -07002316 memset(cc->node_load, 0, sizeof(cc->node_load));
Yang Shie031ff92022-11-08 10:43:56 -08002317 nodes_clear(cc->alloc_nmask);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002318 rcu_read_lock();
Matthew Wilcox85b392d2017-12-04 15:06:23 -05002319 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
2320 if (xas_retry(&xas, page))
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002321 continue;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002322
Matthew Wilcox85b392d2017-12-04 15:06:23 -05002323 if (xa_is_value(page)) {
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -07002324 ++swap;
2325 if (cc->is_khugepaged &&
2326 swap > khugepaged_max_ptes_swap) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002327 result = SCAN_EXCEED_SWAP_PTE;
Yang Yange9ea8742022-01-14 14:07:55 -08002328 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002329 break;
2330 }
2331 continue;
2332 }
2333
Matthew Wilcox (Oracle)6b24ca42020-06-27 22:19:08 -04002334 /*
Zach O'Keefe58ac9a82022-09-22 15:40:38 -07002335 * TODO: khugepaged should compact smaller compound pages
Matthew Wilcox (Oracle)6b24ca42020-06-27 22:19:08 -04002336 * into a PMD sized page
2337 */
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002338 if (PageTransCompound(page)) {
Zach O'Keefe58ac9a82022-09-22 15:40:38 -07002339 struct page *head = compound_head(page);
2340
2341 result = compound_order(head) == HPAGE_PMD_ORDER &&
2342 head->index == start
2343 /* Maybe PMD-mapped */
2344 ? SCAN_PTE_MAPPED_HUGEPAGE
2345 : SCAN_PAGE_COMPOUND;
2346 /*
2347 * For SCAN_PTE_MAPPED_HUGEPAGE, further processing
2348 * by the caller won't touch the page cache, and so
2349 * it's safe to skip LRU and refcount checks before
2350 * returning.
2351 */
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002352 break;
2353 }
2354
2355 node = page_to_nid(page);
Zach O'Keefe7d2c4382022-07-06 16:59:28 -07002356 if (hpage_collapse_scan_abort(node, cc)) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002357 result = SCAN_SCAN_ABORT;
2358 break;
2359 }
Zach O'Keefe34d6b472022-07-06 16:59:21 -07002360 cc->node_load[node]++;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002361
2362 if (!PageLRU(page)) {
2363 result = SCAN_PAGE_LRU;
2364 break;
2365 }
2366
Song Liu99cb0db2019-09-23 15:38:00 -07002367 if (page_count(page) !=
2368 1 + page_mapcount(page) + page_has_private(page)) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002369 result = SCAN_PAGE_COUNT;
2370 break;
2371 }
2372
2373 /*
2374 * We probably should check if the page is referenced here, but
2375 * nobody would transfer pte_young() to PageReferenced() for us.
2376 * And rmap walk here is just too costly...
2377 */
2378
2379 present++;
2380
2381 if (need_resched()) {
Matthew Wilcox85b392d2017-12-04 15:06:23 -05002382 xas_pause(&xas);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002383 cond_resched_rcu();
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002384 }
2385 }
2386 rcu_read_unlock();
2387
2388 if (result == SCAN_SUCCEED) {
Zach O'Keefed8ea7cc2022-07-06 16:59:24 -07002389 if (cc->is_khugepaged &&
2390 present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002391 result = SCAN_EXCEED_NONE_PTE;
Yang Yange9ea8742022-01-14 14:07:55 -08002392 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002393 } else {
Zach O'Keefe34488392022-09-22 15:40:39 -07002394 result = collapse_file(mm, addr, file, start, cc);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002395 }
2396 }
2397
Gautam Menghani045634f2022-10-26 10:15:24 +05302398 trace_mm_khugepaged_scan_file(mm, page, file, present, swap, result);
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002399 return result;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002400}
2401#else
Zach O'Keefe34488392022-09-22 15:40:39 -07002402static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2403 struct file *file, pgoff_t start,
2404 struct collapse_control *cc)
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002405{
2406 BUILD_BUG();
2407}
Song Liu27e1f822019-09-23 15:38:30 -07002408
Qi Zhengb26e2702022-08-31 11:19:46 +08002409static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot)
Song Liu27e1f822019-09-23 15:38:30 -07002410{
Song Liu27e1f822019-09-23 15:38:30 -07002411}
Zach O'Keefe58ac9a82022-09-22 15:40:38 -07002412
2413static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
2414 unsigned long addr)
2415{
2416 return false;
2417}
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002418#endif
2419
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002420static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
Zach O'Keefe34d6b472022-07-06 16:59:21 -07002421 struct collapse_control *cc)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002422 __releases(&khugepaged_mm_lock)
2423 __acquires(&khugepaged_mm_lock)
2424{
Matthew Wilcox (Oracle)68540502022-09-06 19:49:00 +00002425 struct vma_iterator vmi;
Qi Zhengb26e2702022-08-31 11:19:46 +08002426 struct khugepaged_mm_slot *mm_slot;
2427 struct mm_slot *slot;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002428 struct mm_struct *mm;
2429 struct vm_area_struct *vma;
2430 int progress = 0;
2431
2432 VM_BUG_ON(!pages);
Lance Roy35f3aa32018-10-04 23:45:47 -07002433 lockdep_assert_held(&khugepaged_mm_lock);
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002434 *result = SCAN_FAIL;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002435
Qi Zhengb26e2702022-08-31 11:19:46 +08002436 if (khugepaged_scan.mm_slot) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002437 mm_slot = khugepaged_scan.mm_slot;
Qi Zhengb26e2702022-08-31 11:19:46 +08002438 slot = &mm_slot->slot;
2439 } else {
2440 slot = list_entry(khugepaged_scan.mm_head.next,
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002441 struct mm_slot, mm_node);
Qi Zhengb26e2702022-08-31 11:19:46 +08002442 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002443 khugepaged_scan.address = 0;
2444 khugepaged_scan.mm_slot = mm_slot;
2445 }
2446 spin_unlock(&khugepaged_mm_lock);
Song Liu27e1f822019-09-23 15:38:30 -07002447 khugepaged_collapse_pte_mapped_thps(mm_slot);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002448
Qi Zhengb26e2702022-08-31 11:19:46 +08002449 mm = slot->mm;
Yang Shi3b454ad2018-01-31 16:18:28 -08002450 /*
2451 * Don't wait for semaphore (to avoid long wait times). Just move to
2452 * the next mm on the list.
2453 */
2454 vma = NULL;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002455 if (unlikely(!mmap_read_trylock(mm)))
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07002456 goto breakouterloop_mmap_lock;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002457
2458 progress++;
Matthew Wilcox (Oracle)68540502022-09-06 19:49:00 +00002459 if (unlikely(hpage_collapse_test_exit(mm)))
2460 goto breakouterloop;
2461
2462 vma_iter_init(&vmi, mm, khugepaged_scan.address);
2463 for_each_vma(vmi, vma) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002464 unsigned long hstart, hend;
2465
2466 cond_resched();
Zach O'Keefe7d2c4382022-07-06 16:59:28 -07002467 if (unlikely(hpage_collapse_test_exit(mm))) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002468 progress++;
2469 break;
2470 }
Zach O'Keefea7f4e6e2022-07-06 16:59:25 -07002471 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, true)) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002472skip:
2473 progress++;
2474 continue;
2475 }
Yang Shi4fa68932022-06-16 10:48:35 -07002476 hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE);
2477 hend = round_down(vma->vm_end, HPAGE_PMD_SIZE);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002478 if (khugepaged_scan.address > hend)
2479 goto skip;
2480 if (khugepaged_scan.address < hstart)
2481 khugepaged_scan.address = hstart;
2482 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2483
2484 while (khugepaged_scan.address < hend) {
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002485 bool mmap_locked = true;
2486
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002487 cond_resched();
Zach O'Keefe7d2c4382022-07-06 16:59:28 -07002488 if (unlikely(hpage_collapse_test_exit(mm)))
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002489 goto breakouterloop;
2490
2491 VM_BUG_ON(khugepaged_scan.address < hstart ||
2492 khugepaged_scan.address + HPAGE_PMD_SIZE >
2493 hend);
Song Liu99cb0db2019-09-23 15:38:00 -07002494 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
Matthew Wilcox (Oracle)396bcc52020-04-06 20:04:35 -07002495 struct file *file = get_file(vma->vm_file);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002496 pgoff_t pgoff = linear_page_index(vma,
2497 khugepaged_scan.address);
Song Liu99cb0db2019-09-23 15:38:00 -07002498
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002499 mmap_read_unlock(mm);
Zach O'Keefe34488392022-09-22 15:40:39 -07002500 *result = hpage_collapse_scan_file(mm,
2501 khugepaged_scan.address,
2502 file, pgoff, cc);
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002503 mmap_locked = false;
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002504 fput(file);
2505 } else {
Zach O'Keefe7d2c4382022-07-06 16:59:28 -07002506 *result = hpage_collapse_scan_pmd(mm, vma,
2507 khugepaged_scan.address,
2508 &mmap_locked,
2509 cc);
Kirill A. Shutemovf3f0e1d2016-07-26 15:26:32 -07002510 }
Zach O'Keefe58ac9a82022-09-22 15:40:38 -07002511 switch (*result) {
2512 case SCAN_PTE_MAPPED_HUGEPAGE: {
2513 pmd_t *pmd;
2514
2515 *result = find_pmd_or_thp_or_none(mm,
2516 khugepaged_scan.address,
2517 &pmd);
2518 if (*result != SCAN_SUCCEED)
2519 break;
2520 if (!khugepaged_add_pte_mapped_thp(mm,
2521 khugepaged_scan.address))
2522 break;
2523 } fallthrough;
2524 case SCAN_SUCCEED:
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002525 ++khugepaged_pages_collapsed;
Zach O'Keefe58ac9a82022-09-22 15:40:38 -07002526 break;
2527 default:
2528 break;
2529 }
2530
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002531 /* move to next address */
2532 khugepaged_scan.address += HPAGE_PMD_SIZE;
2533 progress += HPAGE_PMD_NR;
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002534 if (!mmap_locked)
2535 /*
2536 * We released mmap_lock so break loop. Note
2537 * that we drop mmap_lock before all hugepage
2538 * allocations, so if allocation fails, we are
2539 * guaranteed to break here and report the
2540 * correct result back to caller.
2541 */
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07002542 goto breakouterloop_mmap_lock;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002543 if (progress >= pages)
2544 goto breakouterloop;
2545 }
2546 }
2547breakouterloop:
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002548 mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07002549breakouterloop_mmap_lock:
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002550
2551 spin_lock(&khugepaged_mm_lock);
2552 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2553 /*
2554 * Release the current mm_slot if this mm is about to die, or
2555 * if we scanned all vmas of this mm.
2556 */
Zach O'Keefe7d2c4382022-07-06 16:59:28 -07002557 if (hpage_collapse_test_exit(mm) || !vma) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002558 /*
2559 * Make sure that if mm_users is reaching zero while
2560 * khugepaged runs here, khugepaged_exit will find
2561 * mm_slot not pointing to the exiting mm.
2562 */
Qi Zhengb26e2702022-08-31 11:19:46 +08002563 if (slot->mm_node.next != &khugepaged_scan.mm_head) {
2564 slot = list_entry(slot->mm_node.next,
2565 struct mm_slot, mm_node);
2566 khugepaged_scan.mm_slot =
2567 mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002568 khugepaged_scan.address = 0;
2569 } else {
2570 khugepaged_scan.mm_slot = NULL;
2571 khugepaged_full_scans++;
2572 }
2573
2574 collect_mm_slot(mm_slot);
2575 }
2576
2577 return progress;
2578}
2579
2580static int khugepaged_has_work(void)
2581{
2582 return !list_empty(&khugepaged_scan.mm_head) &&
Yang Shi10640262022-06-16 10:48:39 -07002583 hugepage_flags_enabled();
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002584}
2585
2586static int khugepaged_wait_event(void)
2587{
2588 return !list_empty(&khugepaged_scan.mm_head) ||
2589 kthread_should_stop();
2590}
2591
Zach O'Keefe34d6b472022-07-06 16:59:21 -07002592static void khugepaged_do_scan(struct collapse_control *cc)
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002593{
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002594 unsigned int progress = 0, pass_through_head = 0;
Yanfei Xu89dc6a92021-05-04 18:34:12 -07002595 unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002596 bool wait = true;
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002597 int result = SCAN_SUCCEED;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002598
Kirill A. Shutemova980df32020-06-03 16:00:12 -07002599 lru_add_drain_all();
2600
Yang Shic6a7f442022-07-06 16:59:20 -07002601 while (true) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002602 cond_resched();
2603
2604 if (unlikely(kthread_should_stop() || try_to_freeze()))
2605 break;
2606
2607 spin_lock(&khugepaged_mm_lock);
2608 if (!khugepaged_scan.mm_slot)
2609 pass_through_head++;
2610 if (khugepaged_has_work() &&
2611 pass_through_head < 2)
2612 progress += khugepaged_scan_mm_slot(pages - progress,
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002613 &result, cc);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002614 else
2615 progress = pages;
2616 spin_unlock(&khugepaged_mm_lock);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002617
Yang Shic6a7f442022-07-06 16:59:20 -07002618 if (progress >= pages)
2619 break;
2620
Zach O'Keefe50ad2f22022-07-06 16:59:23 -07002621 if (result == SCAN_ALLOC_HUGE_PAGE_FAIL) {
Yang Shic6a7f442022-07-06 16:59:20 -07002622 /*
2623 * If fail to allocate the first time, try to sleep for
2624 * a while. When hit again, cancel the scan.
2625 */
2626 if (!wait)
2627 break;
2628 wait = false;
Yang Shic6a7f442022-07-06 16:59:20 -07002629 khugepaged_alloc_sleep();
2630 }
2631 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002632}
2633
2634static bool khugepaged_should_wakeup(void)
2635{
2636 return kthread_should_stop() ||
2637 time_after_eq(jiffies, khugepaged_sleep_expire);
2638}
2639
2640static void khugepaged_wait_work(void)
2641{
2642 if (khugepaged_has_work()) {
2643 const unsigned long scan_sleep_jiffies =
2644 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2645
2646 if (!scan_sleep_jiffies)
2647 return;
2648
2649 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2650 wait_event_freezable_timeout(khugepaged_wait,
2651 khugepaged_should_wakeup(),
2652 scan_sleep_jiffies);
2653 return;
2654 }
2655
Yang Shi10640262022-06-16 10:48:39 -07002656 if (hugepage_flags_enabled())
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002657 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2658}
2659
2660static int khugepaged(void *none)
2661{
Qi Zhengb26e2702022-08-31 11:19:46 +08002662 struct khugepaged_mm_slot *mm_slot;
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002663
2664 set_freezable();
2665 set_user_nice(current, MAX_NICE);
2666
2667 while (!kthread_should_stop()) {
Zach O'Keefe34d6b472022-07-06 16:59:21 -07002668 khugepaged_do_scan(&khugepaged_collapse_control);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002669 khugepaged_wait_work();
2670 }
2671
2672 spin_lock(&khugepaged_mm_lock);
2673 mm_slot = khugepaged_scan.mm_slot;
2674 khugepaged_scan.mm_slot = NULL;
2675 if (mm_slot)
2676 collect_mm_slot(mm_slot);
2677 spin_unlock(&khugepaged_mm_lock);
2678 return 0;
2679}
2680
2681static void set_recommended_min_free_kbytes(void)
2682{
2683 struct zone *zone;
2684 int nr_zones = 0;
2685 unsigned long recommended_min;
2686
Yang Shi10640262022-06-16 10:48:39 -07002687 if (!hugepage_flags_enabled()) {
Liangcai Fanbd3400e2021-11-05 13:41:36 -07002688 calculate_min_free_kbytes();
2689 goto update_wmarks;
2690 }
2691
Joonsoo Kimb7d349c2018-04-10 16:30:27 -07002692 for_each_populated_zone(zone) {
2693 /*
2694 * We don't need to worry about fragmentation of
2695 * ZONE_MOVABLE since it only has movable pages.
2696 */
2697 if (zone_idx(zone) > gfp_zone(GFP_USER))
2698 continue;
2699
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002700 nr_zones++;
Joonsoo Kimb7d349c2018-04-10 16:30:27 -07002701 }
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002702
2703 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2704 recommended_min = pageblock_nr_pages * nr_zones * 2;
2705
2706 /*
2707 * Make sure that on average at least two pageblocks are almost free
2708 * of another type, one for a migratetype to fall back to and a
2709 * second to avoid subsequent fallbacks of other types There are 3
2710 * MIGRATE_TYPES we care about.
2711 */
2712 recommended_min += pageblock_nr_pages * nr_zones *
2713 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2714
2715 /* don't ever allow to reserve more than 5% of the lowmem */
2716 recommended_min = min(recommended_min,
2717 (unsigned long) nr_free_buffer_pages() / 20);
2718 recommended_min <<= (PAGE_SHIFT-10);
2719
2720 if (recommended_min > min_free_kbytes) {
2721 if (user_min_free_kbytes >= 0)
2722 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2723 min_free_kbytes, recommended_min);
2724
2725 min_free_kbytes = recommended_min;
2726 }
Liangcai Fanbd3400e2021-11-05 13:41:36 -07002727
2728update_wmarks:
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002729 setup_per_zone_wmarks();
2730}
2731
2732int start_stop_khugepaged(void)
2733{
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002734 int err = 0;
2735
2736 mutex_lock(&khugepaged_mutex);
Yang Shi10640262022-06-16 10:48:39 -07002737 if (hugepage_flags_enabled()) {
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002738 if (!khugepaged_thread)
2739 khugepaged_thread = kthread_run(khugepaged, NULL,
2740 "khugepaged");
2741 if (IS_ERR(khugepaged_thread)) {
2742 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2743 err = PTR_ERR(khugepaged_thread);
2744 khugepaged_thread = NULL;
2745 goto fail;
2746 }
2747
2748 if (!list_empty(&khugepaged_scan.mm_head))
2749 wake_up_interruptible(&khugepaged_wait);
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002750 } else if (khugepaged_thread) {
2751 kthread_stop(khugepaged_thread);
2752 khugepaged_thread = NULL;
2753 }
Liangcai Fanbd3400e2021-11-05 13:41:36 -07002754 set_recommended_min_free_kbytes();
Kirill A. Shutemovb46e7562016-07-26 15:26:24 -07002755fail:
2756 mutex_unlock(&khugepaged_mutex);
2757 return err;
2758}
Vijay Balakrishna4aab2be2020-10-10 23:16:40 -07002759
2760void khugepaged_min_free_kbytes_update(void)
2761{
2762 mutex_lock(&khugepaged_mutex);
Yang Shi10640262022-06-16 10:48:39 -07002763 if (hugepage_flags_enabled() && khugepaged_thread)
Vijay Balakrishna4aab2be2020-10-10 23:16:40 -07002764 set_recommended_min_free_kbytes();
2765 mutex_unlock(&khugepaged_mutex);
2766}
Zach O'Keefe7d8faaf2022-07-06 16:59:27 -07002767
Johannes Weiner57e9cc52022-10-26 14:01:33 -04002768bool current_is_khugepaged(void)
2769{
2770 return kthread_func(current) == khugepaged;
2771}
2772
Zach O'Keefe7d8faaf2022-07-06 16:59:27 -07002773static int madvise_collapse_errno(enum scan_result r)
2774{
2775 /*
2776 * MADV_COLLAPSE breaks from existing madvise(2) conventions to provide
2777 * actionable feedback to caller, so they may take an appropriate
2778 * fallback measure depending on the nature of the failure.
2779 */
2780 switch (r) {
2781 case SCAN_ALLOC_HUGE_PAGE_FAIL:
2782 return -ENOMEM;
2783 case SCAN_CGROUP_CHARGE_FAIL:
David Stevensac492b92023-04-04 21:01:16 +09002784 case SCAN_EXCEED_NONE_PTE:
Zach O'Keefe7d8faaf2022-07-06 16:59:27 -07002785 return -EBUSY;
2786 /* Resource temporary unavailable - trying again might succeed */
Zach O'Keefeae63c892023-01-24 17:57:37 -08002787 case SCAN_PAGE_COUNT:
Zach O'Keefe7d8faaf2022-07-06 16:59:27 -07002788 case SCAN_PAGE_LOCK:
2789 case SCAN_PAGE_LRU:
Zach O'Keefe0f3e2a22022-09-22 11:46:50 -07002790 case SCAN_DEL_PAGE_LRU:
David Stevensac492b92023-04-04 21:01:16 +09002791 case SCAN_PAGE_FILLED:
Zach O'Keefe7d8faaf2022-07-06 16:59:27 -07002792 return -EAGAIN;
2793 /*
2794 * Other: Trying again likely not to succeed / error intrinsic to
2795 * specified memory range. khugepaged likely won't be able to collapse
2796 * either.
2797 */
2798 default:
2799 return -EINVAL;
2800 }
2801}
2802
2803int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
2804 unsigned long start, unsigned long end)
2805{
2806 struct collapse_control *cc;
2807 struct mm_struct *mm = vma->vm_mm;
2808 unsigned long hstart, hend, addr;
2809 int thps = 0, last_fail = SCAN_FAIL;
2810 bool mmap_locked = true;
2811
2812 BUG_ON(vma->vm_start > start);
2813 BUG_ON(vma->vm_end < end);
2814
2815 *prev = vma;
2816
Zach O'Keefe7d8faaf2022-07-06 16:59:27 -07002817 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
2818 return -EINVAL;
2819
2820 cc = kmalloc(sizeof(*cc), GFP_KERNEL);
2821 if (!cc)
2822 return -ENOMEM;
2823 cc->is_khugepaged = false;
Zach O'Keefe7d8faaf2022-07-06 16:59:27 -07002824
2825 mmgrab(mm);
2826 lru_add_drain_all();
2827
2828 hstart = (start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2829 hend = end & HPAGE_PMD_MASK;
2830
2831 for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) {
2832 int result = SCAN_FAIL;
2833
2834 if (!mmap_locked) {
2835 cond_resched();
2836 mmap_read_lock(mm);
2837 mmap_locked = true;
Zach O'Keefe34488392022-09-22 15:40:39 -07002838 result = hugepage_vma_revalidate(mm, addr, false, &vma,
2839 cc);
Zach O'Keefe7d8faaf2022-07-06 16:59:27 -07002840 if (result != SCAN_SUCCEED) {
2841 last_fail = result;
2842 goto out_nolock;
2843 }
Yang Shi4d24de92022-09-14 09:22:20 -07002844
Zach O'Keefe52dc0312022-12-24 00:20:34 -08002845 hend = min(hend, vma->vm_end & HPAGE_PMD_MASK);
Zach O'Keefe7d8faaf2022-07-06 16:59:27 -07002846 }
2847 mmap_assert_locked(mm);
2848 memset(cc->node_load, 0, sizeof(cc->node_load));
Yang Shie031ff92022-11-08 10:43:56 -08002849 nodes_clear(cc->alloc_nmask);
Zach O'Keefe34488392022-09-22 15:40:39 -07002850 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2851 struct file *file = get_file(vma->vm_file);
2852 pgoff_t pgoff = linear_page_index(vma, addr);
2853
2854 mmap_read_unlock(mm);
2855 mmap_locked = false;
2856 result = hpage_collapse_scan_file(mm, addr, file, pgoff,
2857 cc);
2858 fput(file);
2859 } else {
2860 result = hpage_collapse_scan_pmd(mm, vma, addr,
2861 &mmap_locked, cc);
2862 }
Zach O'Keefe7d8faaf2022-07-06 16:59:27 -07002863 if (!mmap_locked)
2864 *prev = NULL; /* Tell caller we dropped mmap_lock */
2865
Zach O'Keefe34488392022-09-22 15:40:39 -07002866handle_result:
Zach O'Keefe7d8faaf2022-07-06 16:59:27 -07002867 switch (result) {
2868 case SCAN_SUCCEED:
2869 case SCAN_PMD_MAPPED:
2870 ++thps;
2871 break;
Zach O'Keefe34488392022-09-22 15:40:39 -07002872 case SCAN_PTE_MAPPED_HUGEPAGE:
2873 BUG_ON(mmap_locked);
2874 BUG_ON(*prev);
2875 mmap_write_lock(mm);
2876 result = collapse_pte_mapped_thp(mm, addr, true);
2877 mmap_write_unlock(mm);
2878 goto handle_result;
Zach O'Keefe7d8faaf2022-07-06 16:59:27 -07002879 /* Whitelisted set of results where continuing OK */
2880 case SCAN_PMD_NULL:
2881 case SCAN_PTE_NON_PRESENT:
2882 case SCAN_PTE_UFFD_WP:
2883 case SCAN_PAGE_RO:
2884 case SCAN_LACK_REFERENCED_PAGE:
2885 case SCAN_PAGE_NULL:
2886 case SCAN_PAGE_COUNT:
2887 case SCAN_PAGE_LOCK:
2888 case SCAN_PAGE_COMPOUND:
2889 case SCAN_PAGE_LRU:
Zach O'Keefe0f3e2a22022-09-22 11:46:50 -07002890 case SCAN_DEL_PAGE_LRU:
Zach O'Keefe7d8faaf2022-07-06 16:59:27 -07002891 last_fail = result;
2892 break;
2893 default:
2894 last_fail = result;
2895 /* Other error, exit */
2896 goto out_maybelock;
2897 }
2898 }
2899
2900out_maybelock:
2901 /* Caller expects us to hold mmap_lock on return */
2902 if (!mmap_locked)
2903 mmap_read_lock(mm);
2904out_nolock:
2905 mmap_assert_locked(mm);
2906 mmdrop(mm);
2907 kfree(cc);
2908
2909 return thps == ((hend - hstart) >> HPAGE_PMD_SHIFT) ? 0
2910 : madvise_collapse_errno(last_fail);
2911}