blob: 78b2cf87946d28c09b31d364ff96d19c4d8349c7 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Christoph Lameterb20a3502006-03-22 00:09:12 -08002/*
Hugh Dickins14e0f9b2015-11-05 18:49:43 -08003 * Memory Migration functionality - linux/mm/migrate.c
Christoph Lameterb20a3502006-03-22 00:09:12 -08004 *
5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6 *
7 * Page migration was first developed in the context of the memory hotplug
8 * project. The main authors of the migration code are:
9 *
10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11 * Hirokazu Takahashi <taka@valinux.co.jp>
12 * Dave Hansen <haveblue@us.ibm.com>
Christoph Lametercde53532008-07-04 09:59:22 -070013 * Christoph Lameter
Christoph Lameterb20a3502006-03-22 00:09:12 -080014 */
15
16#include <linux/migrate.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040017#include <linux/export.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080018#include <linux/swap.h>
Christoph Lameter06972122006-06-23 02:03:35 -070019#include <linux/swapops.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080020#include <linux/pagemap.h>
Christoph Lametere23ca002006-04-10 22:52:57 -070021#include <linux/buffer_head.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080022#include <linux/mm_inline.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070023#include <linux/nsproxy.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080024#include <linux/pagevec.h>
Hugh Dickinse9995ef2009-12-14 17:59:31 -080025#include <linux/ksm.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080026#include <linux/rmap.h>
27#include <linux/topology.h>
28#include <linux/cpu.h>
29#include <linux/cpuset.h>
Christoph Lameter04e62a22006-06-23 02:03:38 -070030#include <linux/writeback.h>
Christoph Lameter742755a2006-06-23 02:03:55 -070031#include <linux/mempolicy.h>
32#include <linux/vmalloc.h>
David Quigley86c3a762006-06-23 02:04:02 -070033#include <linux/security.h>
Hugh Dickins42cb14b2015-11-05 18:50:05 -080034#include <linux/backing-dev.h>
Minchan Kimbda807d2016-07-26 15:23:05 -070035#include <linux/compaction.h>
Adrian Bunk4f5ca262008-07-23 21:27:02 -070036#include <linux/syscalls.h>
Dominik Brodowski7addf442018-03-17 16:08:03 +010037#include <linux/compat.h>
Naoya Horiguchi290408d2010-09-08 10:19:35 +090038#include <linux/hugetlb.h>
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -070039#include <linux/hugetlb_cgroup.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090040#include <linux/gfp.h>
Christoph Hellwiga5201102019-08-28 16:19:53 +020041#include <linux/pagewalk.h>
Jérôme Glissedf6ad692017-09-08 16:12:24 -070042#include <linux/pfn_t.h>
Jérôme Glissea5430dd2017-09-08 16:12:17 -070043#include <linux/memremap.h>
Jérôme Glisse8315ada2017-09-08 16:12:21 -070044#include <linux/userfaultfd_k.h>
Rafael Aquinibf6bddf12012-12-11 16:02:42 -080045#include <linux/balloon_compaction.h>
Mel Gormanf714f4f2013-12-18 17:08:33 -080046#include <linux/mmu_notifier.h>
Vladimir Davydov33c3fc72015-09-09 15:35:45 -070047#include <linux/page_idle.h>
Vlastimil Babkad435edc2016-03-15 14:56:15 -070048#include <linux/page_owner.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010049#include <linux/sched/mm.h>
Linus Torvalds197e7e52017-08-20 13:26:27 -070050#include <linux/ptrace.h>
Ralph Campbell34290e22020-01-30 22:14:44 -080051#include <linux/oom.h>
Dave Hansen884a6e52021-09-02 14:59:09 -070052#include <linux/memory.h>
Baolin Wangac16ec82022-01-14 14:08:43 -080053#include <linux/random.h>
Huang Yingc574bbe2022-03-22 14:46:23 -070054#include <linux/sched/sysctl.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080055
Michal Nazarewicz0d1836c2010-12-21 17:24:26 -080056#include <asm/tlbflush.h>
57
Mel Gorman7b2a2d42012-10-19 14:07:31 +010058#define CREATE_TRACE_POINTS
59#include <trace/events/migrate.h>
60
Christoph Lameterb20a3502006-03-22 00:09:12 -080061#include "internal.h"
62
Yisheng Xie9e5bcd62017-02-24 14:57:29 -080063int isolate_movable_page(struct page *page, isolate_mode_t mode)
Minchan Kimbda807d2016-07-26 15:23:05 -070064{
65 struct address_space *mapping;
66
67 /*
68 * Avoid burning cycles with pages that are yet under __free_pages(),
69 * or just got freed under us.
70 *
71 * In case we 'win' a race for a movable page being freed under us and
72 * raise its refcount preventing __free_pages() from doing its job
73 * the put_page() at the end of this block will take care of
74 * release this page, thus avoiding a nasty leakage.
75 */
76 if (unlikely(!get_page_unless_zero(page)))
77 goto out;
78
79 /*
80 * Check PageMovable before holding a PG_lock because page's owner
81 * assumes anybody doesn't touch PG_lock of newly allocated page
Wei Yang8bb4e7a2019-03-05 15:46:22 -080082 * so unconditionally grabbing the lock ruins page's owner side.
Minchan Kimbda807d2016-07-26 15:23:05 -070083 */
84 if (unlikely(!__PageMovable(page)))
85 goto out_putpage;
86 /*
87 * As movable pages are not isolated from LRU lists, concurrent
88 * compaction threads can race against page migration functions
89 * as well as race against the releasing a page.
90 *
91 * In order to avoid having an already isolated movable page
92 * being (wrongly) re-isolated while it is under migration,
93 * or to avoid attempting to isolate pages being released,
94 * lets be sure we have the page lock
95 * before proceeding with the movable page isolation steps.
96 */
97 if (unlikely(!trylock_page(page)))
98 goto out_putpage;
99
100 if (!PageMovable(page) || PageIsolated(page))
101 goto out_no_isolated;
102
103 mapping = page_mapping(page);
104 VM_BUG_ON_PAGE(!mapping, page);
105
106 if (!mapping->a_ops->isolate_page(page, mode))
107 goto out_no_isolated;
108
109 /* Driver shouldn't use PG_isolated bit of page->flags */
110 WARN_ON_ONCE(PageIsolated(page));
andrew.yang356ea382022-03-22 14:46:08 -0700111 SetPageIsolated(page);
Minchan Kimbda807d2016-07-26 15:23:05 -0700112 unlock_page(page);
113
Yisheng Xie9e5bcd62017-02-24 14:57:29 -0800114 return 0;
Minchan Kimbda807d2016-07-26 15:23:05 -0700115
116out_no_isolated:
117 unlock_page(page);
118out_putpage:
119 put_page(page);
120out:
Yisheng Xie9e5bcd62017-02-24 14:57:29 -0800121 return -EBUSY;
Minchan Kimbda807d2016-07-26 15:23:05 -0700122}
123
Miaohe Lin606a6f72021-05-04 18:37:04 -0700124static void putback_movable_page(struct page *page)
Minchan Kimbda807d2016-07-26 15:23:05 -0700125{
126 struct address_space *mapping;
127
Minchan Kimbda807d2016-07-26 15:23:05 -0700128 mapping = page_mapping(page);
129 mapping->a_ops->putback_page(page);
andrew.yang356ea382022-03-22 14:46:08 -0700130 ClearPageIsolated(page);
Minchan Kimbda807d2016-07-26 15:23:05 -0700131}
132
Christoph Lameterb20a3502006-03-22 00:09:12 -0800133/*
Rafael Aquini5733c7d2012-12-11 16:02:47 -0800134 * Put previously isolated pages back onto the appropriate lists
135 * from where they were once taken off for compaction/migration.
136 *
Joonsoo Kim59c82b72014-01-21 15:51:17 -0800137 * This function shall be used whenever the isolated pageset has been
138 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
139 * and isolate_huge_page().
Rafael Aquini5733c7d2012-12-11 16:02:47 -0800140 */
141void putback_movable_pages(struct list_head *l)
142{
143 struct page *page;
144 struct page *page2;
145
146 list_for_each_entry_safe(page, page2, l, lru) {
Naoya Horiguchi31caf662013-09-11 14:21:59 -0700147 if (unlikely(PageHuge(page))) {
148 putback_active_hugepage(page);
149 continue;
150 }
Rafael Aquini5733c7d2012-12-11 16:02:47 -0800151 list_del(&page->lru);
Minchan Kimbda807d2016-07-26 15:23:05 -0700152 /*
153 * We isolated non-lru movable page so here we can use
154 * __PageMovable because LRU page's mapping cannot have
155 * PAGE_MAPPING_MOVABLE.
156 */
Minchan Kimb1123ea62016-07-26 15:23:09 -0700157 if (unlikely(__PageMovable(page))) {
Minchan Kimbda807d2016-07-26 15:23:05 -0700158 VM_BUG_ON_PAGE(!PageIsolated(page), page);
159 lock_page(page);
160 if (PageMovable(page))
161 putback_movable_page(page);
162 else
andrew.yang356ea382022-03-22 14:46:08 -0700163 ClearPageIsolated(page);
Minchan Kimbda807d2016-07-26 15:23:05 -0700164 unlock_page(page);
165 put_page(page);
166 } else {
Naoya Horiguchie8db67e2017-09-08 16:11:12 -0700167 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
Matthew Wilcox (Oracle)6c357842020-08-14 17:30:37 -0700168 page_is_file_lru(page), -thp_nr_pages(page));
Rabin Vincentfc280fe2017-04-20 14:37:46 -0700169 putback_lru_page(page);
Minchan Kimbda807d2016-07-26 15:23:05 -0700170 }
Christoph Lameterb20a3502006-03-22 00:09:12 -0800171 }
Christoph Lameterb20a3502006-03-22 00:09:12 -0800172}
173
Christoph Lameter06972122006-06-23 02:03:35 -0700174/*
175 * Restore a potential migration pte to a working pte entry
176 */
Minchan Kime4b82222017-05-03 14:54:27 -0700177static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800178 unsigned long addr, void *old)
Christoph Lameter06972122006-06-23 02:03:35 -0700179{
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800180 struct page_vma_mapped_walk pvmw = {
181 .page = old,
182 .vma = vma,
183 .address = addr,
184 .flags = PVMW_SYNC | PVMW_MIGRATION,
185 };
186 struct page *new;
187 pte_t pte;
Christoph Lameter06972122006-06-23 02:03:35 -0700188 swp_entry_t entry;
Christoph Lameter06972122006-06-23 02:03:35 -0700189
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800190 VM_BUG_ON_PAGE(PageTail(page), page);
191 while (page_vma_mapped_walk(&pvmw)) {
Naoya Horiguchi4b0ece62017-03-31 15:11:44 -0700192 if (PageKsm(page))
193 new = page;
194 else
195 new = page - pvmw.page->index +
196 linear_page_index(vma, pvmw.address);
Christoph Lameter06972122006-06-23 02:03:35 -0700197
Zi Yan616b8372017-09-08 16:10:57 -0700198#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
199 /* PMD-mapped THP migration entry */
200 if (!pvmw.pte) {
201 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
202 remove_migration_pmd(&pvmw, new);
203 continue;
204 }
205#endif
206
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800207 get_page(new);
208 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
209 if (pte_swp_soft_dirty(*pvmw.pte))
210 pte = pte_mksoft_dirty(pte);
Christoph Lameter06972122006-06-23 02:03:35 -0700211
Hugh Dickins486cf462011-10-19 12:50:35 -0700212 /*
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800213 * Recheck VMA as permissions can change since migration started
Hugh Dickins486cf462011-10-19 12:50:35 -0700214 */
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800215 entry = pte_to_swp_entry(*pvmw.pte);
Alistair Popple4dd845b2021-06-30 18:54:09 -0700216 if (is_writable_migration_entry(entry))
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800217 pte = maybe_mkwrite(pte, vma);
Peter Xuf45ec5f2020-04-06 20:06:01 -0700218 else if (pte_swp_uffd_wp(*pvmw.pte))
219 pte = pte_mkuffd_wp(pte);
Mel Gormand3cb8bf2014-10-02 19:47:41 +0100220
Ralph Campbell61287632020-09-04 16:36:04 -0700221 if (unlikely(is_device_private_page(new))) {
Alistair Popple4dd845b2021-06-30 18:54:09 -0700222 if (pte_write(pte))
223 entry = make_writable_device_private_entry(
224 page_to_pfn(new));
225 else
226 entry = make_readable_device_private_entry(
227 page_to_pfn(new));
Ralph Campbell61287632020-09-04 16:36:04 -0700228 pte = swp_entry_to_pte(entry);
Ralph Campbell3d321bf82020-09-04 16:36:07 -0700229 if (pte_swp_soft_dirty(*pvmw.pte))
230 pte = pte_swp_mksoft_dirty(pte);
Ralph Campbell61287632020-09-04 16:36:04 -0700231 if (pte_swp_uffd_wp(*pvmw.pte))
232 pte = pte_swp_mkuffd_wp(pte);
Lars Perssond2b2c6dd2019-03-28 20:44:28 -0700233 }
Jérôme Glissea5430dd2017-09-08 16:12:17 -0700234
Andi Kleen3ef8fd72010-10-11 16:03:21 +0200235#ifdef CONFIG_HUGETLB_PAGE
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800236 if (PageHuge(new)) {
Christophe Leroy79c1c592021-06-30 18:48:00 -0700237 unsigned int shift = huge_page_shift(hstate_vma(vma));
238
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800239 pte = pte_mkhuge(pte);
Christophe Leroy79c1c592021-06-30 18:48:00 -0700240 pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800241 if (PageAnon(new))
242 hugepage_add_anon_rmap(new, vma, pvmw.address);
243 else
244 page_dup_rmap(new, true);
Pasha Tatashin1eba86c2022-01-14 14:06:29 -0800245 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
Aneesh Kumar K.V383321a2017-07-06 15:38:41 -0700246 } else
247#endif
248 {
Aneesh Kumar K.V383321a2017-07-06 15:38:41 -0700249 if (PageAnon(new))
250 page_add_anon_rmap(new, vma, pvmw.address, false);
251 else
252 page_add_file_rmap(new, false);
Pasha Tatashin1eba86c2022-01-14 14:06:29 -0800253 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
Aneesh Kumar K.V383321a2017-07-06 15:38:41 -0700254 }
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800255 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
256 mlock_vma_page(new);
Hugh Dickins51afb122015-11-05 18:49:37 -0800257
Kirill A. Shutemove125fe42018-10-05 15:51:41 -0700258 if (PageTransHuge(page) && PageMlocked(page))
259 clear_page_mlock(page);
260
Kirill A. Shutemov3fe87962017-02-24 14:58:16 -0800261 /* No need to invalidate - it was non-present before */
262 update_mmu_cache(vma, pvmw.address, pvmw.pte);
263 }
264
Minchan Kime4b82222017-05-03 14:54:27 -0700265 return true;
Christoph Lameter06972122006-06-23 02:03:35 -0700266}
267
268/*
Christoph Lameter04e62a22006-06-23 02:03:38 -0700269 * Get rid of all migration entries and replace them by
270 * references to the indicated page.
271 */
Kirill A. Shutemove3884662016-03-17 14:20:07 -0700272void remove_migration_ptes(struct page *old, struct page *new, bool locked)
Christoph Lameter04e62a22006-06-23 02:03:38 -0700273{
Joonsoo Kim051ac832014-01-21 15:49:48 -0800274 struct rmap_walk_control rwc = {
275 .rmap_one = remove_migration_pte,
276 .arg = old,
277 };
278
Kirill A. Shutemove3884662016-03-17 14:20:07 -0700279 if (locked)
280 rmap_walk_locked(new, &rwc);
281 else
282 rmap_walk(new, &rwc);
Christoph Lameter04e62a22006-06-23 02:03:38 -0700283}
284
285/*
Christoph Lameter06972122006-06-23 02:03:35 -0700286 * Something used the pte of a page under migration. We need to
287 * get to the page and wait until migration is finished.
288 * When we return from this function the fault will be retried.
Christoph Lameter06972122006-06-23 02:03:35 -0700289 */
Naoya Horiguchie66f17f2015-02-11 15:25:22 -0800290void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
Naoya Horiguchi30dad302013-06-12 14:05:04 -0700291 spinlock_t *ptl)
Christoph Lameter06972122006-06-23 02:03:35 -0700292{
Naoya Horiguchi30dad302013-06-12 14:05:04 -0700293 pte_t pte;
Christoph Lameter06972122006-06-23 02:03:35 -0700294 swp_entry_t entry;
Christoph Lameter06972122006-06-23 02:03:35 -0700295
Naoya Horiguchi30dad302013-06-12 14:05:04 -0700296 spin_lock(ptl);
Christoph Lameter06972122006-06-23 02:03:35 -0700297 pte = *ptep;
298 if (!is_swap_pte(pte))
299 goto out;
300
301 entry = pte_to_swp_entry(pte);
302 if (!is_migration_entry(entry))
303 goto out;
304
Alistair Poppleffa65752022-01-21 22:10:46 -0800305 migration_entry_wait_on_locked(entry, ptep, ptl);
Christoph Lameter06972122006-06-23 02:03:35 -0700306 return;
307out:
308 pte_unmap_unlock(ptep, ptl);
309}
310
Naoya Horiguchi30dad302013-06-12 14:05:04 -0700311void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
312 unsigned long address)
313{
314 spinlock_t *ptl = pte_lockptr(mm, pmd);
315 pte_t *ptep = pte_offset_map(pmd, address);
316 __migration_entry_wait(mm, ptep, ptl);
317}
318
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800319void migration_entry_wait_huge(struct vm_area_struct *vma,
320 struct mm_struct *mm, pte_t *pte)
Naoya Horiguchi30dad302013-06-12 14:05:04 -0700321{
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800322 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
Naoya Horiguchi30dad302013-06-12 14:05:04 -0700323 __migration_entry_wait(mm, pte, ptl);
324}
325
Zi Yan616b8372017-09-08 16:10:57 -0700326#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
327void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
328{
329 spinlock_t *ptl;
Zi Yan616b8372017-09-08 16:10:57 -0700330
331 ptl = pmd_lock(mm, pmd);
332 if (!is_pmd_migration_entry(*pmd))
333 goto unlock;
Alistair Poppleffa65752022-01-21 22:10:46 -0800334 migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), NULL, ptl);
Zi Yan616b8372017-09-08 16:10:57 -0700335 return;
336unlock:
337 spin_unlock(ptl);
338}
339#endif
340
Jan Karaf9004822019-03-05 15:48:46 -0800341static int expected_page_refs(struct address_space *mapping, struct page *page)
Jan Kara0b3901b2018-12-28 00:39:01 -0800342{
343 int expected_count = 1;
344
345 /*
Ralph Campbellf1f4f3a2020-10-13 16:58:42 -0700346 * Device private pages have an extra refcount as they are
Jan Kara0b3901b2018-12-28 00:39:01 -0800347 * ZONE_DEVICE pages.
348 */
349 expected_count += is_device_private_page(page);
Jan Karaf9004822019-03-05 15:48:46 -0800350 if (mapping)
Matthew Wilcox (Oracle)34170132021-05-07 07:28:40 -0400351 expected_count += compound_nr(page) + page_has_private(page);
Jan Kara0b3901b2018-12-28 00:39:01 -0800352
353 return expected_count;
354}
355
Christoph Lameterb20a3502006-03-22 00:09:12 -0800356/*
Christoph Lameterc3fcf8a2006-06-23 02:03:32 -0700357 * Replace the page in the mapping.
Christoph Lameter5b5c7122006-06-23 02:03:29 -0700358 *
359 * The number of remaining references must be:
360 * 1 for anonymous pages without a mapping
361 * 2 for pages with a mapping
David Howells266cf652009-04-03 16:42:36 +0100362 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800363 */
Matthew Wilcox (Oracle)34170132021-05-07 07:28:40 -0400364int folio_migrate_mapping(struct address_space *mapping,
365 struct folio *newfolio, struct folio *folio, int extra_count)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800366{
Matthew Wilcox (Oracle)34170132021-05-07 07:28:40 -0400367 XA_STATE(xas, &mapping->i_pages, folio_index(folio));
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800368 struct zone *oldzone, *newzone;
369 int dirty;
Matthew Wilcox (Oracle)34170132021-05-07 07:28:40 -0400370 int expected_count = expected_page_refs(mapping, &folio->page) + extra_count;
371 long nr = folio_nr_pages(folio);
Jérôme Glisse8763cb42017-09-08 16:12:09 -0700372
Christoph Lameter6c5240a2006-06-23 02:03:37 -0700373 if (!mapping) {
Christoph Lameter0e8c7d02007-04-23 14:41:09 -0700374 /* Anonymous page without mapping */
Matthew Wilcox (Oracle)34170132021-05-07 07:28:40 -0400375 if (folio_ref_count(folio) != expected_count)
Christoph Lameter6c5240a2006-06-23 02:03:37 -0700376 return -EAGAIN;
Hugh Dickinscf4b7692015-11-05 18:50:02 -0800377
378 /* No turning back from here */
Matthew Wilcox (Oracle)34170132021-05-07 07:28:40 -0400379 newfolio->index = folio->index;
380 newfolio->mapping = folio->mapping;
381 if (folio_test_swapbacked(folio))
382 __folio_set_swapbacked(newfolio);
Hugh Dickinscf4b7692015-11-05 18:50:02 -0800383
Rafael Aquini78bd5202012-12-11 16:02:31 -0800384 return MIGRATEPAGE_SUCCESS;
Christoph Lameter6c5240a2006-06-23 02:03:37 -0700385 }
386
Matthew Wilcox (Oracle)34170132021-05-07 07:28:40 -0400387 oldzone = folio_zone(folio);
388 newzone = folio_zone(newfolio);
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800389
Matthew Wilcox89eb9462017-12-04 04:35:16 -0500390 xas_lock_irq(&xas);
Matthew Wilcox (Oracle)34170132021-05-07 07:28:40 -0400391 if (!folio_ref_freeze(folio, expected_count)) {
Matthew Wilcox89eb9462017-12-04 04:35:16 -0500392 xas_unlock_irq(&xas);
Nick Piggine2867812008-07-25 19:45:30 -0700393 return -EAGAIN;
394 }
395
Christoph Lameterb20a3502006-03-22 00:09:12 -0800396 /*
Matthew Wilcox (Oracle)34170132021-05-07 07:28:40 -0400397 * Now we know that no one else is looking at the folio:
Hugh Dickinscf4b7692015-11-05 18:50:02 -0800398 * no turning back from here.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800399 */
Matthew Wilcox (Oracle)34170132021-05-07 07:28:40 -0400400 newfolio->index = folio->index;
401 newfolio->mapping = folio->mapping;
402 folio_ref_add(newfolio, nr); /* add cache reference */
403 if (folio_test_swapbacked(folio)) {
404 __folio_set_swapbacked(newfolio);
405 if (folio_test_swapcache(folio)) {
406 folio_set_swapcache(newfolio);
407 newfolio->private = folio_get_private(folio);
Nicholas Piggin6326fec2016-12-25 13:00:29 +1000408 }
409 } else {
Matthew Wilcox (Oracle)34170132021-05-07 07:28:40 -0400410 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800411 }
412
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800413 /* Move dirty while page refs frozen and newpage not yet exposed */
Matthew Wilcox (Oracle)34170132021-05-07 07:28:40 -0400414 dirty = folio_test_dirty(folio);
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800415 if (dirty) {
Matthew Wilcox (Oracle)34170132021-05-07 07:28:40 -0400416 folio_clear_dirty(folio);
417 folio_set_dirty(newfolio);
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800418 }
419
Matthew Wilcox (Oracle)34170132021-05-07 07:28:40 -0400420 xas_store(&xas, newfolio);
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800421
422 /*
Jacobo Giralt937a94c2012-01-10 15:07:11 -0800423 * Drop cache reference from old page by unfreezing
424 * to one less reference.
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800425 * We know this isn't the last reference.
426 */
Matthew Wilcox (Oracle)34170132021-05-07 07:28:40 -0400427 folio_ref_unfreeze(folio, expected_count - nr);
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800428
Matthew Wilcox89eb9462017-12-04 04:35:16 -0500429 xas_unlock(&xas);
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800430 /* Leave irq disabled to prevent preemption while updating stats */
431
Christoph Lameter0e8c7d02007-04-23 14:41:09 -0700432 /*
433 * If moved to a different zone then also account
434 * the page for that zone. Other VM counters will be
435 * taken care of when we establish references to the
436 * new page and drop references to the old page.
437 *
438 * Note that anonymous pages are accounted for
Mel Gorman4b9d0fa2016-07-28 15:46:17 -0700439 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
Christoph Lameter0e8c7d02007-04-23 14:41:09 -0700440 * are mapped to swap space.
441 */
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800442 if (newzone != oldzone) {
Johannes Weiner0d1c2072020-06-03 16:01:54 -0700443 struct lruvec *old_lruvec, *new_lruvec;
444 struct mem_cgroup *memcg;
445
Matthew Wilcox (Oracle)34170132021-05-07 07:28:40 -0400446 memcg = folio_memcg(folio);
Johannes Weiner0d1c2072020-06-03 16:01:54 -0700447 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
448 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
449
Shakeel Butt5c447d22021-01-23 21:01:15 -0800450 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
451 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
Matthew Wilcox (Oracle)34170132021-05-07 07:28:40 -0400452 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
Shakeel Butt5c447d22021-01-23 21:01:15 -0800453 __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
454 __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800455 }
Shakeel Buttb6038942021-02-24 12:03:55 -0800456#ifdef CONFIG_SWAP
Matthew Wilcox (Oracle)34170132021-05-07 07:28:40 -0400457 if (folio_test_swapcache(folio)) {
Shakeel Buttb6038942021-02-24 12:03:55 -0800458 __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
459 __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
460 }
461#endif
Christoph Hellwigf56753a2020-09-24 08:51:40 +0200462 if (dirty && mapping_can_writeback(mapping)) {
Shakeel Butt5c447d22021-01-23 21:01:15 -0800463 __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
464 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
465 __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
466 __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800467 }
KOSAKI Motohiro4b021082009-09-21 17:01:33 -0700468 }
Hugh Dickins42cb14b2015-11-05 18:50:05 -0800469 local_irq_enable();
Christoph Lameterb20a3502006-03-22 00:09:12 -0800470
Rafael Aquini78bd5202012-12-11 16:02:31 -0800471 return MIGRATEPAGE_SUCCESS;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800472}
Matthew Wilcox (Oracle)34170132021-05-07 07:28:40 -0400473EXPORT_SYMBOL(folio_migrate_mapping);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800474
475/*
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900476 * The expected number of remaining references is the same as that
Matthew Wilcox (Oracle)34170132021-05-07 07:28:40 -0400477 * of folio_migrate_mapping().
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900478 */
479int migrate_huge_page_move_mapping(struct address_space *mapping,
480 struct page *newpage, struct page *page)
481{
Matthew Wilcox89eb9462017-12-04 04:35:16 -0500482 XA_STATE(xas, &mapping->i_pages, page_index(page));
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900483 int expected_count;
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900484
Matthew Wilcox89eb9462017-12-04 04:35:16 -0500485 xas_lock_irq(&xas);
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900486 expected_count = 2 + page_has_private(page);
Matthew Wilcox89eb9462017-12-04 04:35:16 -0500487 if (page_count(page) != expected_count || xas_load(&xas) != page) {
488 xas_unlock_irq(&xas);
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900489 return -EAGAIN;
490 }
491
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700492 if (!page_ref_freeze(page, expected_count)) {
Matthew Wilcox89eb9462017-12-04 04:35:16 -0500493 xas_unlock_irq(&xas);
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900494 return -EAGAIN;
495 }
496
Hugh Dickinscf4b7692015-11-05 18:50:02 -0800497 newpage->index = page->index;
498 newpage->mapping = page->mapping;
Johannes Weiner6a93ca82016-03-15 14:57:19 -0700499
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900500 get_page(newpage);
501
Matthew Wilcox89eb9462017-12-04 04:35:16 -0500502 xas_store(&xas, newpage);
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900503
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700504 page_ref_unfreeze(page, expected_count - 1);
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900505
Matthew Wilcox89eb9462017-12-04 04:35:16 -0500506 xas_unlock_irq(&xas);
Johannes Weiner6a93ca82016-03-15 14:57:19 -0700507
Rafael Aquini78bd5202012-12-11 16:02:31 -0800508 return MIGRATEPAGE_SUCCESS;
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900509}
510
511/*
Matthew Wilcox (Oracle)19138342021-05-07 15:26:29 -0400512 * Copy the flags and some other ancillary information
Christoph Lameterb20a3502006-03-22 00:09:12 -0800513 */
Matthew Wilcox (Oracle)19138342021-05-07 15:26:29 -0400514void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800515{
Rik van Riel7851a452013-10-07 11:29:23 +0100516 int cpupid;
517
Matthew Wilcox (Oracle)19138342021-05-07 15:26:29 -0400518 if (folio_test_error(folio))
519 folio_set_error(newfolio);
520 if (folio_test_referenced(folio))
521 folio_set_referenced(newfolio);
522 if (folio_test_uptodate(folio))
523 folio_mark_uptodate(newfolio);
524 if (folio_test_clear_active(folio)) {
525 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
526 folio_set_active(newfolio);
527 } else if (folio_test_clear_unevictable(folio))
528 folio_set_unevictable(newfolio);
529 if (folio_test_workingset(folio))
530 folio_set_workingset(newfolio);
531 if (folio_test_checked(folio))
532 folio_set_checked(newfolio);
533 if (folio_test_mappedtodisk(folio))
534 folio_set_mappedtodisk(newfolio);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800535
Matthew Wilcox (Oracle)34170132021-05-07 07:28:40 -0400536 /* Move dirty on pages not done by folio_migrate_mapping() */
Matthew Wilcox (Oracle)19138342021-05-07 15:26:29 -0400537 if (folio_test_dirty(folio))
538 folio_set_dirty(newfolio);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800539
Matthew Wilcox (Oracle)19138342021-05-07 15:26:29 -0400540 if (folio_test_young(folio))
541 folio_set_young(newfolio);
542 if (folio_test_idle(folio))
543 folio_set_idle(newfolio);
Vladimir Davydov33c3fc72015-09-09 15:35:45 -0700544
Rik van Riel7851a452013-10-07 11:29:23 +0100545 /*
546 * Copy NUMA information to the new page, to prevent over-eager
547 * future migrations of this same page.
548 */
Matthew Wilcox (Oracle)19138342021-05-07 15:26:29 -0400549 cpupid = page_cpupid_xchg_last(&folio->page, -1);
550 page_cpupid_xchg_last(&newfolio->page, cpupid);
Rik van Riel7851a452013-10-07 11:29:23 +0100551
Matthew Wilcox (Oracle)19138342021-05-07 15:26:29 -0400552 folio_migrate_ksm(newfolio, folio);
Hugh Dickinsc8d65532013-02-22 16:35:10 -0800553 /*
554 * Please do not reorder this without considering how mm/ksm.c's
555 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
556 */
Matthew Wilcox (Oracle)19138342021-05-07 15:26:29 -0400557 if (folio_test_swapcache(folio))
558 folio_clear_swapcache(folio);
559 folio_clear_private(folio);
Muchun Songad2fa372021-06-30 18:47:21 -0700560
561 /* page->private contains hugetlb specific flags */
Matthew Wilcox (Oracle)19138342021-05-07 15:26:29 -0400562 if (!folio_test_hugetlb(folio))
563 folio->private = NULL;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800564
565 /*
566 * If any waiters have accumulated on the new page then
567 * wake them up.
568 */
Matthew Wilcox (Oracle)19138342021-05-07 15:26:29 -0400569 if (folio_test_writeback(newfolio))
570 folio_end_writeback(newfolio);
Vlastimil Babkad435edc2016-03-15 14:56:15 -0700571
Yang Shi6aeff242020-04-06 20:04:21 -0700572 /*
573 * PG_readahead shares the same bit with PG_reclaim. The above
574 * end_page_writeback() may clear PG_readahead mistakenly, so set the
575 * bit after that.
576 */
Matthew Wilcox (Oracle)19138342021-05-07 15:26:29 -0400577 if (folio_test_readahead(folio))
578 folio_set_readahead(newfolio);
Yang Shi6aeff242020-04-06 20:04:21 -0700579
Matthew Wilcox (Oracle)19138342021-05-07 15:26:29 -0400580 folio_copy_owner(newfolio, folio);
Johannes Weiner74485cf2016-03-15 14:57:54 -0700581
Matthew Wilcox (Oracle)19138342021-05-07 15:26:29 -0400582 if (!folio_test_hugetlb(folio))
Matthew Wilcox (Oracle)d21bba22021-05-06 18:14:59 -0400583 mem_cgroup_migrate(folio, newfolio);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800584}
Matthew Wilcox (Oracle)19138342021-05-07 15:26:29 -0400585EXPORT_SYMBOL(folio_migrate_flags);
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700586
Matthew Wilcox (Oracle)715cbfd2021-05-07 15:05:06 -0400587void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700588{
Matthew Wilcox (Oracle)715cbfd2021-05-07 15:05:06 -0400589 folio_copy(newfolio, folio);
590 folio_migrate_flags(newfolio, folio);
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700591}
Matthew Wilcox (Oracle)715cbfd2021-05-07 15:05:06 -0400592EXPORT_SYMBOL(folio_migrate_copy);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800593
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700594/************************************************************
595 * Migration functions
596 ***********************************************************/
597
Christoph Lameterb20a3502006-03-22 00:09:12 -0800598/*
Minchan Kimbda807d2016-07-26 15:23:05 -0700599 * Common logic to directly migrate a single LRU page suitable for
David Howells266cf652009-04-03 16:42:36 +0100600 * pages that do not use PagePrivate/PagePrivate2.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800601 *
602 * Pages are locked upon entry and exit.
603 */
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700604int migrate_page(struct address_space *mapping,
Mel Gormana6bc32b2012-01-12 17:19:43 -0800605 struct page *newpage, struct page *page,
606 enum migrate_mode mode)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800607{
Matthew Wilcox (Oracle)34170132021-05-07 07:28:40 -0400608 struct folio *newfolio = page_folio(newpage);
609 struct folio *folio = page_folio(page);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800610 int rc;
611
Matthew Wilcox (Oracle)34170132021-05-07 07:28:40 -0400612 BUG_ON(folio_test_writeback(folio)); /* Writeback must be complete */
Christoph Lameterb20a3502006-03-22 00:09:12 -0800613
Matthew Wilcox (Oracle)34170132021-05-07 07:28:40 -0400614 rc = folio_migrate_mapping(mapping, newfolio, folio, 0);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800615
Rafael Aquini78bd5202012-12-11 16:02:31 -0800616 if (rc != MIGRATEPAGE_SUCCESS)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800617 return rc;
618
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700619 if (mode != MIGRATE_SYNC_NO_COPY)
Matthew Wilcox (Oracle)715cbfd2021-05-07 15:05:06 -0400620 folio_migrate_copy(newfolio, folio);
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700621 else
Matthew Wilcox (Oracle)19138342021-05-07 15:26:29 -0400622 folio_migrate_flags(newfolio, folio);
Rafael Aquini78bd5202012-12-11 16:02:31 -0800623 return MIGRATEPAGE_SUCCESS;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800624}
625EXPORT_SYMBOL(migrate_page);
626
David Howells93614012006-09-30 20:45:40 +0200627#ifdef CONFIG_BLOCK
Jan Kara84ade7c2018-12-28 00:39:09 -0800628/* Returns true if all buffers are successfully locked */
629static bool buffer_migrate_lock_buffers(struct buffer_head *head,
630 enum migrate_mode mode)
631{
632 struct buffer_head *bh = head;
633
634 /* Simple case, sync compaction */
635 if (mode != MIGRATE_ASYNC) {
636 do {
Jan Kara84ade7c2018-12-28 00:39:09 -0800637 lock_buffer(bh);
638 bh = bh->b_this_page;
639
640 } while (bh != head);
641
642 return true;
643 }
644
645 /* async case, we cannot block on lock_buffer so use trylock_buffer */
646 do {
Jan Kara84ade7c2018-12-28 00:39:09 -0800647 if (!trylock_buffer(bh)) {
648 /*
649 * We failed to lock the buffer and cannot stall in
650 * async migration. Release the taken locks
651 */
652 struct buffer_head *failed_bh = bh;
Jan Kara84ade7c2018-12-28 00:39:09 -0800653 bh = head;
654 while (bh != failed_bh) {
655 unlock_buffer(bh);
Jan Kara84ade7c2018-12-28 00:39:09 -0800656 bh = bh->b_this_page;
657 }
658 return false;
659 }
660
661 bh = bh->b_this_page;
662 } while (bh != head);
663 return true;
664}
665
Jan Kara89cb0882018-12-28 00:39:12 -0800666static int __buffer_migrate_page(struct address_space *mapping,
667 struct page *newpage, struct page *page, enum migrate_mode mode,
668 bool check_refs)
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700669{
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700670 struct buffer_head *bh, *head;
671 int rc;
Jan Karacc4f11e2018-12-28 00:39:05 -0800672 int expected_count;
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700673
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700674 if (!page_has_buffers(page))
Mel Gormana6bc32b2012-01-12 17:19:43 -0800675 return migrate_page(mapping, newpage, page, mode);
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700676
Jan Karacc4f11e2018-12-28 00:39:05 -0800677 /* Check whether page does not have extra refs before we do more work */
Jan Karaf9004822019-03-05 15:48:46 -0800678 expected_count = expected_page_refs(mapping, page);
Jan Karacc4f11e2018-12-28 00:39:05 -0800679 if (page_count(page) != expected_count)
680 return -EAGAIN;
681
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700682 head = page_buffers(page);
Jan Karacc4f11e2018-12-28 00:39:05 -0800683 if (!buffer_migrate_lock_buffers(head, mode))
684 return -EAGAIN;
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700685
Jan Kara89cb0882018-12-28 00:39:12 -0800686 if (check_refs) {
687 bool busy;
688 bool invalidated = false;
689
690recheck_buffers:
691 busy = false;
692 spin_lock(&mapping->private_lock);
693 bh = head;
694 do {
695 if (atomic_read(&bh->b_count)) {
696 busy = true;
697 break;
698 }
699 bh = bh->b_this_page;
700 } while (bh != head);
Jan Kara89cb0882018-12-28 00:39:12 -0800701 if (busy) {
702 if (invalidated) {
703 rc = -EAGAIN;
704 goto unlock_buffers;
705 }
Jan Karaebdf4de2019-08-02 21:48:47 -0700706 spin_unlock(&mapping->private_lock);
Jan Kara89cb0882018-12-28 00:39:12 -0800707 invalidate_bh_lrus();
708 invalidated = true;
709 goto recheck_buffers;
710 }
711 }
712
Keith Busch37109692019-07-18 15:58:46 -0700713 rc = migrate_page_move_mapping(mapping, newpage, page, 0);
Rafael Aquini78bd5202012-12-11 16:02:31 -0800714 if (rc != MIGRATEPAGE_SUCCESS)
Jan Karacc4f11e2018-12-28 00:39:05 -0800715 goto unlock_buffers;
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700716
Guoqing Jiangcd0f3712020-06-01 21:48:06 -0700717 attach_page_private(newpage, detach_page_private(page));
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700718
719 bh = head;
720 do {
721 set_bh_page(bh, newpage, bh_offset(bh));
722 bh = bh->b_this_page;
723
724 } while (bh != head);
725
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700726 if (mode != MIGRATE_SYNC_NO_COPY)
727 migrate_page_copy(newpage, page);
728 else
729 migrate_page_states(newpage, page);
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700730
Jan Karacc4f11e2018-12-28 00:39:05 -0800731 rc = MIGRATEPAGE_SUCCESS;
732unlock_buffers:
Jan Karaebdf4de2019-08-02 21:48:47 -0700733 if (check_refs)
734 spin_unlock(&mapping->private_lock);
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700735 bh = head;
736 do {
737 unlock_buffer(bh);
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700738 bh = bh->b_this_page;
739
740 } while (bh != head);
741
Jan Karacc4f11e2018-12-28 00:39:05 -0800742 return rc;
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700743}
Jan Kara89cb0882018-12-28 00:39:12 -0800744
745/*
746 * Migration function for pages with buffers. This function can only be used
747 * if the underlying filesystem guarantees that no other references to "page"
748 * exist. For example attached buffer heads are accessed only under page lock.
749 */
750int buffer_migrate_page(struct address_space *mapping,
751 struct page *newpage, struct page *page, enum migrate_mode mode)
752{
753 return __buffer_migrate_page(mapping, newpage, page, mode, false);
754}
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700755EXPORT_SYMBOL(buffer_migrate_page);
Jan Kara89cb0882018-12-28 00:39:12 -0800756
757/*
758 * Same as above except that this variant is more careful and checks that there
759 * are also no buffer head references. This function is the right one for
760 * mappings where buffer heads are directly looked up and referenced (such as
761 * block device mappings).
762 */
763int buffer_migrate_page_norefs(struct address_space *mapping,
764 struct page *newpage, struct page *page, enum migrate_mode mode)
765{
766 return __buffer_migrate_page(mapping, newpage, page, mode, true);
767}
David Howells93614012006-09-30 20:45:40 +0200768#endif
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700769
Christoph Lameter04e62a22006-06-23 02:03:38 -0700770/*
771 * Writeback a page to clean the dirty state
772 */
773static int writeout(struct address_space *mapping, struct page *page)
774{
775 struct writeback_control wbc = {
776 .sync_mode = WB_SYNC_NONE,
777 .nr_to_write = 1,
778 .range_start = 0,
779 .range_end = LLONG_MAX,
Christoph Lameter04e62a22006-06-23 02:03:38 -0700780 .for_reclaim = 1
781 };
782 int rc;
783
784 if (!mapping->a_ops->writepage)
785 /* No write method for the address space */
786 return -EINVAL;
787
788 if (!clear_page_dirty_for_io(page))
789 /* Someone else already triggered a write */
790 return -EAGAIN;
791
792 /*
793 * A dirty page may imply that the underlying filesystem has
794 * the page on some queue. So the page must be clean for
795 * migration. Writeout may mean we loose the lock and the
796 * page state is no longer what we checked for earlier.
797 * At this point we know that the migration attempt cannot
798 * be successful.
799 */
Kirill A. Shutemove3884662016-03-17 14:20:07 -0700800 remove_migration_ptes(page, page, false);
Christoph Lameter04e62a22006-06-23 02:03:38 -0700801
802 rc = mapping->a_ops->writepage(page, &wbc);
Christoph Lameter04e62a22006-06-23 02:03:38 -0700803
804 if (rc != AOP_WRITEPAGE_ACTIVATE)
805 /* unlocked. Relock */
806 lock_page(page);
807
Hugh Dickinsbda85502008-11-19 15:36:36 -0800808 return (rc < 0) ? -EIO : -EAGAIN;
Christoph Lameter04e62a22006-06-23 02:03:38 -0700809}
810
811/*
812 * Default handling if a filesystem does not provide a migration function.
813 */
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700814static int fallback_migrate_page(struct address_space *mapping,
Mel Gormana6bc32b2012-01-12 17:19:43 -0800815 struct page *newpage, struct page *page, enum migrate_mode mode)
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700816{
Mel Gormanb969c4ab2012-01-12 17:19:34 -0800817 if (PageDirty(page)) {
Mel Gormana6bc32b2012-01-12 17:19:43 -0800818 /* Only writeback pages in full synchronous migration */
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700819 switch (mode) {
820 case MIGRATE_SYNC:
821 case MIGRATE_SYNC_NO_COPY:
822 break;
823 default:
Mel Gormanb969c4ab2012-01-12 17:19:34 -0800824 return -EBUSY;
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700825 }
Christoph Lameter04e62a22006-06-23 02:03:38 -0700826 return writeout(mapping, page);
Mel Gormanb969c4ab2012-01-12 17:19:34 -0800827 }
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700828
829 /*
830 * Buffers may be managed in a filesystem specific way.
831 * We must have no buffers or drop them.
832 */
David Howells266cf652009-04-03 16:42:36 +0100833 if (page_has_private(page) &&
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700834 !try_to_release_page(page, GFP_KERNEL))
Mel Gorman806031b2019-03-05 15:44:43 -0800835 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700836
Mel Gormana6bc32b2012-01-12 17:19:43 -0800837 return migrate_page(mapping, newpage, page, mode);
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700838}
839
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700840/*
Christoph Lametere24f0b82006-06-23 02:03:51 -0700841 * Move a page to a newly allocated page
842 * The page is locked and all ptes have been successfully removed.
843 *
844 * The new page will have replaced the old page if this function
845 * is successful.
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700846 *
847 * Return value:
848 * < 0 - error code
Rafael Aquini78bd5202012-12-11 16:02:31 -0800849 * MIGRATEPAGE_SUCCESS - success
Christoph Lametere24f0b82006-06-23 02:03:51 -0700850 */
Mel Gorman3fe20112010-05-24 14:32:20 -0700851static int move_to_new_page(struct page *newpage, struct page *page,
Hugh Dickins5c3f9a62015-11-05 18:49:53 -0800852 enum migrate_mode mode)
Christoph Lametere24f0b82006-06-23 02:03:51 -0700853{
854 struct address_space *mapping;
Minchan Kimbda807d2016-07-26 15:23:05 -0700855 int rc = -EAGAIN;
856 bool is_lru = !__PageMovable(page);
Christoph Lametere24f0b82006-06-23 02:03:51 -0700857
Hugh Dickins7db76712015-11-05 18:49:49 -0800858 VM_BUG_ON_PAGE(!PageLocked(page), page);
859 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
Christoph Lametere24f0b82006-06-23 02:03:51 -0700860
Christoph Lametere24f0b82006-06-23 02:03:51 -0700861 mapping = page_mapping(page);
Minchan Kimbda807d2016-07-26 15:23:05 -0700862
863 if (likely(is_lru)) {
864 if (!mapping)
865 rc = migrate_page(mapping, newpage, page, mode);
866 else if (mapping->a_ops->migratepage)
867 /*
868 * Most pages have a mapping and most filesystems
869 * provide a migratepage callback. Anonymous pages
870 * are part of swap space which also has its own
871 * migratepage callback. This is the most common path
872 * for page migration.
873 */
874 rc = mapping->a_ops->migratepage(mapping, newpage,
875 page, mode);
876 else
877 rc = fallback_migrate_page(mapping, newpage,
878 page, mode);
879 } else {
Christoph Lametere24f0b82006-06-23 02:03:51 -0700880 /*
Minchan Kimbda807d2016-07-26 15:23:05 -0700881 * In case of non-lru page, it could be released after
882 * isolation step. In that case, we shouldn't try migration.
Christoph Lametere24f0b82006-06-23 02:03:51 -0700883 */
Minchan Kimbda807d2016-07-26 15:23:05 -0700884 VM_BUG_ON_PAGE(!PageIsolated(page), page);
885 if (!PageMovable(page)) {
886 rc = MIGRATEPAGE_SUCCESS;
andrew.yang356ea382022-03-22 14:46:08 -0700887 ClearPageIsolated(page);
Minchan Kimbda807d2016-07-26 15:23:05 -0700888 goto out;
889 }
890
891 rc = mapping->a_ops->migratepage(mapping, newpage,
892 page, mode);
893 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
894 !PageIsolated(page));
895 }
Christoph Lametere24f0b82006-06-23 02:03:51 -0700896
Hugh Dickins5c3f9a62015-11-05 18:49:53 -0800897 /*
898 * When successful, old pagecache page->mapping must be cleared before
899 * page is freed; but stats require that PageAnon be left as PageAnon.
900 */
901 if (rc == MIGRATEPAGE_SUCCESS) {
Minchan Kimbda807d2016-07-26 15:23:05 -0700902 if (__PageMovable(page)) {
903 VM_BUG_ON_PAGE(!PageIsolated(page), page);
904
905 /*
906 * We clear PG_movable under page_lock so any compactor
907 * cannot try to migrate this page.
908 */
andrew.yang356ea382022-03-22 14:46:08 -0700909 ClearPageIsolated(page);
Minchan Kimbda807d2016-07-26 15:23:05 -0700910 }
911
912 /*
Ralph Campbellc23a0c92020-01-30 22:14:41 -0800913 * Anonymous and movable page->mapping will be cleared by
Minchan Kimbda807d2016-07-26 15:23:05 -0700914 * free_pages_prepare so don't reset it here for keeping
915 * the type to work PageAnon, for example.
916 */
917 if (!PageMappingFlags(page))
Hugh Dickins5c3f9a62015-11-05 18:49:53 -0800918 page->mapping = NULL;
Lars Perssond2b2c6dd2019-03-28 20:44:28 -0700919
Muchun Song3150be82022-03-22 14:42:11 -0700920 if (likely(!is_zone_device_page(newpage)))
921 flush_dcache_folio(page_folio(newpage));
Mel Gorman3fe20112010-05-24 14:32:20 -0700922 }
Minchan Kimbda807d2016-07-26 15:23:05 -0700923out:
Christoph Lametere24f0b82006-06-23 02:03:51 -0700924 return rc;
925}
926
Minchan Kim0dabec92011-10-31 17:06:57 -0700927static int __unmap_and_move(struct page *page, struct page *newpage,
Hugh Dickins9c620e22013-02-22 16:35:14 -0800928 int force, enum migrate_mode mode)
Christoph Lametere24f0b82006-06-23 02:03:51 -0700929{
Minchan Kim0dabec92011-10-31 17:06:57 -0700930 int rc = -EAGAIN;
Baolin Wang213ecb32021-09-08 15:18:06 -0700931 bool page_was_mapped = false;
Mel Gorman3f6c8272010-05-24 14:32:17 -0700932 struct anon_vma *anon_vma = NULL;
Minchan Kimbda807d2016-07-26 15:23:05 -0700933 bool is_lru = !__PageMovable(page);
Christoph Lameter95a402c2006-06-23 02:03:53 -0700934
Nick Piggin529ae9a2008-08-02 12:01:03 +0200935 if (!trylock_page(page)) {
Mel Gormana6bc32b2012-01-12 17:19:43 -0800936 if (!force || mode == MIGRATE_ASYNC)
Minchan Kim0dabec92011-10-31 17:06:57 -0700937 goto out;
Mel Gorman3e7d3442011-01-13 15:45:56 -0800938
939 /*
940 * It's not safe for direct compaction to call lock_page.
941 * For example, during page readahead pages are added locked
942 * to the LRU. Later, when the IO completes the pages are
943 * marked uptodate and unlocked. However, the queueing
944 * could be merging multiple pages for one bio (e.g.
Matthew Wilcox (Oracle)d4388342020-06-01 21:47:02 -0700945 * mpage_readahead). If an allocation happens for the
Mel Gorman3e7d3442011-01-13 15:45:56 -0800946 * second or third page, the process can end up locking
947 * the same page twice and deadlocking. Rather than
948 * trying to be clever about what pages can be locked,
949 * avoid the use of lock_page for direct compaction
950 * altogether.
951 */
952 if (current->flags & PF_MEMALLOC)
Minchan Kim0dabec92011-10-31 17:06:57 -0700953 goto out;
Mel Gorman3e7d3442011-01-13 15:45:56 -0800954
Christoph Lametere24f0b82006-06-23 02:03:51 -0700955 lock_page(page);
956 }
957
958 if (PageWriteback(page)) {
Andrea Arcangeli11bc82d2011-03-22 16:33:11 -0700959 /*
Jianguo Wufed5b642013-04-29 15:07:58 -0700960 * Only in the case of a full synchronous migration is it
Mel Gormana6bc32b2012-01-12 17:19:43 -0800961 * necessary to wait for PageWriteback. In the async case,
962 * the retry loop is too short and in the sync-light case,
963 * the overhead of stalling is too much
Andrea Arcangeli11bc82d2011-03-22 16:33:11 -0700964 */
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700965 switch (mode) {
966 case MIGRATE_SYNC:
967 case MIGRATE_SYNC_NO_COPY:
968 break;
969 default:
Andrea Arcangeli11bc82d2011-03-22 16:33:11 -0700970 rc = -EBUSY;
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700971 goto out_unlock;
Andrea Arcangeli11bc82d2011-03-22 16:33:11 -0700972 }
973 if (!force)
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700974 goto out_unlock;
Christoph Lametere24f0b82006-06-23 02:03:51 -0700975 wait_on_page_writeback(page);
976 }
Hugh Dickins03f15c82015-11-05 18:49:56 -0800977
Christoph Lametere24f0b82006-06-23 02:03:51 -0700978 /*
Baolin Wang68a98432021-09-08 15:18:03 -0700979 * By try_to_migrate(), page->mapcount goes down to 0 here. In this case,
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -0700980 * we cannot notice that anon_vma is freed while we migrates a page.
Hugh Dickins1ce82b62011-01-13 15:47:30 -0800981 * This get_anon_vma() delays freeing anon_vma pointer until the end
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -0700982 * of migration. File cache pages are no problem because of page_lock()
KAMEZAWA Hiroyuki989f89c2007-08-30 23:56:21 -0700983 * File Caches may use write_page() or lock_page() in migration, then,
984 * just care Anon page here.
Hugh Dickins03f15c82015-11-05 18:49:56 -0800985 *
986 * Only page_get_anon_vma() understands the subtleties of
987 * getting a hold on an anon_vma from outside one of its mms.
988 * But if we cannot get anon_vma, then we won't need it anyway,
989 * because that implies that the anon page is no longer mapped
990 * (and cannot be remapped so long as we hold the page lock).
Christoph Lametere24f0b82006-06-23 02:03:51 -0700991 */
Hugh Dickins03f15c82015-11-05 18:49:56 -0800992 if (PageAnon(page) && !PageKsm(page))
Peter Zijlstra746b18d2011-05-24 17:12:10 -0700993 anon_vma = page_get_anon_vma(page);
Shaohua Li62e1c552008-02-04 22:29:33 -0800994
Hugh Dickins7db76712015-11-05 18:49:49 -0800995 /*
996 * Block others from accessing the new page when we get around to
997 * establishing additional references. We are usually the only one
998 * holding a reference to newpage at this point. We used to have a BUG
999 * here if trylock_page(newpage) fails, but would like to allow for
1000 * cases where there might be a race with the previous use of newpage.
1001 * This is much like races on refcount of oldpage: just don't BUG().
1002 */
1003 if (unlikely(!trylock_page(newpage)))
1004 goto out_unlock;
1005
Minchan Kimbda807d2016-07-26 15:23:05 -07001006 if (unlikely(!is_lru)) {
1007 rc = move_to_new_page(newpage, page, mode);
1008 goto out_unlock_both;
1009 }
1010
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -07001011 /*
Shaohua Li62e1c552008-02-04 22:29:33 -08001012 * Corner case handling:
1013 * 1. When a new swap-cache page is read into, it is added to the LRU
1014 * and treated as swapcache but it has no rmap yet.
1015 * Calling try_to_unmap() against a page->mapping==NULL page will
1016 * trigger a BUG. So handle it here.
Yang Shid12b8952020-12-14 19:13:02 -08001017 * 2. An orphaned page (see truncate_cleanup_page) might have
Shaohua Li62e1c552008-02-04 22:29:33 -08001018 * fs-private metadata. The page can be picked up due to memory
1019 * offlining. Everywhere else except page reclaim, the page is
1020 * invisible to the vm, so the page can not be migrated. So try to
1021 * free the metadata, so the page can be freed.
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -07001022 */
Shaohua Li62e1c552008-02-04 22:29:33 -08001023 if (!page->mapping) {
Sasha Levin309381fea2014-01-23 15:52:54 -08001024 VM_BUG_ON_PAGE(PageAnon(page), page);
Hugh Dickins1ce82b62011-01-13 15:47:30 -08001025 if (page_has_private(page)) {
Shaohua Li62e1c552008-02-04 22:29:33 -08001026 try_to_free_buffers(page);
Hugh Dickins7db76712015-11-05 18:49:49 -08001027 goto out_unlock_both;
Shaohua Li62e1c552008-02-04 22:29:33 -08001028 }
Hugh Dickins7db76712015-11-05 18:49:49 -08001029 } else if (page_mapped(page)) {
1030 /* Establish migration ptes */
Hugh Dickins03f15c82015-11-05 18:49:56 -08001031 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
1032 page);
Alistair Popplea98a2f02021-06-30 18:54:16 -07001033 try_to_migrate(page, 0);
Baolin Wang213ecb32021-09-08 15:18:06 -07001034 page_was_mapped = true;
Hugh Dickins2ebba6b2014-12-12 16:56:19 -08001035 }
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -07001036
Christoph Lametere6a15302006-06-25 05:46:49 -07001037 if (!page_mapped(page))
Hugh Dickins5c3f9a62015-11-05 18:49:53 -08001038 rc = move_to_new_page(newpage, page, mode);
Christoph Lametere24f0b82006-06-23 02:03:51 -07001039
Hugh Dickins5c3f9a62015-11-05 18:49:53 -08001040 if (page_was_mapped)
1041 remove_migration_ptes(page,
Kirill A. Shutemove3884662016-03-17 14:20:07 -07001042 rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
Mel Gorman3f6c8272010-05-24 14:32:17 -07001043
Hugh Dickins7db76712015-11-05 18:49:49 -08001044out_unlock_both:
1045 unlock_page(newpage);
1046out_unlock:
Mel Gorman3f6c8272010-05-24 14:32:17 -07001047 /* Drop an anon_vma reference if we took one */
Rik van Riel76545062010-08-09 17:18:41 -07001048 if (anon_vma)
Peter Zijlstra9e601092011-03-22 16:32:46 -07001049 put_anon_vma(anon_vma);
Christoph Lametere24f0b82006-06-23 02:03:51 -07001050 unlock_page(page);
Minchan Kim0dabec92011-10-31 17:06:57 -07001051out:
Minchan Kimc6c919e2016-07-26 15:23:02 -07001052 /*
1053 * If migration is successful, decrease refcount of the newpage
1054 * which will not free the page because new page owner increased
1055 * refcounter. As well, if it is LRU page, add the page to LRU
David Hildenbrande0a352f2019-02-01 14:21:19 -08001056 * list in here. Use the old state of the isolated source page to
1057 * determine if we migrated a LRU page. newpage was already unlocked
1058 * and possibly modified by its owner - don't rely on the page
1059 * state.
Minchan Kimc6c919e2016-07-26 15:23:02 -07001060 */
1061 if (rc == MIGRATEPAGE_SUCCESS) {
David Hildenbrande0a352f2019-02-01 14:21:19 -08001062 if (unlikely(!is_lru))
Minchan Kimc6c919e2016-07-26 15:23:02 -07001063 put_page(newpage);
1064 else
1065 putback_lru_page(newpage);
1066 }
1067
Minchan Kim0dabec92011-10-31 17:06:57 -07001068 return rc;
1069}
Christoph Lameter95a402c2006-06-23 02:03:53 -07001070
Minchan Kim0dabec92011-10-31 17:06:57 -07001071/*
1072 * Obtain the lock on page, remove all ptes and migrate the page
1073 * to the newly allocated page in newpage.
1074 */
Linus Torvalds6ec44762020-07-08 10:48:35 -07001075static int unmap_and_move(new_page_t get_new_page,
Geert Uytterhoevenef2a5152015-04-14 15:44:22 -07001076 free_page_t put_new_page,
1077 unsigned long private, struct page *page,
Naoya Horiguchiadd05ce2015-06-24 16:56:50 -07001078 int force, enum migrate_mode mode,
Yang Shidd4ae782020-12-14 19:13:06 -08001079 enum migrate_reason reason,
1080 struct list_head *ret)
Minchan Kim0dabec92011-10-31 17:06:57 -07001081{
Hugh Dickins2def7422015-11-05 18:49:46 -08001082 int rc = MIGRATEPAGE_SUCCESS;
Yang Shi74d4a572019-11-30 17:57:12 -08001083 struct page *newpage = NULL;
Minchan Kim0dabec92011-10-31 17:06:57 -07001084
Michal Hocko94723aa2018-04-10 16:30:07 -07001085 if (!thp_migration_supported() && PageTransHuge(page))
Yang Shid532e2e2020-12-14 19:13:16 -08001086 return -ENOSYS;
Michal Hocko94723aa2018-04-10 16:30:07 -07001087
Minchan Kim0dabec92011-10-31 17:06:57 -07001088 if (page_count(page) == 1) {
1089 /* page was freed from under us. So we are done. */
Minchan Kimc6c919e2016-07-26 15:23:02 -07001090 ClearPageActive(page);
1091 ClearPageUnevictable(page);
Minchan Kimbda807d2016-07-26 15:23:05 -07001092 if (unlikely(__PageMovable(page))) {
1093 lock_page(page);
1094 if (!PageMovable(page))
andrew.yang356ea382022-03-22 14:46:08 -07001095 ClearPageIsolated(page);
Minchan Kimbda807d2016-07-26 15:23:05 -07001096 unlock_page(page);
1097 }
Minchan Kim0dabec92011-10-31 17:06:57 -07001098 goto out;
1099 }
1100
Yang Shi74d4a572019-11-30 17:57:12 -08001101 newpage = get_new_page(page, private);
1102 if (!newpage)
1103 return -ENOMEM;
1104
Hugh Dickins9c620e22013-02-22 16:35:14 -08001105 rc = __unmap_and_move(page, newpage, force, mode);
Minchan Kimc6c919e2016-07-26 15:23:02 -07001106 if (rc == MIGRATEPAGE_SUCCESS)
Vlastimil Babka7cd12b42016-03-15 14:56:18 -07001107 set_page_owner_migrate_reason(newpage, reason);
Rafael Aquinibf6bddf12012-12-11 16:02:42 -08001108
Minchan Kim0dabec92011-10-31 17:06:57 -07001109out:
Christoph Lametere24f0b82006-06-23 02:03:51 -07001110 if (rc != -EAGAIN) {
Minchan Kim0dabec92011-10-31 17:06:57 -07001111 /*
1112 * A page that has been migrated has all references
1113 * removed and will be freed. A page that has not been
Ralph Campbellc23a0c92020-01-30 22:14:41 -08001114 * migrated will have kept its references and be restored.
Minchan Kim0dabec92011-10-31 17:06:57 -07001115 */
1116 list_del(&page->lru);
Christoph Lametere24f0b82006-06-23 02:03:51 -07001117 }
David Rientjes68711a72014-06-04 16:08:25 -07001118
Christoph Lameter95a402c2006-06-23 02:03:53 -07001119 /*
Minchan Kimc6c919e2016-07-26 15:23:02 -07001120 * If migration is successful, releases reference grabbed during
1121 * isolation. Otherwise, restore the page to right list unless
1122 * we want to retry.
Christoph Lameter95a402c2006-06-23 02:03:53 -07001123 */
Minchan Kimc6c919e2016-07-26 15:23:02 -07001124 if (rc == MIGRATEPAGE_SUCCESS) {
Yang Shidd4ae782020-12-14 19:13:06 -08001125 /*
1126 * Compaction can migrate also non-LRU pages which are
1127 * not accounted to NR_ISOLATED_*. They can be recognized
1128 * as __PageMovable
1129 */
1130 if (likely(!__PageMovable(page)))
1131 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
1132 page_is_file_lru(page), -thp_nr_pages(page));
1133
Oscar Salvador79f5f8f2020-10-15 20:07:09 -07001134 if (reason != MR_MEMORY_FAILURE)
Minchan Kimc6c919e2016-07-26 15:23:02 -07001135 /*
Oscar Salvador79f5f8f2020-10-15 20:07:09 -07001136 * We release the page in page_handle_poison.
Minchan Kimc6c919e2016-07-26 15:23:02 -07001137 */
Oscar Salvador79f5f8f2020-10-15 20:07:09 -07001138 put_page(page);
Minchan Kimc6c919e2016-07-26 15:23:02 -07001139 } else {
Yang Shidd4ae782020-12-14 19:13:06 -08001140 if (rc != -EAGAIN)
1141 list_add_tail(&page->lru, ret);
Minchan Kimbda807d2016-07-26 15:23:05 -07001142
Minchan Kimc6c919e2016-07-26 15:23:02 -07001143 if (put_new_page)
1144 put_new_page(newpage, private);
1145 else
1146 put_page(newpage);
1147 }
David Rientjes68711a72014-06-04 16:08:25 -07001148
Christoph Lametere24f0b82006-06-23 02:03:51 -07001149 return rc;
1150}
1151
1152/*
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001153 * Counterpart of unmap_and_move_page() for hugepage migration.
1154 *
1155 * This function doesn't wait the completion of hugepage I/O
1156 * because there is no race between I/O and migration for hugepage.
1157 * Note that currently hugepage I/O occurs only in direct I/O
1158 * where no lock is held and PG_writeback is irrelevant,
1159 * and writeback status of all subpages are counted in the reference
1160 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1161 * under direct I/O, the reference of the head page is 512 and a bit more.)
1162 * This means that when we try to migrate hugepage whose subpages are
1163 * doing direct I/O, some references remain after try_to_unmap() and
1164 * hugepage migration fails without data corruption.
1165 *
1166 * There is also no race when direct I/O is issued on the page under migration,
1167 * because then pte is replaced with migration swap entry and direct I/O code
1168 * will wait in the page fault for migration to complete.
1169 */
1170static int unmap_and_move_huge_page(new_page_t get_new_page,
David Rientjes68711a72014-06-04 16:08:25 -07001171 free_page_t put_new_page, unsigned long private,
1172 struct page *hpage, int force,
Yang Shidd4ae782020-12-14 19:13:06 -08001173 enum migrate_mode mode, int reason,
1174 struct list_head *ret)
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001175{
Hugh Dickins2def7422015-11-05 18:49:46 -08001176 int rc = -EAGAIN;
Hugh Dickins2ebba6b2014-12-12 16:56:19 -08001177 int page_was_mapped = 0;
Joonsoo Kim32665f22014-01-21 15:51:15 -08001178 struct page *new_hpage;
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001179 struct anon_vma *anon_vma = NULL;
Mike Kravetzc0d03812020-04-01 21:11:05 -07001180 struct address_space *mapping = NULL;
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001181
Naoya Horiguchi83467ef2013-09-11 14:22:11 -07001182 /*
Anshuman Khandual7ed2c312019-03-05 15:43:44 -08001183 * Migratability of hugepages depends on architectures and their size.
Naoya Horiguchi83467ef2013-09-11 14:22:11 -07001184 * This check is necessary because some callers of hugepage migration
1185 * like soft offline and memory hotremove don't walk through page
1186 * tables or check whether the hugepage is pmd-based or not before
1187 * kicking migration.
1188 */
Naoya Horiguchi100873d2014-06-04 16:10:56 -07001189 if (!hugepage_migration_supported(page_hstate(hpage))) {
Yang Shidd4ae782020-12-14 19:13:06 -08001190 list_move_tail(&hpage->lru, ret);
Naoya Horiguchi83467ef2013-09-11 14:22:11 -07001191 return -ENOSYS;
Joonsoo Kim32665f22014-01-21 15:51:15 -08001192 }
Naoya Horiguchi83467ef2013-09-11 14:22:11 -07001193
Muchun Song71a64f62021-02-04 18:32:17 -08001194 if (page_count(hpage) == 1) {
1195 /* page was freed from under us. So we are done. */
1196 putback_active_hugepage(hpage);
1197 return MIGRATEPAGE_SUCCESS;
1198 }
1199
Michal Hocko666feb22018-04-10 16:30:03 -07001200 new_hpage = get_new_page(hpage, private);
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001201 if (!new_hpage)
1202 return -ENOMEM;
1203
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001204 if (!trylock_page(hpage)) {
Jérôme Glisse2916ecc2017-09-08 16:12:06 -07001205 if (!force)
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001206 goto out;
Jérôme Glisse2916ecc2017-09-08 16:12:06 -07001207 switch (mode) {
1208 case MIGRATE_SYNC:
1209 case MIGRATE_SYNC_NO_COPY:
1210 break;
1211 default:
1212 goto out;
1213 }
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001214 lock_page(hpage);
1215 }
1216
Mike Kravetzcb6acd02019-02-28 16:22:02 -08001217 /*
1218 * Check for pages which are in the process of being freed. Without
1219 * page_mapping() set, hugetlbfs specific move page routine will not
1220 * be called and we could leak usage counts for subpools.
1221 */
Muchun Song6acfb5b2021-06-30 18:51:29 -07001222 if (hugetlb_page_subpool(hpage) && !page_mapping(hpage)) {
Mike Kravetzcb6acd02019-02-28 16:22:02 -08001223 rc = -EBUSY;
1224 goto out_unlock;
1225 }
1226
Peter Zijlstra746b18d2011-05-24 17:12:10 -07001227 if (PageAnon(hpage))
1228 anon_vma = page_get_anon_vma(hpage);
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001229
Hugh Dickins7db76712015-11-05 18:49:49 -08001230 if (unlikely(!trylock_page(new_hpage)))
1231 goto put_anon;
1232
Hugh Dickins2ebba6b2014-12-12 16:56:19 -08001233 if (page_mapped(hpage)) {
Mike Kravetz336bf302020-11-13 22:52:16 -08001234 bool mapping_locked = false;
Alistair Popplea98a2f02021-06-30 18:54:16 -07001235 enum ttu_flags ttu = 0;
Mike Kravetzc0d03812020-04-01 21:11:05 -07001236
Mike Kravetz336bf302020-11-13 22:52:16 -08001237 if (!PageAnon(hpage)) {
1238 /*
1239 * In shared mappings, try_to_unmap could potentially
1240 * call huge_pmd_unshare. Because of this, take
1241 * semaphore in write mode here and set TTU_RMAP_LOCKED
1242 * to let lower levels know we have taken the lock.
1243 */
1244 mapping = hugetlb_page_mapping_lock_write(hpage);
1245 if (unlikely(!mapping))
1246 goto unlock_put_anon;
1247
1248 mapping_locked = true;
1249 ttu |= TTU_RMAP_LOCKED;
1250 }
1251
Alistair Popplea98a2f02021-06-30 18:54:16 -07001252 try_to_migrate(hpage, ttu);
Hugh Dickins2ebba6b2014-12-12 16:56:19 -08001253 page_was_mapped = 1;
Mike Kravetz336bf302020-11-13 22:52:16 -08001254
1255 if (mapping_locked)
1256 i_mmap_unlock_write(mapping);
Hugh Dickins2ebba6b2014-12-12 16:56:19 -08001257 }
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001258
1259 if (!page_mapped(hpage))
Hugh Dickins5c3f9a62015-11-05 18:49:53 -08001260 rc = move_to_new_page(new_hpage, hpage, mode);
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001261
Mike Kravetz336bf302020-11-13 22:52:16 -08001262 if (page_was_mapped)
Hugh Dickins5c3f9a62015-11-05 18:49:53 -08001263 remove_migration_ptes(hpage,
Mike Kravetz336bf302020-11-13 22:52:16 -08001264 rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001265
Mike Kravetzc0d03812020-04-01 21:11:05 -07001266unlock_put_anon:
Hugh Dickins7db76712015-11-05 18:49:49 -08001267 unlock_page(new_hpage);
1268
1269put_anon:
Hugh Dickinsfd4a4662011-01-13 15:47:31 -08001270 if (anon_vma)
Peter Zijlstra9e601092011-03-22 16:32:46 -07001271 put_anon_vma(anon_vma);
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -07001272
Hugh Dickins2def7422015-11-05 18:49:46 -08001273 if (rc == MIGRATEPAGE_SUCCESS) {
Michal Hockoab5ac902018-01-31 16:20:48 -08001274 move_hugetlb_state(hpage, new_hpage, reason);
Hugh Dickins2def7422015-11-05 18:49:46 -08001275 put_new_page = NULL;
1276 }
Aneesh Kumar K.V8e6ac7f2012-07-31 16:42:27 -07001277
Mike Kravetzcb6acd02019-02-28 16:22:02 -08001278out_unlock:
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001279 unlock_page(hpage);
Hillf Danton09761332011-12-08 14:34:20 -08001280out:
Yang Shidd4ae782020-12-14 19:13:06 -08001281 if (rc == MIGRATEPAGE_SUCCESS)
Naoya Horiguchib8ec1ce2013-09-11 14:22:01 -07001282 putback_active_hugepage(hpage);
Miaohe Lina04840c2021-05-04 18:37:07 -07001283 else if (rc != -EAGAIN)
Yang Shidd4ae782020-12-14 19:13:06 -08001284 list_move_tail(&hpage->lru, ret);
David Rientjes68711a72014-06-04 16:08:25 -07001285
1286 /*
1287 * If migration was not successful and there's a freeing callback, use
1288 * it. Otherwise, put_page() will drop the reference grabbed during
1289 * isolation.
1290 */
Hugh Dickins2def7422015-11-05 18:49:46 -08001291 if (put_new_page)
David Rientjes68711a72014-06-04 16:08:25 -07001292 put_new_page(new_hpage, private);
1293 else
Naoya Horiguchi3aaa76e2015-09-22 14:59:14 -07001294 putback_active_hugepage(new_hpage);
David Rientjes68711a72014-06-04 16:08:25 -07001295
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001296 return rc;
1297}
1298
Yang Shid532e2e2020-12-14 19:13:16 -08001299static inline int try_split_thp(struct page *page, struct page **page2,
1300 struct list_head *from)
1301{
1302 int rc = 0;
1303
1304 lock_page(page);
1305 rc = split_huge_page_to_list(page, from);
1306 unlock_page(page);
1307 if (!rc)
1308 list_safe_reset_next(page, *page2, lru);
1309
1310 return rc;
1311}
1312
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001313/*
Srivatsa S. Bhatc73e5c92013-04-29 15:08:16 -07001314 * migrate_pages - migrate the pages specified in a list, to the free pages
1315 * supplied as the target for the page migration
Christoph Lameterb20a3502006-03-22 00:09:12 -08001316 *
Srivatsa S. Bhatc73e5c92013-04-29 15:08:16 -07001317 * @from: The list of pages to be migrated.
1318 * @get_new_page: The function used to allocate free pages to be used
1319 * as the target of the page migration.
David Rientjes68711a72014-06-04 16:08:25 -07001320 * @put_new_page: The function used to free target pages if migration
1321 * fails, or NULL if no special handling is necessary.
Srivatsa S. Bhatc73e5c92013-04-29 15:08:16 -07001322 * @private: Private data to be passed on to get_new_page()
1323 * @mode: The migration mode that specifies the constraints for
1324 * page migration, if any.
1325 * @reason: The reason for page migration.
Baolin Wangb5bade92022-01-14 14:08:34 -08001326 * @ret_succeeded: Set to the number of normal pages migrated successfully if
Yang Shi5ac95882021-09-02 14:59:13 -07001327 * the caller passes a non-NULL pointer.
Christoph Lameterb20a3502006-03-22 00:09:12 -08001328 *
Srivatsa S. Bhatc73e5c92013-04-29 15:08:16 -07001329 * The function returns after 10 attempts or if no pages are movable any more
1330 * because the list has become empty or no retryable pages exist any more.
Yang Shidd4ae782020-12-14 19:13:06 -08001331 * It is caller's responsibility to call putback_movable_pages() to return pages
1332 * to the LRU or free list only if ret != 0.
Christoph Lameterb20a3502006-03-22 00:09:12 -08001333 *
Baolin Wang5d39a7e2022-01-14 14:08:37 -08001334 * Returns the number of {normal page, THP, hugetlb} that were not migrated, or
1335 * an error code. The number of THP splits will be considered as the number of
1336 * non-migrated THP, no matter how many subpages of the THP are migrated successfully.
Christoph Lameterb20a3502006-03-22 00:09:12 -08001337 */
Hugh Dickins9c620e22013-02-22 16:35:14 -08001338int migrate_pages(struct list_head *from, new_page_t get_new_page,
David Rientjes68711a72014-06-04 16:08:25 -07001339 free_page_t put_new_page, unsigned long private,
Yang Shi5ac95882021-09-02 14:59:13 -07001340 enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
Christoph Lameterb20a3502006-03-22 00:09:12 -08001341{
Christoph Lametere24f0b82006-06-23 02:03:51 -07001342 int retry = 1;
Anshuman Khandual1a5bae22020-08-11 18:31:51 -07001343 int thp_retry = 1;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001344 int nr_failed = 0;
Baolin Wangb5bade92022-01-14 14:08:34 -08001345 int nr_failed_pages = 0;
Mel Gorman5647bc22012-10-19 10:46:20 +01001346 int nr_succeeded = 0;
Anshuman Khandual1a5bae22020-08-11 18:31:51 -07001347 int nr_thp_succeeded = 0;
1348 int nr_thp_failed = 0;
1349 int nr_thp_split = 0;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001350 int pass = 0;
Anshuman Khandual1a5bae22020-08-11 18:31:51 -07001351 bool is_thp = false;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001352 struct page *page;
1353 struct page *page2;
Anshuman Khandual1a5bae22020-08-11 18:31:51 -07001354 int rc, nr_subpages;
Yang Shidd4ae782020-12-14 19:13:06 -08001355 LIST_HEAD(ret_pages);
Baolin Wangb5bade92022-01-14 14:08:34 -08001356 LIST_HEAD(thp_split_pages);
Yang Shib0b515b2021-06-30 18:51:48 -07001357 bool nosplit = (reason == MR_NUMA_MISPLACED);
Baolin Wangb5bade92022-01-14 14:08:34 -08001358 bool no_subpage_counting = false;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001359
Liam Mark7bc1aec2021-05-04 18:37:25 -07001360 trace_mm_migrate_pages_start(mode, reason);
1361
Baolin Wangb5bade92022-01-14 14:08:34 -08001362thp_subpage_migration:
Anshuman Khandual1a5bae22020-08-11 18:31:51 -07001363 for (pass = 0; pass < 10 && (retry || thp_retry); pass++) {
Christoph Lametere24f0b82006-06-23 02:03:51 -07001364 retry = 0;
Anshuman Khandual1a5bae22020-08-11 18:31:51 -07001365 thp_retry = 0;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001366
Christoph Lametere24f0b82006-06-23 02:03:51 -07001367 list_for_each_entry_safe(page, page2, from, lru) {
Michal Hocko94723aa2018-04-10 16:30:07 -07001368retry:
Anshuman Khandual1a5bae22020-08-11 18:31:51 -07001369 /*
1370 * THP statistics is based on the source huge page.
1371 * Capture required information that might get lost
1372 * during migration.
1373 */
Zi Yan6c5c7b92020-09-25 21:19:14 -07001374 is_thp = PageTransHuge(page) && !PageHuge(page);
Baolin Wang5d39a7e2022-01-14 14:08:37 -08001375 nr_subpages = compound_nr(page);
Christoph Lametere24f0b82006-06-23 02:03:51 -07001376 cond_resched();
Christoph Lameterb20a3502006-03-22 00:09:12 -08001377
Naoya Horiguchi31caf662013-09-11 14:21:59 -07001378 if (PageHuge(page))
1379 rc = unmap_and_move_huge_page(get_new_page,
David Rientjes68711a72014-06-04 16:08:25 -07001380 put_new_page, private, page,
Yang Shidd4ae782020-12-14 19:13:06 -08001381 pass > 2, mode, reason,
1382 &ret_pages);
Naoya Horiguchi31caf662013-09-11 14:21:59 -07001383 else
David Rientjes68711a72014-06-04 16:08:25 -07001384 rc = unmap_and_move(get_new_page, put_new_page,
Naoya Horiguchiadd05ce2015-06-24 16:56:50 -07001385 private, page, pass > 2, mode,
Yang Shidd4ae782020-12-14 19:13:06 -08001386 reason, &ret_pages);
1387 /*
1388 * The rules are:
1389 * Success: non hugetlb page will be freed, hugetlb
1390 * page will be put back
1391 * -EAGAIN: stay on the from list
1392 * -ENOMEM: stay on the from list
1393 * Other errno: put on ret_pages list then splice to
1394 * from list
1395 */
Christoph Lametere24f0b82006-06-23 02:03:51 -07001396 switch(rc) {
Yang Shid532e2e2020-12-14 19:13:16 -08001397 /*
1398 * THP migration might be unsupported or the
1399 * allocation could've failed so we should
1400 * retry on the same page with the THP split
1401 * to base pages.
1402 *
1403 * Head page is retried immediately and tail
1404 * pages are added to the tail of the list so
1405 * we encounter them after the rest of the list
1406 * is processed.
1407 */
1408 case -ENOSYS:
1409 /* THP migration is unsupported */
1410 if (is_thp) {
Baolin Wangb5bade92022-01-14 14:08:34 -08001411 nr_thp_failed++;
1412 if (!try_split_thp(page, &page2, &thp_split_pages)) {
Yang Shid532e2e2020-12-14 19:13:16 -08001413 nr_thp_split++;
1414 goto retry;
1415 }
1416
Baolin Wangb5bade92022-01-14 14:08:34 -08001417 nr_failed_pages += nr_subpages;
Yang Shid532e2e2020-12-14 19:13:16 -08001418 break;
1419 }
1420
1421 /* Hugetlb migration is unsupported */
Baolin Wangb5bade92022-01-14 14:08:34 -08001422 if (!no_subpage_counting)
1423 nr_failed++;
Baolin Wang5d39a7e2022-01-14 14:08:37 -08001424 nr_failed_pages += nr_subpages;
Yang Shid532e2e2020-12-14 19:13:16 -08001425 break;
Christoph Lameter95a402c2006-06-23 02:03:53 -07001426 case -ENOMEM:
Michal Hocko94723aa2018-04-10 16:30:07 -07001427 /*
Yang Shid532e2e2020-12-14 19:13:16 -08001428 * When memory is low, don't bother to try to migrate
1429 * other pages, just exit.
Yang Shib0b515b2021-06-30 18:51:48 -07001430 * THP NUMA faulting doesn't split THP to retry.
Michal Hocko94723aa2018-04-10 16:30:07 -07001431 */
Yang Shib0b515b2021-06-30 18:51:48 -07001432 if (is_thp && !nosplit) {
Baolin Wangb5bade92022-01-14 14:08:34 -08001433 nr_thp_failed++;
1434 if (!try_split_thp(page, &page2, &thp_split_pages)) {
Anshuman Khandual1a5bae22020-08-11 18:31:51 -07001435 nr_thp_split++;
Michal Hocko94723aa2018-04-10 16:30:07 -07001436 goto retry;
1437 }
Zi Yan6c5c7b92020-09-25 21:19:14 -07001438
Baolin Wangb5bade92022-01-14 14:08:34 -08001439 nr_failed_pages += nr_subpages;
Anshuman Khandual1a5bae22020-08-11 18:31:51 -07001440 goto out;
1441 }
Baolin Wangb5bade92022-01-14 14:08:34 -08001442
1443 if (!no_subpage_counting)
1444 nr_failed++;
Baolin Wang5d39a7e2022-01-14 14:08:37 -08001445 nr_failed_pages += nr_subpages;
Christoph Lameter95a402c2006-06-23 02:03:53 -07001446 goto out;
Christoph Lametere24f0b82006-06-23 02:03:51 -07001447 case -EAGAIN:
Anshuman Khandual1a5bae22020-08-11 18:31:51 -07001448 if (is_thp) {
1449 thp_retry++;
1450 break;
1451 }
Christoph Lameter2d1db3b2006-06-23 02:03:33 -07001452 retry++;
Christoph Lametere24f0b82006-06-23 02:03:51 -07001453 break;
Rafael Aquini78bd5202012-12-11 16:02:31 -08001454 case MIGRATEPAGE_SUCCESS:
Baolin Wang5d39a7e2022-01-14 14:08:37 -08001455 nr_succeeded += nr_subpages;
Anshuman Khandual1a5bae22020-08-11 18:31:51 -07001456 if (is_thp) {
1457 nr_thp_succeeded++;
Anshuman Khandual1a5bae22020-08-11 18:31:51 -07001458 break;
1459 }
Christoph Lametere24f0b82006-06-23 02:03:51 -07001460 break;
1461 default:
Naoya Horiguchi354a3362014-01-21 15:51:14 -08001462 /*
Yang Shid532e2e2020-12-14 19:13:16 -08001463 * Permanent failure (-EBUSY, etc.):
Naoya Horiguchi354a3362014-01-21 15:51:14 -08001464 * unlike -EAGAIN case, the failed page is
1465 * removed from migration page list and not
1466 * retried in the next outer loop.
1467 */
Anshuman Khandual1a5bae22020-08-11 18:31:51 -07001468 if (is_thp) {
1469 nr_thp_failed++;
Baolin Wangb5bade92022-01-14 14:08:34 -08001470 nr_failed_pages += nr_subpages;
Anshuman Khandual1a5bae22020-08-11 18:31:51 -07001471 break;
1472 }
Baolin Wangb5bade92022-01-14 14:08:34 -08001473
1474 if (!no_subpage_counting)
1475 nr_failed++;
Baolin Wang5d39a7e2022-01-14 14:08:37 -08001476 nr_failed_pages += nr_subpages;
Christoph Lametere24f0b82006-06-23 02:03:51 -07001477 break;
Christoph Lameter2d1db3b2006-06-23 02:03:33 -07001478 }
Christoph Lameterb20a3502006-03-22 00:09:12 -08001479 }
1480 }
Baolin Wangb5bade92022-01-14 14:08:34 -08001481 nr_failed += retry;
Anshuman Khandual1a5bae22020-08-11 18:31:51 -07001482 nr_thp_failed += thp_retry;
Baolin Wangb5bade92022-01-14 14:08:34 -08001483 /*
1484 * Try to migrate subpages of fail-to-migrate THPs, no nr_failed
1485 * counting in this round, since all subpages of a THP is counted
1486 * as 1 failure in the first round.
1487 */
1488 if (!list_empty(&thp_split_pages)) {
1489 /*
1490 * Move non-migrated pages (after 10 retries) to ret_pages
1491 * to avoid migrating them again.
1492 */
1493 list_splice_init(from, &ret_pages);
1494 list_splice_init(&thp_split_pages, from);
1495 no_subpage_counting = true;
1496 retry = 1;
1497 goto thp_subpage_migration;
1498 }
1499
1500 rc = nr_failed + nr_thp_failed;
Christoph Lameter95a402c2006-06-23 02:03:53 -07001501out:
Yang Shidd4ae782020-12-14 19:13:06 -08001502 /*
1503 * Put the permanent failure page back to migration list, they
1504 * will be put back to the right list by the caller.
1505 */
1506 list_splice(&ret_pages, from);
1507
Anshuman Khandual1a5bae22020-08-11 18:31:51 -07001508 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
Baolin Wangb5bade92022-01-14 14:08:34 -08001509 count_vm_events(PGMIGRATE_FAIL, nr_failed_pages);
Anshuman Khandual1a5bae22020-08-11 18:31:51 -07001510 count_vm_events(THP_MIGRATION_SUCCESS, nr_thp_succeeded);
1511 count_vm_events(THP_MIGRATION_FAIL, nr_thp_failed);
1512 count_vm_events(THP_MIGRATION_SPLIT, nr_thp_split);
Baolin Wangb5bade92022-01-14 14:08:34 -08001513 trace_mm_migrate_pages(nr_succeeded, nr_failed_pages, nr_thp_succeeded,
Anshuman Khandual1a5bae22020-08-11 18:31:51 -07001514 nr_thp_failed, nr_thp_split, mode, reason);
Mel Gorman7b2a2d42012-10-19 14:07:31 +01001515
Yang Shi5ac95882021-09-02 14:59:13 -07001516 if (ret_succeeded)
1517 *ret_succeeded = nr_succeeded;
1518
Rafael Aquini78bd5202012-12-11 16:02:31 -08001519 return rc;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001520}
1521
Joonsoo Kim19fc7be2020-08-11 18:37:25 -07001522struct page *alloc_migration_target(struct page *page, unsigned long private)
Joonsoo Kimb4b38222020-08-11 18:37:14 -07001523{
Joonsoo Kim19fc7be2020-08-11 18:37:25 -07001524 struct migration_target_control *mtc;
1525 gfp_t gfp_mask;
Joonsoo Kimb4b38222020-08-11 18:37:14 -07001526 unsigned int order = 0;
1527 struct page *new_page = NULL;
Joonsoo Kim19fc7be2020-08-11 18:37:25 -07001528 int nid;
1529 int zidx;
1530
1531 mtc = (struct migration_target_control *)private;
1532 gfp_mask = mtc->gfp_mask;
1533 nid = mtc->nid;
1534 if (nid == NUMA_NO_NODE)
1535 nid = page_to_nid(page);
Joonsoo Kimb4b38222020-08-11 18:37:14 -07001536
Joonsoo Kimd92bbc22020-08-11 18:37:17 -07001537 if (PageHuge(page)) {
1538 struct hstate *h = page_hstate(compound_head(page));
1539
Joonsoo Kim19fc7be2020-08-11 18:37:25 -07001540 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
1541 return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
Joonsoo Kimd92bbc22020-08-11 18:37:17 -07001542 }
Joonsoo Kimb4b38222020-08-11 18:37:14 -07001543
1544 if (PageTransHuge(page)) {
Joonsoo Kim9933a0c2020-08-11 18:37:20 -07001545 /*
1546 * clear __GFP_RECLAIM to make the migration callback
1547 * consistent with regular THP allocations.
1548 */
1549 gfp_mask &= ~__GFP_RECLAIM;
Joonsoo Kimb4b38222020-08-11 18:37:14 -07001550 gfp_mask |= GFP_TRANSHUGE;
1551 order = HPAGE_PMD_ORDER;
1552 }
Joonsoo Kim19fc7be2020-08-11 18:37:25 -07001553 zidx = zone_idx(page_zone(page));
1554 if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
Joonsoo Kimb4b38222020-08-11 18:37:14 -07001555 gfp_mask |= __GFP_HIGHMEM;
1556
Matthew Wilcox (Oracle)84172f42021-04-29 23:01:15 -07001557 new_page = __alloc_pages(gfp_mask, order, nid, mtc->nmask);
Joonsoo Kimb4b38222020-08-11 18:37:14 -07001558
1559 if (new_page && PageTransHuge(new_page))
1560 prep_transhuge_page(new_page);
1561
1562 return new_page;
1563}
1564
Christoph Lameter742755a2006-06-23 02:03:55 -07001565#ifdef CONFIG_NUMA
Christoph Lameter742755a2006-06-23 02:03:55 -07001566
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001567static int store_status(int __user *status, int start, int value, int nr)
Christoph Lameter742755a2006-06-23 02:03:55 -07001568{
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001569 while (nr-- > 0) {
1570 if (put_user(value, status + start))
1571 return -EFAULT;
1572 start++;
1573 }
Christoph Lameter742755a2006-06-23 02:03:55 -07001574
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001575 return 0;
1576}
Christoph Lameter742755a2006-06-23 02:03:55 -07001577
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001578static int do_move_pages_to_node(struct mm_struct *mm,
1579 struct list_head *pagelist, int node)
1580{
1581 int err;
Joonsoo Kima0976312020-08-11 18:37:28 -07001582 struct migration_target_control mtc = {
1583 .nid = node,
1584 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1585 };
Christoph Lameter742755a2006-06-23 02:03:55 -07001586
Joonsoo Kima0976312020-08-11 18:37:28 -07001587 err = migrate_pages(pagelist, alloc_migration_target, NULL,
Yang Shi5ac95882021-09-02 14:59:13 -07001588 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001589 if (err)
1590 putback_movable_pages(pagelist);
1591 return err;
Christoph Lameter742755a2006-06-23 02:03:55 -07001592}
1593
1594/*
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001595 * Resolves the given address to a struct page, isolates it from the LRU and
1596 * puts it to the given pagelist.
Yang Shie0153fc2020-01-04 12:59:46 -08001597 * Returns:
1598 * errno - if the page cannot be found/isolated
1599 * 0 - when it doesn't have to be migrated because it is already on the
1600 * target node
1601 * 1 - when it has been queued
Christoph Lameter742755a2006-06-23 02:03:55 -07001602 */
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001603static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
1604 int node, struct list_head *pagelist, bool migrate_all)
Christoph Lameter742755a2006-06-23 02:03:55 -07001605{
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001606 struct vm_area_struct *vma;
1607 struct page *page;
Christoph Lameter742755a2006-06-23 02:03:55 -07001608 int err;
Christoph Lameter742755a2006-06-23 02:03:55 -07001609
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001610 mmap_read_lock(mm);
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001611 err = -EFAULT;
1612 vma = find_vma(mm, addr);
1613 if (!vma || addr < vma->vm_start || !vma_migratable(vma))
1614 goto out;
Christoph Lameter742755a2006-06-23 02:03:55 -07001615
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001616 /* FOLL_DUMP to ignore special (like zero) pages */
Miaohe Lin87d27622022-03-22 14:45:29 -07001617 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
Christoph Lameter742755a2006-06-23 02:03:55 -07001618
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001619 err = PTR_ERR(page);
1620 if (IS_ERR(page))
1621 goto out;
Christoph Lameter742755a2006-06-23 02:03:55 -07001622
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001623 err = -ENOENT;
1624 if (!page)
1625 goto out;
Christoph Lameter742755a2006-06-23 02:03:55 -07001626
Brice Gogline78bbfa2008-10-18 20:27:15 -07001627 err = 0;
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001628 if (page_to_nid(page) == node)
1629 goto out_putpage;
Christoph Lameter742755a2006-06-23 02:03:55 -07001630
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001631 err = -EACCES;
1632 if (page_mapcount(page) > 1 && !migrate_all)
1633 goto out_putpage;
1634
1635 if (PageHuge(page)) {
1636 if (PageHead(page)) {
1637 isolate_huge_page(page, pagelist);
Yang Shie0153fc2020-01-04 12:59:46 -08001638 err = 1;
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001639 }
1640 } else {
1641 struct page *head;
1642
1643 head = compound_head(page);
1644 err = isolate_lru_page(head);
1645 if (err)
1646 goto out_putpage;
1647
Yang Shie0153fc2020-01-04 12:59:46 -08001648 err = 1;
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001649 list_add_tail(&head->lru, pagelist);
1650 mod_node_page_state(page_pgdat(head),
Huang Ying9de4f222020-04-06 20:04:41 -07001651 NR_ISOLATED_ANON + page_is_file_lru(head),
Matthew Wilcox (Oracle)6c357842020-08-14 17:30:37 -07001652 thp_nr_pages(head));
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001653 }
1654out_putpage:
1655 /*
1656 * Either remove the duplicate refcount from
1657 * isolate_lru_page() or drop the page ref if it was
1658 * not isolated.
1659 */
1660 put_page(page);
1661out:
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001662 mmap_read_unlock(mm);
Christoph Lameter742755a2006-06-23 02:03:55 -07001663 return err;
1664}
1665
Wei Yang7ca87832020-04-06 20:04:12 -07001666static int move_pages_and_store_status(struct mm_struct *mm, int node,
1667 struct list_head *pagelist, int __user *status,
1668 int start, int i, unsigned long nr_pages)
1669{
1670 int err;
1671
Wei Yang5d7ae892020-04-06 20:04:15 -07001672 if (list_empty(pagelist))
1673 return 0;
1674
Wei Yang7ca87832020-04-06 20:04:12 -07001675 err = do_move_pages_to_node(mm, pagelist, node);
1676 if (err) {
1677 /*
1678 * Positive err means the number of failed
1679 * pages to migrate. Since we are going to
1680 * abort and return the number of non-migrated
Long Liab9dd4f2020-12-14 19:12:52 -08001681 * pages, so need to include the rest of the
Wei Yang7ca87832020-04-06 20:04:12 -07001682 * nr_pages that have not been attempted as
1683 * well.
1684 */
1685 if (err > 0)
1686 err += nr_pages - i - 1;
1687 return err;
1688 }
1689 return store_status(status, start, node, i - start);
1690}
1691
Christoph Lameter742755a2006-06-23 02:03:55 -07001692/*
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001693 * Migrate an array of page address onto an array of nodes and fill
1694 * the corresponding array of status.
1695 */
Christoph Lameter3268c632012-03-21 16:34:06 -07001696static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001697 unsigned long nr_pages,
1698 const void __user * __user *pages,
1699 const int __user *nodes,
1700 int __user *status, int flags)
1701{
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001702 int current_node = NUMA_NO_NODE;
1703 LIST_HEAD(pagelist);
1704 int start, i;
1705 int err = 0, err1;
Brice Goglin35282a22009-06-16 15:32:43 -07001706
Minchan Kim361a2a22021-05-04 18:36:57 -07001707 lru_cache_disable();
Brice Goglin35282a22009-06-16 15:32:43 -07001708
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001709 for (i = start = 0; i < nr_pages; i++) {
1710 const void __user *p;
1711 unsigned long addr;
1712 int node;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001713
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001714 err = -EFAULT;
1715 if (get_user(p, pages + i))
1716 goto out_flush;
1717 if (get_user(node, nodes + i))
1718 goto out_flush;
Andrey Konovalov057d33892019-09-25 16:48:30 -07001719 addr = (unsigned long)untagged_addr(p);
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001720
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001721 err = -ENODEV;
1722 if (node < 0 || node >= MAX_NUMNODES)
1723 goto out_flush;
1724 if (!node_state(node, N_MEMORY))
1725 goto out_flush;
Brice Goglin3140a222009-01-06 14:38:57 -08001726
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001727 err = -EACCES;
1728 if (!node_isset(node, task_nodes))
1729 goto out_flush;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001730
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001731 if (current_node == NUMA_NO_NODE) {
1732 current_node = node;
1733 start = i;
1734 } else if (node != current_node) {
Wei Yang7ca87832020-04-06 20:04:12 -07001735 err = move_pages_and_store_status(mm, current_node,
1736 &pagelist, status, start, i, nr_pages);
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001737 if (err)
1738 goto out;
1739 start = i;
1740 current_node = node;
Brice Goglin3140a222009-01-06 14:38:57 -08001741 }
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001742
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001743 /*
1744 * Errors in the page lookup or isolation are not fatal and we simply
1745 * report them via status
1746 */
1747 err = add_page_for_migration(mm, addr, current_node,
1748 &pagelist, flags & MPOL_MF_MOVE_ALL);
Yang Shie0153fc2020-01-04 12:59:46 -08001749
Wei Yangd08221a2020-04-06 20:04:18 -07001750 if (err > 0) {
Yang Shie0153fc2020-01-04 12:59:46 -08001751 /* The page is successfully queued for migration */
1752 continue;
1753 }
Brice Goglin3140a222009-01-06 14:38:57 -08001754
Wei Yangd08221a2020-04-06 20:04:18 -07001755 /*
John Hubbard65462462022-03-22 14:39:40 -07001756 * The move_pages() man page does not have an -EEXIST choice, so
1757 * use -EFAULT instead.
1758 */
1759 if (err == -EEXIST)
1760 err = -EFAULT;
1761
1762 /*
Wei Yangd08221a2020-04-06 20:04:18 -07001763 * If the page is already on the target node (!err), store the
1764 * node, otherwise, store the err.
1765 */
1766 err = store_status(status, i, err ? : current_node, 1);
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001767 if (err)
1768 goto out_flush;
Brice Goglin3140a222009-01-06 14:38:57 -08001769
Wei Yang7ca87832020-04-06 20:04:12 -07001770 err = move_pages_and_store_status(mm, current_node, &pagelist,
1771 status, start, i, nr_pages);
Wei Yang4afdace2020-04-06 20:04:09 -07001772 if (err)
1773 goto out;
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001774 current_node = NUMA_NO_NODE;
Brice Goglin3140a222009-01-06 14:38:57 -08001775 }
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001776out_flush:
1777 /* Make sure we do not overwrite the existing error */
Wei Yang7ca87832020-04-06 20:04:12 -07001778 err1 = move_pages_and_store_status(mm, current_node, &pagelist,
1779 status, start, i, nr_pages);
Wei Yangdfe9aa22020-01-30 22:11:14 -08001780 if (err >= 0)
Michal Hockoa49bd4d2018-04-10 16:29:59 -07001781 err = err1;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001782out:
Minchan Kim361a2a22021-05-04 18:36:57 -07001783 lru_cache_enable();
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001784 return err;
1785}
1786
1787/*
Brice Goglin2f007e72008-10-18 20:27:16 -07001788 * Determine the nodes of an array of pages and store it in an array of status.
Christoph Lameter742755a2006-06-23 02:03:55 -07001789 */
Brice Goglin80bba122008-12-09 13:14:23 -08001790static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1791 const void __user **pages, int *status)
Christoph Lameter742755a2006-06-23 02:03:55 -07001792{
Brice Goglin2f007e72008-10-18 20:27:16 -07001793 unsigned long i;
Brice Goglin2f007e72008-10-18 20:27:16 -07001794
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001795 mmap_read_lock(mm);
Christoph Lameter742755a2006-06-23 02:03:55 -07001796
Brice Goglin2f007e72008-10-18 20:27:16 -07001797 for (i = 0; i < nr_pages; i++) {
Brice Goglin80bba122008-12-09 13:14:23 -08001798 unsigned long addr = (unsigned long)(*pages);
Christoph Lameter742755a2006-06-23 02:03:55 -07001799 struct vm_area_struct *vma;
1800 struct page *page;
KOSAKI Motohiroc095adb2008-12-16 16:06:43 +09001801 int err = -EFAULT;
Brice Goglin2f007e72008-10-18 20:27:16 -07001802
Liam Howlett059b8b42021-06-28 19:39:44 -07001803 vma = vma_lookup(mm, addr);
1804 if (!vma)
Christoph Lameter742755a2006-06-23 02:03:55 -07001805 goto set_status;
1806
Kirill A. Shutemovd8998442015-09-04 15:47:53 -07001807 /* FOLL_DUMP to ignore special (like zero) pages */
1808 page = follow_page(vma, addr, FOLL_DUMP);
Linus Torvalds89f5b7d2008-06-20 11:18:25 -07001809
1810 err = PTR_ERR(page);
1811 if (IS_ERR(page))
1812 goto set_status;
1813
Kirill A. Shutemovd8998442015-09-04 15:47:53 -07001814 err = page ? page_to_nid(page) : -ENOENT;
Christoph Lameter742755a2006-06-23 02:03:55 -07001815set_status:
Brice Goglin80bba122008-12-09 13:14:23 -08001816 *status = err;
1817
1818 pages++;
1819 status++;
1820 }
1821
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001822 mmap_read_unlock(mm);
Brice Goglin80bba122008-12-09 13:14:23 -08001823}
1824
Arnd Bergmann5b1b5612021-09-08 15:18:17 -07001825static int get_compat_pages_array(const void __user *chunk_pages[],
1826 const void __user * __user *pages,
1827 unsigned long chunk_nr)
1828{
1829 compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
1830 compat_uptr_t p;
1831 int i;
1832
1833 for (i = 0; i < chunk_nr; i++) {
1834 if (get_user(p, pages32 + i))
1835 return -EFAULT;
1836 chunk_pages[i] = compat_ptr(p);
1837 }
1838
1839 return 0;
1840}
1841
Brice Goglin80bba122008-12-09 13:14:23 -08001842/*
1843 * Determine the nodes of a user array of pages and store it in
1844 * a user array of status.
1845 */
1846static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1847 const void __user * __user *pages,
1848 int __user *status)
1849{
1850#define DO_PAGES_STAT_CHUNK_NR 16
1851 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1852 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
Brice Goglin80bba122008-12-09 13:14:23 -08001853
H. Peter Anvin87b8d1a2010-02-18 16:13:40 -08001854 while (nr_pages) {
1855 unsigned long chunk_nr;
Brice Goglin80bba122008-12-09 13:14:23 -08001856
H. Peter Anvin87b8d1a2010-02-18 16:13:40 -08001857 chunk_nr = nr_pages;
1858 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1859 chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1860
Arnd Bergmann5b1b5612021-09-08 15:18:17 -07001861 if (in_compat_syscall()) {
1862 if (get_compat_pages_array(chunk_pages, pages,
1863 chunk_nr))
1864 break;
1865 } else {
1866 if (copy_from_user(chunk_pages, pages,
1867 chunk_nr * sizeof(*chunk_pages)))
1868 break;
1869 }
Brice Goglin80bba122008-12-09 13:14:23 -08001870
1871 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1872
H. Peter Anvin87b8d1a2010-02-18 16:13:40 -08001873 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1874 break;
Christoph Lameter742755a2006-06-23 02:03:55 -07001875
H. Peter Anvin87b8d1a2010-02-18 16:13:40 -08001876 pages += chunk_nr;
1877 status += chunk_nr;
1878 nr_pages -= chunk_nr;
1879 }
1880 return nr_pages ? -EFAULT : 0;
Christoph Lameter742755a2006-06-23 02:03:55 -07001881}
1882
Miaohe Lin4dc200c2020-10-17 16:14:03 -07001883static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
1884{
1885 struct task_struct *task;
1886 struct mm_struct *mm;
1887
1888 /*
1889 * There is no need to check if current process has the right to modify
1890 * the specified process when they are same.
1891 */
1892 if (!pid) {
1893 mmget(current->mm);
1894 *mem_nodes = cpuset_mems_allowed(current);
1895 return current->mm;
1896 }
1897
1898 /* Find the mm_struct */
1899 rcu_read_lock();
1900 task = find_task_by_vpid(pid);
1901 if (!task) {
1902 rcu_read_unlock();
1903 return ERR_PTR(-ESRCH);
1904 }
1905 get_task_struct(task);
1906
1907 /*
1908 * Check if this process has the right to modify the specified
1909 * process. Use the regular "ptrace_may_access()" checks.
1910 */
1911 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1912 rcu_read_unlock();
1913 mm = ERR_PTR(-EPERM);
1914 goto out;
1915 }
1916 rcu_read_unlock();
1917
1918 mm = ERR_PTR(security_task_movememory(task));
1919 if (IS_ERR(mm))
1920 goto out;
1921 *mem_nodes = cpuset_mems_allowed(task);
1922 mm = get_task_mm(task);
1923out:
1924 put_task_struct(task);
1925 if (!mm)
1926 mm = ERR_PTR(-EINVAL);
1927 return mm;
1928}
1929
Christoph Lameter742755a2006-06-23 02:03:55 -07001930/*
1931 * Move a list of pages in the address space of the currently executing
1932 * process.
1933 */
Dominik Brodowski7addf442018-03-17 16:08:03 +01001934static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
1935 const void __user * __user *pages,
1936 const int __user *nodes,
1937 int __user *status, int flags)
Christoph Lameter742755a2006-06-23 02:03:55 -07001938{
Christoph Lameter742755a2006-06-23 02:03:55 -07001939 struct mm_struct *mm;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001940 int err;
Christoph Lameter3268c632012-03-21 16:34:06 -07001941 nodemask_t task_nodes;
Christoph Lameter742755a2006-06-23 02:03:55 -07001942
1943 /* Check flags */
1944 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1945 return -EINVAL;
1946
1947 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1948 return -EPERM;
1949
Miaohe Lin4dc200c2020-10-17 16:14:03 -07001950 mm = find_mm_struct(pid, &task_nodes);
1951 if (IS_ERR(mm))
1952 return PTR_ERR(mm);
Sasha Levin6e8b09e2012-04-25 16:01:53 -07001953
1954 if (nodes)
1955 err = do_pages_move(mm, task_nodes, nr_pages, pages,
1956 nodes, status, flags);
1957 else
1958 err = do_pages_stat(mm, nr_pages, pages, status);
Christoph Lameter3268c632012-03-21 16:34:06 -07001959
1960 mmput(mm);
1961 return err;
Christoph Lameter742755a2006-06-23 02:03:55 -07001962}
Christoph Lameter742755a2006-06-23 02:03:55 -07001963
Dominik Brodowski7addf442018-03-17 16:08:03 +01001964SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1965 const void __user * __user *, pages,
1966 const int __user *, nodes,
1967 int __user *, status, int, flags)
1968{
1969 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
1970}
1971
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001972#ifdef CONFIG_NUMA_BALANCING
1973/*
1974 * Returns true if this is a safe migration target node for misplaced NUMA
1975 * pages. Currently it only checks the watermarks which crude
1976 */
1977static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
Mel Gorman3abef4e2013-02-22 16:34:27 -08001978 unsigned long nr_migrate_pages)
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001979{
1980 int z;
Mel Gorman599d0c92016-07-28 15:45:31 -07001981
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001982 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
1983 struct zone *zone = pgdat->node_zones + z;
1984
1985 if (!populated_zone(zone))
1986 continue;
1987
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001988 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
1989 if (!zone_watermark_ok(zone, 0,
1990 high_wmark_pages(zone) +
1991 nr_migrate_pages,
Huang Yingbfe9d002019-11-30 17:57:28 -08001992 ZONE_MOVABLE, 0))
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02001993 continue;
1994 return true;
1995 }
1996 return false;
1997}
1998
1999static struct page *alloc_misplaced_dst_page(struct page *page,
Michal Hocko666feb22018-04-10 16:30:03 -07002000 unsigned long data)
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02002001{
2002 int nid = (int) data;
2003 struct page *newpage;
2004
Vlastimil Babka96db8002015-09-08 15:03:50 -07002005 newpage = __alloc_pages_node(nid,
Johannes Weinere97ca8e52014-03-10 15:49:43 -07002006 (GFP_HIGHUSER_MOVABLE |
2007 __GFP_THISNODE | __GFP_NOMEMALLOC |
2008 __GFP_NORETRY | __GFP_NOWARN) &
Mel Gorman8479eba2016-02-26 15:19:31 -08002009 ~__GFP_RECLAIM, 0);
Hillf Dantonbac03822012-11-27 14:46:24 +00002010
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02002011 return newpage;
2012}
2013
Yang Shic5b5a3d2021-06-30 18:51:42 -07002014static struct page *alloc_misplaced_dst_page_thp(struct page *page,
2015 unsigned long data)
2016{
2017 int nid = (int) data;
2018 struct page *newpage;
2019
2020 newpage = alloc_pages_node(nid, (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
2021 HPAGE_PMD_ORDER);
2022 if (!newpage)
2023 goto out;
2024
2025 prep_transhuge_page(newpage);
2026
2027out:
2028 return newpage;
2029}
2030
Mel Gorman1c30e012014-01-21 15:50:58 -08002031static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
Mel Gormanb32967f2012-11-19 12:35:47 +00002032{
Hugh Dickins340ef392013-02-22 16:34:33 -08002033 int page_lru;
Baolin Wang2b9b6242021-09-08 15:18:01 -07002034 int nr_pages = thp_nr_pages(page);
Huang Yingc574bbe2022-03-22 14:46:23 -07002035 int order = compound_order(page);
Mel Gormanb32967f2012-11-19 12:35:47 +00002036
Huang Yingc574bbe2022-03-22 14:46:23 -07002037 VM_BUG_ON_PAGE(order && !PageTransHuge(page), page);
Mel Gorman3abef4e2013-02-22 16:34:27 -08002038
Yang Shi662aeea2021-06-30 18:51:51 -07002039 /* Do not migrate THP mapped by multiple processes */
2040 if (PageTransHuge(page) && total_mapcount(page) > 1)
2041 return 0;
2042
Mel Gormanb32967f2012-11-19 12:35:47 +00002043 /* Avoid migrating to a node that is nearly full */
Huang Yingc574bbe2022-03-22 14:46:23 -07002044 if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2045 int z;
2046
2047 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2048 return 0;
2049 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2050 if (populated_zone(pgdat->node_zones + z))
2051 break;
2052 }
2053 wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE);
Hugh Dickins340ef392013-02-22 16:34:33 -08002054 return 0;
Huang Yingc574bbe2022-03-22 14:46:23 -07002055 }
Mel Gormanb32967f2012-11-19 12:35:47 +00002056
Hugh Dickins340ef392013-02-22 16:34:33 -08002057 if (isolate_lru_page(page))
2058 return 0;
Mel Gormanb32967f2012-11-19 12:35:47 +00002059
Huang Ying9de4f222020-04-06 20:04:41 -07002060 page_lru = page_is_file_lru(page);
Mel Gorman599d0c92016-07-28 15:45:31 -07002061 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
Baolin Wang2b9b6242021-09-08 15:18:01 -07002062 nr_pages);
Hugh Dickins340ef392013-02-22 16:34:33 -08002063
2064 /*
2065 * Isolating the page has taken another reference, so the
2066 * caller's reference can be safely dropped without the page
2067 * disappearing underneath us during migration.
Mel Gormanb32967f2012-11-19 12:35:47 +00002068 */
2069 put_page(page);
Hugh Dickins340ef392013-02-22 16:34:33 -08002070 return 1;
Mel Gormanb32967f2012-11-19 12:35:47 +00002071}
2072
Mel Gormana8f60772012-11-14 21:41:46 +00002073/*
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02002074 * Attempt to migrate a misplaced page to the specified destination
2075 * node. Caller is expected to have an elevated reference count on
2076 * the page that will be dropped by this function before returning.
2077 */
Mel Gorman1bc115d2013-10-07 11:29:05 +01002078int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
2079 int node)
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02002080{
Mel Gormana8f60772012-11-14 21:41:46 +00002081 pg_data_t *pgdat = NODE_DATA(node);
Hugh Dickins340ef392013-02-22 16:34:33 -08002082 int isolated;
Mel Gormanb32967f2012-11-19 12:35:47 +00002083 int nr_remaining;
Huang Yinge39bb6b2022-03-22 14:46:20 -07002084 unsigned int nr_succeeded;
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02002085 LIST_HEAD(migratepages);
Yang Shic5b5a3d2021-06-30 18:51:42 -07002086 new_page_t *new;
2087 bool compound;
Aneesh Kumar K.Vb5916c02021-07-29 14:53:47 -07002088 int nr_pages = thp_nr_pages(page);
Yang Shic5b5a3d2021-06-30 18:51:42 -07002089
2090 /*
2091 * PTE mapped THP or HugeTLB page can't reach here so the page could
2092 * be either base page or THP. And it must be head page if it is
2093 * THP.
2094 */
2095 compound = PageTransHuge(page);
2096
2097 if (compound)
2098 new = alloc_misplaced_dst_page_thp;
2099 else
2100 new = alloc_misplaced_dst_page;
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02002101
2102 /*
Mel Gorman1bc115d2013-10-07 11:29:05 +01002103 * Don't migrate file pages that are mapped in multiple processes
2104 * with execute permissions as they are probably shared libraries.
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02002105 */
Miaohe Lin7ee820e2021-05-04 18:37:16 -07002106 if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
2107 (vma->vm_flags & VM_EXEC))
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02002108 goto out;
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02002109
Mel Gormana8f60772012-11-14 21:41:46 +00002110 /*
Mel Gorman09a913a2018-04-10 16:29:20 -07002111 * Also do not migrate dirty pages as not all filesystems can move
2112 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
2113 */
Huang Ying9de4f222020-04-06 20:04:41 -07002114 if (page_is_file_lru(page) && PageDirty(page))
Mel Gorman09a913a2018-04-10 16:29:20 -07002115 goto out;
2116
Mel Gormanb32967f2012-11-19 12:35:47 +00002117 isolated = numamigrate_isolate_page(pgdat, page);
2118 if (!isolated)
2119 goto out;
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02002120
Mel Gormanb32967f2012-11-19 12:35:47 +00002121 list_add(&page->lru, &migratepages);
Yang Shic5b5a3d2021-06-30 18:51:42 -07002122 nr_remaining = migrate_pages(&migratepages, *new, NULL, node,
Huang Yinge39bb6b2022-03-22 14:46:20 -07002123 MIGRATE_ASYNC, MR_NUMA_MISPLACED,
2124 &nr_succeeded);
Mel Gormanb32967f2012-11-19 12:35:47 +00002125 if (nr_remaining) {
Joonsoo Kim59c82b72014-01-21 15:51:17 -08002126 if (!list_empty(&migratepages)) {
2127 list_del(&page->lru);
Yang Shic5fc5c32021-06-30 18:51:45 -07002128 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
2129 page_is_file_lru(page), -nr_pages);
Joonsoo Kim59c82b72014-01-21 15:51:17 -08002130 putback_lru_page(page);
2131 }
Mel Gormanb32967f2012-11-19 12:35:47 +00002132 isolated = 0;
Huang Yinge39bb6b2022-03-22 14:46:20 -07002133 }
2134 if (nr_succeeded) {
2135 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2136 if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node))
2137 mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
2138 nr_succeeded);
2139 }
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02002140 BUG_ON(!list_empty(&migratepages));
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02002141 return isolated;
Hugh Dickins340ef392013-02-22 16:34:33 -08002142
2143out:
2144 put_page(page);
2145 return 0;
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02002146}
Mel Gorman220018d2012-12-05 09:32:56 +00002147#endif /* CONFIG_NUMA_BALANCING */
Peter Zijlstra7039e1d2012-10-25 14:16:34 +02002148#endif /* CONFIG_NUMA */
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002149
Christoph Hellwig9b2ed9c2019-08-14 09:59:28 +02002150#ifdef CONFIG_DEVICE_PRIVATE
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002151static int migrate_vma_collect_skip(unsigned long start,
2152 unsigned long end,
2153 struct mm_walk *walk)
2154{
2155 struct migrate_vma *migrate = walk->private;
2156 unsigned long addr;
2157
Ralph Campbell872ea702020-01-30 22:14:38 -08002158 for (addr = start; addr < end; addr += PAGE_SIZE) {
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002159 migrate->dst[migrate->npages] = 0;
2160 migrate->src[migrate->npages++] = 0;
2161 }
2162
2163 return 0;
2164}
2165
Miaohe Lin843e1be2021-05-04 18:37:13 -07002166static int migrate_vma_collect_hole(unsigned long start,
2167 unsigned long end,
2168 __always_unused int depth,
2169 struct mm_walk *walk)
2170{
2171 struct migrate_vma *migrate = walk->private;
2172 unsigned long addr;
2173
2174 /* Only allow populating anonymous memory. */
2175 if (!vma_is_anonymous(walk->vma))
2176 return migrate_vma_collect_skip(start, end, walk);
2177
2178 for (addr = start; addr < end; addr += PAGE_SIZE) {
2179 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
2180 migrate->dst[migrate->npages] = 0;
2181 migrate->npages++;
2182 migrate->cpages++;
2183 }
2184
2185 return 0;
2186}
2187
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002188static int migrate_vma_collect_pmd(pmd_t *pmdp,
2189 unsigned long start,
2190 unsigned long end,
2191 struct mm_walk *walk)
2192{
2193 struct migrate_vma *migrate = walk->private;
2194 struct vm_area_struct *vma = walk->vma;
2195 struct mm_struct *mm = vma->vm_mm;
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002196 unsigned long addr = start, unmapped = 0;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002197 spinlock_t *ptl;
2198 pte_t *ptep;
2199
2200again:
2201 if (pmd_none(*pmdp))
Steven Priceb7a16c72020-02-03 17:36:03 -08002202 return migrate_vma_collect_hole(start, end, -1, walk);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002203
2204 if (pmd_trans_huge(*pmdp)) {
2205 struct page *page;
2206
2207 ptl = pmd_lock(mm, pmdp);
2208 if (unlikely(!pmd_trans_huge(*pmdp))) {
2209 spin_unlock(ptl);
2210 goto again;
2211 }
2212
2213 page = pmd_page(*pmdp);
2214 if (is_huge_zero_page(page)) {
2215 spin_unlock(ptl);
2216 split_huge_pmd(vma, pmdp, addr);
2217 if (pmd_trans_unstable(pmdp))
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002218 return migrate_vma_collect_skip(start, end,
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002219 walk);
2220 } else {
2221 int ret;
2222
2223 get_page(page);
2224 spin_unlock(ptl);
2225 if (unlikely(!trylock_page(page)))
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002226 return migrate_vma_collect_skip(start, end,
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002227 walk);
2228 ret = split_huge_page(page);
2229 unlock_page(page);
2230 put_page(page);
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002231 if (ret)
2232 return migrate_vma_collect_skip(start, end,
2233 walk);
2234 if (pmd_none(*pmdp))
Steven Priceb7a16c72020-02-03 17:36:03 -08002235 return migrate_vma_collect_hole(start, end, -1,
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002236 walk);
2237 }
2238 }
2239
2240 if (unlikely(pmd_bad(*pmdp)))
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002241 return migrate_vma_collect_skip(start, end, walk);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002242
2243 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002244 arch_enter_lazy_mmu_mode();
2245
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002246 for (; addr < end; addr += PAGE_SIZE, ptep++) {
Christoph Hellwig800bb1c2020-03-16 20:32:14 +01002247 unsigned long mpfn = 0, pfn;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002248 struct page *page;
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002249 swp_entry_t entry;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002250 pte_t pte;
2251
2252 pte = *ptep;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002253
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002254 if (pte_none(pte)) {
Ralph Campbell0744f282020-08-11 18:31:41 -07002255 if (vma_is_anonymous(vma)) {
2256 mpfn = MIGRATE_PFN_MIGRATE;
2257 migrate->cpages++;
2258 }
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002259 goto next;
2260 }
2261
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002262 if (!pte_present(pte)) {
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002263 /*
2264 * Only care about unaddressable device page special
2265 * page table entry. Other special swap entries are not
2266 * migratable, and we ignore regular swapped page.
2267 */
2268 entry = pte_to_swp_entry(pte);
2269 if (!is_device_private_entry(entry))
2270 goto next;
2271
Alistair Poppleaf5cdaf2021-06-30 18:54:06 -07002272 page = pfn_swap_entry_to_page(entry);
Ralph Campbell51431922020-07-23 15:30:00 -07002273 if (!(migrate->flags &
2274 MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
2275 page->pgmap->owner != migrate->pgmap_owner)
Christoph Hellwig800bb1c2020-03-16 20:32:14 +01002276 goto next;
2277
Christoph Hellwig06d462b2019-08-14 09:59:27 +02002278 mpfn = migrate_pfn(page_to_pfn(page)) |
2279 MIGRATE_PFN_MIGRATE;
Alistair Popple4dd845b2021-06-30 18:54:09 -07002280 if (is_writable_device_private_entry(entry))
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002281 mpfn |= MIGRATE_PFN_WRITE;
2282 } else {
Ralph Campbell51431922020-07-23 15:30:00 -07002283 if (!(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM))
Christoph Hellwig800bb1c2020-03-16 20:32:14 +01002284 goto next;
Pingfan Liu276f7562019-09-23 15:37:38 -07002285 pfn = pte_pfn(pte);
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002286 if (is_zero_pfn(pfn)) {
2287 mpfn = MIGRATE_PFN_MIGRATE;
2288 migrate->cpages++;
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002289 goto next;
2290 }
Christoph Hellwig25b29952019-06-13 22:50:49 +02002291 page = vm_normal_page(migrate->vma, addr, pte);
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002292 mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
2293 mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
2294 }
2295
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002296 /* FIXME support THP */
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002297 if (!page || !page->mapping || PageTransCompound(page)) {
Pingfan Liu276f7562019-09-23 15:37:38 -07002298 mpfn = 0;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002299 goto next;
2300 }
2301
2302 /*
2303 * By getting a reference on the page we pin it and that blocks
2304 * any kind of migration. Side effect is that it "freezes" the
2305 * pte.
2306 *
2307 * We drop this reference after isolating the page from the lru
2308 * for non device page (device page are not on the lru and thus
2309 * can't be dropped from it).
2310 */
2311 get_page(page);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002312
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002313 /*
2314 * Optimize for the common case where page is only mapped once
2315 * in one process. If we can lock the page, then we can safely
2316 * set up a special migration page table entry now.
2317 */
2318 if (trylock_page(page)) {
2319 pte_t swp_pte;
2320
Alistair Poppleab092432021-11-10 20:32:40 -08002321 migrate->cpages++;
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002322 ptep_get_and_clear(mm, addr, ptep);
2323
2324 /* Setup special migration page table entry */
Alistair Popple4dd845b2021-06-30 18:54:09 -07002325 if (mpfn & MIGRATE_PFN_WRITE)
2326 entry = make_writable_migration_entry(
2327 page_to_pfn(page));
2328 else
2329 entry = make_readable_migration_entry(
2330 page_to_pfn(page));
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002331 swp_pte = swp_entry_to_pte(entry);
Alistair Popplead7df762020-09-04 16:36:01 -07002332 if (pte_present(pte)) {
2333 if (pte_soft_dirty(pte))
2334 swp_pte = pte_swp_mksoft_dirty(swp_pte);
2335 if (pte_uffd_wp(pte))
2336 swp_pte = pte_swp_mkuffd_wp(swp_pte);
2337 } else {
2338 if (pte_swp_soft_dirty(pte))
2339 swp_pte = pte_swp_mksoft_dirty(swp_pte);
2340 if (pte_swp_uffd_wp(pte))
2341 swp_pte = pte_swp_mkuffd_wp(swp_pte);
2342 }
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002343 set_pte_at(mm, addr, ptep, swp_pte);
2344
2345 /*
2346 * This is like regular unmap: we remove the rmap and
2347 * drop page refcount. Page won't be freed, as we took
2348 * a reference just above.
2349 */
2350 page_remove_rmap(page, false);
2351 put_page(page);
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002352
2353 if (pte_present(pte))
2354 unmapped++;
Alistair Poppleab092432021-11-10 20:32:40 -08002355 } else {
2356 put_page(page);
2357 mpfn = 0;
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002358 }
2359
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002360next:
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002361 migrate->dst[migrate->npages] = 0;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002362 migrate->src[migrate->npages++] = mpfn;
2363 }
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002364 arch_leave_lazy_mmu_mode();
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002365 pte_unmap_unlock(ptep - 1, ptl);
2366
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002367 /* Only flush the TLB if we actually modified any entries */
2368 if (unmapped)
2369 flush_tlb_range(walk->vma, start, end);
2370
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002371 return 0;
2372}
2373
Christoph Hellwig7b86ac32019-08-28 16:19:54 +02002374static const struct mm_walk_ops migrate_vma_walk_ops = {
2375 .pmd_entry = migrate_vma_collect_pmd,
2376 .pte_hole = migrate_vma_collect_hole,
2377};
2378
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002379/*
2380 * migrate_vma_collect() - collect pages over a range of virtual addresses
2381 * @migrate: migrate struct containing all migration information
2382 *
2383 * This will walk the CPU page table. For each virtual address backed by a
2384 * valid page, it updates the src array and takes a reference on the page, in
2385 * order to pin the page until we lock it and unmap it.
2386 */
2387static void migrate_vma_collect(struct migrate_vma *migrate)
2388{
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08002389 struct mmu_notifier_range range;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002390
Ralph Campbell998427b2020-07-23 15:30:01 -07002391 /*
2392 * Note that the pgmap_owner is passed to the mmu notifier callback so
2393 * that the registered device driver can skip invalidating device
2394 * private page mappings that won't be migrated.
2395 */
Alistair Popple6b49bf62021-06-30 18:54:19 -07002396 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_MIGRATE, 0,
2397 migrate->vma, migrate->vma->vm_mm, migrate->start, migrate->end,
Ralph Campbellc1a06df2020-08-06 23:17:09 -07002398 migrate->pgmap_owner);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08002399 mmu_notifier_invalidate_range_start(&range);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002400
Christoph Hellwig7b86ac32019-08-28 16:19:54 +02002401 walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end,
2402 &migrate_vma_walk_ops, migrate);
2403
2404 mmu_notifier_invalidate_range_end(&range);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002405 migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT);
2406}
2407
2408/*
2409 * migrate_vma_check_page() - check if page is pinned or not
2410 * @page: struct page to check
2411 *
2412 * Pinned pages cannot be migrated. This is the same test as in
Matthew Wilcox (Oracle)34170132021-05-07 07:28:40 -04002413 * folio_migrate_mapping(), except that here we allow migration of a
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002414 * ZONE_DEVICE page.
2415 */
2416static bool migrate_vma_check_page(struct page *page)
2417{
2418 /*
2419 * One extra ref because caller holds an extra reference, either from
2420 * isolate_lru_page() for a regular page, or migrate_vma_collect() for
2421 * a device page.
2422 */
2423 int extra = 1;
2424
2425 /*
2426 * FIXME support THP (transparent huge page), it is bit more complex to
2427 * check them than regular pages, because they can be mapped with a pmd
2428 * or with a pte (split pte mapping).
2429 */
2430 if (PageCompound(page))
2431 return false;
2432
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002433 /* Page from ZONE_DEVICE have one extra reference */
Alistair Poppleffa65752022-01-21 22:10:46 -08002434 if (is_zone_device_page(page))
2435 extra++;
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002436
Jérôme Glissedf6ad692017-09-08 16:12:24 -07002437 /* For file back page */
2438 if (page_mapping(page))
2439 extra += 1 + page_has_private(page);
2440
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002441 if ((page_count(page) - extra) > page_mapcount(page))
2442 return false;
2443
2444 return true;
2445}
2446
2447/*
Alistair Poppleab092432021-11-10 20:32:40 -08002448 * migrate_vma_unmap() - replace page mapping with special migration pte entry
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002449 * @migrate: migrate struct containing all migration information
2450 *
Alistair Poppleab092432021-11-10 20:32:40 -08002451 * Isolate pages from the LRU and replace mappings (CPU page table pte) with a
2452 * special migration pte entry and check if it has been pinned. Pinned pages are
2453 * restored because we cannot migrate them.
2454 *
2455 * This is the last step before we call the device driver callback to allocate
2456 * destination memory and copy contents of original page over to new page.
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002457 */
Alistair Poppleab092432021-11-10 20:32:40 -08002458static void migrate_vma_unmap(struct migrate_vma *migrate)
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002459{
2460 const unsigned long npages = migrate->npages;
Colin Ian Kingf1e8db02022-01-14 14:08:53 -08002461 unsigned long i, restore = 0;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002462 bool allow_drain = true;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002463
2464 lru_add_drain();
2465
Alistair Poppleab092432021-11-10 20:32:40 -08002466 for (i = 0; i < npages; i++) {
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002467 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2468
2469 if (!page)
2470 continue;
2471
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002472 /* ZONE_DEVICE pages are not on LRU */
2473 if (!is_zone_device_page(page)) {
2474 if (!PageLRU(page) && allow_drain) {
2475 /* Drain CPU's pagevec */
2476 lru_add_drain_all();
2477 allow_drain = false;
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002478 }
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002479
2480 if (isolate_lru_page(page)) {
Alistair Poppleab092432021-11-10 20:32:40 -08002481 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2482 migrate->cpages--;
2483 restore++;
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002484 continue;
2485 }
2486
2487 /* Drop the reference we took in collect */
2488 put_page(page);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002489 }
2490
Alistair Poppleab092432021-11-10 20:32:40 -08002491 if (page_mapped(page))
Alistair Popplea98a2f02021-06-30 18:54:16 -07002492 try_to_migrate(page, 0);
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002493
Alistair Poppleab092432021-11-10 20:32:40 -08002494 if (page_mapped(page) || !migrate_vma_check_page(page)) {
2495 if (!is_zone_device_page(page)) {
2496 get_page(page);
2497 putback_lru_page(page);
2498 }
2499
2500 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2501 migrate->cpages--;
2502 restore++;
Jérôme Glisse8c3328f2017-09-08 16:12:13 -07002503 continue;
Alistair Poppleab092432021-11-10 20:32:40 -08002504 }
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002505 }
2506
Colin Ian Kingf1e8db02022-01-14 14:08:53 -08002507 for (i = 0; i < npages && restore; i++) {
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002508 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2509
2510 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2511 continue;
2512
2513 remove_migration_ptes(page, page, false);
2514
2515 migrate->src[i] = 0;
2516 unlock_page(page);
Alistair Poppleab092432021-11-10 20:32:40 -08002517 put_page(page);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002518 restore--;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002519 }
2520}
2521
Christoph Hellwiga7d1f222019-08-14 09:59:19 +02002522/**
2523 * migrate_vma_setup() - prepare to migrate a range of memory
Randy Dunlapeaf444d2020-08-11 18:33:08 -07002524 * @args: contains the vma, start, and pfns arrays for the migration
Christoph Hellwiga7d1f222019-08-14 09:59:19 +02002525 *
2526 * Returns: negative errno on failures, 0 when 0 or more pages were migrated
2527 * without an error.
2528 *
2529 * Prepare to migrate a range of memory virtual address range by collecting all
2530 * the pages backing each virtual address in the range, saving them inside the
2531 * src array. Then lock those pages and unmap them. Once the pages are locked
2532 * and unmapped, check whether each page is pinned or not. Pages that aren't
2533 * pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the
2534 * corresponding src array entry. Then restores any pages that are pinned, by
2535 * remapping and unlocking those pages.
2536 *
2537 * The caller should then allocate destination memory and copy source memory to
2538 * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE
2539 * flag set). Once these are allocated and copied, the caller must update each
2540 * corresponding entry in the dst array with the pfn value of the destination
Alistair Poppleab092432021-11-10 20:32:40 -08002541 * page and with MIGRATE_PFN_VALID. Destination pages must be locked via
2542 * lock_page().
Christoph Hellwiga7d1f222019-08-14 09:59:19 +02002543 *
2544 * Note that the caller does not have to migrate all the pages that are marked
2545 * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from
2546 * device memory to system memory. If the caller cannot migrate a device page
2547 * back to system memory, then it must return VM_FAULT_SIGBUS, which has severe
2548 * consequences for the userspace process, so it must be avoided if at all
2549 * possible.
2550 *
2551 * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we
2552 * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus
Ingo Molnarf0953a12021-05-06 18:06:47 -07002553 * allowing the caller to allocate device memory for those unbacked virtual
2554 * addresses. For this the caller simply has to allocate device memory and
Christoph Hellwiga7d1f222019-08-14 09:59:19 +02002555 * properly set the destination entry like for regular migration. Note that
Ingo Molnarf0953a12021-05-06 18:06:47 -07002556 * this can still fail, and thus inside the device driver you must check if the
2557 * migration was successful for those entries after calling migrate_vma_pages(),
Christoph Hellwiga7d1f222019-08-14 09:59:19 +02002558 * just like for regular migration.
2559 *
2560 * After that, the callers must call migrate_vma_pages() to go over each entry
2561 * in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag
2562 * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set,
2563 * then migrate_vma_pages() to migrate struct page information from the source
2564 * struct page to the destination struct page. If it fails to migrate the
2565 * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the
2566 * src array.
2567 *
2568 * At this point all successfully migrated pages have an entry in the src
2569 * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst
2570 * array entry with MIGRATE_PFN_VALID flag set.
2571 *
2572 * Once migrate_vma_pages() returns the caller may inspect which pages were
2573 * successfully migrated, and which were not. Successfully migrated pages will
2574 * have the MIGRATE_PFN_MIGRATE flag set for their src array entry.
2575 *
2576 * It is safe to update device page table after migrate_vma_pages() because
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -07002577 * both destination and source page are still locked, and the mmap_lock is held
Christoph Hellwiga7d1f222019-08-14 09:59:19 +02002578 * in read mode (hence no one can unmap the range being migrated).
2579 *
2580 * Once the caller is done cleaning up things and updating its page table (if it
2581 * chose to do so, this is not an obligation) it finally calls
2582 * migrate_vma_finalize() to update the CPU page table to point to new pages
2583 * for successfully migrated pages or otherwise restore the CPU page table to
2584 * point to the original source pages.
2585 */
2586int migrate_vma_setup(struct migrate_vma *args)
2587{
2588 long nr_pages = (args->end - args->start) >> PAGE_SHIFT;
2589
2590 args->start &= PAGE_MASK;
2591 args->end &= PAGE_MASK;
2592 if (!args->vma || is_vm_hugetlb_page(args->vma) ||
2593 (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma))
2594 return -EINVAL;
2595 if (nr_pages <= 0)
2596 return -EINVAL;
2597 if (args->start < args->vma->vm_start ||
2598 args->start >= args->vma->vm_end)
2599 return -EINVAL;
2600 if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end)
2601 return -EINVAL;
2602 if (!args->src || !args->dst)
2603 return -EINVAL;
2604
2605 memset(args->src, 0, sizeof(*args->src) * nr_pages);
2606 args->cpages = 0;
2607 args->npages = 0;
2608
2609 migrate_vma_collect(args);
2610
2611 if (args->cpages)
Christoph Hellwiga7d1f222019-08-14 09:59:19 +02002612 migrate_vma_unmap(args);
2613
2614 /*
2615 * At this point pages are locked and unmapped, and thus they have
2616 * stable content and can safely be copied to destination memory that
2617 * is allocated by the drivers.
2618 */
2619 return 0;
2620
2621}
2622EXPORT_SYMBOL(migrate_vma_setup);
2623
Ralph Campbell34290e22020-01-30 22:14:44 -08002624/*
2625 * This code closely matches the code in:
2626 * __handle_mm_fault()
2627 * handle_pte_fault()
2628 * do_anonymous_page()
2629 * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE
2630 * private page.
2631 */
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002632static void migrate_vma_insert_page(struct migrate_vma *migrate,
2633 unsigned long addr,
2634 struct page *page,
Stephen Zhangd85c6db2020-12-14 19:13:20 -08002635 unsigned long *src)
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002636{
2637 struct vm_area_struct *vma = migrate->vma;
2638 struct mm_struct *mm = vma->vm_mm;
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002639 bool flush = false;
2640 spinlock_t *ptl;
2641 pte_t entry;
2642 pgd_t *pgdp;
2643 p4d_t *p4dp;
2644 pud_t *pudp;
2645 pmd_t *pmdp;
2646 pte_t *ptep;
2647
2648 /* Only allow populating anonymous memory */
2649 if (!vma_is_anonymous(vma))
2650 goto abort;
2651
2652 pgdp = pgd_offset(mm, addr);
2653 p4dp = p4d_alloc(mm, pgdp, addr);
2654 if (!p4dp)
2655 goto abort;
2656 pudp = pud_alloc(mm, p4dp, addr);
2657 if (!pudp)
2658 goto abort;
2659 pmdp = pmd_alloc(mm, pudp, addr);
2660 if (!pmdp)
2661 goto abort;
2662
2663 if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp))
2664 goto abort;
2665
2666 /*
2667 * Use pte_alloc() instead of pte_alloc_map(). We can't run
2668 * pte_offset_map() on pmds where a huge pmd might be created
2669 * from a different thread.
2670 *
Michel Lespinasse3e4e28c2020-06-08 21:33:51 -07002671 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002672 * parallel threads are excluded by other means.
2673 *
Michel Lespinasse3e4e28c2020-06-08 21:33:51 -07002674 * Here we only have mmap_read_lock(mm).
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002675 */
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -08002676 if (pte_alloc(mm, pmdp))
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002677 goto abort;
2678
2679 /* See the comment in pte_alloc_one_map() */
2680 if (unlikely(pmd_trans_unstable(pmdp)))
2681 goto abort;
2682
2683 if (unlikely(anon_vma_prepare(vma)))
2684 goto abort;
Matthew Wilcox (Oracle)8f425e42021-06-25 09:27:04 -04002685 if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL))
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002686 goto abort;
2687
2688 /*
2689 * The memory barrier inside __SetPageUptodate makes sure that
2690 * preceding stores to the page contents become visible before
2691 * the set_pte_at() write.
2692 */
2693 __SetPageUptodate(page);
2694
Jérôme Glissedf6ad692017-09-08 16:12:24 -07002695 if (is_zone_device_page(page)) {
2696 if (is_device_private_page(page)) {
2697 swp_entry_t swp_entry;
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002698
Alistair Popple4dd845b2021-06-30 18:54:09 -07002699 if (vma->vm_flags & VM_WRITE)
2700 swp_entry = make_writable_device_private_entry(
2701 page_to_pfn(page));
2702 else
2703 swp_entry = make_readable_device_private_entry(
2704 page_to_pfn(page));
Jérôme Glissedf6ad692017-09-08 16:12:24 -07002705 entry = swp_entry_to_pte(swp_entry);
Miaohe Lin34f5e9b2021-05-04 18:37:10 -07002706 } else {
2707 /*
2708 * For now we only support migrating to un-addressable
2709 * device memory.
2710 */
2711 pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
2712 goto abort;
Jérôme Glissedf6ad692017-09-08 16:12:24 -07002713 }
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002714 } else {
2715 entry = mk_pte(page, vma->vm_page_prot);
2716 if (vma->vm_flags & VM_WRITE)
2717 entry = pte_mkwrite(pte_mkdirty(entry));
2718 }
2719
2720 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
2721
Ralph Campbell34290e22020-01-30 22:14:44 -08002722 if (check_stable_address_space(mm))
2723 goto unlock_abort;
2724
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002725 if (pte_present(*ptep)) {
2726 unsigned long pfn = pte_pfn(*ptep);
2727
Ralph Campbellc23a0c92020-01-30 22:14:41 -08002728 if (!is_zero_pfn(pfn))
2729 goto unlock_abort;
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002730 flush = true;
Ralph Campbellc23a0c92020-01-30 22:14:41 -08002731 } else if (!pte_none(*ptep))
2732 goto unlock_abort;
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002733
2734 /*
Ralph Campbellc23a0c92020-01-30 22:14:41 -08002735 * Check for userfaultfd but do not deliver the fault. Instead,
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002736 * just back off.
2737 */
Ralph Campbellc23a0c92020-01-30 22:14:41 -08002738 if (userfaultfd_missing(vma))
2739 goto unlock_abort;
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002740
2741 inc_mm_counter(mm, MM_ANONPAGES);
Johannes Weinerbe5d0a72020-06-03 16:01:57 -07002742 page_add_new_anon_rmap(page, vma, addr, false);
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002743 if (!is_zone_device_page(page))
Joonsoo Kimb5181542020-08-11 18:30:40 -07002744 lru_cache_add_inactive_or_unevictable(page, vma);
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002745 get_page(page);
2746
2747 if (flush) {
2748 flush_cache_page(vma, addr, pte_pfn(*ptep));
2749 ptep_clear_flush_notify(vma, addr, ptep);
2750 set_pte_at_notify(mm, addr, ptep, entry);
2751 update_mmu_cache(vma, addr, ptep);
2752 } else {
2753 /* No need to invalidate - it was non-present before */
2754 set_pte_at(mm, addr, ptep, entry);
2755 update_mmu_cache(vma, addr, ptep);
2756 }
2757
2758 pte_unmap_unlock(ptep, ptl);
2759 *src = MIGRATE_PFN_MIGRATE;
2760 return;
2761
Ralph Campbellc23a0c92020-01-30 22:14:41 -08002762unlock_abort:
2763 pte_unmap_unlock(ptep, ptl);
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002764abort:
2765 *src &= ~MIGRATE_PFN_MIGRATE;
2766}
2767
Christoph Hellwiga7d1f222019-08-14 09:59:19 +02002768/**
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002769 * migrate_vma_pages() - migrate meta-data from src page to dst page
2770 * @migrate: migrate struct containing all migration information
2771 *
2772 * This migrates struct page meta-data from source struct page to destination
2773 * struct page. This effectively finishes the migration from source page to the
2774 * destination page.
2775 */
Christoph Hellwiga7d1f222019-08-14 09:59:19 +02002776void migrate_vma_pages(struct migrate_vma *migrate)
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002777{
2778 const unsigned long npages = migrate->npages;
2779 const unsigned long start = migrate->start;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08002780 struct mmu_notifier_range range;
2781 unsigned long addr, i;
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002782 bool notified = false;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002783
2784 for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) {
2785 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
2786 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2787 struct address_space *mapping;
2788 int r;
2789
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002790 if (!newpage) {
2791 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002792 continue;
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002793 }
2794
2795 if (!page) {
Ralph Campbellc23a0c92020-01-30 22:14:41 -08002796 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE))
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002797 continue;
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002798 if (!notified) {
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002799 notified = true;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08002800
Alistair Popple6b49bf62021-06-30 18:54:19 -07002801 mmu_notifier_range_init_owner(&range,
2802 MMU_NOTIFY_MIGRATE, 0, migrate->vma,
2803 migrate->vma->vm_mm, addr, migrate->end,
Ralph Campbell5e5dda82020-12-14 19:12:55 -08002804 migrate->pgmap_owner);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08002805 mmu_notifier_invalidate_range_start(&range);
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002806 }
2807 migrate_vma_insert_page(migrate, addr, newpage,
Stephen Zhangd85c6db2020-12-14 19:13:20 -08002808 &migrate->src[i]);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002809 continue;
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002810 }
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002811
2812 mapping = page_mapping(page);
2813
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002814 if (is_zone_device_page(newpage)) {
2815 if (is_device_private_page(newpage)) {
2816 /*
2817 * For now only support private anonymous when
2818 * migrating to un-addressable device memory.
2819 */
2820 if (mapping) {
2821 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2822 continue;
2823 }
Christoph Hellwig25b29952019-06-13 22:50:49 +02002824 } else {
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002825 /*
2826 * Other types of ZONE_DEVICE page are not
2827 * supported.
2828 */
2829 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2830 continue;
2831 }
2832 }
2833
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002834 r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY);
2835 if (r != MIGRATEPAGE_SUCCESS)
2836 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2837 }
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002838
Jérôme Glisse4645b9f2017-11-15 17:34:11 -08002839 /*
2840 * No need to double call mmu_notifier->invalidate_range() callback as
2841 * the above ptep_clear_flush_notify() inside migrate_vma_insert_page()
2842 * did already call it.
2843 */
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002844 if (notified)
Jérôme Glisseac46d4f2018-12-28 00:38:09 -08002845 mmu_notifier_invalidate_range_only_end(&range);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002846}
Christoph Hellwiga7d1f222019-08-14 09:59:19 +02002847EXPORT_SYMBOL(migrate_vma_pages);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002848
Christoph Hellwiga7d1f222019-08-14 09:59:19 +02002849/**
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002850 * migrate_vma_finalize() - restore CPU page table entry
2851 * @migrate: migrate struct containing all migration information
2852 *
2853 * This replaces the special migration pte entry with either a mapping to the
2854 * new page if migration was successful for that page, or to the original page
2855 * otherwise.
2856 *
2857 * This also unlocks the pages and puts them back on the lru, or drops the extra
2858 * refcount, for device pages.
2859 */
Christoph Hellwiga7d1f222019-08-14 09:59:19 +02002860void migrate_vma_finalize(struct migrate_vma *migrate)
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002861{
2862 const unsigned long npages = migrate->npages;
2863 unsigned long i;
2864
2865 for (i = 0; i < npages; i++) {
2866 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
2867 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2868
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002869 if (!page) {
2870 if (newpage) {
2871 unlock_page(newpage);
2872 put_page(newpage);
2873 }
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002874 continue;
Jérôme Glisse8315ada2017-09-08 16:12:21 -07002875 }
2876
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002877 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) {
2878 if (newpage) {
2879 unlock_page(newpage);
2880 put_page(newpage);
2881 }
2882 newpage = page;
2883 }
2884
2885 remove_migration_ptes(page, newpage, false);
2886 unlock_page(page);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002887
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002888 if (is_zone_device_page(page))
2889 put_page(page);
2890 else
2891 putback_lru_page(page);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002892
2893 if (newpage != page) {
2894 unlock_page(newpage);
Jérôme Glissea5430dd2017-09-08 16:12:17 -07002895 if (is_zone_device_page(newpage))
2896 put_page(newpage);
2897 else
2898 putback_lru_page(newpage);
Jérôme Glisse8763cb42017-09-08 16:12:09 -07002899 }
2900 }
2901}
Christoph Hellwiga7d1f222019-08-14 09:59:19 +02002902EXPORT_SYMBOL(migrate_vma_finalize);
Christoph Hellwig9b2ed9c2019-08-14 09:59:28 +02002903#endif /* CONFIG_DEVICE_PRIVATE */
Dave Hansen79c28a42021-09-02 14:59:06 -07002904
Huang Yingdcee9bf52022-01-14 14:08:49 -08002905/*
2906 * node_demotion[] example:
2907 *
2908 * Consider a system with two sockets. Each socket has
2909 * three classes of memory attached: fast, medium and slow.
2910 * Each memory class is placed in its own NUMA node. The
2911 * CPUs are placed in the node with the "fast" memory. The
2912 * 6 NUMA nodes (0-5) might be split among the sockets like
2913 * this:
2914 *
2915 * Socket A: 0, 1, 2
2916 * Socket B: 3, 4, 5
2917 *
2918 * When Node 0 fills up, its memory should be migrated to
2919 * Node 1. When Node 1 fills up, it should be migrated to
2920 * Node 2. The migration path start on the nodes with the
2921 * processors (since allocations default to this node) and
2922 * fast memory, progress through medium and end with the
2923 * slow memory:
2924 *
2925 * 0 -> 1 -> 2 -> stop
2926 * 3 -> 4 -> 5 -> stop
2927 *
2928 * This is represented in the node_demotion[] like this:
2929 *
2930 * { nr=1, nodes[0]=1 }, // Node 0 migrates to 1
2931 * { nr=1, nodes[0]=2 }, // Node 1 migrates to 2
2932 * { nr=0, nodes[0]=-1 }, // Node 2 does not migrate
2933 * { nr=1, nodes[0]=4 }, // Node 3 migrates to 4
2934 * { nr=1, nodes[0]=5 }, // Node 4 migrates to 5
2935 * { nr=0, nodes[0]=-1 }, // Node 5 does not migrate
2936 *
2937 * Moreover some systems may have multiple slow memory nodes.
2938 * Suppose a system has one socket with 3 memory nodes, node 0
2939 * is fast memory type, and node 1/2 both are slow memory
2940 * type, and the distance between fast memory node and slow
2941 * memory node is same. So the migration path should be:
2942 *
2943 * 0 -> 1/2 -> stop
2944 *
2945 * This is represented in the node_demotion[] like this:
2946 * { nr=2, {nodes[0]=1, nodes[1]=2} }, // Node 0 migrates to node 1 and node 2
2947 * { nr=0, nodes[0]=-1, }, // Node 1 dose not migrate
2948 * { nr=0, nodes[0]=-1, }, // Node 2 does not migrate
2949 */
2950
2951/*
2952 * Writes to this array occur without locking. Cycles are
2953 * not allowed: Node X demotes to Y which demotes to X...
2954 *
2955 * If multiple reads are performed, a single rcu_read_lock()
2956 * must be held over all reads to ensure that no cycles are
2957 * observed.
2958 */
2959#define DEFAULT_DEMOTION_TARGET_NODES 15
2960
2961#if MAX_NUMNODES < DEFAULT_DEMOTION_TARGET_NODES
2962#define DEMOTION_TARGET_NODES (MAX_NUMNODES - 1)
2963#else
2964#define DEMOTION_TARGET_NODES DEFAULT_DEMOTION_TARGET_NODES
2965#endif
2966
2967struct demotion_nodes {
2968 unsigned short nr;
2969 short nodes[DEMOTION_TARGET_NODES];
2970};
2971
2972static struct demotion_nodes *node_demotion __read_mostly;
2973
2974/**
2975 * next_demotion_node() - Get the next node in the demotion path
2976 * @node: The starting node to lookup the next node
2977 *
2978 * Return: node id for next memory node in the demotion path hierarchy
2979 * from @node; NUMA_NO_NODE if @node is terminal. This does not keep
2980 * @node online or guarantee that it *continues* to be the next demotion
2981 * target.
2982 */
2983int next_demotion_node(int node)
2984{
2985 struct demotion_nodes *nd;
2986 unsigned short target_nr, index;
2987 int target;
2988
2989 if (!node_demotion)
2990 return NUMA_NO_NODE;
2991
2992 nd = &node_demotion[node];
2993
2994 /*
2995 * node_demotion[] is updated without excluding this
2996 * function from running. RCU doesn't provide any
2997 * compiler barriers, so the READ_ONCE() is required
2998 * to avoid compiler reordering or read merging.
2999 *
3000 * Make sure to use RCU over entire code blocks if
3001 * node_demotion[] reads need to be consistent.
3002 */
3003 rcu_read_lock();
3004 target_nr = READ_ONCE(nd->nr);
3005
3006 switch (target_nr) {
3007 case 0:
3008 target = NUMA_NO_NODE;
3009 goto out;
3010 case 1:
3011 index = 0;
3012 break;
3013 default:
3014 /*
3015 * If there are multiple target nodes, just select one
3016 * target node randomly.
3017 *
3018 * In addition, we can also use round-robin to select
3019 * target node, but we should introduce another variable
3020 * for node_demotion[] to record last selected target node,
3021 * that may cause cache ping-pong due to the changing of
3022 * last target node. Or introducing per-cpu data to avoid
3023 * caching issue, which seems more complicated. So selecting
3024 * target node randomly seems better until now.
3025 */
3026 index = get_random_int() % target_nr;
3027 break;
3028 }
3029
3030 target = READ_ONCE(nd->nodes[index]);
3031
3032out:
3033 rcu_read_unlock();
3034 return target;
3035}
3036
Dave Hansen76af6a02021-10-18 15:15:32 -07003037#if defined(CONFIG_HOTPLUG_CPU)
Dave Hansen79c28a42021-09-02 14:59:06 -07003038/* Disable reclaim-based migration. */
3039static void __disable_all_migrate_targets(void)
3040{
Baolin Wangac16ec82022-01-14 14:08:43 -08003041 int node, i;
Dave Hansen79c28a42021-09-02 14:59:06 -07003042
Baolin Wangac16ec82022-01-14 14:08:43 -08003043 if (!node_demotion)
3044 return;
3045
3046 for_each_online_node(node) {
3047 node_demotion[node].nr = 0;
3048 for (i = 0; i < DEMOTION_TARGET_NODES; i++)
3049 node_demotion[node].nodes[i] = NUMA_NO_NODE;
3050 }
Dave Hansen79c28a42021-09-02 14:59:06 -07003051}
3052
3053static void disable_all_migrate_targets(void)
3054{
3055 __disable_all_migrate_targets();
3056
3057 /*
3058 * Ensure that the "disable" is visible across the system.
3059 * Readers will see either a combination of before+disable
3060 * state or disable+after. They will never see before and
3061 * after state together.
3062 *
3063 * The before+after state together might have cycles and
3064 * could cause readers to do things like loop until this
3065 * function finishes. This ensures they can only see a
3066 * single "bad" read and would, for instance, only loop
3067 * once.
3068 */
3069 synchronize_rcu();
3070}
3071
3072/*
3073 * Find an automatic demotion target for 'node'.
3074 * Failing here is OK. It might just indicate
3075 * being at the end of a chain.
3076 */
Baolin Wangac16ec82022-01-14 14:08:43 -08003077static int establish_migrate_target(int node, nodemask_t *used,
3078 int best_distance)
Dave Hansen79c28a42021-09-02 14:59:06 -07003079{
Baolin Wangac16ec82022-01-14 14:08:43 -08003080 int migration_target, index, val;
3081 struct demotion_nodes *nd;
Dave Hansen79c28a42021-09-02 14:59:06 -07003082
Baolin Wangac16ec82022-01-14 14:08:43 -08003083 if (!node_demotion)
Dave Hansen79c28a42021-09-02 14:59:06 -07003084 return NUMA_NO_NODE;
3085
Baolin Wangac16ec82022-01-14 14:08:43 -08003086 nd = &node_demotion[node];
3087
Dave Hansen79c28a42021-09-02 14:59:06 -07003088 migration_target = find_next_best_node(node, used);
3089 if (migration_target == NUMA_NO_NODE)
3090 return NUMA_NO_NODE;
3091
Baolin Wangac16ec82022-01-14 14:08:43 -08003092 /*
3093 * If the node has been set a migration target node before,
3094 * which means it's the best distance between them. Still
3095 * check if this node can be demoted to other target nodes
3096 * if they have a same best distance.
3097 */
3098 if (best_distance != -1) {
3099 val = node_distance(node, migration_target);
3100 if (val > best_distance)
Huang Yingfc892132022-03-22 14:46:05 -07003101 goto out_clear;
Baolin Wangac16ec82022-01-14 14:08:43 -08003102 }
3103
3104 index = nd->nr;
3105 if (WARN_ONCE(index >= DEMOTION_TARGET_NODES,
3106 "Exceeds maximum demotion target nodes\n"))
Huang Yingfc892132022-03-22 14:46:05 -07003107 goto out_clear;
Baolin Wangac16ec82022-01-14 14:08:43 -08003108
3109 nd->nodes[index] = migration_target;
3110 nd->nr++;
Dave Hansen79c28a42021-09-02 14:59:06 -07003111
3112 return migration_target;
Huang Yingfc892132022-03-22 14:46:05 -07003113out_clear:
3114 node_clear(migration_target, *used);
3115 return NUMA_NO_NODE;
Dave Hansen79c28a42021-09-02 14:59:06 -07003116}
3117
3118/*
3119 * When memory fills up on a node, memory contents can be
3120 * automatically migrated to another node instead of
3121 * discarded at reclaim.
3122 *
3123 * Establish a "migration path" which will start at nodes
3124 * with CPUs and will follow the priorities used to build the
3125 * page allocator zonelists.
3126 *
3127 * The difference here is that cycles must be avoided. If
3128 * node0 migrates to node1, then neither node1, nor anything
Baolin Wangac16ec82022-01-14 14:08:43 -08003129 * node1 migrates to can migrate to node0. Also one node can
3130 * be migrated to multiple nodes if the target nodes all have
3131 * a same best-distance against the source node.
Dave Hansen79c28a42021-09-02 14:59:06 -07003132 *
3133 * This function can run simultaneously with readers of
3134 * node_demotion[]. However, it can not run simultaneously
3135 * with itself. Exclusion is provided by memory hotplug events
3136 * being single-threaded.
3137 */
3138static void __set_migration_target_nodes(void)
3139{
3140 nodemask_t next_pass = NODE_MASK_NONE;
3141 nodemask_t this_pass = NODE_MASK_NONE;
3142 nodemask_t used_targets = NODE_MASK_NONE;
Baolin Wangac16ec82022-01-14 14:08:43 -08003143 int node, best_distance;
Dave Hansen79c28a42021-09-02 14:59:06 -07003144
3145 /*
3146 * Avoid any oddities like cycles that could occur
3147 * from changes in the topology. This will leave
3148 * a momentary gap when migration is disabled.
3149 */
3150 disable_all_migrate_targets();
3151
3152 /*
3153 * Allocations go close to CPUs, first. Assume that
3154 * the migration path starts at the nodes with CPUs.
3155 */
3156 next_pass = node_states[N_CPU];
3157again:
3158 this_pass = next_pass;
3159 next_pass = NODE_MASK_NONE;
3160 /*
3161 * To avoid cycles in the migration "graph", ensure
3162 * that migration sources are not future targets by
3163 * setting them in 'used_targets'. Do this only
3164 * once per pass so that multiple source nodes can
3165 * share a target node.
3166 *
3167 * 'used_targets' will become unavailable in future
3168 * passes. This limits some opportunities for
3169 * multiple source nodes to share a destination.
3170 */
3171 nodes_or(used_targets, used_targets, this_pass);
Dave Hansen79c28a42021-09-02 14:59:06 -07003172
Baolin Wangac16ec82022-01-14 14:08:43 -08003173 for_each_node_mask(node, this_pass) {
3174 best_distance = -1;
Dave Hansen79c28a42021-09-02 14:59:06 -07003175
3176 /*
Baolin Wangac16ec82022-01-14 14:08:43 -08003177 * Try to set up the migration path for the node, and the target
3178 * migration nodes can be multiple, so doing a loop to find all
3179 * the target nodes if they all have a best node distance.
Dave Hansen79c28a42021-09-02 14:59:06 -07003180 */
Baolin Wangac16ec82022-01-14 14:08:43 -08003181 do {
3182 int target_node =
3183 establish_migrate_target(node, &used_targets,
3184 best_distance);
3185
3186 if (target_node == NUMA_NO_NODE)
3187 break;
3188
3189 if (best_distance == -1)
3190 best_distance = node_distance(node, target_node);
3191
3192 /*
3193 * Visit targets from this pass in the next pass.
3194 * Eventually, every node will have been part of
3195 * a pass, and will become set in 'used_targets'.
3196 */
3197 node_set(target_node, next_pass);
3198 } while (1);
Dave Hansen79c28a42021-09-02 14:59:06 -07003199 }
3200 /*
3201 * 'next_pass' contains nodes which became migration
3202 * targets in this pass. Make additional passes until
3203 * no more migrations targets are available.
3204 */
3205 if (!nodes_empty(next_pass))
3206 goto again;
3207}
3208
3209/*
3210 * For callers that do not hold get_online_mems() already.
3211 */
Dave Hansen79c28a42021-09-02 14:59:06 -07003212static void set_migration_target_nodes(void)
3213{
3214 get_online_mems();
3215 __set_migration_target_nodes();
3216 put_online_mems();
3217}
Dave Hansen884a6e52021-09-02 14:59:09 -07003218
3219/*
Dave Hansen884a6e52021-09-02 14:59:09 -07003220 * This leaves migrate-on-reclaim transiently disabled between
3221 * the MEM_GOING_OFFLINE and MEM_OFFLINE events. This runs
3222 * whether reclaim-based migration is enabled or not, which
3223 * ensures that the user can turn reclaim-based migration at
3224 * any time without needing to recalculate migration targets.
3225 *
3226 * These callbacks already hold get_online_mems(). That is why
3227 * __set_migration_target_nodes() can be used as opposed to
3228 * set_migration_target_nodes().
3229 */
3230static int __meminit migrate_on_reclaim_callback(struct notifier_block *self,
Dave Hansen295be912021-10-18 15:15:29 -07003231 unsigned long action, void *_arg)
Dave Hansen884a6e52021-09-02 14:59:09 -07003232{
Dave Hansen295be912021-10-18 15:15:29 -07003233 struct memory_notify *arg = _arg;
3234
3235 /*
3236 * Only update the node migration order when a node is
3237 * changing status, like online->offline. This avoids
3238 * the overhead of synchronize_rcu() in most cases.
3239 */
3240 if (arg->status_change_nid < 0)
3241 return notifier_from_errno(0);
3242
Dave Hansen884a6e52021-09-02 14:59:09 -07003243 switch (action) {
3244 case MEM_GOING_OFFLINE:
3245 /*
3246 * Make sure there are not transient states where
3247 * an offline node is a migration target. This
3248 * will leave migration disabled until the offline
3249 * completes and the MEM_OFFLINE case below runs.
3250 */
3251 disable_all_migrate_targets();
3252 break;
3253 case MEM_OFFLINE:
3254 case MEM_ONLINE:
3255 /*
3256 * Recalculate the target nodes once the node
3257 * reaches its final state (online or offline).
3258 */
3259 __set_migration_target_nodes();
3260 break;
3261 case MEM_CANCEL_OFFLINE:
3262 /*
3263 * MEM_GOING_OFFLINE disabled all the migration
3264 * targets. Reenable them.
3265 */
3266 __set_migration_target_nodes();
3267 break;
3268 case MEM_GOING_ONLINE:
3269 case MEM_CANCEL_ONLINE:
3270 break;
3271 }
3272
3273 return notifier_from_errno(0);
3274}
3275
Dave Hansen76af6a02021-10-18 15:15:32 -07003276/*
3277 * React to hotplug events that might affect the migration targets
3278 * like events that online or offline NUMA nodes.
3279 *
3280 * The ordering is also currently dependent on which nodes have
3281 * CPUs. That means we need CPU on/offline notification too.
3282 */
3283static int migration_online_cpu(unsigned int cpu)
3284{
3285 set_migration_target_nodes();
3286 return 0;
3287}
3288
3289static int migration_offline_cpu(unsigned int cpu)
3290{
3291 set_migration_target_nodes();
3292 return 0;
3293}
3294
Dave Hansen884a6e52021-09-02 14:59:09 -07003295static int __init migrate_on_reclaim_init(void)
3296{
3297 int ret;
3298
Baolin Wangac16ec82022-01-14 14:08:43 -08003299 node_demotion = kmalloc_array(nr_node_ids,
3300 sizeof(struct demotion_nodes),
3301 GFP_KERNEL);
3302 WARN_ON(!node_demotion);
3303
Huang Yinga6a02512021-10-18 15:15:35 -07003304 ret = cpuhp_setup_state_nocalls(CPUHP_MM_DEMOTION_DEAD, "mm/demotion:offline",
3305 NULL, migration_offline_cpu);
Dave Hansen884a6e52021-09-02 14:59:09 -07003306 /*
3307 * In the unlikely case that this fails, the automatic
3308 * migration targets may become suboptimal for nodes
3309 * where N_CPU changes. With such a small impact in a
3310 * rare case, do not bother trying to do anything special.
3311 */
3312 WARN_ON(ret < 0);
Huang Yinga6a02512021-10-18 15:15:35 -07003313 ret = cpuhp_setup_state(CPUHP_AP_MM_DEMOTION_ONLINE, "mm/demotion:online",
3314 migration_online_cpu, NULL);
3315 WARN_ON(ret < 0);
Dave Hansen884a6e52021-09-02 14:59:09 -07003316
3317 hotplug_memory_notifier(migrate_on_reclaim_callback, 100);
3318 return 0;
3319}
3320late_initcall(migrate_on_reclaim_init);
Dave Hansen76af6a02021-10-18 15:15:32 -07003321#endif /* CONFIG_HOTPLUG_CPU */
Yang Shi20f9ba42021-11-05 13:43:35 -07003322
3323bool numa_demotion_enabled = false;
3324
3325#ifdef CONFIG_SYSFS
3326static ssize_t numa_demotion_enabled_show(struct kobject *kobj,
3327 struct kobj_attribute *attr, char *buf)
3328{
3329 return sysfs_emit(buf, "%s\n",
3330 numa_demotion_enabled ? "true" : "false");
3331}
3332
3333static ssize_t numa_demotion_enabled_store(struct kobject *kobj,
3334 struct kobj_attribute *attr,
3335 const char *buf, size_t count)
3336{
3337 if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
3338 numa_demotion_enabled = true;
3339 else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
3340 numa_demotion_enabled = false;
3341 else
3342 return -EINVAL;
3343
3344 return count;
3345}
3346
3347static struct kobj_attribute numa_demotion_enabled_attr =
3348 __ATTR(demotion_enabled, 0644, numa_demotion_enabled_show,
3349 numa_demotion_enabled_store);
3350
3351static struct attribute *numa_attrs[] = {
3352 &numa_demotion_enabled_attr.attr,
3353 NULL,
3354};
3355
3356static const struct attribute_group numa_attr_group = {
3357 .attrs = numa_attrs,
3358};
3359
3360static int __init numa_init_sysfs(void)
3361{
3362 int err;
3363 struct kobject *numa_kobj;
3364
3365 numa_kobj = kobject_create_and_add("numa", mm_kobj);
3366 if (!numa_kobj) {
3367 pr_err("failed to create numa kobject\n");
3368 return -ENOMEM;
3369 }
3370 err = sysfs_create_group(numa_kobj, &numa_attr_group);
3371 if (err) {
3372 pr_err("failed to register numa group\n");
3373 goto delete_obj;
3374 }
3375 return 0;
3376
3377delete_obj:
3378 kobject_put(numa_kobj);
3379 return err;
3380}
3381subsys_initcall(numa_init_sysfs);
3382#endif