blob: 7a003d8abb37bcf750da82940197bdc010b49aef [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/mm/swap_state.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 * Swap reorganised 29.12.95, Stephen Tweedie
7 *
8 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
9 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/mm.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090011#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/kernel_stat.h>
13#include <linux/swap.h>
Hugh Dickins46017e92008-02-04 22:28:41 -080014#include <linux/swapops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/init.h>
16#include <linux/pagemap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/backing-dev.h>
Christian Ehrhardt3fb5c292012-07-31 16:41:44 -070018#include <linux/blkdev.h>
Hugh Dickinsc484d412006-01-06 00:10:55 -080019#include <linux/pagevec.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080020#include <linux/migrate.h>
Huang, Ying4b3ef9d2017-02-22 15:45:26 -080021#include <linux/vmalloc.h>
Tim Chen67afa382017-02-22 15:45:39 -080022#include <linux/swap_slots.h>
Huang Ying38d8b4e2017-07-06 15:37:18 -070023#include <linux/huge_mm.h>
Matthew Wilcox (Oracle)61ef1862020-10-13 16:51:17 -070024#include <linux/shmem_fs.h>
Hugh Dickins243bce02020-06-25 20:29:59 -070025#include "internal.h"
NeilBrown014bb1d2022-05-09 18:20:47 -070026#include "swap.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
28/*
29 * swapper_space is a fiction, retained to simplify the path through
Jens Axboe7eaceac2011-03-10 08:52:07 +010030 * vmscan's shrink_page_list.
Linus Torvalds1da177e2005-04-16 15:20:36 -070031 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -070032static const struct address_space_operations swap_aops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070033 .writepage = swap_writepage,
NeilBrown4c4a7632022-05-09 18:20:47 -070034 .dirty_folio = noop_dirty_folio,
Andrew Morton1c939232014-10-09 15:27:59 -070035#ifdef CONFIG_MIGRATION
Matthew Wilcox (Oracle)54184652022-06-06 10:27:41 -040036 .migrate_folio = migrate_folio,
Andrew Morton1c939232014-10-09 15:27:59 -070037#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070038};
39
Changbin Du783cb682017-11-15 17:36:06 -080040struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
41static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
Colin Ian Kingf5c754d2018-04-05 16:25:05 -070042static bool enable_vma_readahead __read_mostly = true;
Huang Yingec560172017-09-06 16:24:36 -070043
Huang Yingec560172017-09-06 16:24:36 -070044#define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
45#define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
46#define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK
47#define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK)
48
49#define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK)
50#define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
51#define SWAP_RA_ADDR(v) ((v) & PAGE_MASK)
52
53#define SWAP_RA_VAL(addr, win, hits) \
54 (((addr) & PAGE_MASK) | \
55 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \
56 ((hits) & SWAP_RA_HITS_MASK))
57
58/* Initial readahead hits is 4 to start up with a small window */
59#define GET_SWAP_RA_VAL(vma) \
60 (atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
Shaohua Li579f8292014-02-06 12:04:21 -080062static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
63
Linus Torvalds1da177e2005-04-16 15:20:36 -070064void show_swap_cache_info(void)
65{
Shaohua Li33806f02013-02-22 16:34:37 -080066 printk("%lu pages in swap cache\n", total_swapcache_pages());
Shaohua Liec8acf22013-02-22 16:34:38 -080067 printk("Free swap = %ldkB\n",
68 get_nr_swap_pages() << (PAGE_SHIFT - 10));
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
70}
71
Joonsoo Kimaae466b2020-08-11 18:30:50 -070072void *get_shadow_from_swap_cache(swp_entry_t entry)
73{
74 struct address_space *address_space = swap_address_space(entry);
75 pgoff_t idx = swp_offset(entry);
76 struct page *page;
77
Matthew Wilcox (Oracle)8c647dd2021-02-25 17:15:33 -080078 page = xa_load(&address_space->i_pages, idx);
Joonsoo Kimaae466b2020-08-11 18:30:50 -070079 if (xa_is_value(page))
80 return page;
Joonsoo Kimaae466b2020-08-11 18:30:50 -070081 return NULL;
82}
83
Linus Torvalds1da177e2005-04-16 15:20:36 -070084/*
Matthew Wilcox (Oracle)2bb876b2022-06-01 15:13:59 -040085 * add_to_swap_cache resembles filemap_add_folio on swapper_space,
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 * but sets SwapCache flag and private instead of mapping and index.
87 */
Matthew Wilcox (Oracle)a4c366f2022-09-02 20:46:08 +010088int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
Joonsoo Kim3852f672020-08-11 18:30:47 -070089 gfp_t gfp, void **shadowp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070090{
Matthew Wilcox8d93b412017-11-27 15:46:54 -050091 struct address_space *address_space = swap_address_space(entry);
Huang Ying38d8b4e2017-07-06 15:37:18 -070092 pgoff_t idx = swp_offset(entry);
Matthew Wilcox (Oracle)a4c366f2022-09-02 20:46:08 +010093 XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio));
94 unsigned long i, nr = folio_nr_pages(folio);
Joonsoo Kim3852f672020-08-11 18:30:47 -070095 void *old;
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
Yang Yang5649d112023-01-18 20:13:03 +080097 xas_set_update(&xas, workingset_update_node);
98
Matthew Wilcox (Oracle)a4c366f2022-09-02 20:46:08 +010099 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
100 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
101 VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
Hugh Dickins51726b12009-01-06 14:39:25 -0800102
Matthew Wilcox (Oracle)a4c366f2022-09-02 20:46:08 +0100103 folio_ref_add(folio, nr);
104 folio_set_swapcache(folio);
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700105
Matthew Wilcox8d93b412017-11-27 15:46:54 -0500106 do {
107 xas_lock_irq(&xas);
108 xas_create_range(&xas);
109 if (xas_error(&xas))
110 goto unlock;
111 for (i = 0; i < nr; i++) {
Matthew Wilcox (Oracle)a4c366f2022-09-02 20:46:08 +0100112 VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio);
Joonsoo Kim3852f672020-08-11 18:30:47 -0700113 old = xas_load(&xas);
114 if (xa_is_value(old)) {
Joonsoo Kim3852f672020-08-11 18:30:47 -0700115 if (shadowp)
116 *shadowp = old;
117 }
Matthew Wilcox (Oracle)a4c366f2022-09-02 20:46:08 +0100118 set_page_private(folio_page(folio, i), entry.val + i);
119 xas_store(&xas, folio);
Matthew Wilcox8d93b412017-11-27 15:46:54 -0500120 xas_next(&xas);
121 }
Huang Ying38d8b4e2017-07-06 15:37:18 -0700122 address_space->nrpages += nr;
Matthew Wilcox (Oracle)a4c366f2022-09-02 20:46:08 +0100123 __node_stat_mod_folio(folio, NR_FILE_PAGES, nr);
124 __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr);
Matthew Wilcox8d93b412017-11-27 15:46:54 -0500125unlock:
126 xas_unlock_irq(&xas);
127 } while (xas_nomem(&xas, gfp));
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700128
Matthew Wilcox8d93b412017-11-27 15:46:54 -0500129 if (!xas_error(&xas))
130 return 0;
Daisuke Nishimura31a56392009-09-21 17:02:50 -0700131
Matthew Wilcox (Oracle)a4c366f2022-09-02 20:46:08 +0100132 folio_clear_swapcache(folio);
133 folio_ref_sub(folio, nr);
Matthew Wilcox8d93b412017-11-27 15:46:54 -0500134 return xas_error(&xas);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135}
136
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137/*
Matthew Wilcox (Oracle)ceff9d32022-06-17 18:50:20 +0100138 * This must be called only on folios that have
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 * been verified to be in the swap cache.
140 */
Matthew Wilcox (Oracle)ceff9d32022-06-17 18:50:20 +0100141void __delete_from_swap_cache(struct folio *folio,
Joonsoo Kim3852f672020-08-11 18:30:47 -0700142 swp_entry_t entry, void *shadow)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143{
Matthew Wilcox4e17ec22017-11-29 08:32:39 -0500144 struct address_space *address_space = swap_address_space(entry);
Matthew Wilcox (Oracle)ceff9d32022-06-17 18:50:20 +0100145 int i;
146 long nr = folio_nr_pages(folio);
Matthew Wilcox4e17ec22017-11-29 08:32:39 -0500147 pgoff_t idx = swp_offset(entry);
148 XA_STATE(xas, &address_space->i_pages, idx);
Shaohua Li33806f02013-02-22 16:34:37 -0800149
Yang Yang5649d112023-01-18 20:13:03 +0800150 xas_set_update(&xas, workingset_update_node);
151
Matthew Wilcox (Oracle)ceff9d32022-06-17 18:50:20 +0100152 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
153 VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
154 VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
Huang Ying38d8b4e2017-07-06 15:37:18 -0700156 for (i = 0; i < nr; i++) {
Joonsoo Kim3852f672020-08-11 18:30:47 -0700157 void *entry = xas_store(&xas, shadow);
Matthew Wilcox (Oracle)b9eb7772022-09-02 20:26:38 +0100158 VM_BUG_ON_PAGE(entry != folio, entry);
Matthew Wilcox (Oracle)ceff9d32022-06-17 18:50:20 +0100159 set_page_private(folio_page(folio, i), 0);
Matthew Wilcox4e17ec22017-11-29 08:32:39 -0500160 xas_next(&xas);
Huang Ying38d8b4e2017-07-06 15:37:18 -0700161 }
Matthew Wilcox (Oracle)ceff9d32022-06-17 18:50:20 +0100162 folio_clear_swapcache(folio);
Huang Ying38d8b4e2017-07-06 15:37:18 -0700163 address_space->nrpages -= nr;
Matthew Wilcox (Oracle)ceff9d32022-06-17 18:50:20 +0100164 __node_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
165 __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166}
167
168/**
Matthew Wilcox (Oracle)09c02e52022-05-12 20:23:02 -0700169 * add_to_swap - allocate swap space for a folio
170 * @folio: folio we want to move to swap
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 *
Matthew Wilcox (Oracle)09c02e52022-05-12 20:23:02 -0700172 * Allocate swap space for the folio and add the folio to the
173 * swap cache.
174 *
175 * Context: Caller needs to hold the folio lock.
176 * Return: Whether the folio was added to the swap cache.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 */
Matthew Wilcox (Oracle)09c02e52022-05-12 20:23:02 -0700178bool add_to_swap(struct folio *folio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179{
180 swp_entry_t entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 int err;
182
Matthew Wilcox (Oracle)09c02e52022-05-12 20:23:02 -0700183 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
184 VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
Matthew Wilcox (Oracle)e2e3fdc2022-05-12 20:23:02 -0700186 entry = folio_alloc_swap(folio);
Daisuke Nishimura2ca45322009-09-21 17:02:52 -0700187 if (!entry.val)
Matthew Wilcox (Oracle)09c02e52022-05-12 20:23:02 -0700188 return false;
Minchan Kim0f074652017-07-06 15:37:24 -0700189
Daisuke Nishimura2ca45322009-09-21 17:02:52 -0700190 /*
Matthew Wilcox8d93b412017-11-27 15:46:54 -0500191 * XArray node allocations from PF_MEMALLOC contexts could
Daisuke Nishimura2ca45322009-09-21 17:02:52 -0700192 * completely exhaust the page allocator. __GFP_NOMEMALLOC
193 * stops emergency reserves from being allocated.
194 *
195 * TODO: this could cause a theoretical memory reclaim
196 * deadlock in the swap out path.
197 */
198 /*
Minchan Kim854e9ed2016-01-15 16:54:53 -0800199 * Add it to the swap cache.
Daisuke Nishimura2ca45322009-09-21 17:02:52 -0700200 */
Matthew Wilcox (Oracle)a4c366f2022-09-02 20:46:08 +0100201 err = add_to_swap_cache(folio, entry,
Joonsoo Kim3852f672020-08-11 18:30:47 -0700202 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
Huang Ying38d8b4e2017-07-06 15:37:18 -0700203 if (err)
Daisuke Nishimura2ca45322009-09-21 17:02:52 -0700204 /*
205 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
206 * clear SWAP_HAS_CACHE flag.
207 */
Minchan Kim0f074652017-07-06 15:37:24 -0700208 goto fail;
Shaohua Li96254562017-10-03 16:15:32 -0700209 /*
Matthew Wilcox (Oracle)09c02e52022-05-12 20:23:02 -0700210 * Normally the folio will be dirtied in unmap because its
211 * pte should be dirty. A special case is MADV_FREE page. The
212 * page's pte could have dirty bit cleared but the folio's
213 * SwapBacked flag is still set because clearing the dirty bit
214 * and SwapBacked flag has no lock protected. For such folio,
215 * unmap will not set dirty bit for it, so folio reclaim will
216 * not write the folio out. This can cause data corruption when
217 * the folio is swapped in later. Always setting the dirty flag
218 * for the folio solves the problem.
Shaohua Li96254562017-10-03 16:15:32 -0700219 */
Matthew Wilcox (Oracle)09c02e52022-05-12 20:23:02 -0700220 folio_mark_dirty(folio);
Huang Ying38d8b4e2017-07-06 15:37:18 -0700221
Matthew Wilcox (Oracle)09c02e52022-05-12 20:23:02 -0700222 return true;
Huang Ying38d8b4e2017-07-06 15:37:18 -0700223
Huang Ying38d8b4e2017-07-06 15:37:18 -0700224fail:
Matthew Wilcox (Oracle)4081f742022-09-02 20:46:09 +0100225 put_swap_folio(folio, entry);
Matthew Wilcox (Oracle)09c02e52022-05-12 20:23:02 -0700226 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227}
228
229/*
Matthew Wilcox (Oracle)75fa68a2022-06-17 18:50:19 +0100230 * This must be called only on folios that have
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 * been verified to be in the swap cache and locked.
Matthew Wilcox (Oracle)75fa68a2022-06-17 18:50:19 +0100232 * It will never put the folio into the free list,
233 * the caller has a reference on the folio.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 */
Matthew Wilcox (Oracle)75fa68a2022-06-17 18:50:19 +0100235void delete_from_swap_cache(struct folio *folio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236{
Matthew Wilcox (Oracle)75fa68a2022-06-17 18:50:19 +0100237 swp_entry_t entry = folio_swap_entry(folio);
Matthew Wilcox4e17ec22017-11-29 08:32:39 -0500238 struct address_space *address_space = swap_address_space(entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700240 xa_lock_irq(&address_space->i_pages);
Matthew Wilcox (Oracle)ceff9d32022-06-17 18:50:20 +0100241 __delete_from_swap_cache(folio, entry, NULL);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700242 xa_unlock_irq(&address_space->i_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243
Matthew Wilcox (Oracle)4081f742022-09-02 20:46:09 +0100244 put_swap_folio(folio, entry);
Matthew Wilcox (Oracle)75fa68a2022-06-17 18:50:19 +0100245 folio_ref_sub(folio, folio_nr_pages(folio));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246}
247
Joonsoo Kim3852f672020-08-11 18:30:47 -0700248void clear_shadow_from_swap_cache(int type, unsigned long begin,
249 unsigned long end)
250{
251 unsigned long curr = begin;
252 void *old;
253
254 for (;;) {
Joonsoo Kim3852f672020-08-11 18:30:47 -0700255 swp_entry_t entry = swp_entry(type, curr);
256 struct address_space *address_space = swap_address_space(entry);
257 XA_STATE(xas, &address_space->i_pages, curr);
258
Yang Yang5649d112023-01-18 20:13:03 +0800259 xas_set_update(&xas, workingset_update_node);
260
Joonsoo Kim3852f672020-08-11 18:30:47 -0700261 xa_lock_irq(&address_space->i_pages);
262 xas_for_each(&xas, old, end) {
263 if (!xa_is_value(old))
264 continue;
265 xas_store(&xas, NULL);
Joonsoo Kim3852f672020-08-11 18:30:47 -0700266 }
Joonsoo Kim3852f672020-08-11 18:30:47 -0700267 xa_unlock_irq(&address_space->i_pages);
268
269 /* search the next swapcache until we meet end */
270 curr >>= SWAP_ADDRESS_SPACE_SHIFT;
271 curr++;
272 curr <<= SWAP_ADDRESS_SPACE_SHIFT;
273 if (curr > end)
274 break;
275 }
276}
277
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278/*
279 * If we are the only user, then try to free up the swap cache.
280 *
Matthew Wilcox (Oracle)aedd74d2022-09-02 20:46:35 +0100281 * Its ok to check the swapcache flag without the folio lock
Hugh Dickinsa2c43ee2009-01-06 14:39:36 -0800282 * here because we are going to recheck again inside
Matthew Wilcox (Oracle)aedd74d2022-09-02 20:46:35 +0100283 * folio_free_swap() _with_ the lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 * - Marcelo
285 */
Huang Yingf4c4a3f2021-06-28 19:37:12 -0700286void free_swap_cache(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287{
Matthew Wilcox (Oracle)aedd74d2022-09-02 20:46:35 +0100288 struct folio *folio = page_folio(page);
289
290 if (folio_test_swapcache(folio) && !folio_mapped(folio) &&
291 folio_trylock(folio)) {
292 folio_free_swap(folio);
293 folio_unlock(folio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 }
295}
296
297/*
298 * Perform a free_page(), also freeing any swap cache associated with
Hugh Dickinsb8072f02005-10-29 18:16:41 -0700299 * this page if it is the last user of the page.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 */
301void free_page_and_swap_cache(struct page *page)
302{
303 free_swap_cache(page);
Aaron Lu6fcb52a2016-10-07 17:00:08 -0700304 if (!is_huge_zero_page(page))
Gerald Schaefer770a5372016-06-08 15:33:50 -0700305 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306}
307
308/*
309 * Passed an array of pages, drop them all from swapcache and then release
310 * them. They are removed from the LRU and freed if this is their last use.
311 */
Linus Torvalds7cc8f9c2022-11-09 12:30:50 -0800312void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 lru_add_drain();
Linus Torvalds7cc8f9c2022-11-09 12:30:50 -0800315 for (int i = 0; i < nr; i++)
316 free_swap_cache(encoded_page_ptr(pages[i]));
317 release_pages(pages, nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318}
319
Minchan Kime9e9b7e2018-04-05 16:23:42 -0700320static inline bool swap_use_vma_readahead(void)
321{
322 return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
323}
324
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325/*
Matthew Wilcox (Oracle)c9edc242022-09-02 20:46:15 +0100326 * Lookup a swap entry in the swap cache. A found folio will be returned
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 * unlocked and with its refcount incremented - we rely on the kernel
Matthew Wilcox (Oracle)c9edc242022-09-02 20:46:15 +0100328 * lock getting page table operations atomic even if we drop the folio
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 * lock before returning.
Kairui Songcbc2bd92022-12-20 02:58:40 +0800330 *
331 * Caller must lock the swap device or hold a reference to keep it valid.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 */
Matthew Wilcox (Oracle)c9edc242022-09-02 20:46:15 +0100333struct folio *swap_cache_get_folio(swp_entry_t entry,
334 struct vm_area_struct *vma, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335{
Matthew Wilcox (Oracle)c9edc242022-09-02 20:46:15 +0100336 struct folio *folio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337
Matthew Wilcox (Oracle)c9edc242022-09-02 20:46:15 +0100338 folio = filemap_get_folio(swap_address_space(entry), swp_offset(entry));
Matthew Wilcox (Oracle)c9edc242022-09-02 20:46:15 +0100339 if (folio) {
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700340 bool vma_ra = swap_use_vma_readahead();
341 bool readahead;
342
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700343 /*
344 * At the moment, we don't support PG_readahead for anon THP
345 * so let's bail out rather than confusing the readahead stat.
346 */
Matthew Wilcox (Oracle)c9edc242022-09-02 20:46:15 +0100347 if (unlikely(folio_test_large(folio)))
348 return folio;
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700349
Matthew Wilcox (Oracle)c9edc242022-09-02 20:46:15 +0100350 readahead = folio_test_clear_readahead(folio);
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700351 if (vma && vma_ra) {
352 unsigned long ra_val;
353 int win, hits;
354
355 ra_val = GET_SWAP_RA_VAL(vma);
356 win = SWAP_RA_WIN(ra_val);
357 hits = SWAP_RA_HITS(ra_val);
Huang Yingec560172017-09-06 16:24:36 -0700358 if (readahead)
359 hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
360 atomic_long_set(&vma->swap_readahead_info,
361 SWAP_RA_VAL(addr, win, hits));
362 }
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700363
Huang Yingec560172017-09-06 16:24:36 -0700364 if (readahead) {
Huang Yingcbc65df2017-09-06 16:24:29 -0700365 count_vm_event(SWAP_RA_HIT);
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700366 if (!vma || !vma_ra)
Huang Yingec560172017-09-06 16:24:36 -0700367 atomic_inc(&swapin_readahead_hits);
Huang Yingcbc65df2017-09-06 16:24:29 -0700368 }
Shaohua Li579f8292014-02-06 12:04:21 -0800369 }
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700370
Matthew Wilcox (Oracle)c9edc242022-09-02 20:46:15 +0100371 return folio;
372}
373
Matthew Wilcox (Oracle)61ef1862020-10-13 16:51:17 -0700374/**
Matthew Wilcox (Oracle)524984f2022-10-19 19:33:31 +0100375 * filemap_get_incore_folio - Find and get a folio from the page or swap caches.
Matthew Wilcox (Oracle)61ef1862020-10-13 16:51:17 -0700376 * @mapping: The address_space to search.
377 * @index: The page cache index.
378 *
Matthew Wilcox (Oracle)524984f2022-10-19 19:33:31 +0100379 * This differs from filemap_get_folio() in that it will also look for the
380 * folio in the swap cache.
Matthew Wilcox (Oracle)61ef1862020-10-13 16:51:17 -0700381 *
Matthew Wilcox (Oracle)524984f2022-10-19 19:33:31 +0100382 * Return: The found folio or %NULL.
Matthew Wilcox (Oracle)61ef1862020-10-13 16:51:17 -0700383 */
Matthew Wilcox (Oracle)524984f2022-10-19 19:33:31 +0100384struct folio *filemap_get_incore_folio(struct address_space *mapping,
385 pgoff_t index)
Matthew Wilcox (Oracle)61ef1862020-10-13 16:51:17 -0700386{
387 swp_entry_t swp;
388 struct swap_info_struct *si;
Matthew Wilcox (Oracle)dd8095b2022-10-19 19:33:30 +0100389 struct folio *folio = __filemap_get_folio(mapping, index, FGP_ENTRY, 0);
Matthew Wilcox (Oracle)61ef1862020-10-13 16:51:17 -0700390
Matthew Wilcox (Oracle)dd8095b2022-10-19 19:33:30 +0100391 if (!xa_is_value(folio))
392 goto out;
Matthew Wilcox (Oracle)61ef1862020-10-13 16:51:17 -0700393 if (!shmem_mapping(mapping))
394 return NULL;
395
Matthew Wilcox (Oracle)dd8095b2022-10-19 19:33:30 +0100396 swp = radix_to_swp_entry(folio);
Miaohe Linba6851b2022-05-19 20:50:30 +0800397 /* There might be swapin error entries in shmem mapping. */
398 if (non_swap_entry(swp))
399 return NULL;
Matthew Wilcox (Oracle)61ef1862020-10-13 16:51:17 -0700400 /* Prevent swapoff from happening to us */
401 si = get_swap_device(swp);
402 if (!si)
403 return NULL;
Matthew Wilcox (Oracle)dd8095b2022-10-19 19:33:30 +0100404 index = swp_offset(swp);
405 folio = filemap_get_folio(swap_address_space(swp), index);
Matthew Wilcox (Oracle)61ef1862020-10-13 16:51:17 -0700406 put_swap_device(si);
Matthew Wilcox (Oracle)dd8095b2022-10-19 19:33:30 +0100407out:
Matthew Wilcox (Oracle)524984f2022-10-19 19:33:31 +0100408 return folio;
Matthew Wilcox (Oracle)61ef1862020-10-13 16:51:17 -0700409}
410
Dmitry Safonov5b999aa2015-09-08 15:05:00 -0700411struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
412 struct vm_area_struct *vma, unsigned long addr,
413 bool *new_page_allocated)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414{
Huang Yingeb085572019-07-11 20:55:33 -0700415 struct swap_info_struct *si;
Matthew Wilcox (Oracle)a0d33742022-09-02 20:46:07 +0100416 struct folio *folio;
Joonsoo Kimaae466b2020-08-11 18:30:50 -0700417 void *shadow = NULL;
Johannes Weiner4c6355b2020-06-03 16:02:17 -0700418
Dmitry Safonov5b999aa2015-09-08 15:05:00 -0700419 *new_page_allocated = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420
Johannes Weiner4c6355b2020-06-03 16:02:17 -0700421 for (;;) {
422 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 /*
424 * First check the swap cache. Since this is normally
Matthew Wilcox (Oracle)cb691e22022-09-02 20:46:34 +0100425 * called after swap_cache_get_folio() failed, re-calling
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 * that would confuse statistics.
427 */
Huang Yingeb085572019-07-11 20:55:33 -0700428 si = get_swap_device(entry);
429 if (!si)
Johannes Weiner4c6355b2020-06-03 16:02:17 -0700430 return NULL;
Matthew Wilcox (Oracle)a0d33742022-09-02 20:46:07 +0100431 folio = filemap_get_folio(swap_address_space(entry),
432 swp_offset(entry));
Huang Yingeb085572019-07-11 20:55:33 -0700433 put_swap_device(si);
Matthew Wilcox (Oracle)a0d33742022-09-02 20:46:07 +0100434 if (folio)
435 return folio_file_page(folio, swp_offset(entry));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436
Huang Yingba81f832017-02-22 15:45:46 -0800437 /*
438 * Just skip read ahead for unused swap slot.
439 * During swap_off when swap_slot_cache is disabled,
440 * we have to handle the race between putting
441 * swap entry in swap cache and marking swap slot
442 * as SWAP_HAS_CACHE. That's done in later part of code or
443 * else swap_off will be aborted if we return NULL.
444 */
445 if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
Johannes Weiner4c6355b2020-06-03 16:02:17 -0700446 return NULL;
Tim Chene8c26ab2017-02-22 15:45:29 -0800447
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 /*
Johannes Weiner4c6355b2020-06-03 16:02:17 -0700449 * Get a new page to read into from swap. Allocate it now,
450 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
451 * cause any racers to loop around until we add it to cache.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 */
Matthew Wilcox (Oracle)a0d33742022-09-02 20:46:07 +0100453 folio = vma_alloc_folio(gfp_mask, 0, vma, addr, false);
454 if (!folio)
Johannes Weiner4c6355b2020-06-03 16:02:17 -0700455 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456
457 /*
Hugh Dickinsf0009442008-02-04 22:28:49 -0800458 * Swap entry may have been freed since our caller observed it.
459 */
KAMEZAWA Hiroyuki355cfa72009-06-16 15:32:53 -0700460 err = swapcache_prepare(entry);
Johannes Weiner4c6355b2020-06-03 16:02:17 -0700461 if (!err)
Hugh Dickinsf0009442008-02-04 22:28:49 -0800462 break;
463
Matthew Wilcox (Oracle)a0d33742022-09-02 20:46:07 +0100464 folio_put(folio);
Johannes Weiner4c6355b2020-06-03 16:02:17 -0700465 if (err != -EEXIST)
466 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467
Johannes Weiner4c6355b2020-06-03 16:02:17 -0700468 /*
469 * We might race against __delete_from_swap_cache(), and
470 * stumble across a swap_map entry whose SWAP_HAS_CACHE
471 * has not yet been cleared. Or race against another
472 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
473 * in swap_map, but not yet added its page to swap cache.
474 */
Guo Ziliang029c4622022-03-16 16:15:03 -0700475 schedule_timeout_uninterruptible(1);
Johannes Weiner4c6355b2020-06-03 16:02:17 -0700476 }
477
478 /*
479 * The swap entry is ours to swap in. Prepare the new page.
480 */
481
Matthew Wilcox (Oracle)a0d33742022-09-02 20:46:07 +0100482 __folio_set_locked(folio);
483 __folio_set_swapbacked(folio);
Johannes Weiner4c6355b2020-06-03 16:02:17 -0700484
Matthew Wilcox (Oracle)65995912022-09-02 20:46:12 +0100485 if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry))
Johannes Weiner4c6355b2020-06-03 16:02:17 -0700486 goto fail_unlock;
Johannes Weiner4c6355b2020-06-03 16:02:17 -0700487
Shakeel Butt0add0c72021-04-29 22:56:36 -0700488 /* May fail (-ENOMEM) if XArray node allocation failed. */
Matthew Wilcox (Oracle)a4c366f2022-09-02 20:46:08 +0100489 if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
Johannes Weiner4c6355b2020-06-03 16:02:17 -0700490 goto fail_unlock;
Shakeel Butt0add0c72021-04-29 22:56:36 -0700491
492 mem_cgroup_swapin_uncharge_swap(entry);
Johannes Weiner4c6355b2020-06-03 16:02:17 -0700493
Joonsoo Kimaae466b2020-08-11 18:30:50 -0700494 if (shadow)
Matthew Wilcox (Oracle)a0d33742022-09-02 20:46:07 +0100495 workingset_refault(folio, shadow);
Johannes Weiner314b57f2020-06-03 16:03:03 -0700496
Matthew Wilcox (Oracle)a0d33742022-09-02 20:46:07 +0100497 /* Caller will initiate read into locked folio */
498 folio_add_lru(folio);
Johannes Weiner4c6355b2020-06-03 16:02:17 -0700499 *new_page_allocated = true;
Matthew Wilcox (Oracle)a0d33742022-09-02 20:46:07 +0100500 return &folio->page;
Johannes Weiner4c6355b2020-06-03 16:02:17 -0700501
502fail_unlock:
Matthew Wilcox (Oracle)4081f742022-09-02 20:46:09 +0100503 put_swap_folio(folio, entry);
Matthew Wilcox (Oracle)a0d33742022-09-02 20:46:07 +0100504 folio_unlock(folio);
505 folio_put(folio);
Johannes Weiner4c6355b2020-06-03 16:02:17 -0700506 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507}
Hugh Dickins46017e92008-02-04 22:28:41 -0800508
Dmitry Safonov5b999aa2015-09-08 15:05:00 -0700509/*
510 * Locate a page of swap in physical memory, reserving swap cache space
511 * and reading the disk if it is not already cached.
512 * A failure return means that either the page allocation failed or that
513 * the swap entry is no longer in use.
514 */
515struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
NeilBrown5169b842022-05-09 18:20:49 -0700516 struct vm_area_struct *vma,
517 unsigned long addr, bool do_poll,
518 struct swap_iocb **plug)
Dmitry Safonov5b999aa2015-09-08 15:05:00 -0700519{
520 bool page_was_allocated;
521 struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
522 vma, addr, &page_was_allocated);
523
524 if (page_was_allocated)
NeilBrown5169b842022-05-09 18:20:49 -0700525 swap_readpage(retpage, do_poll, plug);
Dmitry Safonov5b999aa2015-09-08 15:05:00 -0700526
527 return retpage;
528}
529
Huang Yingec560172017-09-06 16:24:36 -0700530static unsigned int __swapin_nr_pages(unsigned long prev_offset,
531 unsigned long offset,
532 int hits,
533 int max_pages,
534 int prev_win)
Shaohua Li579f8292014-02-06 12:04:21 -0800535{
Huang Yingec560172017-09-06 16:24:36 -0700536 unsigned int pages, last_ra;
Shaohua Li579f8292014-02-06 12:04:21 -0800537
538 /*
539 * This heuristic has been found to work well on both sequential and
540 * random loads, swapping to hard disk or to SSD: please don't ask
541 * what the "+ 2" means, it just happens to work well, that's all.
542 */
Huang Yingec560172017-09-06 16:24:36 -0700543 pages = hits + 2;
Shaohua Li579f8292014-02-06 12:04:21 -0800544 if (pages == 2) {
545 /*
546 * We can have no readahead hits to judge by: but must not get
547 * stuck here forever, so check for an adjacent offset instead
548 * (and don't even bother to check whether swap type is same).
549 */
550 if (offset != prev_offset + 1 && offset != prev_offset - 1)
551 pages = 1;
Shaohua Li579f8292014-02-06 12:04:21 -0800552 } else {
553 unsigned int roundup = 4;
554 while (roundup < pages)
555 roundup <<= 1;
556 pages = roundup;
557 }
558
559 if (pages > max_pages)
560 pages = max_pages;
561
562 /* Don't shrink readahead too fast */
Huang Yingec560172017-09-06 16:24:36 -0700563 last_ra = prev_win / 2;
Shaohua Li579f8292014-02-06 12:04:21 -0800564 if (pages < last_ra)
565 pages = last_ra;
Huang Yingec560172017-09-06 16:24:36 -0700566
567 return pages;
568}
569
570static unsigned long swapin_nr_pages(unsigned long offset)
571{
572 static unsigned long prev_offset;
573 unsigned int hits, pages, max_pages;
574 static atomic_t last_readahead_pages;
575
576 max_pages = 1 << READ_ONCE(page_cluster);
577 if (max_pages <= 1)
578 return 1;
579
580 hits = atomic_xchg(&swapin_readahead_hits, 0);
Qian Caid6c1f092020-06-01 21:48:40 -0700581 pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
582 max_pages,
Huang Yingec560172017-09-06 16:24:36 -0700583 atomic_read(&last_readahead_pages));
584 if (!hits)
Qian Caid6c1f092020-06-01 21:48:40 -0700585 WRITE_ONCE(prev_offset, offset);
Shaohua Li579f8292014-02-06 12:04:21 -0800586 atomic_set(&last_readahead_pages, pages);
587
588 return pages;
589}
590
Hugh Dickins46017e92008-02-04 22:28:41 -0800591/**
Minchan Kime9e9b7e2018-04-05 16:23:42 -0700592 * swap_cluster_readahead - swap in pages in hope we need them soon
Hugh Dickins46017e92008-02-04 22:28:41 -0800593 * @entry: swap entry of this memory
Randy Dunlap76824862008-03-19 17:00:40 -0700594 * @gfp_mask: memory allocation flags
Minchan Kime9e9b7e2018-04-05 16:23:42 -0700595 * @vmf: fault information
Hugh Dickins46017e92008-02-04 22:28:41 -0800596 *
597 * Returns the struct page for entry and addr, after queueing swapin.
598 *
599 * Primitive swap readahead code. We simply read an aligned block of
600 * (1 << page_cluster) entries in the swap area. This method is chosen
601 * because it doesn't cost us any seek time. We also make sure to queue
602 * the 'original' request together with the readahead ones...
603 *
604 * This has been extended to use the NUMA policies from the mm triggering
605 * the readahead.
606 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700607 * Caller must hold read mmap_lock if vmf->vma is not NULL.
Hugh Dickins46017e92008-02-04 22:28:41 -0800608 */
Minchan Kime9e9b7e2018-04-05 16:23:42 -0700609struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
610 struct vm_fault *vmf)
Hugh Dickins46017e92008-02-04 22:28:41 -0800611{
Hugh Dickins46017e92008-02-04 22:28:41 -0800612 struct page *page;
Shaohua Li579f8292014-02-06 12:04:21 -0800613 unsigned long entry_offset = swp_offset(entry);
614 unsigned long offset = entry_offset;
Rik van Riel67f96aa2012-03-21 16:33:50 -0700615 unsigned long start_offset, end_offset;
Shaohua Li579f8292014-02-06 12:04:21 -0800616 unsigned long mask;
Huang Yinge9a6eff2017-11-15 17:33:15 -0800617 struct swap_info_struct *si = swp_swap_info(entry);
Christian Ehrhardt3fb5c292012-07-31 16:41:44 -0700618 struct blk_plug plug;
NeilBrown5169b842022-05-09 18:20:49 -0700619 struct swap_iocb *splug = NULL;
Huang Yingc4fa6302017-09-06 16:24:33 -0700620 bool do_poll = true, page_allocated;
Minchan Kime9e9b7e2018-04-05 16:23:42 -0700621 struct vm_area_struct *vma = vmf->vma;
622 unsigned long addr = vmf->address;
Hugh Dickins46017e92008-02-04 22:28:41 -0800623
Shaohua Li579f8292014-02-06 12:04:21 -0800624 mask = swapin_nr_pages(offset) - 1;
625 if (!mask)
626 goto skip;
627
Shaohua Li23955622017-07-10 15:47:11 -0700628 do_poll = false;
Rik van Riel67f96aa2012-03-21 16:33:50 -0700629 /* Read a page_cluster sized and aligned cluster around offset. */
630 start_offset = offset & ~mask;
631 end_offset = offset | mask;
632 if (!start_offset) /* First page is swap header. */
633 start_offset++;
Huang Yinge9a6eff2017-11-15 17:33:15 -0800634 if (end_offset >= si->max)
635 end_offset = si->max - 1;
Rik van Riel67f96aa2012-03-21 16:33:50 -0700636
Christian Ehrhardt3fb5c292012-07-31 16:41:44 -0700637 blk_start_plug(&plug);
Rik van Riel67f96aa2012-03-21 16:33:50 -0700638 for (offset = start_offset; offset <= end_offset ; offset++) {
Hugh Dickins46017e92008-02-04 22:28:41 -0800639 /* Ok, do the async read-ahead now */
Huang Yingc4fa6302017-09-06 16:24:33 -0700640 page = __read_swap_cache_async(
641 swp_entry(swp_type(entry), offset),
642 gfp_mask, vma, addr, &page_allocated);
Hugh Dickins46017e92008-02-04 22:28:41 -0800643 if (!page)
Rik van Riel67f96aa2012-03-21 16:33:50 -0700644 continue;
Huang Yingc4fa6302017-09-06 16:24:33 -0700645 if (page_allocated) {
NeilBrown5169b842022-05-09 18:20:49 -0700646 swap_readpage(page, false, &splug);
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700647 if (offset != entry_offset) {
Huang Yingc4fa6302017-09-06 16:24:33 -0700648 SetPageReadahead(page);
649 count_vm_event(SWAP_RA);
650 }
Huang Yingcbc65df2017-09-06 16:24:29 -0700651 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300652 put_page(page);
Hugh Dickins46017e92008-02-04 22:28:41 -0800653 }
Christian Ehrhardt3fb5c292012-07-31 16:41:44 -0700654 blk_finish_plug(&plug);
NeilBrown5169b842022-05-09 18:20:49 -0700655 swap_read_unplug(splug);
Christian Ehrhardt3fb5c292012-07-31 16:41:44 -0700656
Hugh Dickins46017e92008-02-04 22:28:41 -0800657 lru_add_drain(); /* Push any new pages onto the LRU now */
Shaohua Li579f8292014-02-06 12:04:21 -0800658skip:
NeilBrown5169b842022-05-09 18:20:49 -0700659 /* The page was likely read above, so no need for plugging here */
660 return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll, NULL);
Hugh Dickins46017e92008-02-04 22:28:41 -0800661}
Huang, Ying4b3ef9d2017-02-22 15:45:26 -0800662
663int init_swap_address_space(unsigned int type, unsigned long nr_pages)
664{
665 struct address_space *spaces, *space;
666 unsigned int i, nr;
667
668 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
Kees Cook778e1cd2018-06-12 14:04:48 -0700669 spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
Huang, Ying4b3ef9d2017-02-22 15:45:26 -0800670 if (!spaces)
671 return -ENOMEM;
672 for (i = 0; i < nr; i++) {
673 space = spaces + i;
Matthew Wilcoxa2833482017-12-05 19:04:20 -0500674 xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
Huang, Ying4b3ef9d2017-02-22 15:45:26 -0800675 atomic_set(&space->i_mmap_writable, 0);
676 space->a_ops = &swap_aops;
677 /* swap cache doesn't use writeback related tags */
678 mapping_set_no_writeback_tags(space);
Huang, Ying4b3ef9d2017-02-22 15:45:26 -0800679 }
680 nr_swapper_spaces[type] = nr;
Huang Ying054f1d12019-07-11 20:55:37 -0700681 swapper_spaces[type] = spaces;
Huang, Ying4b3ef9d2017-02-22 15:45:26 -0800682
683 return 0;
684}
685
686void exit_swap_address_space(unsigned int type)
687{
Huang Yingeea4a502021-06-28 19:37:16 -0700688 int i;
689 struct address_space *spaces = swapper_spaces[type];
690
691 for (i = 0; i < nr_swapper_spaces[type]; i++)
692 VM_WARN_ON_ONCE(!mapping_empty(&spaces[i]));
693 kvfree(spaces);
Huang, Ying4b3ef9d2017-02-22 15:45:26 -0800694 nr_swapper_spaces[type] = 0;
Huang Ying054f1d12019-07-11 20:55:37 -0700695 swapper_spaces[type] = NULL;
Huang, Ying4b3ef9d2017-02-22 15:45:26 -0800696}
Huang Yingec560172017-09-06 16:24:36 -0700697
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700698static void swap_ra_info(struct vm_fault *vmf,
Kairui Song16ba3912022-12-20 02:58:39 +0800699 struct vma_swap_readahead *ra_info)
Huang Yingec560172017-09-06 16:24:36 -0700700{
701 struct vm_area_struct *vma = vmf->vma;
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700702 unsigned long ra_val;
Kairui Song16ba3912022-12-20 02:58:39 +0800703 unsigned long faddr, pfn, fpfn, lpfn, rpfn;
Huang Yingec560172017-09-06 16:24:36 -0700704 unsigned long start, end;
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700705 pte_t *pte, *orig_pte;
Kairui Song16ba3912022-12-20 02:58:39 +0800706 unsigned int max_win, hits, prev_win, win;
Huang Yingec560172017-09-06 16:24:36 -0700707#ifndef CONFIG_64BIT
708 pte_t *tpte;
709#endif
710
Huang Ying61b63972017-10-13 15:58:29 -0700711 max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
712 SWAP_RA_ORDER_CEILING);
713 if (max_win == 1) {
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700714 ra_info->win = 1;
715 return;
Huang Ying61b63972017-10-13 15:58:29 -0700716 }
717
Huang Yingec560172017-09-06 16:24:36 -0700718 faddr = vmf->address;
Huang Yingec560172017-09-06 16:24:36 -0700719 fpfn = PFN_DOWN(faddr);
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700720 ra_val = GET_SWAP_RA_VAL(vma);
721 pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
722 prev_win = SWAP_RA_WIN(ra_val);
723 hits = SWAP_RA_HITS(ra_val);
724 ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
Huang Yingec560172017-09-06 16:24:36 -0700725 max_win, prev_win);
726 atomic_long_set(&vma->swap_readahead_info,
727 SWAP_RA_VAL(faddr, win, 0));
728
Kairui Song18ad72f2022-12-20 02:58:38 +0800729 if (win == 1)
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700730 return;
Huang Yingec560172017-09-06 16:24:36 -0700731
732 /* Copy the PTEs because the page table may be unmapped */
Kairui Song18ad72f2022-12-20 02:58:38 +0800733 orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
Kairui Song16ba3912022-12-20 02:58:39 +0800734 if (fpfn == pfn + 1) {
735 lpfn = fpfn;
736 rpfn = fpfn + win;
737 } else if (pfn == fpfn + 1) {
738 lpfn = fpfn - win + 1;
739 rpfn = fpfn + 1;
740 } else {
741 unsigned int left = (win - 1) / 2;
742
743 lpfn = fpfn - left;
744 rpfn = fpfn + win - left;
Huang Yingec560172017-09-06 16:24:36 -0700745 }
Kairui Song16ba3912022-12-20 02:58:39 +0800746 start = max3(lpfn, PFN_DOWN(vma->vm_start),
747 PFN_DOWN(faddr & PMD_MASK));
748 end = min3(rpfn, PFN_DOWN(vma->vm_end),
749 PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
750
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700751 ra_info->nr_pte = end - start;
752 ra_info->offset = fpfn - start;
753 pte -= ra_info->offset;
Huang Yingec560172017-09-06 16:24:36 -0700754#ifdef CONFIG_64BIT
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700755 ra_info->ptes = pte;
Huang Yingec560172017-09-06 16:24:36 -0700756#else
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700757 tpte = ra_info->ptes;
Huang Yingec560172017-09-06 16:24:36 -0700758 for (pfn = start; pfn != end; pfn++)
759 *tpte++ = *pte++;
760#endif
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700761 pte_unmap(orig_pte);
Huang Yingec560172017-09-06 16:24:36 -0700762}
763
Yang Shie9f59872019-03-05 15:44:15 -0800764/**
765 * swap_vma_readahead - swap in pages in hope we need them soon
Krzysztof Kozlowski27ec4872020-08-06 23:20:14 -0700766 * @fentry: swap entry of this memory
Yang Shie9f59872019-03-05 15:44:15 -0800767 * @gfp_mask: memory allocation flags
768 * @vmf: fault information
769 *
770 * Returns the struct page for entry and addr, after queueing swapin.
771 *
Shijie Luocb152a12021-05-06 18:05:51 -0700772 * Primitive swap readahead code. We simply read in a few pages whose
Yang Shie9f59872019-03-05 15:44:15 -0800773 * virtual addresses are around the fault address in the same vma.
774 *
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700775 * Caller must hold read mmap_lock if vmf->vma is not NULL.
Yang Shie9f59872019-03-05 15:44:15 -0800776 *
777 */
Colin Ian Kingf5c754d2018-04-05 16:25:05 -0700778static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
779 struct vm_fault *vmf)
Huang Yingec560172017-09-06 16:24:36 -0700780{
781 struct blk_plug plug;
NeilBrown5169b842022-05-09 18:20:49 -0700782 struct swap_iocb *splug = NULL;
Huang Yingec560172017-09-06 16:24:36 -0700783 struct vm_area_struct *vma = vmf->vma;
784 struct page *page;
785 pte_t *pte, pentry;
786 swp_entry_t entry;
787 unsigned int i;
788 bool page_allocated;
Miaohe Line97af692020-12-14 19:06:01 -0800789 struct vma_swap_readahead ra_info = {
790 .win = 1,
791 };
Huang Yingec560172017-09-06 16:24:36 -0700792
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700793 swap_ra_info(vmf, &ra_info);
794 if (ra_info.win == 1)
Huang Yingec560172017-09-06 16:24:36 -0700795 goto skip;
796
797 blk_start_plug(&plug);
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700798 for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
Huang Yingec560172017-09-06 16:24:36 -0700799 i++, pte++) {
800 pentry = *pte;
Miaohe Lin92bafb22022-05-19 14:08:50 -0700801 if (!is_swap_pte(pentry))
Huang Yingec560172017-09-06 16:24:36 -0700802 continue;
803 entry = pte_to_swp_entry(pentry);
804 if (unlikely(non_swap_entry(entry)))
805 continue;
806 page = __read_swap_cache_async(entry, gfp_mask, vma,
807 vmf->address, &page_allocated);
808 if (!page)
809 continue;
810 if (page_allocated) {
NeilBrown5169b842022-05-09 18:20:49 -0700811 swap_readpage(page, false, &splug);
Minchan Kimeaf649eb2018-04-05 16:23:39 -0700812 if (i != ra_info.offset) {
Huang Yingec560172017-09-06 16:24:36 -0700813 SetPageReadahead(page);
814 count_vm_event(SWAP_RA);
815 }
816 }
817 put_page(page);
818 }
819 blk_finish_plug(&plug);
NeilBrown5169b842022-05-09 18:20:49 -0700820 swap_read_unplug(splug);
Huang Yingec560172017-09-06 16:24:36 -0700821 lru_add_drain();
822skip:
NeilBrown5169b842022-05-09 18:20:49 -0700823 /* The page was likely read above, so no need for plugging here */
Huang Yingec560172017-09-06 16:24:36 -0700824 return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
NeilBrown5169b842022-05-09 18:20:49 -0700825 ra_info.win == 1, NULL);
Huang Yingec560172017-09-06 16:24:36 -0700826}
Huang Yingd9bfcfd2017-09-06 16:24:40 -0700827
Minchan Kime9e9b7e2018-04-05 16:23:42 -0700828/**
829 * swapin_readahead - swap in pages in hope we need them soon
830 * @entry: swap entry of this memory
831 * @gfp_mask: memory allocation flags
832 * @vmf: fault information
833 *
834 * Returns the struct page for entry and addr, after queueing swapin.
835 *
836 * It's a main entry function for swap readahead. By the configuration,
837 * it will read ahead blocks by cluster-based(ie, physical disk based)
838 * or vma-based(ie, virtual address based on faulty address) readahead.
839 */
840struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
841 struct vm_fault *vmf)
842{
843 return swap_use_vma_readahead() ?
844 swap_vma_readahead(entry, gfp_mask, vmf) :
845 swap_cluster_readahead(entry, gfp_mask, vmf);
846}
847
Huang Yingd9bfcfd2017-09-06 16:24:40 -0700848#ifdef CONFIG_SYSFS
849static ssize_t vma_ra_enabled_show(struct kobject *kobj,
850 struct kobj_attribute *attr, char *buf)
851{
Joe Perchesae7a9272020-12-14 19:14:42 -0800852 return sysfs_emit(buf, "%s\n",
853 enable_vma_readahead ? "true" : "false");
Huang Yingd9bfcfd2017-09-06 16:24:40 -0700854}
855static ssize_t vma_ra_enabled_store(struct kobject *kobj,
856 struct kobj_attribute *attr,
857 const char *buf, size_t count)
858{
Jagdish Gediya717aeab2022-05-12 20:22:59 -0700859 ssize_t ret;
860
861 ret = kstrtobool(buf, &enable_vma_readahead);
862 if (ret)
863 return ret;
Huang Yingd9bfcfd2017-09-06 16:24:40 -0700864
865 return count;
866}
Miaohe Lin6106b932022-05-19 14:08:50 -0700867static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled);
Huang Yingd9bfcfd2017-09-06 16:24:40 -0700868
Huang Yingd9bfcfd2017-09-06 16:24:40 -0700869static struct attribute *swap_attrs[] = {
870 &vma_ra_enabled_attr.attr,
Huang Yingd9bfcfd2017-09-06 16:24:40 -0700871 NULL,
872};
873
Rikard Falkeborne48333b2021-02-24 12:03:05 -0800874static const struct attribute_group swap_attr_group = {
Huang Yingd9bfcfd2017-09-06 16:24:40 -0700875 .attrs = swap_attrs,
876};
877
878static int __init swap_init_sysfs(void)
879{
880 int err;
881 struct kobject *swap_kobj;
882
883 swap_kobj = kobject_create_and_add("swap", mm_kobj);
884 if (!swap_kobj) {
885 pr_err("failed to create swap kobject\n");
886 return -ENOMEM;
887 }
888 err = sysfs_create_group(swap_kobj, &swap_attr_group);
889 if (err) {
890 pr_err("failed to register swap group\n");
891 goto delete_obj;
892 }
893 return 0;
894
895delete_obj:
896 kobject_put(swap_kobj);
897 return err;
898}
899subsys_initcall(swap_init_sysfs);
900#endif