blob: fc2f6ade7f80b399707bcc67c44f813aea0b846d [file] [log] [blame]
NeilBrown014bb1d2022-05-09 18:20:47 -07001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _MM_SWAP_H
3#define _MM_SWAP_H
4
Hugh Dickinsddc1a5c2023-10-19 13:39:08 -07005struct mempolicy;
6
NeilBrown014bb1d2022-05-09 18:20:47 -07007#ifdef CONFIG_SWAP
8#include <linux/blk_types.h> /* for bio_end_io_t */
9
10/* linux/mm/page_io.c */
NeilBrowne1209d32022-05-09 18:20:48 -070011int sio_pool_init(void);
NeilBrown5169b842022-05-09 18:20:49 -070012struct swap_iocb;
Matthew Wilcox (Oracle)c9bdf762023-12-13 21:58:39 +000013void swap_read_folio(struct folio *folio, bool do_poll,
14 struct swap_iocb **plug);
NeilBrown5169b842022-05-09 18:20:49 -070015void __swap_read_unplug(struct swap_iocb *plug);
16static inline void swap_read_unplug(struct swap_iocb *plug)
17{
18 if (unlikely(plug))
19 __swap_read_unplug(plug);
20}
NeilBrown22826792022-05-09 18:20:49 -070021void swap_write_unplug(struct swap_iocb *sio);
NeilBrown014bb1d2022-05-09 18:20:47 -070022int swap_writepage(struct page *page, struct writeback_control *wbc);
Matthew Wilcox (Oracle)b99b4e02023-12-13 21:58:31 +000023void __swap_writepage(struct folio *folio, struct writeback_control *wbc);
NeilBrown014bb1d2022-05-09 18:20:47 -070024
25/* linux/mm/swap_state.c */
26/* One swap address space for each 64M swap space */
27#define SWAP_ADDRESS_SPACE_SHIFT 14
28#define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT)
29extern struct address_space *swapper_spaces[];
30#define swap_address_space(entry) \
31 (&swapper_spaces[swp_type(entry)][swp_offset(entry) \
32 >> SWAP_ADDRESS_SPACE_SHIFT])
33
34void show_swap_cache_info(void);
Matthew Wilcox (Oracle)09c02e52022-05-12 20:23:02 -070035bool add_to_swap(struct folio *folio);
NeilBrown014bb1d2022-05-09 18:20:47 -070036void *get_shadow_from_swap_cache(swp_entry_t entry);
Matthew Wilcox (Oracle)a4c366f2022-09-02 20:46:08 +010037int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
NeilBrown014bb1d2022-05-09 18:20:47 -070038 gfp_t gfp, void **shadowp);
Matthew Wilcox (Oracle)ceff9d32022-06-17 18:50:20 +010039void __delete_from_swap_cache(struct folio *folio,
NeilBrown014bb1d2022-05-09 18:20:47 -070040 swp_entry_t entry, void *shadow);
Matthew Wilcox (Oracle)75fa68a2022-06-17 18:50:19 +010041void delete_from_swap_cache(struct folio *folio);
NeilBrown014bb1d2022-05-09 18:20:47 -070042void clear_shadow_from_swap_cache(int type, unsigned long begin,
43 unsigned long end);
Kairui Song13ddaf22024-02-07 02:25:59 +080044void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry);
Matthew Wilcox (Oracle)c9edc242022-09-02 20:46:15 +010045struct folio *swap_cache_get_folio(swp_entry_t entry,
46 struct vm_area_struct *vma, unsigned long addr);
Matthew Wilcox (Oracle)524984f2022-10-19 19:33:31 +010047struct folio *filemap_get_incore_folio(struct address_space *mapping,
48 pgoff_t index);
NeilBrown014bb1d2022-05-09 18:20:47 -070049
Matthew Wilcox (Oracle)6e034922023-12-13 21:58:41 +000050struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
51 struct vm_area_struct *vma, unsigned long addr,
52 struct swap_iocb **plug);
Matthew Wilcox (Oracle)96c7b0b2023-12-13 21:58:30 +000053struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_flags,
54 struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
55 bool skip_if_exists);
Matthew Wilcox (Oracle)a4575c42023-12-13 21:58:42 +000056struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
57 struct mempolicy *mpol, pgoff_t ilx);
NeilBrown014bb1d2022-05-09 18:20:47 -070058struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
59 struct vm_fault *vmf);
60
Matthew Wilcox (Oracle)b98c3592022-06-17 18:50:18 +010061static inline unsigned int folio_swap_flags(struct folio *folio)
NeilBrownd791ea62022-05-09 18:20:48 -070062{
Matthew Wilcox (Oracle)69fe7d62023-12-13 21:58:40 +000063 return swp_swap_info(folio->swap)->flags;
NeilBrownd791ea62022-05-09 18:20:48 -070064}
NeilBrown014bb1d2022-05-09 18:20:47 -070065#else /* CONFIG_SWAP */
NeilBrown5169b842022-05-09 18:20:49 -070066struct swap_iocb;
Matthew Wilcox (Oracle)c9bdf762023-12-13 21:58:39 +000067static inline void swap_read_folio(struct folio *folio, bool do_poll,
Christoph Hellwiga8c14082023-01-25 14:34:31 +010068 struct swap_iocb **plug)
NeilBrown014bb1d2022-05-09 18:20:47 -070069{
NeilBrown014bb1d2022-05-09 18:20:47 -070070}
NeilBrown22826792022-05-09 18:20:49 -070071static inline void swap_write_unplug(struct swap_iocb *sio)
72{
73}
NeilBrown014bb1d2022-05-09 18:20:47 -070074
75static inline struct address_space *swap_address_space(swp_entry_t entry)
76{
77 return NULL;
78}
79
NeilBrown014bb1d2022-05-09 18:20:47 -070080static inline void show_swap_cache_info(void)
81{
82}
83
Matthew Wilcox (Oracle)a4575c42023-12-13 21:58:42 +000084static inline struct folio *swap_cluster_readahead(swp_entry_t entry,
Hugh Dickinsddc1a5c2023-10-19 13:39:08 -070085 gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t ilx)
NeilBrown014bb1d2022-05-09 18:20:47 -070086{
87 return NULL;
88}
89
90static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
91 struct vm_fault *vmf)
92{
93 return NULL;
94}
95
96static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
97{
98 return 0;
99}
100
Kairui Song13ddaf22024-02-07 02:25:59 +0800101static inline void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry)
102{
103}
104
Matthew Wilcox (Oracle)c9edc242022-09-02 20:46:15 +0100105static inline struct folio *swap_cache_get_folio(swp_entry_t entry,
106 struct vm_area_struct *vma, unsigned long addr)
107{
108 return NULL;
109}
110
NeilBrown014bb1d2022-05-09 18:20:47 -0700111static inline
Matthew Wilcox (Oracle)524984f2022-10-19 19:33:31 +0100112struct folio *filemap_get_incore_folio(struct address_space *mapping,
113 pgoff_t index)
NeilBrown014bb1d2022-05-09 18:20:47 -0700114{
Matthew Wilcox (Oracle)524984f2022-10-19 19:33:31 +0100115 return filemap_get_folio(mapping, index);
NeilBrown014bb1d2022-05-09 18:20:47 -0700116}
117
Matthew Wilcox (Oracle)09c02e52022-05-12 20:23:02 -0700118static inline bool add_to_swap(struct folio *folio)
NeilBrown014bb1d2022-05-09 18:20:47 -0700119{
Matthew Wilcox (Oracle)09c02e52022-05-12 20:23:02 -0700120 return false;
NeilBrown014bb1d2022-05-09 18:20:47 -0700121}
122
123static inline void *get_shadow_from_swap_cache(swp_entry_t entry)
124{
125 return NULL;
126}
127
Matthew Wilcox (Oracle)a4c366f2022-09-02 20:46:08 +0100128static inline int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
NeilBrown014bb1d2022-05-09 18:20:47 -0700129 gfp_t gfp_mask, void **shadowp)
130{
131 return -1;
132}
133
Matthew Wilcox (Oracle)ceff9d32022-06-17 18:50:20 +0100134static inline void __delete_from_swap_cache(struct folio *folio,
NeilBrown014bb1d2022-05-09 18:20:47 -0700135 swp_entry_t entry, void *shadow)
136{
137}
138
Matthew Wilcox (Oracle)75fa68a2022-06-17 18:50:19 +0100139static inline void delete_from_swap_cache(struct folio *folio)
NeilBrown014bb1d2022-05-09 18:20:47 -0700140{
141}
142
143static inline void clear_shadow_from_swap_cache(int type, unsigned long begin,
144 unsigned long end)
145{
146}
147
Matthew Wilcox (Oracle)b98c3592022-06-17 18:50:18 +0100148static inline unsigned int folio_swap_flags(struct folio *folio)
NeilBrownd791ea62022-05-09 18:20:48 -0700149{
150 return 0;
151}
NeilBrown014bb1d2022-05-09 18:20:47 -0700152#endif /* CONFIG_SWAP */
153#endif /* _MM_SWAP_H */