NeilBrown | 014bb1d | 2022-05-09 18:20:47 -0700 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _MM_SWAP_H |
| 3 | #define _MM_SWAP_H |
| 4 | |
| 5 | #ifdef CONFIG_SWAP |
| 6 | #include <linux/blk_types.h> /* for bio_end_io_t */ |
| 7 | |
| 8 | /* linux/mm/page_io.c */ |
NeilBrown | e1209d3 | 2022-05-09 18:20:48 -0700 | [diff] [blame] | 9 | int sio_pool_init(void); |
NeilBrown | 5169b84 | 2022-05-09 18:20:49 -0700 | [diff] [blame] | 10 | struct swap_iocb; |
| 11 | int swap_readpage(struct page *page, bool do_poll, |
| 12 | struct swap_iocb **plug); |
| 13 | void __swap_read_unplug(struct swap_iocb *plug); |
| 14 | static inline void swap_read_unplug(struct swap_iocb *plug) |
| 15 | { |
| 16 | if (unlikely(plug)) |
| 17 | __swap_read_unplug(plug); |
| 18 | } |
NeilBrown | 2282679 | 2022-05-09 18:20:49 -0700 | [diff] [blame] | 19 | void swap_write_unplug(struct swap_iocb *sio); |
NeilBrown | 014bb1d | 2022-05-09 18:20:47 -0700 | [diff] [blame] | 20 | int swap_writepage(struct page *page, struct writeback_control *wbc); |
| 21 | void end_swap_bio_write(struct bio *bio); |
| 22 | int __swap_writepage(struct page *page, struct writeback_control *wbc, |
| 23 | bio_end_io_t end_write_func); |
| 24 | |
| 25 | /* linux/mm/swap_state.c */ |
| 26 | /* One swap address space for each 64M swap space */ |
| 27 | #define SWAP_ADDRESS_SPACE_SHIFT 14 |
| 28 | #define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT) |
| 29 | extern struct address_space *swapper_spaces[]; |
| 30 | #define swap_address_space(entry) \ |
| 31 | (&swapper_spaces[swp_type(entry)][swp_offset(entry) \ |
| 32 | >> SWAP_ADDRESS_SPACE_SHIFT]) |
| 33 | |
| 34 | void show_swap_cache_info(void); |
Matthew Wilcox (Oracle) | 09c02e5 | 2022-05-12 20:23:02 -0700 | [diff] [blame] | 35 | bool add_to_swap(struct folio *folio); |
NeilBrown | 014bb1d | 2022-05-09 18:20:47 -0700 | [diff] [blame] | 36 | void *get_shadow_from_swap_cache(swp_entry_t entry); |
| 37 | int add_to_swap_cache(struct page *page, swp_entry_t entry, |
| 38 | gfp_t gfp, void **shadowp); |
| 39 | void __delete_from_swap_cache(struct page *page, |
| 40 | swp_entry_t entry, void *shadow); |
| 41 | void delete_from_swap_cache(struct page *page); |
| 42 | void clear_shadow_from_swap_cache(int type, unsigned long begin, |
| 43 | unsigned long end); |
| 44 | void free_swap_cache(struct page *page); |
| 45 | struct page *lookup_swap_cache(swp_entry_t entry, |
| 46 | struct vm_area_struct *vma, |
| 47 | unsigned long addr); |
| 48 | struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index); |
| 49 | |
| 50 | struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, |
| 51 | struct vm_area_struct *vma, |
| 52 | unsigned long addr, |
NeilBrown | 5169b84 | 2022-05-09 18:20:49 -0700 | [diff] [blame] | 53 | bool do_poll, |
| 54 | struct swap_iocb **plug); |
NeilBrown | 014bb1d | 2022-05-09 18:20:47 -0700 | [diff] [blame] | 55 | struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, |
| 56 | struct vm_area_struct *vma, |
| 57 | unsigned long addr, |
| 58 | bool *new_page_allocated); |
| 59 | struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag, |
| 60 | struct vm_fault *vmf); |
| 61 | struct page *swapin_readahead(swp_entry_t entry, gfp_t flag, |
| 62 | struct vm_fault *vmf); |
| 63 | |
Matthew Wilcox (Oracle) | b98c359 | 2022-06-17 18:50:18 +0100 | [diff] [blame^] | 64 | static inline unsigned int folio_swap_flags(struct folio *folio) |
NeilBrown | d791ea6 | 2022-05-09 18:20:48 -0700 | [diff] [blame] | 65 | { |
Matthew Wilcox (Oracle) | b98c359 | 2022-06-17 18:50:18 +0100 | [diff] [blame^] | 66 | return page_swap_info(&folio->page)->flags; |
NeilBrown | d791ea6 | 2022-05-09 18:20:48 -0700 | [diff] [blame] | 67 | } |
NeilBrown | 014bb1d | 2022-05-09 18:20:47 -0700 | [diff] [blame] | 68 | #else /* CONFIG_SWAP */ |
NeilBrown | 5169b84 | 2022-05-09 18:20:49 -0700 | [diff] [blame] | 69 | struct swap_iocb; |
| 70 | static inline int swap_readpage(struct page *page, bool do_poll, |
| 71 | struct swap_iocb **plug) |
NeilBrown | 014bb1d | 2022-05-09 18:20:47 -0700 | [diff] [blame] | 72 | { |
| 73 | return 0; |
| 74 | } |
NeilBrown | 2282679 | 2022-05-09 18:20:49 -0700 | [diff] [blame] | 75 | static inline void swap_write_unplug(struct swap_iocb *sio) |
| 76 | { |
| 77 | } |
NeilBrown | 014bb1d | 2022-05-09 18:20:47 -0700 | [diff] [blame] | 78 | |
| 79 | static inline struct address_space *swap_address_space(swp_entry_t entry) |
| 80 | { |
| 81 | return NULL; |
| 82 | } |
| 83 | |
| 84 | static inline void free_swap_cache(struct page *page) |
| 85 | { |
| 86 | } |
| 87 | |
| 88 | static inline void show_swap_cache_info(void) |
| 89 | { |
| 90 | } |
| 91 | |
| 92 | static inline struct page *swap_cluster_readahead(swp_entry_t entry, |
| 93 | gfp_t gfp_mask, struct vm_fault *vmf) |
| 94 | { |
| 95 | return NULL; |
| 96 | } |
| 97 | |
| 98 | static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, |
| 99 | struct vm_fault *vmf) |
| 100 | { |
| 101 | return NULL; |
| 102 | } |
| 103 | |
| 104 | static inline int swap_writepage(struct page *p, struct writeback_control *wbc) |
| 105 | { |
| 106 | return 0; |
| 107 | } |
| 108 | |
| 109 | static inline struct page *lookup_swap_cache(swp_entry_t swp, |
| 110 | struct vm_area_struct *vma, |
| 111 | unsigned long addr) |
| 112 | { |
| 113 | return NULL; |
| 114 | } |
| 115 | |
| 116 | static inline |
| 117 | struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index) |
| 118 | { |
| 119 | return find_get_page(mapping, index); |
| 120 | } |
| 121 | |
Matthew Wilcox (Oracle) | 09c02e5 | 2022-05-12 20:23:02 -0700 | [diff] [blame] | 122 | static inline bool add_to_swap(struct folio *folio) |
NeilBrown | 014bb1d | 2022-05-09 18:20:47 -0700 | [diff] [blame] | 123 | { |
Matthew Wilcox (Oracle) | 09c02e5 | 2022-05-12 20:23:02 -0700 | [diff] [blame] | 124 | return false; |
NeilBrown | 014bb1d | 2022-05-09 18:20:47 -0700 | [diff] [blame] | 125 | } |
| 126 | |
| 127 | static inline void *get_shadow_from_swap_cache(swp_entry_t entry) |
| 128 | { |
| 129 | return NULL; |
| 130 | } |
| 131 | |
| 132 | static inline int add_to_swap_cache(struct page *page, swp_entry_t entry, |
| 133 | gfp_t gfp_mask, void **shadowp) |
| 134 | { |
| 135 | return -1; |
| 136 | } |
| 137 | |
| 138 | static inline void __delete_from_swap_cache(struct page *page, |
| 139 | swp_entry_t entry, void *shadow) |
| 140 | { |
| 141 | } |
| 142 | |
| 143 | static inline void delete_from_swap_cache(struct page *page) |
| 144 | { |
| 145 | } |
| 146 | |
| 147 | static inline void clear_shadow_from_swap_cache(int type, unsigned long begin, |
| 148 | unsigned long end) |
| 149 | { |
| 150 | } |
| 151 | |
Matthew Wilcox (Oracle) | b98c359 | 2022-06-17 18:50:18 +0100 | [diff] [blame^] | 152 | static inline unsigned int folio_swap_flags(struct folio *folio) |
NeilBrown | d791ea6 | 2022-05-09 18:20:48 -0700 | [diff] [blame] | 153 | { |
| 154 | return 0; |
| 155 | } |
NeilBrown | 014bb1d | 2022-05-09 18:20:47 -0700 | [diff] [blame] | 156 | #endif /* CONFIG_SWAP */ |
| 157 | #endif /* _MM_SWAP_H */ |