Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 2 | /* |
| 3 | * linux/fs/ext4/readpage.c |
| 4 | * |
| 5 | * Copyright (C) 2002, Linus Torvalds. |
| 6 | * Copyright (C) 2015, Google, Inc. |
| 7 | * |
| 8 | * This was originally taken from fs/mpage.c |
| 9 | * |
Matthew Wilcox (Oracle) | 6311f91f | 2020-06-01 21:47:16 -0700 | [diff] [blame] | 10 | * The ext4_mpage_readpages() function here is intended to |
| 11 | * replace mpage_readahead() in the general case, not just for |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 12 | * encrypted files. It has some limitations (see below), where it |
| 13 | * will fall back to read_block_full_page(), but these limitations |
| 14 | * should only be hit when page_size != block_size. |
| 15 | * |
| 16 | * This will allow us to attach a callback function to support ext4 |
| 17 | * encryption. |
| 18 | * |
| 19 | * If anything unusual happens, such as: |
| 20 | * |
| 21 | * - encountering a page which has buffers |
| 22 | * - encountering a page which has a non-hole after a hole |
| 23 | * - encountering a page with non-contiguous blocks |
| 24 | * |
| 25 | * then this code just gives up and calls the buffer_head-based read function. |
| 26 | * It does handle a page which has holes at the end - that is a common case: |
Kirill A. Shutemov | ea1754a | 2016-04-01 15:29:48 +0300 | [diff] [blame] | 27 | * the end-of-file on blocksize < PAGE_SIZE setups. |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 28 | * |
| 29 | */ |
| 30 | |
| 31 | #include <linux/kernel.h> |
| 32 | #include <linux/export.h> |
| 33 | #include <linux/mm.h> |
| 34 | #include <linux/kdev_t.h> |
| 35 | #include <linux/gfp.h> |
| 36 | #include <linux/bio.h> |
| 37 | #include <linux/fs.h> |
| 38 | #include <linux/buffer_head.h> |
| 39 | #include <linux/blkdev.h> |
| 40 | #include <linux/highmem.h> |
| 41 | #include <linux/prefetch.h> |
| 42 | #include <linux/mpage.h> |
| 43 | #include <linux/writeback.h> |
| 44 | #include <linux/backing-dev.h> |
| 45 | #include <linux/pagevec.h> |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 46 | |
| 47 | #include "ext4.h" |
| 48 | |
Eric Biggers | 22cfe4b | 2019-07-22 09:26:24 -0700 | [diff] [blame] | 49 | #define NUM_PREALLOC_POST_READ_CTXS 128 |
| 50 | |
| 51 | static struct kmem_cache *bio_post_read_ctx_cache; |
| 52 | static mempool_t *bio_post_read_ctx_pool; |
| 53 | |
| 54 | /* postprocessing steps for read bios */ |
| 55 | enum bio_post_read_step { |
| 56 | STEP_INITIAL = 0, |
| 57 | STEP_DECRYPT, |
| 58 | STEP_VERITY, |
Eric Biggers | 68e4533 | 2019-12-31 12:12:22 -0600 | [diff] [blame] | 59 | STEP_MAX, |
Eric Biggers | 22cfe4b | 2019-07-22 09:26:24 -0700 | [diff] [blame] | 60 | }; |
| 61 | |
| 62 | struct bio_post_read_ctx { |
| 63 | struct bio *bio; |
| 64 | struct work_struct work; |
| 65 | unsigned int cur_step; |
| 66 | unsigned int enabled_steps; |
| 67 | }; |
| 68 | |
| 69 | static void __read_end_io(struct bio *bio) |
Michael Halcrow | c9c7429 | 2015-04-12 00:56:10 -0400 | [diff] [blame] | 70 | { |
Matthew Wilcox | f2b229a | 2023-03-24 18:01:26 +0000 | [diff] [blame] | 71 | struct folio_iter fi; |
Eric Biggers | 22cfe4b | 2019-07-22 09:26:24 -0700 | [diff] [blame] | 72 | |
Matthew Wilcox | f2b229a | 2023-03-24 18:01:26 +0000 | [diff] [blame] | 73 | bio_for_each_folio_all(fi, bio) { |
| 74 | struct folio *folio = fi.folio; |
Eric Biggers | 22cfe4b | 2019-07-22 09:26:24 -0700 | [diff] [blame] | 75 | |
Eric Biggers | 98dc08b | 2022-11-28 23:04:01 -0800 | [diff] [blame] | 76 | if (bio->bi_status) |
Matthew Wilcox | f2b229a | 2023-03-24 18:01:26 +0000 | [diff] [blame] | 77 | folio_clear_uptodate(folio); |
Eric Biggers | 98dc08b | 2022-11-28 23:04:01 -0800 | [diff] [blame] | 78 | else |
Matthew Wilcox | f2b229a | 2023-03-24 18:01:26 +0000 | [diff] [blame] | 79 | folio_mark_uptodate(folio); |
| 80 | folio_unlock(folio); |
Eric Biggers | 22cfe4b | 2019-07-22 09:26:24 -0700 | [diff] [blame] | 81 | } |
| 82 | if (bio->bi_private) |
| 83 | mempool_free(bio->bi_private, bio_post_read_ctx_pool); |
| 84 | bio_put(bio); |
| 85 | } |
| 86 | |
| 87 | static void bio_post_read_processing(struct bio_post_read_ctx *ctx); |
| 88 | |
| 89 | static void decrypt_work(struct work_struct *work) |
| 90 | { |
| 91 | struct bio_post_read_ctx *ctx = |
| 92 | container_of(work, struct bio_post_read_ctx, work); |
Eric Biggers | 14db0b3 | 2022-08-15 16:50:51 -0700 | [diff] [blame] | 93 | struct bio *bio = ctx->bio; |
Eric Biggers | 22cfe4b | 2019-07-22 09:26:24 -0700 | [diff] [blame] | 94 | |
Eric Biggers | 14db0b3 | 2022-08-15 16:50:51 -0700 | [diff] [blame] | 95 | if (fscrypt_decrypt_bio(bio)) |
| 96 | bio_post_read_processing(ctx); |
| 97 | else |
| 98 | __read_end_io(bio); |
Eric Biggers | 22cfe4b | 2019-07-22 09:26:24 -0700 | [diff] [blame] | 99 | } |
| 100 | |
| 101 | static void verity_work(struct work_struct *work) |
| 102 | { |
| 103 | struct bio_post_read_ctx *ctx = |
| 104 | container_of(work, struct bio_post_read_ctx, work); |
Eric Biggers | 68e4533 | 2019-12-31 12:12:22 -0600 | [diff] [blame] | 105 | struct bio *bio = ctx->bio; |
Eric Biggers | 22cfe4b | 2019-07-22 09:26:24 -0700 | [diff] [blame] | 106 | |
Eric Biggers | 68e4533 | 2019-12-31 12:12:22 -0600 | [diff] [blame] | 107 | /* |
Matthew Wilcox (Oracle) | 704528d | 2022-03-23 21:29:04 -0400 | [diff] [blame] | 108 | * fsverity_verify_bio() may call readahead() again, and although verity |
Eric Biggers | 68e4533 | 2019-12-31 12:12:22 -0600 | [diff] [blame] | 109 | * will be disabled for that, decryption may still be needed, causing |
| 110 | * another bio_post_read_ctx to be allocated. So to guarantee that |
| 111 | * mempool_alloc() never deadlocks we must free the current ctx first. |
| 112 | * This is safe because verity is the last post-read step. |
| 113 | */ |
| 114 | BUILD_BUG_ON(STEP_VERITY + 1 != STEP_MAX); |
| 115 | mempool_free(ctx, bio_post_read_ctx_pool); |
| 116 | bio->bi_private = NULL; |
Eric Biggers | 22cfe4b | 2019-07-22 09:26:24 -0700 | [diff] [blame] | 117 | |
Eric Biggers | 68e4533 | 2019-12-31 12:12:22 -0600 | [diff] [blame] | 118 | fsverity_verify_bio(bio); |
| 119 | |
| 120 | __read_end_io(bio); |
Eric Biggers | 22cfe4b | 2019-07-22 09:26:24 -0700 | [diff] [blame] | 121 | } |
| 122 | |
| 123 | static void bio_post_read_processing(struct bio_post_read_ctx *ctx) |
| 124 | { |
| 125 | /* |
| 126 | * We use different work queues for decryption and for verity because |
| 127 | * verity may require reading metadata pages that need decryption, and |
| 128 | * we shouldn't recurse to the same workqueue. |
| 129 | */ |
| 130 | switch (++ctx->cur_step) { |
| 131 | case STEP_DECRYPT: |
| 132 | if (ctx->enabled_steps & (1 << STEP_DECRYPT)) { |
| 133 | INIT_WORK(&ctx->work, decrypt_work); |
| 134 | fscrypt_enqueue_decrypt_work(&ctx->work); |
| 135 | return; |
| 136 | } |
| 137 | ctx->cur_step++; |
Shijie Luo | 70d7ced | 2020-08-10 07:44:35 -0400 | [diff] [blame] | 138 | fallthrough; |
Eric Biggers | 22cfe4b | 2019-07-22 09:26:24 -0700 | [diff] [blame] | 139 | case STEP_VERITY: |
| 140 | if (ctx->enabled_steps & (1 << STEP_VERITY)) { |
| 141 | INIT_WORK(&ctx->work, verity_work); |
| 142 | fsverity_enqueue_verify_work(&ctx->work); |
| 143 | return; |
| 144 | } |
| 145 | ctx->cur_step++; |
Shijie Luo | 70d7ced | 2020-08-10 07:44:35 -0400 | [diff] [blame] | 146 | fallthrough; |
Eric Biggers | 22cfe4b | 2019-07-22 09:26:24 -0700 | [diff] [blame] | 147 | default: |
| 148 | __read_end_io(ctx->bio); |
| 149 | } |
| 150 | } |
| 151 | |
| 152 | static bool bio_post_read_required(struct bio *bio) |
| 153 | { |
| 154 | return bio->bi_private && !bio->bi_status; |
Michael Halcrow | c9c7429 | 2015-04-12 00:56:10 -0400 | [diff] [blame] | 155 | } |
| 156 | |
| 157 | /* |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 158 | * I/O completion handler for multipage BIOs. |
| 159 | * |
| 160 | * The mpage code never puts partial pages into a BIO (except for end-of-file). |
| 161 | * If a page does not map to a contiguous run of blocks then it simply falls |
Matthew Wilcox (Oracle) | 2c69e20 | 2022-04-29 10:40:40 -0400 | [diff] [blame] | 162 | * back to block_read_full_folio(). |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 163 | * |
| 164 | * Why is this? If a page's completion depends on a number of different BIOs |
| 165 | * which can complete in any order (or at the same time) then determining the |
| 166 | * status of that page is hard. See end_buffer_async_read() for the details. |
| 167 | * There is no point in duplicating all that complexity. |
| 168 | */ |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 169 | static void mpage_end_io(struct bio *bio) |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 170 | { |
Eric Biggers | 22cfe4b | 2019-07-22 09:26:24 -0700 | [diff] [blame] | 171 | if (bio_post_read_required(bio)) { |
| 172 | struct bio_post_read_ctx *ctx = bio->bi_private; |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 173 | |
Eric Biggers | 22cfe4b | 2019-07-22 09:26:24 -0700 | [diff] [blame] | 174 | ctx->cur_step = STEP_INITIAL; |
| 175 | bio_post_read_processing(ctx); |
| 176 | return; |
Michael Halcrow | c9c7429 | 2015-04-12 00:56:10 -0400 | [diff] [blame] | 177 | } |
Eric Biggers | 22cfe4b | 2019-07-22 09:26:24 -0700 | [diff] [blame] | 178 | __read_end_io(bio); |
| 179 | } |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 180 | |
Eric Biggers | 22cfe4b | 2019-07-22 09:26:24 -0700 | [diff] [blame] | 181 | static inline bool ext4_need_verity(const struct inode *inode, pgoff_t idx) |
| 182 | { |
| 183 | return fsverity_active(inode) && |
| 184 | idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE); |
| 185 | } |
| 186 | |
Eric Biggers | fd5fe25 | 2019-12-31 12:12:56 -0600 | [diff] [blame] | 187 | static void ext4_set_bio_post_read_ctx(struct bio *bio, |
| 188 | const struct inode *inode, |
| 189 | pgoff_t first_idx) |
Eric Biggers | 22cfe4b | 2019-07-22 09:26:24 -0700 | [diff] [blame] | 190 | { |
| 191 | unsigned int post_read_steps = 0; |
Eric Biggers | 22cfe4b | 2019-07-22 09:26:24 -0700 | [diff] [blame] | 192 | |
Eric Biggers | 4f74d15 | 2020-07-02 01:56:07 +0000 | [diff] [blame] | 193 | if (fscrypt_inode_uses_fs_layer_crypto(inode)) |
Eric Biggers | 22cfe4b | 2019-07-22 09:26:24 -0700 | [diff] [blame] | 194 | post_read_steps |= 1 << STEP_DECRYPT; |
| 195 | |
| 196 | if (ext4_need_verity(inode, first_idx)) |
| 197 | post_read_steps |= 1 << STEP_VERITY; |
| 198 | |
| 199 | if (post_read_steps) { |
Eric Biggers | fd5fe25 | 2019-12-31 12:12:56 -0600 | [diff] [blame] | 200 | /* Due to the mempool, this never fails. */ |
| 201 | struct bio_post_read_ctx *ctx = |
| 202 | mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS); |
| 203 | |
Eric Biggers | 22cfe4b | 2019-07-22 09:26:24 -0700 | [diff] [blame] | 204 | ctx->bio = bio; |
| 205 | ctx->enabled_steps = post_read_steps; |
| 206 | bio->bi_private = ctx; |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 207 | } |
Eric Biggers | 22cfe4b | 2019-07-22 09:26:24 -0700 | [diff] [blame] | 208 | } |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 209 | |
Eric Biggers | 22cfe4b | 2019-07-22 09:26:24 -0700 | [diff] [blame] | 210 | static inline loff_t ext4_readpage_limit(struct inode *inode) |
| 211 | { |
Eric Biggers | 5e12214 | 2022-12-23 12:36:35 -0800 | [diff] [blame] | 212 | if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode)) |
Eric Biggers | 22cfe4b | 2019-07-22 09:26:24 -0700 | [diff] [blame] | 213 | return inode->i_sb->s_maxbytes; |
| 214 | |
| 215 | return i_size_read(inode); |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 216 | } |
| 217 | |
Matthew Wilcox (Oracle) | a07f624 | 2020-06-01 21:47:20 -0700 | [diff] [blame] | 218 | int ext4_mpage_readpages(struct inode *inode, |
Matthew Wilcox | c0be8e6 | 2023-03-24 18:01:23 +0000 | [diff] [blame] | 219 | struct readahead_control *rac, struct folio *folio) |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 220 | { |
| 221 | struct bio *bio = NULL; |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 222 | sector_t last_block_in_bio = 0; |
| 223 | |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 224 | const unsigned blkbits = inode->i_blkbits; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 225 | const unsigned blocks_per_page = PAGE_SIZE >> blkbits; |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 226 | const unsigned blocksize = 1 << blkbits; |
Eric Biggers | 4f74d15 | 2020-07-02 01:56:07 +0000 | [diff] [blame] | 227 | sector_t next_block; |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 228 | sector_t block_in_file; |
| 229 | sector_t last_block; |
| 230 | sector_t last_block_in_file; |
| 231 | sector_t blocks[MAX_BUF_PER_PAGE]; |
| 232 | unsigned page_block; |
| 233 | struct block_device *bdev = inode->i_sb->s_bdev; |
| 234 | int length; |
| 235 | unsigned relative_block = 0; |
| 236 | struct ext4_map_blocks map; |
Matthew Wilcox (Oracle) | 6311f91f | 2020-06-01 21:47:16 -0700 | [diff] [blame] | 237 | unsigned int nr_pages = rac ? readahead_count(rac) : 1; |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 238 | |
| 239 | map.m_pblk = 0; |
| 240 | map.m_lblk = 0; |
| 241 | map.m_len = 0; |
| 242 | map.m_flags = 0; |
| 243 | |
yalin wang | de9e918 | 2016-07-05 16:32:32 -0400 | [diff] [blame] | 244 | for (; nr_pages; nr_pages--) { |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 245 | int fully_mapped = 1; |
| 246 | unsigned first_hole = blocks_per_page; |
| 247 | |
Matthew Wilcox | c0be8e6 | 2023-03-24 18:01:23 +0000 | [diff] [blame] | 248 | if (rac) |
| 249 | folio = readahead_folio(rac); |
| 250 | prefetchw(&folio->flags); |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 251 | |
Matthew Wilcox | c0be8e6 | 2023-03-24 18:01:23 +0000 | [diff] [blame] | 252 | if (folio_buffers(folio)) |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 253 | goto confused; |
| 254 | |
Eric Biggers | 4f74d15 | 2020-07-02 01:56:07 +0000 | [diff] [blame] | 255 | block_in_file = next_block = |
Matthew Wilcox | c0be8e6 | 2023-03-24 18:01:23 +0000 | [diff] [blame] | 256 | (sector_t)folio->index << (PAGE_SHIFT - blkbits); |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 257 | last_block = block_in_file + nr_pages * blocks_per_page; |
Eric Biggers | 22cfe4b | 2019-07-22 09:26:24 -0700 | [diff] [blame] | 258 | last_block_in_file = (ext4_readpage_limit(inode) + |
| 259 | blocksize - 1) >> blkbits; |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 260 | if (last_block > last_block_in_file) |
| 261 | last_block = last_block_in_file; |
| 262 | page_block = 0; |
| 263 | |
| 264 | /* |
| 265 | * Map blocks using the previous result first. |
| 266 | */ |
| 267 | if ((map.m_flags & EXT4_MAP_MAPPED) && |
| 268 | block_in_file > map.m_lblk && |
| 269 | block_in_file < (map.m_lblk + map.m_len)) { |
| 270 | unsigned map_offset = block_in_file - map.m_lblk; |
| 271 | unsigned last = map.m_len - map_offset; |
| 272 | |
| 273 | for (relative_block = 0; ; relative_block++) { |
| 274 | if (relative_block == last) { |
| 275 | /* needed? */ |
| 276 | map.m_flags &= ~EXT4_MAP_MAPPED; |
| 277 | break; |
| 278 | } |
| 279 | if (page_block == blocks_per_page) |
| 280 | break; |
| 281 | blocks[page_block] = map.m_pblk + map_offset + |
| 282 | relative_block; |
| 283 | page_block++; |
| 284 | block_in_file++; |
| 285 | } |
| 286 | } |
| 287 | |
| 288 | /* |
| 289 | * Then do more ext4_map_blocks() calls until we are |
Matthew Wilcox | c0be8e6 | 2023-03-24 18:01:23 +0000 | [diff] [blame] | 290 | * done with this folio. |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 291 | */ |
| 292 | while (page_block < blocks_per_page) { |
| 293 | if (block_in_file < last_block) { |
| 294 | map.m_lblk = block_in_file; |
| 295 | map.m_len = last_block - block_in_file; |
| 296 | |
| 297 | if (ext4_map_blocks(NULL, inode, &map, 0) < 0) { |
| 298 | set_error_page: |
Matthew Wilcox | c0be8e6 | 2023-03-24 18:01:23 +0000 | [diff] [blame] | 299 | folio_set_error(folio); |
| 300 | folio_zero_segment(folio, 0, |
| 301 | folio_size(folio)); |
| 302 | folio_unlock(folio); |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 303 | goto next_page; |
| 304 | } |
| 305 | } |
| 306 | if ((map.m_flags & EXT4_MAP_MAPPED) == 0) { |
| 307 | fully_mapped = 0; |
| 308 | if (first_hole == blocks_per_page) |
| 309 | first_hole = page_block; |
| 310 | page_block++; |
| 311 | block_in_file++; |
| 312 | continue; |
| 313 | } |
| 314 | if (first_hole != blocks_per_page) |
| 315 | goto confused; /* hole -> non-hole */ |
| 316 | |
| 317 | /* Contiguous blocks? */ |
| 318 | if (page_block && blocks[page_block-1] != map.m_pblk-1) |
| 319 | goto confused; |
| 320 | for (relative_block = 0; ; relative_block++) { |
| 321 | if (relative_block == map.m_len) { |
| 322 | /* needed? */ |
| 323 | map.m_flags &= ~EXT4_MAP_MAPPED; |
| 324 | break; |
| 325 | } else if (page_block == blocks_per_page) |
| 326 | break; |
| 327 | blocks[page_block] = map.m_pblk+relative_block; |
| 328 | page_block++; |
| 329 | block_in_file++; |
| 330 | } |
| 331 | } |
| 332 | if (first_hole != blocks_per_page) { |
Matthew Wilcox | c0be8e6 | 2023-03-24 18:01:23 +0000 | [diff] [blame] | 333 | folio_zero_segment(folio, first_hole << blkbits, |
| 334 | folio_size(folio)); |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 335 | if (first_hole == 0) { |
Matthew Wilcox | c0be8e6 | 2023-03-24 18:01:23 +0000 | [diff] [blame] | 336 | if (ext4_need_verity(inode, folio->index) && |
Matthew Wilcox | 0dea40a | 2023-05-16 20:27:13 +0100 | [diff] [blame] | 337 | !fsverity_verify_folio(folio)) |
Eric Biggers | 22cfe4b | 2019-07-22 09:26:24 -0700 | [diff] [blame] | 338 | goto set_error_page; |
Matthew Wilcox | c0be8e6 | 2023-03-24 18:01:23 +0000 | [diff] [blame] | 339 | folio_mark_uptodate(folio); |
| 340 | folio_unlock(folio); |
| 341 | continue; |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 342 | } |
| 343 | } else if (fully_mapped) { |
Matthew Wilcox | c0be8e6 | 2023-03-24 18:01:23 +0000 | [diff] [blame] | 344 | folio_set_mappedtodisk(folio); |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 345 | } |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 346 | |
| 347 | /* |
Matthew Wilcox | c0be8e6 | 2023-03-24 18:01:23 +0000 | [diff] [blame] | 348 | * This folio will go to BIO. Do we need to send this |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 349 | * BIO off first? |
| 350 | */ |
Eric Biggers | 4f74d15 | 2020-07-02 01:56:07 +0000 | [diff] [blame] | 351 | if (bio && (last_block_in_bio != blocks[0] - 1 || |
| 352 | !fscrypt_mergeable_bio(bio, inode, next_block))) { |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 353 | submit_and_realloc: |
Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 354 | submit_bio(bio); |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 355 | bio = NULL; |
| 356 | } |
| 357 | if (bio == NULL) { |
Gao Xiang | 5500221 | 2019-10-31 17:23:15 +0800 | [diff] [blame] | 358 | /* |
| 359 | * bio_alloc will _always_ be able to allocate a bio if |
| 360 | * __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset(). |
| 361 | */ |
Christoph Hellwig | 07888c66 | 2022-01-24 10:11:05 +0100 | [diff] [blame] | 362 | bio = bio_alloc(bdev, bio_max_segs(nr_pages), |
| 363 | REQ_OP_READ, GFP_KERNEL); |
Eric Biggers | 4f74d15 | 2020-07-02 01:56:07 +0000 | [diff] [blame] | 364 | fscrypt_set_bio_crypt_ctx(bio, inode, next_block, |
| 365 | GFP_KERNEL); |
Matthew Wilcox | c0be8e6 | 2023-03-24 18:01:23 +0000 | [diff] [blame] | 366 | ext4_set_bio_post_read_ctx(bio, inode, folio->index); |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 367 | bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); |
| 368 | bio->bi_end_io = mpage_end_io; |
Christoph Hellwig | 07888c66 | 2022-01-24 10:11:05 +0100 | [diff] [blame] | 369 | if (rac) |
| 370 | bio->bi_opf |= REQ_RAHEAD; |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 371 | } |
| 372 | |
| 373 | length = first_hole << blkbits; |
Matthew Wilcox | c0be8e6 | 2023-03-24 18:01:23 +0000 | [diff] [blame] | 374 | if (!bio_add_folio(bio, folio, length, 0)) |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 375 | goto submit_and_realloc; |
| 376 | |
| 377 | if (((map.m_flags & EXT4_MAP_BOUNDARY) && |
| 378 | (relative_block == map.m_len)) || |
| 379 | (first_hole != blocks_per_page)) { |
Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 380 | submit_bio(bio); |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 381 | bio = NULL; |
| 382 | } else |
| 383 | last_block_in_bio = blocks[blocks_per_page - 1]; |
Matthew Wilcox | c0be8e6 | 2023-03-24 18:01:23 +0000 | [diff] [blame] | 384 | continue; |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 385 | confused: |
| 386 | if (bio) { |
Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 387 | submit_bio(bio); |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 388 | bio = NULL; |
| 389 | } |
Matthew Wilcox | c0be8e6 | 2023-03-24 18:01:23 +0000 | [diff] [blame] | 390 | if (!folio_test_uptodate(folio)) |
| 391 | block_read_full_folio(folio, ext4_get_block); |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 392 | else |
Matthew Wilcox | c0be8e6 | 2023-03-24 18:01:23 +0000 | [diff] [blame] | 393 | folio_unlock(folio); |
| 394 | next_page: |
| 395 | ; /* A label shall be followed by a statement until C23 */ |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 396 | } |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 397 | if (bio) |
Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 398 | submit_bio(bio); |
Theodore Ts'o | f64e02f | 2015-04-08 00:00:32 -0400 | [diff] [blame] | 399 | return 0; |
| 400 | } |
Eric Biggers | 22cfe4b | 2019-07-22 09:26:24 -0700 | [diff] [blame] | 401 | |
| 402 | int __init ext4_init_post_read_processing(void) |
| 403 | { |
JunChao Sun | 060f773 | 2022-11-09 07:38:22 -0800 | [diff] [blame] | 404 | bio_post_read_ctx_cache = KMEM_CACHE(bio_post_read_ctx, SLAB_RECLAIM_ACCOUNT); |
| 405 | |
Eric Biggers | 22cfe4b | 2019-07-22 09:26:24 -0700 | [diff] [blame] | 406 | if (!bio_post_read_ctx_cache) |
| 407 | goto fail; |
| 408 | bio_post_read_ctx_pool = |
| 409 | mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS, |
| 410 | bio_post_read_ctx_cache); |
| 411 | if (!bio_post_read_ctx_pool) |
| 412 | goto fail_free_cache; |
| 413 | return 0; |
| 414 | |
| 415 | fail_free_cache: |
| 416 | kmem_cache_destroy(bio_post_read_ctx_cache); |
| 417 | fail: |
| 418 | return -ENOMEM; |
| 419 | } |
| 420 | |
| 421 | void ext4_exit_post_read_processing(void) |
| 422 | { |
| 423 | mempool_destroy(bio_post_read_ctx_pool); |
| 424 | kmem_cache_destroy(bio_post_read_ctx_cache); |
| 425 | } |