blob: 9f754aaef558bef37d513f1f957fba471f0600ec [file] [log] [blame]
Chao Yu7c1a0002018-09-12 09:16:07 +08001// SPDX-License-Identifier: GPL-2.0
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09002/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003 * fs/f2fs/data.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09007 */
8#include <linux/fs.h>
9#include <linux/f2fs_fs.h>
10#include <linux/buffer_head.h>
11#include <linux/mpage.h>
12#include <linux/writeback.h>
13#include <linux/backing-dev.h>
Chao Yu8f46dca2015-07-14 18:56:10 +080014#include <linux/pagevec.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090015#include <linux/blkdev.h>
16#include <linux/bio.h>
Satya Tangirala27aacd22020-07-02 01:56:06 +000017#include <linux/blk-crypto.h>
Jaegeuk Kim4969c062019-07-01 19:15:29 -070018#include <linux/swap.h>
Geert Uytterhoeven690e4a32012-12-19 22:19:30 +010019#include <linux/prefetch.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080020#include <linux/uio.h>
Jaegeuk Kimf1e88662015-04-09 11:20:42 -070021#include <linux/cleancache.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010022#include <linux/sched/signal.h>
Christoph Hellwig10c5db22020-05-23 09:30:11 +020023#include <linux/fiemap.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090024
25#include "f2fs.h"
26#include "node.h"
27#include "segment.h"
Daeho Jeong52118742021-08-19 20:52:28 -070028#include "iostat.h"
Namjae Jeon848753a2013-04-23 16:38:02 +090029#include <trace/events/f2fs.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090030
Eric Biggers6dbb1792018-04-18 11:09:48 -070031#define NUM_PREALLOC_POST_READ_CTXS 128
32
33static struct kmem_cache *bio_post_read_ctx_cache;
Chao Yu0b20fce2019-09-30 18:53:25 +080034static struct kmem_cache *bio_entry_slab;
Eric Biggers6dbb1792018-04-18 11:09:48 -070035static mempool_t *bio_post_read_ctx_pool;
Chao Yuf5438052019-12-04 09:52:58 +080036static struct bio_set f2fs_bioset;
37
38#define F2FS_BIO_POOL_SIZE NR_CURSEG_TYPE
39
40int __init f2fs_init_bioset(void)
41{
42 if (bioset_init(&f2fs_bioset, F2FS_BIO_POOL_SIZE,
43 0, BIOSET_NEED_BVECS))
44 return -ENOMEM;
45 return 0;
46}
47
48void f2fs_destroy_bioset(void)
49{
50 bioset_exit(&f2fs_bioset);
51}
52
Chao Yu36951b32016-11-16 10:41:20 +080053static bool __is_cp_guaranteed(struct page *page)
54{
55 struct address_space *mapping = page->mapping;
56 struct inode *inode;
57 struct f2fs_sb_info *sbi;
58
59 if (!mapping)
60 return false;
61
62 inode = mapping->host;
63 sbi = F2FS_I_SB(inode);
64
65 if (inode->i_ino == F2FS_META_INO(sbi) ||
Jack Qiua87aff12020-07-24 16:55:28 +080066 inode->i_ino == F2FS_NODE_INO(sbi) ||
Chao Yub763f3b2021-04-28 17:20:31 +080067 S_ISDIR(inode->i_mode))
68 return true;
69
70 if (f2fs_is_compressed_page(page))
71 return false;
72 if ((S_ISREG(inode->i_mode) &&
Chao Yuaf033b22018-09-20 20:05:00 +080073 (f2fs_is_atomic_file(inode) || IS_NOQUOTA(inode))) ||
Chao Yub763f3b2021-04-28 17:20:31 +080074 page_private_gcing(page))
Chao Yu36951b32016-11-16 10:41:20 +080075 return true;
76 return false;
77}
78
Jaegeuk Kim5f9abab2018-10-16 10:20:53 -070079static enum count_type __read_io_type(struct page *page)
80{
Jaegeuk Kim4969c062019-07-01 19:15:29 -070081 struct address_space *mapping = page_file_mapping(page);
Jaegeuk Kim5f9abab2018-10-16 10:20:53 -070082
83 if (mapping) {
84 struct inode *inode = mapping->host;
85 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
86
87 if (inode->i_ino == F2FS_META_INO(sbi))
88 return F2FS_RD_META;
89
90 if (inode->i_ino == F2FS_NODE_INO(sbi))
91 return F2FS_RD_NODE;
92 }
93 return F2FS_RD_DATA;
94}
95
Eric Biggers6dbb1792018-04-18 11:09:48 -070096/* postprocessing steps for read bios */
97enum bio_post_read_step {
Eric Biggers7f59b272021-01-04 22:33:02 -080098#ifdef CONFIG_FS_ENCRYPTION
99 STEP_DECRYPT = 1 << 0,
100#else
101 STEP_DECRYPT = 0, /* compile out the decryption-related code */
102#endif
103#ifdef CONFIG_F2FS_FS_COMPRESSION
104 STEP_DECOMPRESS = 1 << 1,
105#else
106 STEP_DECOMPRESS = 0, /* compile out the decompression-related code */
107#endif
108#ifdef CONFIG_FS_VERITY
109 STEP_VERITY = 1 << 2,
110#else
111 STEP_VERITY = 0, /* compile out the verity-related code */
112#endif
Eric Biggers6dbb1792018-04-18 11:09:48 -0700113};
114
115struct bio_post_read_ctx {
116 struct bio *bio;
Chao Yu4c8ff702019-11-01 18:07:14 +0800117 struct f2fs_sb_info *sbi;
Eric Biggers6dbb1792018-04-18 11:09:48 -0700118 struct work_struct work;
Eric Biggers6dbb1792018-04-18 11:09:48 -0700119 unsigned int enabled_steps;
Daeho Jeong4931e0c2021-07-28 12:38:11 -0700120 block_t fs_blkaddr;
Eric Biggers6dbb1792018-04-18 11:09:48 -0700121};
122
Eric Biggers7f59b272021-01-04 22:33:02 -0800123static void f2fs_finish_read_bio(struct bio *bio)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900124{
Eric Biggers6dbb1792018-04-18 11:09:48 -0700125 struct bio_vec *bv;
Ming Lei6dc4f102019-02-15 19:13:19 +0800126 struct bvec_iter_all iter_all;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900127
Eric Biggers7f59b272021-01-04 22:33:02 -0800128 /*
129 * Update and unlock the bio's pagecache pages, and put the
130 * decompression context for any compressed pages.
131 */
Christoph Hellwig2b070cf2019-04-25 09:03:00 +0200132 bio_for_each_segment_all(bv, bio, iter_all) {
Eric Biggers7f59b272021-01-04 22:33:02 -0800133 struct page *page = bv->bv_page;
Eric Biggers6dbb1792018-04-18 11:09:48 -0700134
Eric Biggers7f59b272021-01-04 22:33:02 -0800135 if (f2fs_is_compressed_page(page)) {
136 if (bio->bi_status)
Chao Yu6ce19af2021-05-20 19:51:50 +0800137 f2fs_end_read_compressed_page(page, true, 0);
Eric Biggers7f59b272021-01-04 22:33:02 -0800138 f2fs_put_page_dic(page);
Chao Yu4c8ff702019-11-01 18:07:14 +0800139 continue;
140 }
Chao Yu4c8ff702019-11-01 18:07:14 +0800141
Eric Biggers7f59b272021-01-04 22:33:02 -0800142 /* PG_error was set if decryption or verity failed. */
Eric Biggers6dbb1792018-04-18 11:09:48 -0700143 if (bio->bi_status || PageError(page)) {
144 ClearPageUptodate(page);
Jaegeuk Kimfb7d70d2018-09-25 13:54:33 -0700145 /* will re-read again later */
146 ClearPageError(page);
Eric Biggers6dbb1792018-04-18 11:09:48 -0700147 } else {
148 SetPageUptodate(page);
149 }
Jaegeuk Kim5f9abab2018-10-16 10:20:53 -0700150 dec_page_count(F2FS_P_SB(page), __read_io_type(page));
Eric Biggers6dbb1792018-04-18 11:09:48 -0700151 unlock_page(page);
152 }
Eric Biggers7f59b272021-01-04 22:33:02 -0800153
154 if (bio->bi_private)
155 mempool_free(bio->bi_private, bio_post_read_ctx_pool);
156 bio_put(bio);
Chao Yu4c8ff702019-11-01 18:07:14 +0800157}
158
Eric Biggers7f59b272021-01-04 22:33:02 -0800159static void f2fs_verify_bio(struct work_struct *work)
Eric Biggers95ae2512019-07-22 09:26:24 -0700160{
161 struct bio_post_read_ctx *ctx =
162 container_of(work, struct bio_post_read_ctx, work);
Eric Biggers644c8c92019-12-31 12:14:16 -0600163 struct bio *bio = ctx->bio;
Eric Biggers7f59b272021-01-04 22:33:02 -0800164 bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS);
Eric Biggers644c8c92019-12-31 12:14:16 -0600165
166 /*
167 * fsverity_verify_bio() may call readpages() again, and while verity
Eric Biggers7f59b272021-01-04 22:33:02 -0800168 * will be disabled for this, decryption and/or decompression may still
169 * be needed, resulting in another bio_post_read_ctx being allocated.
170 * So to prevent deadlocks we need to release the current ctx to the
171 * mempool first. This assumes that verity is the last post-read step.
Eric Biggers644c8c92019-12-31 12:14:16 -0600172 */
173 mempool_free(ctx, bio_post_read_ctx_pool);
174 bio->bi_private = NULL;
Eric Biggers95ae2512019-07-22 09:26:24 -0700175
Eric Biggers7f59b272021-01-04 22:33:02 -0800176 /*
177 * Verify the bio's pages with fs-verity. Exclude compressed pages,
178 * as those were handled separately by f2fs_end_read_compressed_page().
179 */
180 if (may_have_compressed_pages) {
181 struct bio_vec *bv;
182 struct bvec_iter_all iter_all;
Chao Yu4c8ff702019-11-01 18:07:14 +0800183
Eric Biggers7f59b272021-01-04 22:33:02 -0800184 bio_for_each_segment_all(bv, bio, iter_all) {
185 struct page *page = bv->bv_page;
186
187 if (!f2fs_is_compressed_page(page) &&
188 !PageError(page) && !fsverity_verify_page(page))
189 SetPageError(page);
190 }
191 } else {
192 fsverity_verify_bio(bio);
193 }
194
195 f2fs_finish_read_bio(bio);
196}
197
198/*
199 * If the bio's data needs to be verified with fs-verity, then enqueue the
200 * verity work for the bio. Otherwise finish the bio now.
201 *
202 * Note that to avoid deadlocks, the verity work can't be done on the
203 * decryption/decompression workqueue. This is because verifying the data pages
204 * can involve reading verity metadata pages from the file, and these verity
205 * metadata pages may be encrypted and/or compressed.
206 */
207static void f2fs_verify_and_finish_bio(struct bio *bio)
208{
209 struct bio_post_read_ctx *ctx = bio->bi_private;
210
211 if (ctx && (ctx->enabled_steps & STEP_VERITY)) {
212 INIT_WORK(&ctx->work, f2fs_verify_bio);
213 fsverity_enqueue_verify_work(&ctx->work);
214 } else {
215 f2fs_finish_read_bio(bio);
216 }
217}
218
219/*
220 * Handle STEP_DECOMPRESS by decompressing any compressed clusters whose last
221 * remaining page was read by @ctx->bio.
222 *
223 * Note that a bio may span clusters (even a mix of compressed and uncompressed
224 * clusters) or be for just part of a cluster. STEP_DECOMPRESS just indicates
225 * that the bio includes at least one compressed page. The actual decompression
226 * is done on a per-cluster basis, not a per-bio basis.
227 */
228static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx)
229{
230 struct bio_vec *bv;
231 struct bvec_iter_all iter_all;
232 bool all_compressed = true;
Daeho Jeong4931e0c2021-07-28 12:38:11 -0700233 block_t blkaddr = ctx->fs_blkaddr;
Eric Biggers7f59b272021-01-04 22:33:02 -0800234
235 bio_for_each_segment_all(bv, ctx->bio, iter_all) {
236 struct page *page = bv->bv_page;
237
238 /* PG_error was set if decryption failed. */
239 if (f2fs_is_compressed_page(page))
Chao Yu6ce19af2021-05-20 19:51:50 +0800240 f2fs_end_read_compressed_page(page, PageError(page),
241 blkaddr);
Eric Biggers7f59b272021-01-04 22:33:02 -0800242 else
243 all_compressed = false;
Chao Yu6ce19af2021-05-20 19:51:50 +0800244
245 blkaddr++;
Eric Biggers7f59b272021-01-04 22:33:02 -0800246 }
247
248 /*
249 * Optimization: if all the bio's pages are compressed, then scheduling
250 * the per-bio verity work is unnecessary, as verity will be fully
251 * handled at the compression cluster level.
252 */
253 if (all_compressed)
254 ctx->enabled_steps &= ~STEP_VERITY;
Chao Yu4c8ff702019-11-01 18:07:14 +0800255}
256
257static void f2fs_post_read_work(struct work_struct *work)
258{
259 struct bio_post_read_ctx *ctx =
260 container_of(work, struct bio_post_read_ctx, work);
261
Eric Biggers7f59b272021-01-04 22:33:02 -0800262 if (ctx->enabled_steps & STEP_DECRYPT)
263 fscrypt_decrypt_bio(ctx->bio);
Chao Yu4c8ff702019-11-01 18:07:14 +0800264
Eric Biggers7f59b272021-01-04 22:33:02 -0800265 if (ctx->enabled_steps & STEP_DECOMPRESS)
266 f2fs_handle_step_decompress(ctx);
Chao Yu4c8ff702019-11-01 18:07:14 +0800267
Eric Biggers7f59b272021-01-04 22:33:02 -0800268 f2fs_verify_and_finish_bio(ctx->bio);
Eric Biggers6dbb1792018-04-18 11:09:48 -0700269}
270
271static void f2fs_read_end_io(struct bio *bio)
272{
Chao Yuc45d6002019-11-01 17:53:23 +0800273 struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
Daeho Jeonga4b68172021-08-20 15:29:09 -0700274 struct bio_post_read_ctx *ctx;
275
276 iostat_update_and_unbind_ctx(bio, 0);
277 ctx = bio->bi_private;
Chao Yuc45d6002019-11-01 17:53:23 +0800278
279 if (time_to_inject(sbi, FAULT_READ_IO)) {
280 f2fs_show_injection_info(sbi, FAULT_READ_IO);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200281 bio->bi_status = BLK_STS_IOERR;
Chao Yu55523512017-02-25 11:08:28 +0800282 }
Chao Yu8b038c72016-09-18 23:30:07 +0800283
Eric Biggers7f59b272021-01-04 22:33:02 -0800284 if (bio->bi_status) {
285 f2fs_finish_read_bio(bio);
Eric Biggers6dbb1792018-04-18 11:09:48 -0700286 return;
Chao Yu12377022015-05-25 18:03:38 +0800287 }
288
Eric Biggers7f59b272021-01-04 22:33:02 -0800289 if (ctx && (ctx->enabled_steps & (STEP_DECRYPT | STEP_DECOMPRESS))) {
290 INIT_WORK(&ctx->work, f2fs_post_read_work);
291 queue_work(ctx->sbi->post_read_wq, &ctx->work);
292 } else {
293 f2fs_verify_and_finish_bio(bio);
294 }
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900295}
296
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200297static void f2fs_write_end_io(struct bio *bio)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900298{
Daeho Jeonga4b68172021-08-20 15:29:09 -0700299 struct f2fs_sb_info *sbi;
Linus Torvaldsf5688492014-01-30 11:19:05 -0800300 struct bio_vec *bvec;
Ming Lei6dc4f102019-02-15 19:13:19 +0800301 struct bvec_iter_all iter_all;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900302
Daeho Jeonga4b68172021-08-20 15:29:09 -0700303 iostat_update_and_unbind_ctx(bio, 1);
304 sbi = bio->bi_private;
305
Chao Yu6f5c2ed2018-09-12 09:22:29 +0800306 if (time_to_inject(sbi, FAULT_WRITE_IO)) {
Chao Yuc45d6002019-11-01 17:53:23 +0800307 f2fs_show_injection_info(sbi, FAULT_WRITE_IO);
Chao Yu6f5c2ed2018-09-12 09:22:29 +0800308 bio->bi_status = BLK_STS_IOERR;
309 }
310
Christoph Hellwig2b070cf2019-04-25 09:03:00 +0200311 bio_for_each_segment_all(bvec, bio, iter_all) {
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900312 struct page *page = bvec->bv_page;
Chao Yu36951b32016-11-16 10:41:20 +0800313 enum count_type type = WB_DATA_TYPE(page);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900314
Chao Yub763f3b2021-04-28 17:20:31 +0800315 if (page_private_dummy(page)) {
316 clear_page_private_dummy(page);
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800317 unlock_page(page);
318 mempool_free(page, sbi->write_io_dummy);
319
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200320 if (unlikely(bio->bi_status))
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800321 f2fs_stop_checkpoint(sbi, true);
322 continue;
323 }
324
Eric Biggersd2d07272019-05-20 09:29:39 -0700325 fscrypt_finalize_bounce_page(&page);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700326
Chao Yu4c8ff702019-11-01 18:07:14 +0800327#ifdef CONFIG_F2FS_FS_COMPRESSION
328 if (f2fs_is_compressed_page(page)) {
329 f2fs_compress_write_end_io(bio, page);
330 continue;
331 }
332#endif
333
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200334 if (unlikely(bio->bi_status)) {
Michal Hocko5114a972016-10-11 13:56:01 -0700335 mapping_set_error(page->mapping, -EIO);
Jaegeuk Kimb1ca3212017-12-31 16:26:38 -0800336 if (type == F2FS_WB_CP_DATA)
337 f2fs_stop_checkpoint(sbi, true);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900338 }
Yunlei He7dff55d2018-01-11 14:19:32 +0800339
340 f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
341 page->index != nid_of_node(page));
342
Chao Yu36951b32016-11-16 10:41:20 +0800343 dec_page_count(sbi, type);
Chao Yu50fa53e2018-08-02 23:03:19 +0800344 if (f2fs_in_warm_node_list(sbi, page))
345 f2fs_del_fsync_node_entry(sbi, page);
Chao Yub763f3b2021-04-28 17:20:31 +0800346 clear_page_private_gcing(page);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900347 end_page_writeback(page);
Linus Torvaldsf5688492014-01-30 11:19:05 -0800348 }
Chao Yu36951b32016-11-16 10:41:20 +0800349 if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
Jaegeuk Kimf5730182016-05-17 16:23:36 -0700350 wq_has_sleeper(&sbi->cp_wait))
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900351 wake_up(&sbi->cp_wait);
352
353 bio_put(bio);
354}
355
Jaegeuk Kim3c62be12016-10-06 19:02:05 -0700356struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
357 block_t blk_addr, struct bio *bio)
358{
359 struct block_device *bdev = sbi->sb->s_bdev;
360 int i;
361
Damien Le Moal09168782019-03-16 09:13:06 +0900362 if (f2fs_is_multi_device(sbi)) {
363 for (i = 0; i < sbi->s_ndevs; i++) {
364 if (FDEV(i).start_blk <= blk_addr &&
365 FDEV(i).end_blk >= blk_addr) {
366 blk_addr -= FDEV(i).start_blk;
367 bdev = FDEV(i).bdev;
368 break;
369 }
Jaegeuk Kim3c62be12016-10-06 19:02:05 -0700370 }
371 }
372 if (bio) {
Christoph Hellwig74d46992017-08-23 19:10:32 +0200373 bio_set_dev(bio, bdev);
Jaegeuk Kim3c62be12016-10-06 19:02:05 -0700374 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
375 }
376 return bdev;
377}
378
379int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
380{
381 int i;
382
Damien Le Moal09168782019-03-16 09:13:06 +0900383 if (!f2fs_is_multi_device(sbi))
384 return 0;
385
Jaegeuk Kim3c62be12016-10-06 19:02:05 -0700386 for (i = 0; i < sbi->s_ndevs; i++)
387 if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
388 return i;
389 return 0;
390}
391
Chao Yub757f6e2019-08-23 17:58:35 +0800392static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
Gu Zheng940a6d32013-12-20 17:39:59 +0800393{
Chao Yub757f6e2019-08-23 17:58:35 +0800394 struct f2fs_sb_info *sbi = fio->sbi;
Gu Zheng940a6d32013-12-20 17:39:59 +0800395 struct bio *bio;
396
Christoph Hellwig67883ad2021-01-26 15:52:38 +0100397 bio = bio_alloc_bioset(GFP_NOIO, npages, &f2fs_bioset);
Gu Zheng940a6d32013-12-20 17:39:59 +0800398
Chao Yub757f6e2019-08-23 17:58:35 +0800399 f2fs_target_device(sbi, fio->new_blkaddr, bio);
400 if (is_read_io(fio->op)) {
Hyunchul Lee0cdd3192018-01-31 11:36:57 +0900401 bio->bi_end_io = f2fs_read_end_io;
402 bio->bi_private = NULL;
403 } else {
404 bio->bi_end_io = f2fs_write_end_io;
405 bio->bi_private = sbi;
Chao Yub757f6e2019-08-23 17:58:35 +0800406 bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi,
407 fio->type, fio->temp);
Hyunchul Lee0cdd3192018-01-31 11:36:57 +0900408 }
Daeho Jeonga4b68172021-08-20 15:29:09 -0700409 iostat_alloc_and_bind_ctx(sbi, bio, NULL);
410
Chao Yub757f6e2019-08-23 17:58:35 +0800411 if (fio->io_wbc)
412 wbc_init_bio(fio->io_wbc, bio);
Gu Zheng940a6d32013-12-20 17:39:59 +0800413
414 return bio;
415}
416
Satya Tangirala27aacd22020-07-02 01:56:06 +0000417static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
418 pgoff_t first_idx,
419 const struct f2fs_io_info *fio,
420 gfp_t gfp_mask)
421{
422 /*
423 * The f2fs garbage collector sets ->encrypted_page when it wants to
424 * read/write raw data without encryption.
425 */
426 if (!fio || !fio->encrypted_page)
427 fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
428}
429
430static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
431 pgoff_t next_idx,
432 const struct f2fs_io_info *fio)
433{
434 /*
435 * The f2fs garbage collector sets ->encrypted_page when it wants to
436 * read/write raw data without encryption.
437 */
438 if (fio && fio->encrypted_page)
439 return !bio_has_crypt_ctx(bio);
440
441 return fscrypt_mergeable_bio(bio, inode, next_idx);
442}
443
Linus Torvalds4fc29c12016-07-27 10:36:31 -0700444static inline void __submit_bio(struct f2fs_sb_info *sbi,
445 struct bio *bio, enum page_type type)
Jaegeuk Kimf5730182016-05-17 16:23:36 -0700446{
Linus Torvalds4fc29c12016-07-27 10:36:31 -0700447 if (!is_read_io(bio_op(bio))) {
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800448 unsigned int start;
449
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800450 if (type != DATA && type != NODE)
451 goto submit_io;
452
Chao Yub0332a02020-02-14 17:44:12 +0800453 if (f2fs_lfs_mode(sbi) && current->plug)
Tiezhu Yang3bb09a02018-02-06 08:21:45 +0800454 blk_finish_plug(current->plug);
455
Dehe Gu39f71b72021-02-02 17:39:22 +0800456 if (!F2FS_IO_ALIGNED(sbi))
Chao Yu8223ecc2019-08-28 17:33:38 +0800457 goto submit_io;
458
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800459 start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
460 start %= F2FS_IO_SIZE(sbi);
461
462 if (start == 0)
463 goto submit_io;
464
465 /* fill dummy pages */
466 for (; start < F2FS_IO_SIZE(sbi); start++) {
467 struct page *page =
468 mempool_alloc(sbi->write_io_dummy,
Gao Xiangbc73a4b2019-02-19 10:31:52 +0800469 GFP_NOIO | __GFP_NOFAIL);
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800470 f2fs_bug_on(sbi, !page);
471
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800472 lock_page(page);
Chao Yub763f3b2021-04-28 17:20:31 +0800473
474 zero_user_segment(page, 0, PAGE_SIZE);
475 set_page_private_dummy(page);
476
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800477 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
478 f2fs_bug_on(sbi, 1);
479 }
480 /*
481 * In the NODE case, we lose next block address chain. So, we
482 * need to do checkpoint in f2fs_sync_file.
483 */
484 if (type == NODE)
485 set_sbi_flag(sbi, SBI_NEED_CP);
Jaegeuk Kim19a5f5e2016-06-04 14:25:24 -0700486 }
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800487submit_io:
Jaegeuk Kim554b5122016-12-21 12:13:03 -0800488 if (is_read_io(bio_op(bio)))
489 trace_f2fs_submit_read_bio(sbi->sb, type, bio);
490 else
491 trace_f2fs_submit_write_bio(sbi->sb, type, bio);
Daeho Jeonga4b68172021-08-20 15:29:09 -0700492
493 iostat_update_submit_ctx(bio, type);
Mike Christie4e49ea42016-06-05 14:31:41 -0500494 submit_bio(bio);
Jaegeuk Kimf5730182016-05-17 16:23:36 -0700495}
496
Chao Yu4c8ff702019-11-01 18:07:14 +0800497void f2fs_submit_bio(struct f2fs_sb_info *sbi,
498 struct bio *bio, enum page_type type)
499{
500 __submit_bio(sbi, bio, type);
501}
502
Jaegeuk Kim32b6aba2020-06-04 11:49:43 -0700503static void __attach_io_flag(struct f2fs_io_info *fio)
Jaegeuk Kimda9953b2020-04-02 09:32:35 -0700504{
505 struct f2fs_sb_info *sbi = fio->sbi;
506 unsigned int temp_mask = (1 << NR_TEMP_TYPE) - 1;
Jaegeuk Kim32b6aba2020-06-04 11:49:43 -0700507 unsigned int io_flag, fua_flag, meta_flag;
508
509 if (fio->type == DATA)
510 io_flag = sbi->data_io_flag;
511 else if (fio->type == NODE)
512 io_flag = sbi->node_io_flag;
513 else
514 return;
515
516 fua_flag = io_flag & temp_mask;
517 meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask;
518
Jaegeuk Kimda9953b2020-04-02 09:32:35 -0700519 /*
Jaegeuk Kim32b6aba2020-06-04 11:49:43 -0700520 * data/node io flag bits per temp:
Jaegeuk Kimda9953b2020-04-02 09:32:35 -0700521 * REQ_META | REQ_FUA |
522 * 5 | 4 | 3 | 2 | 1 | 0 |
523 * Cold | Warm | Hot | Cold | Warm | Hot |
524 */
Jaegeuk Kimda9953b2020-04-02 09:32:35 -0700525 if ((1 << fio->temp) & meta_flag)
526 fio->op_flags |= REQ_META;
527 if ((1 << fio->temp) & fua_flag)
528 fio->op_flags |= REQ_FUA;
529}
530
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900531static void __submit_merged_bio(struct f2fs_bio_info *io)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900532{
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900533 struct f2fs_io_info *fio = &io->fio;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900534
535 if (!io->bio)
536 return;
537
Jaegeuk Kim32b6aba2020-06-04 11:49:43 -0700538 __attach_io_flag(fio);
Mike Christie04d328d2016-06-05 14:31:55 -0500539 bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
540
Jaegeuk Kim554b5122016-12-21 12:13:03 -0800541 if (is_read_io(fio->op))
542 trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
543 else
544 trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
545
Linus Torvalds4fc29c12016-07-27 10:36:31 -0700546 __submit_bio(io->sbi, io->bio, fio->type);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900547 io->bio = NULL;
548}
549
Chao Yu8648de22019-02-19 16:15:29 +0800550static bool __has_merged_page(struct bio *bio, struct inode *inode,
Chao Yubab475c2018-09-27 23:41:16 +0800551 struct page *page, nid_t ino)
Chao Yu0fd785e2016-01-18 18:24:59 +0800552{
Chao Yu0fd785e2016-01-18 18:24:59 +0800553 struct bio_vec *bvec;
Ming Lei6dc4f102019-02-15 19:13:19 +0800554 struct bvec_iter_all iter_all;
Chao Yu0fd785e2016-01-18 18:24:59 +0800555
Chao Yu8648de22019-02-19 16:15:29 +0800556 if (!bio)
Chao Yu0fd785e2016-01-18 18:24:59 +0800557 return false;
Chao Yu0c3a5792016-01-18 18:28:11 +0800558
Chao Yubab475c2018-09-27 23:41:16 +0800559 if (!inode && !page && !ino)
Chao Yu0c3a5792016-01-18 18:28:11 +0800560 return true;
Chao Yu0fd785e2016-01-18 18:24:59 +0800561
Chao Yu8648de22019-02-19 16:15:29 +0800562 bio_for_each_segment_all(bvec, bio, iter_all) {
Chao Yu4c8ff702019-11-01 18:07:14 +0800563 struct page *target = bvec->bv_page;
Chao Yu0fd785e2016-01-18 18:24:59 +0800564
Chao Yu4c8ff702019-11-01 18:07:14 +0800565 if (fscrypt_is_bounce_page(target)) {
Eric Biggersd2d07272019-05-20 09:29:39 -0700566 target = fscrypt_pagecache_page(target);
Chao Yu4c8ff702019-11-01 18:07:14 +0800567 if (IS_ERR(target))
568 continue;
569 }
570 if (f2fs_is_compressed_page(target)) {
571 target = f2fs_compress_control_page(target);
572 if (IS_ERR(target))
573 continue;
574 }
Chao Yu0fd785e2016-01-18 18:24:59 +0800575
Chao Yu0c3a5792016-01-18 18:28:11 +0800576 if (inode && inode == target->mapping->host)
Chao Yu0fd785e2016-01-18 18:24:59 +0800577 return true;
Chao Yubab475c2018-09-27 23:41:16 +0800578 if (page && page == target)
579 return true;
Chao Yu0c3a5792016-01-18 18:28:11 +0800580 if (ino && ino == ino_of_node(target))
581 return true;
Chao Yu0fd785e2016-01-18 18:24:59 +0800582 }
583
Chao Yu0fd785e2016-01-18 18:24:59 +0800584 return false;
585}
586
Jaegeuk Kimb9109b02017-05-10 11:28:38 -0700587static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
Jaegeuk Kima912b542017-05-10 11:18:25 -0700588 enum page_type type, enum temp_type temp)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900589{
590 enum page_type btype = PAGE_TYPE_OF_BIO(type);
Jaegeuk Kima912b542017-05-10 11:18:25 -0700591 struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900592
Chao Yudf0f8dc2014-03-22 14:57:23 +0800593 down_write(&io->io_rwsem);
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900594
595 /* change META to META_FLUSH in the checkpoint procedure */
596 if (type >= META_FLUSH) {
597 io->fio.type = META_FLUSH;
Mike Christie04d328d2016-06-05 14:31:55 -0500598 io->fio.op = REQ_OP_WRITE;
Jan Kara3adc5fcb2017-05-02 17:03:47 +0200599 io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC;
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600600 if (!test_opt(sbi, NOBARRIER))
Jaegeuk Kim7f54f512017-02-06 13:57:58 -0800601 io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900602 }
603 __submit_merged_bio(io);
Chao Yudf0f8dc2014-03-22 14:57:23 +0800604 up_write(&io->io_rwsem);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900605}
606
Jaegeuk Kima912b542017-05-10 11:18:25 -0700607static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
Chao Yubab475c2018-09-27 23:41:16 +0800608 struct inode *inode, struct page *page,
609 nid_t ino, enum page_type type, bool force)
Chao Yu0c3a5792016-01-18 18:28:11 +0800610{
Jaegeuk Kima912b542017-05-10 11:18:25 -0700611 enum temp_type temp;
Yunlong Song1e771e82018-11-13 11:57:32 +0800612 bool ret = true;
Jaegeuk Kima912b542017-05-10 11:18:25 -0700613
614 for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
Yunlong Song1e771e82018-11-13 11:57:32 +0800615 if (!force) {
616 enum page_type btype = PAGE_TYPE_OF_BIO(type);
617 struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
Jaegeuk Kima912b542017-05-10 11:18:25 -0700618
Yunlong Song1e771e82018-11-13 11:57:32 +0800619 down_read(&io->io_rwsem);
Chao Yu8648de22019-02-19 16:15:29 +0800620 ret = __has_merged_page(io->bio, inode, page, ino);
Yunlong Song1e771e82018-11-13 11:57:32 +0800621 up_read(&io->io_rwsem);
622 }
623 if (ret)
624 __f2fs_submit_merged_write(sbi, type, temp);
Jaegeuk Kima912b542017-05-10 11:18:25 -0700625
626 /* TODO: use HOT temp only for meta pages now. */
627 if (type >= META)
628 break;
629 }
Chao Yu0c3a5792016-01-18 18:28:11 +0800630}
631
Jaegeuk Kimb9109b02017-05-10 11:28:38 -0700632void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
Chao Yu406657d2016-02-24 17:17:55 +0800633{
Hariprasad Kelamadcc00f2019-04-06 16:29:36 +0530634 __submit_merged_write_cond(sbi, NULL, NULL, 0, type, true);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900635}
636
Jaegeuk Kimb9109b02017-05-10 11:28:38 -0700637void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
Chao Yubab475c2018-09-27 23:41:16 +0800638 struct inode *inode, struct page *page,
639 nid_t ino, enum page_type type)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900640{
Chao Yubab475c2018-09-27 23:41:16 +0800641 __submit_merged_write_cond(sbi, inode, page, ino, type, false);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900642}
643
Jaegeuk Kimb9109b02017-05-10 11:28:38 -0700644void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
Chao Yu406657d2016-02-24 17:17:55 +0800645{
Jaegeuk Kimb9109b02017-05-10 11:28:38 -0700646 f2fs_submit_merged_write(sbi, DATA);
647 f2fs_submit_merged_write(sbi, NODE);
648 f2fs_submit_merged_write(sbi, META);
Chao Yu406657d2016-02-24 17:17:55 +0800649}
650
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900651/*
652 * Fill the locked page with data located in the block address.
Tomohiro Kusumi771a9a72017-04-05 22:49:44 +0300653 * A caller needs to unlock the page on failure.
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900654 */
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700655int f2fs_submit_page_bio(struct f2fs_io_info *fio)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900656{
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900657 struct bio *bio;
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700658 struct page *page = fio->encrypted_page ?
659 fio->encrypted_page : fio->page;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900660
Chao Yuc9b60782018-08-01 19:13:44 +0800661 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
Chao Yu93770ab2019-04-15 15:26:32 +0800662 fio->is_por ? META_POR : (__is_meta_io(fio) ?
663 META_GENERIC : DATA_GENERIC_ENHANCE)))
Chao Yu10f966b2019-06-20 11:36:14 +0800664 return -EFSCORRUPTED;
Chao Yuc9b60782018-08-01 19:13:44 +0800665
Chao Yu2ace38e2014-12-24 16:08:14 +0800666 trace_f2fs_submit_page_bio(page, fio);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900667
668 /* Allocate a new bio */
Chao Yub757f6e2019-08-23 17:58:35 +0800669 bio = __bio_alloc(fio, 1);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900670
Satya Tangirala27aacd22020-07-02 01:56:06 +0000671 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
672 fio->page->index, fio, GFP_NOIO);
673
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300674 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900675 bio_put(bio);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900676 return -EFAULT;
677 }
Chao Yu78efac52018-10-22 23:24:28 +0800678
679 if (fio->io_wbc && !is_read_io(fio->op))
Tejun Heo34e51a52019-06-27 13:39:49 -0700680 wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
Chao Yu78efac52018-10-22 23:24:28 +0800681
Jaegeuk Kimb7b911d2020-06-04 16:45:30 -0700682 __attach_io_flag(fio);
Mike Christie04d328d2016-06-05 14:31:55 -0500683 bio_set_op_attrs(bio, fio->op, fio->op_flags);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900684
Jaegeuk Kim5f9abab2018-10-16 10:20:53 -0700685 inc_page_count(fio->sbi, is_read_io(fio->op) ?
686 __read_io_type(page): WB_DATA_TYPE(fio->page));
Chao Yu4c58ed02018-10-22 09:12:51 +0800687
688 __submit_bio(fio->sbi, bio, fio->type);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900689 return 0;
690}
691
Chao Yu8896cbd2019-07-12 16:55:41 +0800692static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
693 block_t last_blkaddr, block_t cur_blkaddr)
694{
Jaegeuk Kim10208562020-12-03 09:52:45 -0800695 if (unlikely(sbi->max_io_bytes &&
696 bio->bi_iter.bi_size >= sbi->max_io_bytes))
697 return false;
Chao Yu8896cbd2019-07-12 16:55:41 +0800698 if (last_blkaddr + 1 != cur_blkaddr)
699 return false;
Christoph Hellwig309dca302021-01-24 11:02:34 +0100700 return bio->bi_bdev == f2fs_target_device(sbi, cur_blkaddr, NULL);
Chao Yu8896cbd2019-07-12 16:55:41 +0800701}
702
703static bool io_type_is_mergeable(struct f2fs_bio_info *io,
704 struct f2fs_io_info *fio)
705{
706 if (io->fio.op != fio->op)
707 return false;
708 return io->fio.op_flags == fio->op_flags;
709}
710
711static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
712 struct f2fs_bio_info *io,
713 struct f2fs_io_info *fio,
714 block_t last_blkaddr,
715 block_t cur_blkaddr)
716{
Chao Yuc72db712019-07-12 16:55:42 +0800717 if (F2FS_IO_ALIGNED(sbi) && (fio->type == DATA || fio->type == NODE)) {
718 unsigned int filled_blocks =
719 F2FS_BYTES_TO_BLK(bio->bi_iter.bi_size);
720 unsigned int io_size = F2FS_IO_SIZE(sbi);
721 unsigned int left_vecs = bio->bi_max_vecs - bio->bi_vcnt;
722
723 /* IOs in bio is aligned and left space of vectors is not enough */
724 if (!(filled_blocks % io_size) && left_vecs < io_size)
725 return false;
726 }
Chao Yu8896cbd2019-07-12 16:55:41 +0800727 if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr))
728 return false;
729 return io_type_is_mergeable(io, fio);
730}
731
Chao Yu0b20fce2019-09-30 18:53:25 +0800732static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
733 struct page *page, enum temp_type temp)
734{
735 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
736 struct bio_entry *be;
737
Chao Yu32410572021-08-09 08:24:48 +0800738 be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS, true, NULL);
Chao Yu0b20fce2019-09-30 18:53:25 +0800739 be->bio = bio;
740 bio_get(bio);
741
742 if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
743 f2fs_bug_on(sbi, 1);
744
745 down_write(&io->bio_list_lock);
746 list_add_tail(&be->list, &io->bio_list);
747 up_write(&io->bio_list_lock);
748}
749
750static void del_bio_entry(struct bio_entry *be)
751{
752 list_del(&be->list);
753 kmem_cache_free(bio_entry_slab, be);
754}
755
Satya Tangirala27aacd22020-07-02 01:56:06 +0000756static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
Chao Yu0b20fce2019-09-30 18:53:25 +0800757 struct page *page)
758{
Satya Tangirala27aacd22020-07-02 01:56:06 +0000759 struct f2fs_sb_info *sbi = fio->sbi;
Chao Yu0b20fce2019-09-30 18:53:25 +0800760 enum temp_type temp;
761 bool found = false;
762 int ret = -EAGAIN;
763
764 for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
765 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
766 struct list_head *head = &io->bio_list;
767 struct bio_entry *be;
768
769 down_write(&io->bio_list_lock);
770 list_for_each_entry(be, head, list) {
771 if (be->bio != *bio)
772 continue;
773
774 found = true;
775
Satya Tangirala27aacd22020-07-02 01:56:06 +0000776 f2fs_bug_on(sbi, !page_is_mergeable(sbi, *bio,
777 *fio->last_block,
778 fio->new_blkaddr));
779 if (f2fs_crypt_mergeable_bio(*bio,
780 fio->page->mapping->host,
781 fio->page->index, fio) &&
782 bio_add_page(*bio, page, PAGE_SIZE, 0) ==
783 PAGE_SIZE) {
Chao Yu0b20fce2019-09-30 18:53:25 +0800784 ret = 0;
785 break;
786 }
787
Satya Tangirala27aacd22020-07-02 01:56:06 +0000788 /* page can't be merged into bio; submit the bio */
Chao Yu0b20fce2019-09-30 18:53:25 +0800789 del_bio_entry(be);
790 __submit_bio(sbi, *bio, DATA);
791 break;
792 }
793 up_write(&io->bio_list_lock);
794 }
795
796 if (ret) {
797 bio_put(*bio);
798 *bio = NULL;
799 }
800
801 return ret;
802}
803
804void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
805 struct bio **bio, struct page *page)
806{
807 enum temp_type temp;
808 bool found = false;
809 struct bio *target = bio ? *bio : NULL;
810
811 for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
812 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
813 struct list_head *head = &io->bio_list;
814 struct bio_entry *be;
815
816 if (list_empty(head))
817 continue;
818
819 down_read(&io->bio_list_lock);
820 list_for_each_entry(be, head, list) {
821 if (target)
822 found = (target == be->bio);
823 else
824 found = __has_merged_page(be->bio, NULL,
825 page, 0);
826 if (found)
827 break;
828 }
829 up_read(&io->bio_list_lock);
830
831 if (!found)
832 continue;
833
834 found = false;
835
836 down_write(&io->bio_list_lock);
837 list_for_each_entry(be, head, list) {
838 if (target)
839 found = (target == be->bio);
840 else
841 found = __has_merged_page(be->bio, NULL,
842 page, 0);
843 if (found) {
844 target = be->bio;
845 del_bio_entry(be);
846 break;
847 }
848 }
849 up_write(&io->bio_list_lock);
850 }
851
852 if (found)
853 __submit_bio(sbi, target, DATA);
854 if (bio && *bio) {
855 bio_put(*bio);
856 *bio = NULL;
857 }
858}
859
Chao Yu8648de22019-02-19 16:15:29 +0800860int f2fs_merge_page_bio(struct f2fs_io_info *fio)
861{
862 struct bio *bio = *fio->bio;
863 struct page *page = fio->encrypted_page ?
864 fio->encrypted_page : fio->page;
865
866 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
867 __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
Chao Yu10f966b2019-06-20 11:36:14 +0800868 return -EFSCORRUPTED;
Chao Yu8648de22019-02-19 16:15:29 +0800869
870 trace_f2fs_submit_page_bio(page, fio);
Chao Yu8648de22019-02-19 16:15:29 +0800871
Chao Yu8896cbd2019-07-12 16:55:41 +0800872 if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
Chao Yu0b20fce2019-09-30 18:53:25 +0800873 fio->new_blkaddr))
874 f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
Chao Yu8648de22019-02-19 16:15:29 +0800875alloc_new:
876 if (!bio) {
Christoph Hellwiga8affc02021-03-11 12:01:37 +0100877 bio = __bio_alloc(fio, BIO_MAX_VECS);
Jaegeuk Kimb7b911d2020-06-04 16:45:30 -0700878 __attach_io_flag(fio);
Satya Tangirala27aacd22020-07-02 01:56:06 +0000879 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
880 fio->page->index, fio, GFP_NOIO);
Chao Yu8648de22019-02-19 16:15:29 +0800881 bio_set_op_attrs(bio, fio->op, fio->op_flags);
Chao Yu8648de22019-02-19 16:15:29 +0800882
Chao Yu0b20fce2019-09-30 18:53:25 +0800883 add_bio_entry(fio->sbi, bio, page, fio->temp);
884 } else {
Satya Tangirala27aacd22020-07-02 01:56:06 +0000885 if (add_ipu_page(fio, &bio, page))
Chao Yu0b20fce2019-09-30 18:53:25 +0800886 goto alloc_new;
Chao Yu8648de22019-02-19 16:15:29 +0800887 }
888
889 if (fio->io_wbc)
Linus Torvalds9637d512019-07-15 21:20:52 -0700890 wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
Chao Yu8648de22019-02-19 16:15:29 +0800891
892 inc_page_count(fio->sbi, WB_DATA_TYPE(page));
893
894 *fio->last_block = fio->new_blkaddr;
895 *fio->bio = bio;
896
897 return 0;
898}
899
Chao Yufe16efe2018-05-28 23:47:18 +0800900void f2fs_submit_page_write(struct f2fs_io_info *fio)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900901{
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700902 struct f2fs_sb_info *sbi = fio->sbi;
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900903 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
Jaegeuk Kima912b542017-05-10 11:18:25 -0700904 struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700905 struct page *bio_page;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900906
Jaegeuk Kimb9109b02017-05-10 11:28:38 -0700907 f2fs_bug_on(sbi, is_read_io(fio->op));
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900908
Chao Yufb830fc2017-05-19 23:37:01 +0800909 down_write(&io->io_rwsem);
910next:
911 if (fio->in_list) {
912 spin_lock(&io->io_lock);
913 if (list_empty(&io->io_list)) {
914 spin_unlock(&io->io_lock);
Chao Yufe16efe2018-05-28 23:47:18 +0800915 goto out;
Chao Yufb830fc2017-05-19 23:37:01 +0800916 }
917 fio = list_first_entry(&io->io_list,
918 struct f2fs_io_info, list);
919 list_del(&fio->list);
920 spin_unlock(&io->io_lock);
921 }
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900922
Chao Yu93770ab2019-04-15 15:26:32 +0800923 verify_fio_blkaddr(fio);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900924
Chao Yu4c8ff702019-11-01 18:07:14 +0800925 if (fio->encrypted_page)
926 bio_page = fio->encrypted_page;
927 else if (fio->compressed_page)
928 bio_page = fio->compressed_page;
929 else
930 bio_page = fio->page;
Chao Yu36951b32016-11-16 10:41:20 +0800931
Thomas Meyerebf7c522017-10-07 16:02:21 +0200932 /* set submitted = true as a return value */
933 fio->submitted = true;
Jaegeuk Kimd68f7352017-02-03 17:44:04 -0800934
Jaegeuk Kimb9109b02017-05-10 11:28:38 -0700935 inc_page_count(sbi, WB_DATA_TYPE(bio_page));
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900936
Satya Tangirala27aacd22020-07-02 01:56:06 +0000937 if (io->bio &&
938 (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
939 fio->new_blkaddr) ||
940 !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
941 bio_page->index, fio)))
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900942 __submit_merged_bio(io);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900943alloc_new:
944 if (io->bio == NULL) {
Chao Yu8223ecc2019-08-28 17:33:38 +0800945 if (F2FS_IO_ALIGNED(sbi) &&
946 (fio->type == DATA || fio->type == NODE) &&
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800947 fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
Jaegeuk Kimb9109b02017-05-10 11:28:38 -0700948 dec_page_count(sbi, WB_DATA_TYPE(bio_page));
Chao Yufe16efe2018-05-28 23:47:18 +0800949 fio->retry = true;
950 goto skip;
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800951 }
Christoph Hellwiga8affc02021-03-11 12:01:37 +0100952 io->bio = __bio_alloc(fio, BIO_MAX_VECS);
Satya Tangirala27aacd22020-07-02 01:56:06 +0000953 f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
954 bio_page->index, fio, GFP_NOIO);
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900955 io->fio = *fio;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900956 }
957
Jaegeuk Kima912b542017-05-10 11:18:25 -0700958 if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900959 __submit_merged_bio(io);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900960 goto alloc_new;
961 }
962
Yufen Yu578c6472018-01-09 19:33:39 +0800963 if (fio->io_wbc)
Tejun Heo34e51a52019-06-27 13:39:49 -0700964 wbc_account_cgroup_owner(fio->io_wbc, bio_page, PAGE_SIZE);
Yufen Yu578c6472018-01-09 19:33:39 +0800965
Chao Yu7a9d7542016-02-22 18:36:38 +0800966 io->last_block_in_bio = fio->new_blkaddr;
Chao Yufb830fc2017-05-19 23:37:01 +0800967
968 trace_f2fs_submit_page_write(fio->page, fio);
Chao Yufe16efe2018-05-28 23:47:18 +0800969skip:
Chao Yufb830fc2017-05-19 23:37:01 +0800970 if (fio->in_list)
971 goto next;
Chao Yufe16efe2018-05-28 23:47:18 +0800972out:
Daniel Rosenberg43549942018-08-20 19:21:43 -0700973 if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
Chao Yu00e09c02019-08-23 17:58:36 +0800974 !f2fs_is_checkpoint_ready(sbi))
Jaegeuk Kim5ce80582018-09-06 11:40:12 -0700975 __submit_merged_bio(io);
Chao Yudf0f8dc2014-03-22 14:57:23 +0800976 up_write(&io->io_rwsem);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900977}
978
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -0700979static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
Eric Biggers95ae2512019-07-22 09:26:24 -0700980 unsigned nr_pages, unsigned op_flag,
Eric Biggers7f59b272021-01-04 22:33:02 -0800981 pgoff_t first_idx, bool for_write)
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -0700982{
983 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -0700984 struct bio *bio;
Daeho Jeonga4b68172021-08-20 15:29:09 -0700985 struct bio_post_read_ctx *ctx = NULL;
Eric Biggers6dbb1792018-04-18 11:09:48 -0700986 unsigned int post_read_steps = 0;
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -0700987
Christoph Hellwig67883ad2021-01-26 15:52:38 +0100988 bio = bio_alloc_bioset(for_write ? GFP_NOIO : GFP_KERNEL,
Matthew Wilcox (Oracle)5f7136d2021-01-29 04:38:57 +0000989 bio_max_segs(nr_pages), &f2fs_bioset);
Eric Biggers6dbb1792018-04-18 11:09:48 -0700990 if (!bio)
991 return ERR_PTR(-ENOMEM);
Satya Tangirala27aacd22020-07-02 01:56:06 +0000992
993 f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
994
Eric Biggers6dbb1792018-04-18 11:09:48 -0700995 f2fs_target_device(sbi, blkaddr, bio);
996 bio->bi_end_io = f2fs_read_end_io;
Jaegeuk Kime2e59412018-06-21 11:29:43 -0700997 bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
Eric Biggers6dbb1792018-04-18 11:09:48 -0700998
Satya Tangirala27aacd22020-07-02 01:56:06 +0000999 if (fscrypt_inode_uses_fs_layer_crypto(inode))
Eric Biggers7f59b272021-01-04 22:33:02 -08001000 post_read_steps |= STEP_DECRYPT;
Eric Biggers95ae2512019-07-22 09:26:24 -07001001
Eric Biggers7f59b272021-01-04 22:33:02 -08001002 if (f2fs_need_verity(inode, first_idx))
1003 post_read_steps |= STEP_VERITY;
1004
1005 /*
1006 * STEP_DECOMPRESS is handled specially, since a compressed file might
1007 * contain both compressed and uncompressed clusters. We'll allocate a
1008 * bio_post_read_ctx if the file is compressed, but the caller is
1009 * responsible for enabling STEP_DECOMPRESS if it's actually needed.
1010 */
1011
1012 if (post_read_steps || f2fs_compressed_file(inode)) {
Eric Biggerse8ce5742019-12-31 12:14:56 -06001013 /* Due to the mempool, this never fails. */
Eric Biggers6dbb1792018-04-18 11:09:48 -07001014 ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
Eric Biggers6dbb1792018-04-18 11:09:48 -07001015 ctx->bio = bio;
Chao Yu4c8ff702019-11-01 18:07:14 +08001016 ctx->sbi = sbi;
Eric Biggers6dbb1792018-04-18 11:09:48 -07001017 ctx->enabled_steps = post_read_steps;
Daeho Jeong4931e0c2021-07-28 12:38:11 -07001018 ctx->fs_blkaddr = blkaddr;
Eric Biggers6dbb1792018-04-18 11:09:48 -07001019 bio->bi_private = ctx;
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -07001020 }
Daeho Jeonga4b68172021-08-20 15:29:09 -07001021 iostat_alloc_and_bind_ctx(sbi, bio, ctx);
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -07001022
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -07001023 return bio;
1024}
1025
1026/* This can handle encryption stuffs */
1027static int f2fs_submit_page_read(struct inode *inode, struct page *page,
Jia Yangb7973092020-07-01 10:27:40 +08001028 block_t blkaddr, int op_flags, bool for_write)
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -07001029{
Chao Yu93770ab2019-04-15 15:26:32 +08001030 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1031 struct bio *bio;
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -07001032
Jia Yangb7973092020-07-01 10:27:40 +08001033 bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
Eric Biggers7f59b272021-01-04 22:33:02 -08001034 page->index, for_write);
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -07001035 if (IS_ERR(bio))
1036 return PTR_ERR(bio);
1037
Jaegeuk Kim0ded69f2018-08-22 21:18:00 -07001038 /* wait for GCed page writeback via META_MAPPING */
1039 f2fs_wait_on_block_writeback(inode, blkaddr);
1040
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -07001041 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
1042 bio_put(bio);
1043 return -EFAULT;
1044 }
Jaegeuk Kimfb7d70d2018-09-25 13:54:33 -07001045 ClearPageError(page);
Chao Yu93770ab2019-04-15 15:26:32 +08001046 inc_page_count(sbi, F2FS_RD_DATA);
Chao Yu8b83ac82020-04-16 18:16:56 +08001047 f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
Chao Yu93770ab2019-04-15 15:26:32 +08001048 __submit_bio(sbi, bio, DATA);
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -07001049 return 0;
1050}
1051
Chao Yu46008c62016-05-09 19:56:30 +08001052static void __set_data_blkaddr(struct dnode_of_data *dn)
1053{
1054 struct f2fs_node *rn = F2FS_NODE(dn->node_page);
1055 __le32 *addr_array;
Chao Yu7a2af762017-07-19 00:19:06 +08001056 int base = 0;
1057
1058 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
1059 base = get_extra_isize(dn->inode);
Chao Yu46008c62016-05-09 19:56:30 +08001060
1061 /* Get physical address of data block */
1062 addr_array = blkaddr_in_node(rn);
Chao Yu7a2af762017-07-19 00:19:06 +08001063 addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
Chao Yu46008c62016-05-09 19:56:30 +08001064}
1065
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +09001066/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001067 * Lock ordering for the change of data block address:
1068 * ->data_page
1069 * ->node_page
1070 * update block addresses in the node page
1071 */
Chao Yu4d57b862018-05-30 00:20:41 +08001072void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001073{
Chao Yubae0ee72018-12-25 17:43:42 +08001074 f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
Chao Yu46008c62016-05-09 19:56:30 +08001075 __set_data_blkaddr(dn);
1076 if (set_page_dirty(dn->node_page))
Jaegeuk Kim12719ae2016-01-07 13:23:12 -08001077 dn->node_changed = true;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001078}
1079
Chao Yuf28b3432016-02-24 17:16:47 +08001080void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
1081{
1082 dn->data_blkaddr = blkaddr;
Chao Yu4d57b862018-05-30 00:20:41 +08001083 f2fs_set_data_blkaddr(dn);
Chao Yuf28b3432016-02-24 17:16:47 +08001084 f2fs_update_extent_cache(dn);
1085}
1086
Chao Yu46008c62016-05-09 19:56:30 +08001087/* dn->ofs_in_node will be returned with up-to-date last block pointer */
Chao Yu4d57b862018-05-30 00:20:41 +08001088int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001089{
Jaegeuk Kim40813632014-09-02 15:31:18 -07001090 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
Chao Yu0abd6752017-07-09 00:13:07 +08001091 int err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001092
Chao Yu46008c62016-05-09 19:56:30 +08001093 if (!count)
1094 return 0;
1095
Jaegeuk Kim91942322016-05-20 10:13:22 -07001096 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001097 return -EPERM;
Chao Yu0abd6752017-07-09 00:13:07 +08001098 if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
1099 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001100
Chao Yu46008c62016-05-09 19:56:30 +08001101 trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
1102 dn->ofs_in_node, count);
Namjae Jeonc01e2852013-04-23 17:00:52 +09001103
Chao Yubae0ee72018-12-25 17:43:42 +08001104 f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
Chao Yu46008c62016-05-09 19:56:30 +08001105
1106 for (; count > 0; dn->ofs_in_node++) {
Chao Yua2ced1c2020-02-14 17:44:10 +08001107 block_t blkaddr = f2fs_data_blkaddr(dn);
Yi Zhuang5f029c02021-04-06 09:47:35 +08001108
Chao Yu46008c62016-05-09 19:56:30 +08001109 if (blkaddr == NULL_ADDR) {
1110 dn->data_blkaddr = NEW_ADDR;
1111 __set_data_blkaddr(dn);
1112 count--;
1113 }
1114 }
1115
1116 if (set_page_dirty(dn->node_page))
1117 dn->node_changed = true;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001118 return 0;
1119}
1120
Chao Yu46008c62016-05-09 19:56:30 +08001121/* Should keep dn->ofs_in_node unchanged */
Chao Yu4d57b862018-05-30 00:20:41 +08001122int f2fs_reserve_new_block(struct dnode_of_data *dn)
Chao Yu46008c62016-05-09 19:56:30 +08001123{
1124 unsigned int ofs_in_node = dn->ofs_in_node;
1125 int ret;
1126
Chao Yu4d57b862018-05-30 00:20:41 +08001127 ret = f2fs_reserve_new_blocks(dn, 1);
Chao Yu46008c62016-05-09 19:56:30 +08001128 dn->ofs_in_node = ofs_in_node;
1129 return ret;
1130}
1131
Huajun Lib6009652013-11-10 23:13:18 +08001132int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
1133{
1134 bool need_put = dn->inode_page ? false : true;
1135 int err;
1136
Chao Yu4d57b862018-05-30 00:20:41 +08001137 err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE);
Huajun Lib6009652013-11-10 23:13:18 +08001138 if (err)
1139 return err;
Jaegeuk Kima8865372013-12-27 17:04:17 +09001140
Huajun Lib6009652013-11-10 23:13:18 +08001141 if (dn->data_blkaddr == NULL_ADDR)
Chao Yu4d57b862018-05-30 00:20:41 +08001142 err = f2fs_reserve_new_block(dn);
Jaegeuk Kima8865372013-12-27 17:04:17 +09001143 if (err || need_put)
Huajun Lib6009652013-11-10 23:13:18 +08001144 f2fs_put_dnode(dn);
1145 return err;
1146}
1147
Fan Li759af1c2015-08-05 15:52:16 +08001148int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001149{
Chao Yu94afd6d2021-08-04 10:23:48 +08001150 struct extent_info ei = {0, };
Fan Li759af1c2015-08-05 15:52:16 +08001151 struct inode *inode = dn->inode;
Chao Yu028a41e2015-03-19 19:26:02 +08001152
Fan Li759af1c2015-08-05 15:52:16 +08001153 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1154 dn->data_blkaddr = ei.blk + index - ei.fofs;
1155 return 0;
Chao Yu429511c2015-02-05 17:54:31 +08001156 }
1157
Fan Li759af1c2015-08-05 15:52:16 +08001158 return f2fs_reserve_block(dn, index);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001159}
1160
Chao Yu4d57b862018-05-30 00:20:41 +08001161struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
Mike Christie04d328d2016-06-05 14:31:55 -05001162 int op_flags, bool for_write)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001163{
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001164 struct address_space *mapping = inode->i_mapping;
1165 struct dnode_of_data dn;
1166 struct page *page;
Chao Yu94afd6d2021-08-04 10:23:48 +08001167 struct extent_info ei = {0, };
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001168 int err;
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001169
Jaegeuk Kima56c7c62015-10-09 15:11:38 -07001170 page = f2fs_grab_cache_page(mapping, index, for_write);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001171 if (!page)
1172 return ERR_PTR(-ENOMEM);
1173
Chao Yucb3bc9e2015-02-05 18:03:40 +08001174 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1175 dn.data_blkaddr = ei.blk + index - ei.fofs;
Chao Yu93770ab2019-04-15 15:26:32 +08001176 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
1177 DATA_GENERIC_ENHANCE_READ)) {
Chao Yu10f966b2019-06-20 11:36:14 +08001178 err = -EFSCORRUPTED;
Chao Yu93770ab2019-04-15 15:26:32 +08001179 goto put_err;
1180 }
Chao Yucb3bc9e2015-02-05 18:03:40 +08001181 goto got_it;
1182 }
1183
Jaegeuk Kim650495d2013-05-13 08:38:35 +09001184 set_new_dnode(&dn, inode, NULL, NULL, 0);
Chao Yu4d57b862018-05-30 00:20:41 +08001185 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
Jaegeuk Kim86531d62015-07-15 13:08:21 -07001186 if (err)
1187 goto put_err;
Jaegeuk Kim650495d2013-05-13 08:38:35 +09001188 f2fs_put_dnode(&dn);
1189
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09001190 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
Jaegeuk Kim86531d62015-07-15 13:08:21 -07001191 err = -ENOENT;
1192 goto put_err;
Jaegeuk Kim650495d2013-05-13 08:38:35 +09001193 }
Chao Yu93770ab2019-04-15 15:26:32 +08001194 if (dn.data_blkaddr != NEW_ADDR &&
1195 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
1196 dn.data_blkaddr,
1197 DATA_GENERIC_ENHANCE)) {
Chao Yu10f966b2019-06-20 11:36:14 +08001198 err = -EFSCORRUPTED;
Chao Yu93770ab2019-04-15 15:26:32 +08001199 goto put_err;
1200 }
Chao Yucb3bc9e2015-02-05 18:03:40 +08001201got_it:
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -07001202 if (PageUptodate(page)) {
1203 unlock_page(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001204 return page;
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -07001205 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001206
Jaegeuk Kimd59ff4d2013-08-20 19:13:07 +09001207 /*
1208 * A new dentry page is allocated but not able to be written, since its
1209 * new inode page couldn't be allocated due to -ENOSPC.
1210 * In such the case, its blkaddr can be remained as NEW_ADDR.
Chao Yu4d57b862018-05-30 00:20:41 +08001211 * see, f2fs_add_link -> f2fs_get_new_data_page ->
1212 * f2fs_init_inode_metadata.
Jaegeuk Kimd59ff4d2013-08-20 19:13:07 +09001213 */
1214 if (dn.data_blkaddr == NEW_ADDR) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001215 zero_user_segment(page, 0, PAGE_SIZE);
Jaegeuk Kim237c0792016-06-30 18:49:15 -07001216 if (!PageUptodate(page))
1217 SetPageUptodate(page);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -07001218 unlock_page(page);
Jaegeuk Kimd59ff4d2013-08-20 19:13:07 +09001219 return page;
1220 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001221
Jia Yangb7973092020-07-01 10:27:40 +08001222 err = f2fs_submit_page_read(inode, page, dn.data_blkaddr,
1223 op_flags, for_write);
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001224 if (err)
Jaegeuk Kim86531d62015-07-15 13:08:21 -07001225 goto put_err;
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -07001226 return page;
Jaegeuk Kim86531d62015-07-15 13:08:21 -07001227
1228put_err:
1229 f2fs_put_page(page, 1);
1230 return ERR_PTR(err);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -07001231}
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001232
Chao Yu4d57b862018-05-30 00:20:41 +08001233struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index)
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -07001234{
1235 struct address_space *mapping = inode->i_mapping;
1236 struct page *page;
1237
1238 page = find_get_page(mapping, index);
1239 if (page && PageUptodate(page))
1240 return page;
1241 f2fs_put_page(page, 0);
1242
Chao Yu4d57b862018-05-30 00:20:41 +08001243 page = f2fs_get_read_data_page(inode, index, 0, false);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -07001244 if (IS_ERR(page))
1245 return page;
1246
1247 if (PageUptodate(page))
1248 return page;
1249
1250 wait_on_page_locked(page);
1251 if (unlikely(!PageUptodate(page))) {
1252 f2fs_put_page(page, 0);
1253 return ERR_PTR(-EIO);
1254 }
1255 return page;
1256}
1257
1258/*
1259 * If it tries to access a hole, return an error.
1260 * Because, the callers, functions in dir.c and GC, should be able to know
1261 * whether this page exists or not.
1262 */
Chao Yu4d57b862018-05-30 00:20:41 +08001263struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
Jaegeuk Kima56c7c62015-10-09 15:11:38 -07001264 bool for_write)
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -07001265{
1266 struct address_space *mapping = inode->i_mapping;
1267 struct page *page;
1268repeat:
Chao Yu4d57b862018-05-30 00:20:41 +08001269 page = f2fs_get_read_data_page(inode, index, 0, for_write);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -07001270 if (IS_ERR(page))
1271 return page;
1272
1273 /* wait for read completion */
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001274 lock_page(page);
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09001275 if (unlikely(page->mapping != mapping)) {
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +09001276 f2fs_put_page(page, 1);
1277 goto repeat;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001278 }
Chao Yu1563ac72016-07-03 22:05:12 +08001279 if (unlikely(!PageUptodate(page))) {
1280 f2fs_put_page(page, 1);
1281 return ERR_PTR(-EIO);
1282 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001283 return page;
1284}
1285
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001286/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001287 * Caller ensures that this data page is never allocated.
1288 * A new zero-filled data page is allocated in the page cache.
Jaegeuk Kim39936832012-11-22 16:21:29 +09001289 *
Chao Yu4f4124d2013-12-21 18:02:14 +08001290 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
1291 * f2fs_unlock_op().
Chao Yu470f00e2015-07-14 18:14:06 +08001292 * Note that, ipage is set only by make_empty_dir, and if any error occur,
1293 * ipage should be released by this function.
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001294 */
Chao Yu4d57b862018-05-30 00:20:41 +08001295struct page *f2fs_get_new_data_page(struct inode *inode,
Jaegeuk Kima8865372013-12-27 17:04:17 +09001296 struct page *ipage, pgoff_t index, bool new_i_size)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001297{
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001298 struct address_space *mapping = inode->i_mapping;
1299 struct page *page;
1300 struct dnode_of_data dn;
1301 int err;
Jaegeuk Kim76121182016-01-01 22:03:47 -08001302
Jaegeuk Kima56c7c62015-10-09 15:11:38 -07001303 page = f2fs_grab_cache_page(mapping, index, true);
Chao Yu470f00e2015-07-14 18:14:06 +08001304 if (!page) {
1305 /*
1306 * before exiting, we should make sure ipage will be released
1307 * if any error occur.
1308 */
1309 f2fs_put_page(ipage, 1);
Jaegeuk Kim01f28612015-04-29 11:18:42 -07001310 return ERR_PTR(-ENOMEM);
Chao Yu470f00e2015-07-14 18:14:06 +08001311 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001312
Jaegeuk Kima8865372013-12-27 17:04:17 +09001313 set_new_dnode(&dn, inode, ipage, NULL, 0);
Huajun Lib6009652013-11-10 23:13:18 +08001314 err = f2fs_reserve_block(&dn, index);
Jaegeuk Kim01f28612015-04-29 11:18:42 -07001315 if (err) {
1316 f2fs_put_page(page, 1);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001317 return ERR_PTR(err);
Jaegeuk Kima8865372013-12-27 17:04:17 +09001318 }
Jaegeuk Kim01f28612015-04-29 11:18:42 -07001319 if (!ipage)
1320 f2fs_put_dnode(&dn);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001321
1322 if (PageUptodate(page))
Jaegeuk Kim01f28612015-04-29 11:18:42 -07001323 goto got_it;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001324
1325 if (dn.data_blkaddr == NEW_ADDR) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001326 zero_user_segment(page, 0, PAGE_SIZE);
Jaegeuk Kim237c0792016-06-30 18:49:15 -07001327 if (!PageUptodate(page))
1328 SetPageUptodate(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001329 } else {
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001330 f2fs_put_page(page, 1);
Jaegeuk Kima8865372013-12-27 17:04:17 +09001331
Jaegeuk Kim76121182016-01-01 22:03:47 -08001332 /* if ipage exists, blkaddr should be NEW_ADDR */
1333 f2fs_bug_on(F2FS_I_SB(inode), ipage);
Chao Yu4d57b862018-05-30 00:20:41 +08001334 page = f2fs_get_lock_data_page(inode, index, true);
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001335 if (IS_ERR(page))
Jaegeuk Kim76121182016-01-01 22:03:47 -08001336 return page;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001337 }
Jaegeuk Kim01f28612015-04-29 11:18:42 -07001338got_it:
Chao Yu9edcdab2015-09-11 14:43:52 +08001339 if (new_i_size && i_size_read(inode) <
Jaegeuk Kimee6d1822016-05-20 16:32:49 -07001340 ((loff_t)(index + 1) << PAGE_SHIFT))
Jaegeuk Kimfc9581c2016-05-20 09:22:03 -07001341 f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001342 return page;
1343}
1344
Hyunchul Leed5097be2017-11-28 09:23:00 +09001345static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001346{
Jaegeuk Kim40813632014-09-02 15:31:18 -07001347 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001348 struct f2fs_summary sum;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001349 struct node_info ni;
Chao Yu6aa58d82018-08-14 22:37:25 +08001350 block_t old_blkaddr;
Chao Yu46008c62016-05-09 19:56:30 +08001351 blkcnt_t count = 1;
Chao Yu0abd6752017-07-09 00:13:07 +08001352 int err;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001353
Jaegeuk Kim91942322016-05-20 10:13:22 -07001354 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001355 return -EPERM;
Chao Yudf6136e2015-03-23 10:33:37 +08001356
Chao Yu77357302018-07-17 00:02:17 +08001357 err = f2fs_get_node_info(sbi, dn->nid, &ni);
1358 if (err)
1359 return err;
1360
Chao Yua2ced1c2020-02-14 17:44:10 +08001361 dn->data_blkaddr = f2fs_data_blkaddr(dn);
Chao Yuf847c692018-09-27 18:34:52 +08001362 if (dn->data_blkaddr != NULL_ADDR)
Chao Yudf6136e2015-03-23 10:33:37 +08001363 goto alloc;
1364
Chao Yu0abd6752017-07-09 00:13:07 +08001365 if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
1366 return err;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001367
Chao Yudf6136e2015-03-23 10:33:37 +08001368alloc:
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001369 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
Chao Yu6aa58d82018-08-14 22:37:25 +08001370 old_blkaddr = dn->data_blkaddr;
1371 f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
Chao Yu093749e2020-08-04 21:14:49 +08001372 &sum, seg_type, NULL);
Chao Yu6ce19af2021-05-20 19:51:50 +08001373 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
Chao Yu6aa58d82018-08-14 22:37:25 +08001374 invalidate_mapping_pages(META_MAPPING(sbi),
1375 old_blkaddr, old_blkaddr);
Chao Yu6ce19af2021-05-20 19:51:50 +08001376 f2fs_invalidate_compress_page(sbi, old_blkaddr);
1377 }
Chao Yu86f35dc2019-08-28 17:33:35 +08001378 f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001379
Jaegeuk Kim0a4daae2018-09-19 15:28:40 -07001380 /*
1381 * i_size will be updated by direct_IO. Otherwise, we'll get stale
1382 * data from unwritten block via dio_read.
1383 */
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001384 return 0;
1385}
1386
Jaegeuk Kima7de6082016-11-11 16:31:56 -08001387int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
Jaegeuk Kim59b802e2015-02-09 12:09:53 -08001388{
Jaegeuk Kimb439b102016-02-03 13:09:09 -08001389 struct inode *inode = file_inode(iocb->ki_filp);
Chao Yu5b8db7f2016-01-26 15:38:29 +08001390 struct f2fs_map_blocks map;
Chao Yud6d478a12018-01-03 17:30:19 +08001391 int flag;
Jaegeuk Kima7de6082016-11-11 16:31:56 -08001392 int err = 0;
Chao Yud6d478a12018-01-03 17:30:19 +08001393 bool direct_io = iocb->ki_flags & IOCB_DIRECT;
Jaegeuk Kim59b802e2015-02-09 12:09:53 -08001394
Jaegeuk Kim0080c502016-05-07 08:52:57 -07001395 map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
Chao Yudfd02e42016-08-20 15:12:01 +08001396 map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
1397 if (map.m_len > map.m_lblk)
1398 map.m_len -= map.m_lblk;
1399 else
1400 map.m_len = 0;
1401
Chao Yuda859852016-01-26 15:42:58 +08001402 map.m_next_pgofs = NULL;
Chao Yuc4020b22018-01-11 14:42:30 +08001403 map.m_next_extent = NULL;
Hyunchul Leed5097be2017-11-28 09:23:00 +09001404 map.m_seg_type = NO_CHECK_TYPE;
Chao Yuf9d6d052018-11-13 14:33:45 +08001405 map.m_may_create = true;
Jaegeuk Kim59b802e2015-02-09 12:09:53 -08001406
Chao Yud6d478a12018-01-03 17:30:19 +08001407 if (direct_io) {
Chao Yu4d57b862018-05-30 00:20:41 +08001408 map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint);
Chao Yuf847c692018-09-27 18:34:52 +08001409 flag = f2fs_force_buffered_io(inode, iocb, from) ?
Chao Yud6d478a12018-01-03 17:30:19 +08001410 F2FS_GET_BLOCK_PRE_AIO :
1411 F2FS_GET_BLOCK_PRE_DIO;
1412 goto map_blocks;
Hyunchul Leed5097be2017-11-28 09:23:00 +09001413 }
Chao Yuf2470372017-07-19 00:19:05 +08001414 if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
Jaegeuk Kima7de6082016-11-11 16:31:56 -08001415 err = f2fs_convert_inline_inode(inode);
1416 if (err)
1417 return err;
Jaegeuk Kim24b84912016-02-03 13:49:44 -08001418 }
Chao Yud6d478a12018-01-03 17:30:19 +08001419 if (f2fs_has_inline_data(inode))
Sheng Yong250066452017-11-22 18:23:39 +08001420 return err;
Chao Yud6d478a12018-01-03 17:30:19 +08001421
1422 flag = F2FS_GET_BLOCK_PRE_AIO;
1423
1424map_blocks:
1425 err = f2fs_map_blocks(inode, &map, 1, flag);
1426 if (map.m_len > 0 && err == -ENOSPC) {
1427 if (!direct_io)
1428 set_inode_flag(inode, FI_NO_PREALLOC);
1429 err = 0;
Sheng Yong250066452017-11-22 18:23:39 +08001430 }
Jaegeuk Kima7de6082016-11-11 16:31:56 -08001431 return err;
Jaegeuk Kim59b802e2015-02-09 12:09:53 -08001432}
1433
Chao Yu0ef81832020-06-18 14:36:22 +08001434void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
Yunlei He59c90812017-03-13 20:22:18 +08001435{
1436 if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1437 if (lock)
1438 down_read(&sbi->node_change);
1439 else
1440 up_read(&sbi->node_change);
1441 } else {
1442 if (lock)
1443 f2fs_lock_op(sbi);
1444 else
1445 f2fs_unlock_op(sbi);
1446 }
1447}
1448
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001449/*
Chao Yu7a88ddb2020-02-27 19:30:05 +08001450 * f2fs_map_blocks() tries to find or build mapping relationship which
1451 * maps continuous logical blocks to physical blocks, and return such
1452 * info via f2fs_map_blocks structure.
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001453 */
Chao Yud323d002015-10-27 09:53:45 +08001454int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
Chao Yue2b4e2b2015-08-19 19:11:19 +08001455 int create, int flag)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001456{
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001457 unsigned int maxblocks = map->m_len;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001458 struct dnode_of_data dn;
Chao Yuf9811702015-09-21 20:17:52 +08001459 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yuf9d6d052018-11-13 14:33:45 +08001460 int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
Chao Yu46008c62016-05-09 19:56:30 +08001461 pgoff_t pgofs, end_offset, end;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001462 int err = 0, ofs = 1;
Chao Yu46008c62016-05-09 19:56:30 +08001463 unsigned int ofs_in_node, last_ofs_in_node;
1464 blkcnt_t prealloc;
Chao Yu94afd6d2021-08-04 10:23:48 +08001465 struct extent_info ei = {0, };
Fan Li7df3a432015-12-17 13:20:59 +08001466 block_t blkaddr;
Chao Yuc4020b22018-01-11 14:42:30 +08001467 unsigned int start_pgofs;
Chao Yu71f2c822021-09-01 14:39:20 +08001468 int bidx = 0;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001469
Chao Yudfd02e42016-08-20 15:12:01 +08001470 if (!maxblocks)
1471 return 0;
1472
Chao Yu71f2c822021-09-01 14:39:20 +08001473 map->m_bdev = inode->i_sb->s_bdev;
1474 map->m_multidev_dio =
1475 f2fs_allow_multi_device_dio(F2FS_I_SB(inode), flag);
1476
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001477 map->m_len = 0;
1478 map->m_flags = 0;
1479
1480 /* it only supports block size == page size */
1481 pgofs = (pgoff_t)map->m_lblk;
Chao Yu46008c62016-05-09 19:56:30 +08001482 end = pgofs + maxblocks;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001483
Jaegeuk Kim24b84912016-02-03 13:49:44 -08001484 if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
Chao Yub0332a02020-02-14 17:44:12 +08001485 if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
Jia Zhuf4f0b672018-11-20 04:29:35 +08001486 map->m_may_create)
1487 goto next_dnode;
1488
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001489 map->m_pblk = ei.blk + pgofs - ei.fofs;
1490 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
1491 map->m_flags = F2FS_MAP_MAPPED;
Chao Yuc4020b22018-01-11 14:42:30 +08001492 if (map->m_next_extent)
1493 *map->m_next_extent = pgofs + map->m_len;
Sahitya Tummala1e78e8b2018-10-10 10:56:22 +05301494
1495 /* for hardware encryption, but to avoid potential issue in future */
1496 if (flag == F2FS_GET_BLOCK_DIO)
1497 f2fs_wait_on_block_writeback_range(inode,
1498 map->m_pblk, map->m_len);
Chao Yu71f2c822021-09-01 14:39:20 +08001499
1500 if (map->m_multidev_dio) {
1501 block_t blk_addr = map->m_pblk;
1502
1503 bidx = f2fs_target_device_index(sbi, map->m_pblk);
1504
1505 map->m_bdev = FDEV(bidx).bdev;
1506 map->m_pblk -= FDEV(bidx).start_blk;
1507 map->m_len = min(map->m_len,
1508 FDEV(bidx).end_blk + 1 - map->m_pblk);
1509
1510 if (map->m_may_create)
1511 f2fs_update_device_state(sbi, inode->i_ino,
1512 blk_addr, map->m_len);
1513 }
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001514 goto out;
Chao Yua2e7d1b2015-02-05 17:50:30 +08001515 }
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001516
Chao Yu4fe71e82016-01-26 15:37:38 +08001517next_dnode:
Chao Yuf9d6d052018-11-13 14:33:45 +08001518 if (map->m_may_create)
Chao Yu0ef81832020-06-18 14:36:22 +08001519 f2fs_do_map_lock(sbi, flag, true);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001520
1521 /* When reading holes, we need its node page */
1522 set_new_dnode(&dn, inode, NULL, NULL, 0);
Chao Yu4d57b862018-05-30 00:20:41 +08001523 err = f2fs_get_dnode_of_data(&dn, pgofs, mode);
Jaegeuk Kim1ec79082013-12-26 16:55:22 +09001524 if (err) {
Chao Yu43473f92016-05-05 19:13:02 +08001525 if (flag == F2FS_GET_BLOCK_BMAP)
1526 map->m_pblk = 0;
Chao Yuadf9ea82021-08-26 10:03:15 +08001527
Chao Yuda859852016-01-26 15:42:58 +08001528 if (err == -ENOENT) {
Chao Yuadf9ea82021-08-26 10:03:15 +08001529 /*
1530 * There is one exceptional case that read_node_page()
1531 * may return -ENOENT due to filesystem has been
1532 * shutdown or cp_error, so force to convert error
1533 * number to EIO for such case.
1534 */
1535 if (map->m_may_create &&
1536 (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
1537 f2fs_cp_error(sbi))) {
1538 err = -EIO;
1539 goto unlock_out;
1540 }
1541
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001542 err = 0;
Chao Yuda859852016-01-26 15:42:58 +08001543 if (map->m_next_pgofs)
1544 *map->m_next_pgofs =
Chao Yu4d57b862018-05-30 00:20:41 +08001545 f2fs_get_next_page_offset(&dn, pgofs);
Chao Yuc4020b22018-01-11 14:42:30 +08001546 if (map->m_next_extent)
1547 *map->m_next_extent =
Chao Yu4d57b862018-05-30 00:20:41 +08001548 f2fs_get_next_page_offset(&dn, pgofs);
Chao Yuda859852016-01-26 15:42:58 +08001549 }
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001550 goto unlock_out;
Namjae Jeon848753a2013-04-23 16:38:02 +09001551 }
Chao Yu973163f2015-09-18 16:51:51 +08001552
Chao Yuc4020b22018-01-11 14:42:30 +08001553 start_pgofs = pgofs;
Chao Yu46008c62016-05-09 19:56:30 +08001554 prealloc = 0;
Arnd Bergmann230436b32016-11-02 14:52:15 +01001555 last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
Chao Yu81ca7352016-01-26 15:39:35 +08001556 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001557
Chao Yu4fe71e82016-01-26 15:37:38 +08001558next_block:
Chao Yua2ced1c2020-02-14 17:44:10 +08001559 blkaddr = f2fs_data_blkaddr(&dn);
Chao Yu973163f2015-09-18 16:51:51 +08001560
Chao Yuc9b60782018-08-01 19:13:44 +08001561 if (__is_valid_data_blkaddr(blkaddr) &&
Chao Yu93770ab2019-04-15 15:26:32 +08001562 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
Chao Yu10f966b2019-06-20 11:36:14 +08001563 err = -EFSCORRUPTED;
Chao Yuc9b60782018-08-01 19:13:44 +08001564 goto sync_out;
1565 }
1566
Chao Yu93770ab2019-04-15 15:26:32 +08001567 if (__is_valid_data_blkaddr(blkaddr)) {
Chao Yuf847c692018-09-27 18:34:52 +08001568 /* use out-place-update for driect IO under LFS mode */
Chao Yub0332a02020-02-14 17:44:12 +08001569 if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
Chao Yuf9d6d052018-11-13 14:33:45 +08001570 map->m_may_create) {
Chao Yuf847c692018-09-27 18:34:52 +08001571 err = __allocate_data_block(&dn, map->m_seg_type);
Chao Yu05e3600612019-08-28 17:33:36 +08001572 if (err)
1573 goto sync_out;
1574 blkaddr = dn.data_blkaddr;
1575 set_inode_flag(inode, FI_APPEND_WRITE);
Chao Yuf847c692018-09-27 18:34:52 +08001576 }
1577 } else {
Fan Li7df3a432015-12-17 13:20:59 +08001578 if (create) {
1579 if (unlikely(f2fs_cp_error(sbi))) {
1580 err = -EIO;
1581 goto sync_out;
Chao Yu973163f2015-09-18 16:51:51 +08001582 }
Jaegeuk Kim24b84912016-02-03 13:49:44 -08001583 if (flag == F2FS_GET_BLOCK_PRE_AIO) {
Chao Yu46008c62016-05-09 19:56:30 +08001584 if (blkaddr == NULL_ADDR) {
1585 prealloc++;
1586 last_ofs_in_node = dn.ofs_in_node;
1587 }
Jaegeuk Kim24b84912016-02-03 13:49:44 -08001588 } else {
Jaegeuk Kim0a4daae2018-09-19 15:28:40 -07001589 WARN_ON(flag != F2FS_GET_BLOCK_PRE_DIO &&
1590 flag != F2FS_GET_BLOCK_DIO);
Hyunchul Leed5097be2017-11-28 09:23:00 +09001591 err = __allocate_data_block(&dn,
1592 map->m_seg_type);
Chao Yu6f2d8ed2016-10-11 22:57:03 +08001593 if (!err)
Jaegeuk Kim91942322016-05-20 10:13:22 -07001594 set_inode_flag(inode, FI_APPEND_WRITE);
Jaegeuk Kim24b84912016-02-03 13:49:44 -08001595 }
Fan Li7df3a432015-12-17 13:20:59 +08001596 if (err)
1597 goto sync_out;
Kinglong Mee3f2be042017-02-23 19:55:05 +08001598 map->m_flags |= F2FS_MAP_NEW;
Fan Li7df3a432015-12-17 13:20:59 +08001599 blkaddr = dn.data_blkaddr;
1600 } else {
Chao Yubbe1da72021-08-06 08:02:50 +08001601 if (f2fs_compressed_file(inode) &&
1602 f2fs_sanity_check_cluster(&dn) &&
1603 (flag != F2FS_GET_BLOCK_FIEMAP ||
1604 IS_ENABLED(CONFIG_F2FS_CHECK_FS))) {
1605 err = -EFSCORRUPTED;
1606 goto sync_out;
1607 }
Chao Yu43473f92016-05-05 19:13:02 +08001608 if (flag == F2FS_GET_BLOCK_BMAP) {
1609 map->m_pblk = 0;
1610 goto sync_out;
1611 }
Chao Yuc4020b22018-01-11 14:42:30 +08001612 if (flag == F2FS_GET_BLOCK_PRECACHE)
1613 goto sync_out;
Chao Yuda859852016-01-26 15:42:58 +08001614 if (flag == F2FS_GET_BLOCK_FIEMAP &&
1615 blkaddr == NULL_ADDR) {
1616 if (map->m_next_pgofs)
1617 *map->m_next_pgofs = pgofs + 1;
Fan Li7df3a432015-12-17 13:20:59 +08001618 goto sync_out;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001619 }
Chao Yuf3d98e72018-01-10 18:18:52 +08001620 if (flag != F2FS_GET_BLOCK_FIEMAP) {
1621 /* for defragment case */
1622 if (map->m_next_pgofs)
1623 *map->m_next_pgofs = pgofs + 1;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001624 goto sync_out;
Chao Yuf3d98e72018-01-10 18:18:52 +08001625 }
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001626 }
1627 }
Fan Li7df3a432015-12-17 13:20:59 +08001628
Chao Yu46008c62016-05-09 19:56:30 +08001629 if (flag == F2FS_GET_BLOCK_PRE_AIO)
1630 goto skip;
1631
Chao Yu71f2c822021-09-01 14:39:20 +08001632 if (map->m_multidev_dio)
1633 bidx = f2fs_target_device_index(sbi, blkaddr);
1634
Chao Yu4fe71e82016-01-26 15:37:38 +08001635 if (map->m_len == 0) {
1636 /* preallocated unwritten block should be mapped for fiemap. */
1637 if (blkaddr == NEW_ADDR)
1638 map->m_flags |= F2FS_MAP_UNWRITTEN;
1639 map->m_flags |= F2FS_MAP_MAPPED;
1640
1641 map->m_pblk = blkaddr;
1642 map->m_len = 1;
Chao Yu71f2c822021-09-01 14:39:20 +08001643
1644 if (map->m_multidev_dio)
1645 map->m_bdev = FDEV(bidx).bdev;
Chao Yu4fe71e82016-01-26 15:37:38 +08001646 } else if ((map->m_pblk != NEW_ADDR &&
Fan Li7df3a432015-12-17 13:20:59 +08001647 blkaddr == (map->m_pblk + ofs)) ||
Jaegeuk Kimb439b102016-02-03 13:09:09 -08001648 (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
Chao Yu46008c62016-05-09 19:56:30 +08001649 flag == F2FS_GET_BLOCK_PRE_DIO) {
Chao Yu71f2c822021-09-01 14:39:20 +08001650 if (map->m_multidev_dio && map->m_bdev != FDEV(bidx).bdev)
1651 goto sync_out;
Fan Li7df3a432015-12-17 13:20:59 +08001652 ofs++;
Fan Li7df3a432015-12-17 13:20:59 +08001653 map->m_len++;
Chao Yu4fe71e82016-01-26 15:37:38 +08001654 } else {
1655 goto sync_out;
1656 }
1657
Chao Yu46008c62016-05-09 19:56:30 +08001658skip:
Chao Yu4fe71e82016-01-26 15:37:38 +08001659 dn.ofs_in_node++;
1660 pgofs++;
1661
Chao Yu46008c62016-05-09 19:56:30 +08001662 /* preallocate blocks in batch for one dnode page */
1663 if (flag == F2FS_GET_BLOCK_PRE_AIO &&
1664 (pgofs == end || dn.ofs_in_node == end_offset)) {
Chao Yu4fe71e82016-01-26 15:37:38 +08001665
Chao Yu46008c62016-05-09 19:56:30 +08001666 dn.ofs_in_node = ofs_in_node;
Chao Yu4d57b862018-05-30 00:20:41 +08001667 err = f2fs_reserve_new_blocks(&dn, prealloc);
Chao Yu46008c62016-05-09 19:56:30 +08001668 if (err)
1669 goto sync_out;
Chao Yu4fe71e82016-01-26 15:37:38 +08001670
Chao Yu46008c62016-05-09 19:56:30 +08001671 map->m_len += dn.ofs_in_node - ofs_in_node;
1672 if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
1673 err = -ENOSPC;
1674 goto sync_out;
Chao Yu4fe71e82016-01-26 15:37:38 +08001675 }
Chao Yu46008c62016-05-09 19:56:30 +08001676 dn.ofs_in_node = end_offset;
Fan Li7df3a432015-12-17 13:20:59 +08001677 }
1678
Chao Yu46008c62016-05-09 19:56:30 +08001679 if (pgofs >= end)
1680 goto sync_out;
1681 else if (dn.ofs_in_node < end_offset)
1682 goto next_block;
1683
Chao Yuc4020b22018-01-11 14:42:30 +08001684 if (flag == F2FS_GET_BLOCK_PRECACHE) {
1685 if (map->m_flags & F2FS_MAP_MAPPED) {
1686 unsigned int ofs = start_pgofs - map->m_lblk;
1687
1688 f2fs_update_extent_cache_range(&dn,
1689 start_pgofs, map->m_pblk + ofs,
1690 map->m_len - ofs);
1691 }
1692 }
1693
Chao Yu46008c62016-05-09 19:56:30 +08001694 f2fs_put_dnode(&dn);
1695
Chao Yuf9d6d052018-11-13 14:33:45 +08001696 if (map->m_may_create) {
Chao Yu0ef81832020-06-18 14:36:22 +08001697 f2fs_do_map_lock(sbi, flag, false);
Chao Yu6f2d8ed2016-10-11 22:57:03 +08001698 f2fs_balance_fs(sbi, dn.node_changed);
Chao Yu46008c62016-05-09 19:56:30 +08001699 }
Chao Yu46008c62016-05-09 19:56:30 +08001700 goto next_dnode;
1701
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001702sync_out:
Sahitya Tummala1e78e8b2018-10-10 10:56:22 +05301703
Chao Yu71f2c822021-09-01 14:39:20 +08001704 if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED) {
1705 /*
1706 * for hardware encryption, but to avoid potential issue
1707 * in future
1708 */
Sahitya Tummala1e78e8b2018-10-10 10:56:22 +05301709 f2fs_wait_on_block_writeback_range(inode,
1710 map->m_pblk, map->m_len);
Hyeong-Jun Kime3b49ea2021-11-02 16:10:02 +09001711 invalidate_mapping_pages(META_MAPPING(sbi),
1712 map->m_pblk, map->m_pblk);
Sahitya Tummala1e78e8b2018-10-10 10:56:22 +05301713
Chao Yu71f2c822021-09-01 14:39:20 +08001714 if (map->m_multidev_dio) {
1715 block_t blk_addr = map->m_pblk;
1716
1717 bidx = f2fs_target_device_index(sbi, map->m_pblk);
1718
1719 map->m_bdev = FDEV(bidx).bdev;
1720 map->m_pblk -= FDEV(bidx).start_blk;
1721
1722 if (map->m_may_create)
1723 f2fs_update_device_state(sbi, inode->i_ino,
1724 blk_addr, map->m_len);
1725
1726 f2fs_bug_on(sbi, blk_addr + map->m_len >
1727 FDEV(bidx).end_blk + 1);
1728 }
1729 }
1730
Chao Yuc4020b22018-01-11 14:42:30 +08001731 if (flag == F2FS_GET_BLOCK_PRECACHE) {
1732 if (map->m_flags & F2FS_MAP_MAPPED) {
1733 unsigned int ofs = start_pgofs - map->m_lblk;
1734
1735 f2fs_update_extent_cache_range(&dn,
1736 start_pgofs, map->m_pblk + ofs,
1737 map->m_len - ofs);
1738 }
1739 if (map->m_next_extent)
1740 *map->m_next_extent = pgofs + 1;
1741 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001742 f2fs_put_dnode(&dn);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001743unlock_out:
Chao Yuf9d6d052018-11-13 14:33:45 +08001744 if (map->m_may_create) {
Chao Yu0ef81832020-06-18 14:36:22 +08001745 f2fs_do_map_lock(sbi, flag, false);
Chao Yu6f2d8ed2016-10-11 22:57:03 +08001746 f2fs_balance_fs(sbi, dn.node_changed);
Jaegeuk Kim2a340762015-12-22 13:23:35 -08001747 }
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001748out:
Chao Yu71f2c822021-09-01 14:39:20 +08001749 trace_f2fs_map_blocks(inode, map, create, flag, err);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001750 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001751}
1752
Hyunchul Leeb91050a2018-03-08 19:34:38 +09001753bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
1754{
1755 struct f2fs_map_blocks map;
1756 block_t last_lblk;
1757 int err;
1758
1759 if (pos + len > i_size_read(inode))
1760 return false;
1761
1762 map.m_lblk = F2FS_BYTES_TO_BLK(pos);
1763 map.m_next_pgofs = NULL;
1764 map.m_next_extent = NULL;
1765 map.m_seg_type = NO_CHECK_TYPE;
Jia Zhuf4f0b672018-11-20 04:29:35 +08001766 map.m_may_create = false;
Hyunchul Leeb91050a2018-03-08 19:34:38 +09001767 last_lblk = F2FS_BLK_ALIGN(pos + len);
1768
1769 while (map.m_lblk < last_lblk) {
1770 map.m_len = last_lblk - map.m_lblk;
1771 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
1772 if (err || map.m_len == 0)
1773 return false;
1774 map.m_lblk += map.m_len;
1775 }
1776 return true;
1777}
1778
Jaegeuk Kim43b9d4b2020-11-24 15:04:27 -08001779static inline u64 bytes_to_blks(struct inode *inode, u64 bytes)
1780{
1781 return (bytes >> inode->i_blkbits);
1782}
1783
1784static inline u64 blks_to_bytes(struct inode *inode, u64 blks)
1785{
1786 return (blks << inode->i_blkbits);
1787}
1788
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001789static int __get_data_block(struct inode *inode, sector_t iblock,
Chao Yuda859852016-01-26 15:42:58 +08001790 struct buffer_head *bh, int create, int flag,
Chao Yuf9d6d052018-11-13 14:33:45 +08001791 pgoff_t *next_pgofs, int seg_type, bool may_write)
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001792{
1793 struct f2fs_map_blocks map;
Jaegeuk Kima7de6082016-11-11 16:31:56 -08001794 int err;
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001795
1796 map.m_lblk = iblock;
Jaegeuk Kim43b9d4b2020-11-24 15:04:27 -08001797 map.m_len = bytes_to_blks(inode, bh->b_size);
Chao Yuda859852016-01-26 15:42:58 +08001798 map.m_next_pgofs = next_pgofs;
Chao Yuc4020b22018-01-11 14:42:30 +08001799 map.m_next_extent = NULL;
Hyunchul Leed5097be2017-11-28 09:23:00 +09001800 map.m_seg_type = seg_type;
Chao Yuf9d6d052018-11-13 14:33:45 +08001801 map.m_may_create = may_write;
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001802
Jaegeuk Kima7de6082016-11-11 16:31:56 -08001803 err = f2fs_map_blocks(inode, &map, create, flag);
1804 if (!err) {
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001805 map_bh(bh, inode->i_sb, map.m_pblk);
1806 bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
Jaegeuk Kim43b9d4b2020-11-24 15:04:27 -08001807 bh->b_size = blks_to_bytes(inode, map.m_len);
Chao Yu71f2c822021-09-01 14:39:20 +08001808
1809 if (map.m_multidev_dio)
1810 bh->b_bdev = map.m_bdev;
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001811 }
Jaegeuk Kima7de6082016-11-11 16:31:56 -08001812 return err;
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001813}
1814
Chao Yuf9d6d052018-11-13 14:33:45 +08001815static int get_data_block_dio_write(struct inode *inode, sector_t iblock,
1816 struct buffer_head *bh_result, int create)
1817{
1818 return __get_data_block(inode, iblock, bh_result, create,
1819 F2FS_GET_BLOCK_DIO, NULL,
1820 f2fs_rw_hint_to_seg_type(inode->i_write_hint),
huangjianan@oppo.comebc29b622021-02-27 20:02:29 +08001821 true);
Jaegeuk Kimccfb3002014-06-13 13:02:11 +09001822}
1823
Chao Yue2b4e2b2015-08-19 19:11:19 +08001824static int get_data_block_dio(struct inode *inode, sector_t iblock,
Jaegeuk Kimccfb3002014-06-13 13:02:11 +09001825 struct buffer_head *bh_result, int create)
1826{
Chao Yue2b4e2b2015-08-19 19:11:19 +08001827 return __get_data_block(inode, iblock, bh_result, create,
Chao Yuf9d6d052018-11-13 14:33:45 +08001828 F2FS_GET_BLOCK_DIO, NULL,
1829 f2fs_rw_hint_to_seg_type(inode->i_write_hint),
1830 false);
Chao Yue2b4e2b2015-08-19 19:11:19 +08001831}
1832
Chao Yu442a9db2018-01-11 14:39:57 +08001833static int f2fs_xattr_fiemap(struct inode *inode,
1834 struct fiemap_extent_info *fieinfo)
1835{
1836 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1837 struct page *page;
1838 struct node_info ni;
1839 __u64 phys = 0, len;
1840 __u32 flags;
1841 nid_t xnid = F2FS_I(inode)->i_xattr_nid;
1842 int err = 0;
1843
1844 if (f2fs_has_inline_xattr(inode)) {
1845 int offset;
1846
1847 page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
1848 inode->i_ino, false);
1849 if (!page)
1850 return -ENOMEM;
1851
Chao Yu77357302018-07-17 00:02:17 +08001852 err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
1853 if (err) {
1854 f2fs_put_page(page, 1);
1855 return err;
1856 }
Chao Yu442a9db2018-01-11 14:39:57 +08001857
Jaegeuk Kim6cbfcab2020-11-24 14:55:47 -08001858 phys = blks_to_bytes(inode, ni.blk_addr);
Chao Yu442a9db2018-01-11 14:39:57 +08001859 offset = offsetof(struct f2fs_inode, i_addr) +
1860 sizeof(__le32) * (DEF_ADDRS_PER_INODE -
Chao Yub323fd22018-01-17 16:31:36 +08001861 get_inline_xattr_addrs(inode));
Chao Yu442a9db2018-01-11 14:39:57 +08001862
1863 phys += offset;
1864 len = inline_xattr_size(inode);
1865
1866 f2fs_put_page(page, 1);
1867
1868 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;
1869
1870 if (!xnid)
1871 flags |= FIEMAP_EXTENT_LAST;
1872
1873 err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
Chao Yudd5a09b2020-06-29 20:13:13 +08001874 trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
Chao Yu442a9db2018-01-11 14:39:57 +08001875 if (err || err == 1)
1876 return err;
1877 }
1878
1879 if (xnid) {
1880 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
1881 if (!page)
1882 return -ENOMEM;
1883
Chao Yu77357302018-07-17 00:02:17 +08001884 err = f2fs_get_node_info(sbi, xnid, &ni);
1885 if (err) {
1886 f2fs_put_page(page, 1);
1887 return err;
1888 }
Chao Yu442a9db2018-01-11 14:39:57 +08001889
Jaegeuk Kim6cbfcab2020-11-24 14:55:47 -08001890 phys = blks_to_bytes(inode, ni.blk_addr);
Chao Yu442a9db2018-01-11 14:39:57 +08001891 len = inode->i_sb->s_blocksize;
1892
1893 f2fs_put_page(page, 1);
1894
1895 flags = FIEMAP_EXTENT_LAST;
1896 }
1897
Chao Yudd5a09b2020-06-29 20:13:13 +08001898 if (phys) {
Chao Yu442a9db2018-01-11 14:39:57 +08001899 err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
Chao Yudd5a09b2020-06-29 20:13:13 +08001900 trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1901 }
Chao Yu442a9db2018-01-11 14:39:57 +08001902
1903 return (err < 0 ? err : 0);
1904}
1905
Chao Yubf38fba2020-03-28 17:40:40 +08001906static loff_t max_inode_blocks(struct inode *inode)
1907{
1908 loff_t result = ADDRS_PER_INODE(inode);
1909 loff_t leaf_count = ADDRS_PER_BLOCK(inode);
1910
1911 /* two direct node blocks */
1912 result += (leaf_count * 2);
1913
1914 /* two indirect node blocks */
1915 leaf_count *= NIDS_PER_BLOCK;
1916 result += (leaf_count * 2);
1917
1918 /* one double indirect node block */
1919 leaf_count *= NIDS_PER_BLOCK;
1920 result += leaf_count;
1921
1922 return result;
1923}
1924
Jaegeuk Kim9ab701342014-06-08 04:30:14 +09001925int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1926 u64 start, u64 len)
1927{
Jaegeuk Kimb876f4c92020-11-24 15:19:10 -08001928 struct f2fs_map_blocks map;
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001929 sector_t start_blk, last_blk;
Chao Yuda859852016-01-26 15:42:58 +08001930 pgoff_t next_pgofs;
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001931 u64 logical = 0, phys = 0, size = 0;
1932 u32 flags = 0;
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001933 int ret = 0;
Daeho Jeong093f0ba2021-07-25 21:18:19 -07001934 bool compr_cluster = false, compr_appended;
Chao Yubf38fba2020-03-28 17:40:40 +08001935 unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
Daeho Jeong093f0ba2021-07-25 21:18:19 -07001936 unsigned int count_in_cluster = 0;
Chengguang Xu0bb20452021-03-09 13:21:18 +08001937 loff_t maxbytes;
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001938
Chao Yuc4020b22018-01-11 14:42:30 +08001939 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
1940 ret = f2fs_precache_extents(inode);
1941 if (ret)
1942 return ret;
1943 }
1944
Christoph Hellwig45dd0522020-05-23 09:30:14 +02001945 ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_XATTR);
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001946 if (ret)
1947 return ret;
1948
Chao Yuf1b43d42018-01-11 14:37:35 +08001949 inode_lock(inode);
1950
Chengguang Xu0bb20452021-03-09 13:21:18 +08001951 maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
1952 if (start > maxbytes) {
1953 ret = -EFBIG;
1954 goto out;
1955 }
1956
1957 if (len > maxbytes || (maxbytes - len) < start)
1958 len = maxbytes - start;
1959
Chao Yu442a9db2018-01-11 14:39:57 +08001960 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
1961 ret = f2fs_xattr_fiemap(inode, fieinfo);
1962 goto out;
1963 }
1964
Chao Yu7975f342019-07-22 18:03:50 +08001965 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
Jaegeuk Kim67f8cf32015-10-15 11:34:49 -07001966 ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
1967 if (ret != -EAGAIN)
Chao Yuf1b43d42018-01-11 14:37:35 +08001968 goto out;
Jaegeuk Kim67f8cf32015-10-15 11:34:49 -07001969 }
1970
Jaegeuk Kim6cbfcab2020-11-24 14:55:47 -08001971 if (bytes_to_blks(inode, len) == 0)
1972 len = blks_to_bytes(inode, 1);
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001973
Jaegeuk Kim6cbfcab2020-11-24 14:55:47 -08001974 start_blk = bytes_to_blks(inode, start);
1975 last_blk = bytes_to_blks(inode, start + len - 1);
Fan Li9a950d52015-12-26 18:07:41 +08001976
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001977next:
Jaegeuk Kimb876f4c92020-11-24 15:19:10 -08001978 memset(&map, 0, sizeof(map));
1979 map.m_lblk = start_blk;
1980 map.m_len = bytes_to_blks(inode, len);
1981 map.m_next_pgofs = &next_pgofs;
1982 map.m_seg_type = NO_CHECK_TYPE;
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001983
Daeho Jeong093f0ba2021-07-25 21:18:19 -07001984 if (compr_cluster) {
1985 map.m_lblk += 1;
1986 map.m_len = cluster_size - count_in_cluster;
1987 }
Chao Yubf38fba2020-03-28 17:40:40 +08001988
Jaegeuk Kimb876f4c92020-11-24 15:19:10 -08001989 ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001990 if (ret)
1991 goto out;
1992
1993 /* HOLE */
Daeho Jeong093f0ba2021-07-25 21:18:19 -07001994 if (!compr_cluster && !(map.m_flags & F2FS_MAP_FLAGS)) {
Chao Yuda859852016-01-26 15:42:58 +08001995 start_blk = next_pgofs;
Chao Yu58736fa2016-10-11 22:57:04 +08001996
Jaegeuk Kim6cbfcab2020-11-24 14:55:47 -08001997 if (blks_to_bytes(inode, start_blk) < blks_to_bytes(inode,
Chao Yubf38fba2020-03-28 17:40:40 +08001998 max_inode_blocks(inode)))
Fan Li9a950d52015-12-26 18:07:41 +08001999 goto prep_next;
Chao Yu58736fa2016-10-11 22:57:04 +08002000
Fan Li9a950d52015-12-26 18:07:41 +08002001 flags |= FIEMAP_EXTENT_LAST;
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07002002 }
Fan Li9a950d52015-12-26 18:07:41 +08002003
Daeho Jeong093f0ba2021-07-25 21:18:19 -07002004 compr_appended = false;
2005 /* In a case of compressed cluster, append this to the last extent */
2006 if (compr_cluster && ((map.m_flags & F2FS_MAP_UNWRITTEN) ||
2007 !(map.m_flags & F2FS_MAP_FLAGS))) {
2008 compr_appended = true;
2009 goto skip_fill;
2010 }
2011
Chao Yuda5af122016-01-08 20:19:27 +08002012 if (size) {
Chao Yu0953fe82020-12-14 17:20:57 +08002013 flags |= FIEMAP_EXTENT_MERGED;
Chandan Rajendra62230e0d2018-12-12 15:20:11 +05302014 if (IS_ENCRYPTED(inode))
Chao Yuda5af122016-01-08 20:19:27 +08002015 flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
2016
Fan Li9a950d52015-12-26 18:07:41 +08002017 ret = fiemap_fill_next_extent(fieinfo, logical,
2018 phys, size, flags);
Chao Yudd5a09b2020-06-29 20:13:13 +08002019 trace_f2fs_fiemap(inode, logical, phys, size, flags, ret);
Chao Yubf38fba2020-03-28 17:40:40 +08002020 if (ret)
2021 goto out;
2022 size = 0;
Chao Yuda5af122016-01-08 20:19:27 +08002023 }
Fan Li9a950d52015-12-26 18:07:41 +08002024
Chao Yubf38fba2020-03-28 17:40:40 +08002025 if (start_blk > last_blk)
Fan Li9a950d52015-12-26 18:07:41 +08002026 goto out;
2027
Daeho Jeong093f0ba2021-07-25 21:18:19 -07002028skip_fill:
Jaegeuk Kimb876f4c92020-11-24 15:19:10 -08002029 if (map.m_pblk == COMPRESS_ADDR) {
Chao Yubf38fba2020-03-28 17:40:40 +08002030 compr_cluster = true;
Daeho Jeong093f0ba2021-07-25 21:18:19 -07002031 count_in_cluster = 1;
2032 } else if (compr_appended) {
2033 unsigned int appended_blks = cluster_size -
2034 count_in_cluster + 1;
2035 size += blks_to_bytes(inode, appended_blks);
2036 start_blk += appended_blks;
2037 compr_cluster = false;
2038 } else {
2039 logical = blks_to_bytes(inode, start_blk);
2040 phys = __is_valid_data_blkaddr(map.m_pblk) ?
2041 blks_to_bytes(inode, map.m_pblk) : 0;
2042 size = blks_to_bytes(inode, map.m_len);
2043 flags = 0;
2044
2045 if (compr_cluster) {
2046 flags = FIEMAP_EXTENT_ENCODED;
2047 count_in_cluster += map.m_len;
2048 if (count_in_cluster == cluster_size) {
2049 compr_cluster = false;
2050 size += blks_to_bytes(inode, 1);
2051 }
2052 } else if (map.m_flags & F2FS_MAP_UNWRITTEN) {
2053 flags = FIEMAP_EXTENT_UNWRITTEN;
2054 }
2055
2056 start_blk += bytes_to_blks(inode, size);
Chao Yubf38fba2020-03-28 17:40:40 +08002057 }
2058
Fan Li9a950d52015-12-26 18:07:41 +08002059prep_next:
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07002060 cond_resched();
2061 if (fatal_signal_pending(current))
2062 ret = -EINTR;
2063 else
2064 goto next;
2065out:
2066 if (ret == 1)
2067 ret = 0;
2068
Al Viro59551022016-01-22 15:40:57 -05002069 inode_unlock(inode);
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07002070 return ret;
Jaegeuk Kim9ab701342014-06-08 04:30:14 +09002071}
2072
Eric Biggers95ae2512019-07-22 09:26:24 -07002073static inline loff_t f2fs_readpage_limit(struct inode *inode)
2074{
2075 if (IS_ENABLED(CONFIG_FS_VERITY) &&
2076 (IS_VERITY(inode) || f2fs_verity_in_progress(inode)))
2077 return inode->i_sb->s_maxbytes;
2078
2079 return i_size_read(inode);
2080}
2081
Chao Yu2df0ab02019-03-25 21:07:30 +08002082static int f2fs_read_single_page(struct inode *inode, struct page *page,
2083 unsigned nr_pages,
2084 struct f2fs_map_blocks *map,
2085 struct bio **bio_ret,
2086 sector_t *last_block_in_bio,
2087 bool is_readahead)
2088{
2089 struct bio *bio = *bio_ret;
Jaegeuk Kim43b9d4b2020-11-24 15:04:27 -08002090 const unsigned blocksize = blks_to_bytes(inode, 1);
Chao Yu2df0ab02019-03-25 21:07:30 +08002091 sector_t block_in_file;
2092 sector_t last_block;
2093 sector_t last_block_in_file;
2094 sector_t block_nr;
2095 int ret = 0;
2096
Jaegeuk Kim4969c062019-07-01 19:15:29 -07002097 block_in_file = (sector_t)page_index(page);
Chao Yu2df0ab02019-03-25 21:07:30 +08002098 last_block = block_in_file + nr_pages;
Jaegeuk Kim43b9d4b2020-11-24 15:04:27 -08002099 last_block_in_file = bytes_to_blks(inode,
2100 f2fs_readpage_limit(inode) + blocksize - 1);
Chao Yu2df0ab02019-03-25 21:07:30 +08002101 if (last_block > last_block_in_file)
2102 last_block = last_block_in_file;
2103
2104 /* just zeroing out page which is beyond EOF */
2105 if (block_in_file >= last_block)
2106 goto zero_out;
2107 /*
2108 * Map blocks using the previous result first.
2109 */
2110 if ((map->m_flags & F2FS_MAP_MAPPED) &&
2111 block_in_file > map->m_lblk &&
2112 block_in_file < (map->m_lblk + map->m_len))
2113 goto got_it;
2114
2115 /*
2116 * Then do more f2fs_map_blocks() calls until we are
2117 * done with this page.
2118 */
2119 map->m_lblk = block_in_file;
2120 map->m_len = last_block - block_in_file;
2121
2122 ret = f2fs_map_blocks(inode, map, 0, F2FS_GET_BLOCK_DEFAULT);
2123 if (ret)
2124 goto out;
2125got_it:
2126 if ((map->m_flags & F2FS_MAP_MAPPED)) {
2127 block_nr = map->m_pblk + block_in_file - map->m_lblk;
2128 SetPageMappedToDisk(page);
2129
Jaegeuk Kim4969c062019-07-01 19:15:29 -07002130 if (!PageUptodate(page) && (!PageSwapCache(page) &&
2131 !cleancache_get_page(page))) {
Chao Yu2df0ab02019-03-25 21:07:30 +08002132 SetPageUptodate(page);
2133 goto confused;
2134 }
2135
2136 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
Chao Yu93770ab2019-04-15 15:26:32 +08002137 DATA_GENERIC_ENHANCE_READ)) {
Chao Yu10f966b2019-06-20 11:36:14 +08002138 ret = -EFSCORRUPTED;
Chao Yu2df0ab02019-03-25 21:07:30 +08002139 goto out;
2140 }
2141 } else {
2142zero_out:
2143 zero_user_segment(page, 0, PAGE_SIZE);
Eric Biggers95ae2512019-07-22 09:26:24 -07002144 if (f2fs_need_verity(inode, page->index) &&
2145 !fsverity_verify_page(page)) {
2146 ret = -EIO;
2147 goto out;
2148 }
Chao Yu2df0ab02019-03-25 21:07:30 +08002149 if (!PageUptodate(page))
2150 SetPageUptodate(page);
2151 unlock_page(page);
2152 goto out;
2153 }
2154
2155 /*
2156 * This page will go to BIO. Do we need to send this
2157 * BIO off first?
2158 */
Satya Tangirala27aacd22020-07-02 01:56:06 +00002159 if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
2160 *last_block_in_bio, block_nr) ||
2161 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
Chao Yu2df0ab02019-03-25 21:07:30 +08002162submit_and_realloc:
2163 __submit_bio(F2FS_I_SB(inode), bio, DATA);
2164 bio = NULL;
2165 }
2166 if (bio == NULL) {
2167 bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
Chao Yu06837282020-02-18 18:21:35 +08002168 is_readahead ? REQ_RAHEAD : 0, page->index,
Eric Biggers7f59b272021-01-04 22:33:02 -08002169 false);
Chao Yu2df0ab02019-03-25 21:07:30 +08002170 if (IS_ERR(bio)) {
2171 ret = PTR_ERR(bio);
2172 bio = NULL;
2173 goto out;
2174 }
2175 }
2176
2177 /*
2178 * If the page is under writeback, we need to wait for
2179 * its completion to see the correct decrypted data.
2180 */
2181 f2fs_wait_on_block_writeback(inode, block_nr);
2182
2183 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2184 goto submit_and_realloc;
2185
2186 inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
Chao Yu8b83ac82020-04-16 18:16:56 +08002187 f2fs_update_iostat(F2FS_I_SB(inode), FS_DATA_READ_IO, F2FS_BLKSIZE);
Chao Yu2df0ab02019-03-25 21:07:30 +08002188 ClearPageError(page);
2189 *last_block_in_bio = block_nr;
2190 goto out;
2191confused:
2192 if (bio) {
2193 __submit_bio(F2FS_I_SB(inode), bio, DATA);
2194 bio = NULL;
2195 }
2196 unlock_page(page);
2197out:
2198 *bio_ret = bio;
2199 return ret;
2200}
2201
Chao Yu4c8ff702019-11-01 18:07:14 +08002202#ifdef CONFIG_F2FS_FS_COMPRESSION
2203int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
2204 unsigned nr_pages, sector_t *last_block_in_bio,
Chao Yu06837282020-02-18 18:21:35 +08002205 bool is_readahead, bool for_write)
Chao Yu4c8ff702019-11-01 18:07:14 +08002206{
2207 struct dnode_of_data dn;
2208 struct inode *inode = cc->inode;
2209 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2210 struct bio *bio = *bio_ret;
2211 unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size;
2212 sector_t last_block_in_file;
Jaegeuk Kim43b9d4b2020-11-24 15:04:27 -08002213 const unsigned blocksize = blks_to_bytes(inode, 1);
Chao Yu4c8ff702019-11-01 18:07:14 +08002214 struct decompress_io_ctx *dic = NULL;
Chao Yu94afd6d2021-08-04 10:23:48 +08002215 struct extent_info ei = {0, };
2216 bool from_dnode = true;
Chao Yu4c8ff702019-11-01 18:07:14 +08002217 int i;
2218 int ret = 0;
2219
2220 f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
2221
Jaegeuk Kim43b9d4b2020-11-24 15:04:27 -08002222 last_block_in_file = bytes_to_blks(inode,
2223 f2fs_readpage_limit(inode) + blocksize - 1);
Chao Yu4c8ff702019-11-01 18:07:14 +08002224
2225 /* get rid of pages beyond EOF */
2226 for (i = 0; i < cc->cluster_size; i++) {
2227 struct page *page = cc->rpages[i];
2228
2229 if (!page)
2230 continue;
2231 if ((sector_t)page->index >= last_block_in_file) {
2232 zero_user_segment(page, 0, PAGE_SIZE);
2233 if (!PageUptodate(page))
2234 SetPageUptodate(page);
2235 } else if (!PageUptodate(page)) {
2236 continue;
2237 }
2238 unlock_page(page);
Jaegeuk Kim9605f752021-08-30 13:30:45 -07002239 if (for_write)
2240 put_page(page);
Chao Yu4c8ff702019-11-01 18:07:14 +08002241 cc->rpages[i] = NULL;
2242 cc->nr_rpages--;
2243 }
2244
2245 /* we are done since all pages are beyond EOF */
2246 if (f2fs_cluster_is_empty(cc))
2247 goto out;
2248
Chao Yu94afd6d2021-08-04 10:23:48 +08002249 if (f2fs_lookup_extent_cache(inode, start_idx, &ei))
2250 from_dnode = false;
2251
2252 if (!from_dnode)
2253 goto skip_reading_dnode;
2254
Chao Yu4c8ff702019-11-01 18:07:14 +08002255 set_new_dnode(&dn, inode, NULL, NULL, 0);
2256 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
2257 if (ret)
2258 goto out;
2259
Chao Yua86d27d2020-07-29 21:21:35 +08002260 f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
Chao Yu4c8ff702019-11-01 18:07:14 +08002261
Chao Yu94afd6d2021-08-04 10:23:48 +08002262skip_reading_dnode:
Chao Yu4c8ff702019-11-01 18:07:14 +08002263 for (i = 1; i < cc->cluster_size; i++) {
2264 block_t blkaddr;
2265
Chao Yu94afd6d2021-08-04 10:23:48 +08002266 blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
2267 dn.ofs_in_node + i) :
2268 ei.blk + i - 1;
Chao Yu4c8ff702019-11-01 18:07:14 +08002269
2270 if (!__is_valid_data_blkaddr(blkaddr))
2271 break;
2272
2273 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
2274 ret = -EFAULT;
2275 goto out_put_dnode;
2276 }
2277 cc->nr_cpages++;
Chao Yu94afd6d2021-08-04 10:23:48 +08002278
2279 if (!from_dnode && i >= ei.c_len)
2280 break;
Chao Yu4c8ff702019-11-01 18:07:14 +08002281 }
2282
2283 /* nothing to decompress */
2284 if (cc->nr_cpages == 0) {
2285 ret = 0;
2286 goto out_put_dnode;
2287 }
2288
2289 dic = f2fs_alloc_dic(cc);
2290 if (IS_ERR(dic)) {
2291 ret = PTR_ERR(dic);
2292 goto out_put_dnode;
2293 }
2294
Chao Yu6ce19af2021-05-20 19:51:50 +08002295 for (i = 0; i < cc->nr_cpages; i++) {
Chao Yu4c8ff702019-11-01 18:07:14 +08002296 struct page *page = dic->cpages[i];
2297 block_t blkaddr;
Eric Biggers7f59b272021-01-04 22:33:02 -08002298 struct bio_post_read_ctx *ctx;
Chao Yu4c8ff702019-11-01 18:07:14 +08002299
Chao Yu94afd6d2021-08-04 10:23:48 +08002300 blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
2301 dn.ofs_in_node + i + 1) :
2302 ei.blk + i;
Chao Yu4c8ff702019-11-01 18:07:14 +08002303
Chao Yu6ce19af2021-05-20 19:51:50 +08002304 f2fs_wait_on_block_writeback(inode, blkaddr);
2305
2306 if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
2307 if (atomic_dec_and_test(&dic->remaining_pages))
2308 f2fs_decompress_cluster(dic);
2309 continue;
2310 }
2311
Satya Tangirala27aacd22020-07-02 01:56:06 +00002312 if (bio && (!page_is_mergeable(sbi, bio,
2313 *last_block_in_bio, blkaddr) ||
2314 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
Chao Yu4c8ff702019-11-01 18:07:14 +08002315submit_and_realloc:
2316 __submit_bio(sbi, bio, DATA);
2317 bio = NULL;
2318 }
2319
2320 if (!bio) {
2321 bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
2322 is_readahead ? REQ_RAHEAD : 0,
Eric Biggers7f59b272021-01-04 22:33:02 -08002323 page->index, for_write);
Chao Yu4c8ff702019-11-01 18:07:14 +08002324 if (IS_ERR(bio)) {
2325 ret = PTR_ERR(bio);
Eric Biggers7f59b272021-01-04 22:33:02 -08002326 f2fs_decompress_end_io(dic, ret);
Chao Yu4c8ff702019-11-01 18:07:14 +08002327 f2fs_put_dnode(&dn);
Chao Yuf3494342020-04-23 17:57:33 +08002328 *bio_ret = NULL;
Chao Yu4c8ff702019-11-01 18:07:14 +08002329 return ret;
2330 }
2331 }
2332
Chao Yu4c8ff702019-11-01 18:07:14 +08002333 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2334 goto submit_and_realloc;
2335
Daeho Jeonga4b68172021-08-20 15:29:09 -07002336 ctx = get_post_read_ctx(bio);
Eric Biggers7f59b272021-01-04 22:33:02 -08002337 ctx->enabled_steps |= STEP_DECOMPRESS;
2338 refcount_inc(&dic->refcnt);
Chao Yu03382f12020-04-21 19:36:21 +08002339
Chao Yu4c8ff702019-11-01 18:07:14 +08002340 inc_page_count(sbi, F2FS_RD_DATA);
Chao Yu8b83ac82020-04-16 18:16:56 +08002341 f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
Chao Yu9c122382020-04-23 18:03:06 +08002342 f2fs_update_iostat(sbi, FS_CDATA_READ_IO, F2FS_BLKSIZE);
Chao Yu4c8ff702019-11-01 18:07:14 +08002343 ClearPageError(page);
2344 *last_block_in_bio = blkaddr;
2345 }
2346
Chao Yu94afd6d2021-08-04 10:23:48 +08002347 if (from_dnode)
2348 f2fs_put_dnode(&dn);
Chao Yu4c8ff702019-11-01 18:07:14 +08002349
2350 *bio_ret = bio;
2351 return 0;
2352
2353out_put_dnode:
Chao Yu94afd6d2021-08-04 10:23:48 +08002354 if (from_dnode)
2355 f2fs_put_dnode(&dn);
Chao Yu4c8ff702019-11-01 18:07:14 +08002356out:
Eric Biggers7f59b272021-01-04 22:33:02 -08002357 for (i = 0; i < cc->cluster_size; i++) {
2358 if (cc->rpages[i]) {
2359 ClearPageUptodate(cc->rpages[i]);
2360 ClearPageError(cc->rpages[i]);
2361 unlock_page(cc->rpages[i]);
2362 }
2363 }
Chao Yu4c8ff702019-11-01 18:07:14 +08002364 *bio_ret = bio;
2365 return ret;
2366}
2367#endif
2368
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002369/*
2370 * This function was originally taken from fs/mpage.c, and customized for f2fs.
2371 * Major change was from block_size == page_size in f2fs by default.
2372 */
Matthew Wilcox (Oracle)e20a7692020-06-01 21:47:27 -07002373static int f2fs_mpage_readpages(struct inode *inode,
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002374 struct readahead_control *rac, struct page *page)
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002375{
2376 struct bio *bio = NULL;
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002377 sector_t last_block_in_bio = 0;
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002378 struct f2fs_map_blocks map;
Chao Yu4c8ff702019-11-01 18:07:14 +08002379#ifdef CONFIG_F2FS_FS_COMPRESSION
2380 struct compress_ctx cc = {
2381 .inode = inode,
2382 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2383 .cluster_size = F2FS_I(inode)->i_cluster_size,
2384 .cluster_idx = NULL_CLUSTER,
2385 .rpages = NULL,
2386 .cpages = NULL,
2387 .nr_rpages = 0,
2388 .nr_cpages = 0,
2389 };
Fengnan Changa2649312021-08-12 19:36:41 +08002390 pgoff_t nc_cluster_idx = NULL_CLUSTER;
Chao Yu4c8ff702019-11-01 18:07:14 +08002391#endif
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002392 unsigned nr_pages = rac ? readahead_count(rac) : 1;
Chao Yu4c8ff702019-11-01 18:07:14 +08002393 unsigned max_nr_pages = nr_pages;
Chao Yu2df0ab02019-03-25 21:07:30 +08002394 int ret = 0;
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002395
2396 map.m_pblk = 0;
2397 map.m_lblk = 0;
2398 map.m_len = 0;
2399 map.m_flags = 0;
Chao Yuda859852016-01-26 15:42:58 +08002400 map.m_next_pgofs = NULL;
Chao Yuc4020b22018-01-11 14:42:30 +08002401 map.m_next_extent = NULL;
Hyunchul Leed5097be2017-11-28 09:23:00 +09002402 map.m_seg_type = NO_CHECK_TYPE;
Chao Yuf9d6d052018-11-13 14:33:45 +08002403 map.m_may_create = false;
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002404
LiFan736c0a72017-11-25 11:46:18 +08002405 for (; nr_pages; nr_pages--) {
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002406 if (rac) {
2407 page = readahead_page(rac);
Kinglong Meea83d50b2017-03-13 16:35:13 +08002408 prefetchw(&page->flags);
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002409 }
2410
Chao Yu4c8ff702019-11-01 18:07:14 +08002411#ifdef CONFIG_F2FS_FS_COMPRESSION
2412 if (f2fs_compressed_file(inode)) {
2413 /* there are remained comressed pages, submit them */
2414 if (!f2fs_cluster_can_merge_page(&cc, page->index)) {
2415 ret = f2fs_read_multi_pages(&cc, &bio,
2416 max_nr_pages,
2417 &last_block_in_bio,
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002418 rac != NULL, false);
Chao Yu8bfbfb02021-05-10 17:30:32 +08002419 f2fs_destroy_compress_ctx(&cc, false);
Chao Yu4c8ff702019-11-01 18:07:14 +08002420 if (ret)
2421 goto set_error_page;
2422 }
Fengnan Changa2649312021-08-12 19:36:41 +08002423 if (cc.cluster_idx == NULL_CLUSTER) {
2424 if (nc_cluster_idx ==
2425 page->index >> cc.log_cluster_size) {
2426 goto read_single_page;
2427 }
Chao Yu4c8ff702019-11-01 18:07:14 +08002428
Fengnan Changa2649312021-08-12 19:36:41 +08002429 ret = f2fs_is_compressed_cluster(inode, page->index);
2430 if (ret < 0)
2431 goto set_error_page;
2432 else if (!ret) {
2433 nc_cluster_idx =
2434 page->index >> cc.log_cluster_size;
2435 goto read_single_page;
2436 }
2437
2438 nc_cluster_idx = NULL_CLUSTER;
2439 }
Chao Yu4c8ff702019-11-01 18:07:14 +08002440 ret = f2fs_init_compress_ctx(&cc);
2441 if (ret)
2442 goto set_error_page;
2443
2444 f2fs_compress_ctx_add_page(&cc, page);
2445
2446 goto next_page;
2447 }
2448read_single_page:
2449#endif
2450
2451 ret = f2fs_read_single_page(inode, page, max_nr_pages, &map,
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002452 &bio, &last_block_in_bio, rac);
Chao Yu2df0ab02019-03-25 21:07:30 +08002453 if (ret) {
Chao Yu4c8ff702019-11-01 18:07:14 +08002454#ifdef CONFIG_F2FS_FS_COMPRESSION
2455set_error_page:
2456#endif
Chao Yu2df0ab02019-03-25 21:07:30 +08002457 SetPageError(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002458 zero_user_segment(page, 0, PAGE_SIZE);
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002459 unlock_page(page);
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002460 }
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002461#ifdef CONFIG_F2FS_FS_COMPRESSION
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002462next_page:
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002463#endif
2464 if (rac)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002465 put_page(page);
Chao Yu4c8ff702019-11-01 18:07:14 +08002466
2467#ifdef CONFIG_F2FS_FS_COMPRESSION
2468 if (f2fs_compressed_file(inode)) {
2469 /* last page */
2470 if (nr_pages == 1 && !f2fs_cluster_is_empty(&cc)) {
2471 ret = f2fs_read_multi_pages(&cc, &bio,
2472 max_nr_pages,
2473 &last_block_in_bio,
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002474 rac != NULL, false);
Chao Yu8bfbfb02021-05-10 17:30:32 +08002475 f2fs_destroy_compress_ctx(&cc, false);
Chao Yu4c8ff702019-11-01 18:07:14 +08002476 }
2477 }
2478#endif
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002479 }
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002480 if (bio)
Linus Torvalds4fc29c12016-07-27 10:36:31 -07002481 __submit_bio(F2FS_I_SB(inode), bio, DATA);
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002482 return ret;
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002483}
2484
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002485static int f2fs_read_data_page(struct file *file, struct page *page)
2486{
Jaegeuk Kim4969c062019-07-01 19:15:29 -07002487 struct inode *inode = page_file_mapping(page)->host;
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07002488 int ret = -EAGAIN;
Huajun Li9ffe0fb2013-11-10 23:13:20 +08002489
Chao Yuc20e89cd2014-05-06 16:53:08 +08002490 trace_f2fs_readpage(page, DATA);
2491
Chao Yu4c8ff702019-11-01 18:07:14 +08002492 if (!f2fs_is_compress_backend_ready(inode)) {
2493 unlock_page(page);
2494 return -EOPNOTSUPP;
2495 }
2496
arter97e1c42042014-08-06 23:22:50 +09002497 /* If the file has inline data, try to read it directly */
Huajun Li9ffe0fb2013-11-10 23:13:20 +08002498 if (f2fs_has_inline_data(inode))
2499 ret = f2fs_read_inline_data(inode, page);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07002500 if (ret == -EAGAIN)
Matthew Wilcox (Oracle)e20a7692020-06-01 21:47:27 -07002501 ret = f2fs_mpage_readpages(inode, NULL, page);
Huajun Li9ffe0fb2013-11-10 23:13:20 +08002502 return ret;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002503}
2504
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002505static void f2fs_readahead(struct readahead_control *rac)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002506{
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002507 struct inode *inode = rac->mapping->host;
Chao Yub8c29402015-10-12 17:02:26 +08002508
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002509 trace_f2fs_readpages(inode, readahead_index(rac), readahead_count(rac));
Huajun Li9ffe0fb2013-11-10 23:13:20 +08002510
Chao Yu4c8ff702019-11-01 18:07:14 +08002511 if (!f2fs_is_compress_backend_ready(inode))
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002512 return;
Chao Yu4c8ff702019-11-01 18:07:14 +08002513
Huajun Li9ffe0fb2013-11-10 23:13:20 +08002514 /* If the file has inline data, skip readpages */
2515 if (f2fs_has_inline_data(inode))
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002516 return;
Huajun Li9ffe0fb2013-11-10 23:13:20 +08002517
Matthew Wilcox (Oracle)e20a7692020-06-01 21:47:27 -07002518 f2fs_mpage_readpages(inode, rac, NULL);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002519}
2520
Chao Yu4c8ff702019-11-01 18:07:14 +08002521int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002522{
2523 struct inode *inode = fio->page->mapping->host;
Chao Yu4c8ff702019-11-01 18:07:14 +08002524 struct page *mpage, *page;
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002525 gfp_t gfp_flags = GFP_NOFS;
2526
Jaegeuk Kim19585932017-09-05 16:54:24 -07002527 if (!f2fs_encrypted_file(inode))
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002528 return 0;
2529
Chao Yu4c8ff702019-11-01 18:07:14 +08002530 page = fio->compressed_page ? fio->compressed_page : fio->page;
2531
Eric Biggers6dbb1792018-04-18 11:09:48 -07002532 /* wait for GCed page writeback via META_MAPPING */
Jaegeuk Kim0ded69f2018-08-22 21:18:00 -07002533 f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002534
Satya Tangirala27aacd22020-07-02 01:56:06 +00002535 if (fscrypt_inode_uses_inline_crypto(inode))
2536 return 0;
2537
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002538retry_encrypt:
Chao Yu4c8ff702019-11-01 18:07:14 +08002539 fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
2540 PAGE_SIZE, 0, gfp_flags);
Chao Yu6aa58d82018-08-14 22:37:25 +08002541 if (IS_ERR(fio->encrypted_page)) {
2542 /* flush pending IOs and wait for a while in the ENOMEM case */
2543 if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
2544 f2fs_flush_merged_writes(fio->sbi);
Chao Yu5df7731f2020-02-17 17:45:44 +08002545 congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
Chao Yu6aa58d82018-08-14 22:37:25 +08002546 gfp_flags |= __GFP_NOFAIL;
2547 goto retry_encrypt;
2548 }
2549 return PTR_ERR(fio->encrypted_page);
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002550 }
Chao Yu6aa58d82018-08-14 22:37:25 +08002551
2552 mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
2553 if (mpage) {
2554 if (PageUptodate(mpage))
2555 memcpy(page_address(mpage),
2556 page_address(fio->encrypted_page), PAGE_SIZE);
2557 f2fs_put_page(mpage, 1);
2558 }
2559 return 0;
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002560}
2561
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002562static inline bool check_inplace_update_policy(struct inode *inode,
2563 struct f2fs_io_info *fio)
2564{
2565 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2566 unsigned int policy = SM_I(sbi)->ipu_policy;
2567
2568 if (policy & (0x1 << F2FS_IPU_FORCE))
2569 return true;
Chao Yu4d57b862018-05-30 00:20:41 +08002570 if (policy & (0x1 << F2FS_IPU_SSR) && f2fs_need_SSR(sbi))
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002571 return true;
2572 if (policy & (0x1 << F2FS_IPU_UTIL) &&
2573 utilization(sbi) > SM_I(sbi)->min_ipu_util)
2574 return true;
Chao Yu4d57b862018-05-30 00:20:41 +08002575 if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && f2fs_need_SSR(sbi) &&
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002576 utilization(sbi) > SM_I(sbi)->min_ipu_util)
2577 return true;
2578
2579 /*
2580 * IPU for rewrite async pages
2581 */
2582 if (policy & (0x1 << F2FS_IPU_ASYNC) &&
2583 fio && fio->op == REQ_OP_WRITE &&
2584 !(fio->op_flags & REQ_SYNC) &&
Chandan Rajendra62230e0d2018-12-12 15:20:11 +05302585 !IS_ENCRYPTED(inode))
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002586 return true;
2587
2588 /* this is only set during fdatasync */
2589 if (policy & (0x1 << F2FS_IPU_FSYNC) &&
2590 is_inode_flag_set(inode, FI_NEED_IPU))
2591 return true;
2592
Daniel Rosenberg43549942018-08-20 19:21:43 -07002593 if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2594 !f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2595 return true;
2596
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002597 return false;
2598}
2599
Chao Yu4d57b862018-05-30 00:20:41 +08002600bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002601{
Chao Yu859fca62021-05-26 14:29:27 +08002602 /* swap file is migrating in aligned write mode */
2603 if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
2604 return false;
2605
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002606 if (f2fs_is_pinned_file(inode))
2607 return true;
2608
2609 /* if this is cold file, we should overwrite to avoid fragmentation */
2610 if (file_is_cold(inode))
2611 return true;
2612
2613 return check_inplace_update_policy(inode, fio);
2614}
2615
Chao Yu4d57b862018-05-30 00:20:41 +08002616bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002617{
2618 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2619
Chao Yub0332a02020-02-14 17:44:12 +08002620 if (f2fs_lfs_mode(sbi))
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002621 return true;
2622 if (S_ISDIR(inode->i_mode))
2623 return true;
Chao Yuaf033b22018-09-20 20:05:00 +08002624 if (IS_NOQUOTA(inode))
2625 return true;
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002626 if (f2fs_is_atomic_file(inode))
2627 return true;
Jaegeuk Kim1ffc8f52021-07-14 16:14:02 -07002628 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
2629 return true;
Chao Yu859fca62021-05-26 14:29:27 +08002630
2631 /* swap file is migrating in aligned write mode */
2632 if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
2633 return true;
2634
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002635 if (fio) {
Chao Yub763f3b2021-04-28 17:20:31 +08002636 if (page_private_gcing(fio->page))
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002637 return true;
Chao Yub763f3b2021-04-28 17:20:31 +08002638 if (page_private_dummy(fio->page))
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002639 return true;
Daniel Rosenberg43549942018-08-20 19:21:43 -07002640 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2641 f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2642 return true;
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002643 }
2644 return false;
2645}
2646
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002647static inline bool need_inplace_update(struct f2fs_io_info *fio)
2648{
2649 struct inode *inode = fio->page->mapping->host;
2650
Chao Yu4d57b862018-05-30 00:20:41 +08002651 if (f2fs_should_update_outplace(inode, fio))
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002652 return false;
2653
Chao Yu4d57b862018-05-30 00:20:41 +08002654 return f2fs_should_update_inplace(inode, fio);
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002655}
2656
Chao Yu4d57b862018-05-30 00:20:41 +08002657int f2fs_do_write_data_page(struct f2fs_io_info *fio)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002658{
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07002659 struct page *page = fio->page;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002660 struct inode *inode = page->mapping->host;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002661 struct dnode_of_data dn;
Chao Yu94afd6d2021-08-04 10:23:48 +08002662 struct extent_info ei = {0, };
Chao Yu77357302018-07-17 00:02:17 +08002663 struct node_info ni;
Hou Pengyange959c8f2017-04-25 12:45:13 +00002664 bool ipu_force = false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002665 int err = 0;
2666
2667 set_new_dnode(&dn, inode, NULL, NULL, 0);
Hou Pengyange959c8f2017-04-25 12:45:13 +00002668 if (need_inplace_update(fio) &&
2669 f2fs_lookup_extent_cache(inode, page->index, &ei)) {
2670 fio->old_blkaddr = ei.blk + page->index - ei.fofs;
Jaegeuk Kima8177372017-04-24 15:20:16 -07002671
Chao Yuc9b60782018-08-01 19:13:44 +08002672 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
Chao Yu93770ab2019-04-15 15:26:32 +08002673 DATA_GENERIC_ENHANCE))
Chao Yu10f966b2019-06-20 11:36:14 +08002674 return -EFSCORRUPTED;
Chao Yuc9b60782018-08-01 19:13:44 +08002675
2676 ipu_force = true;
2677 fio->need_lock = LOCK_DONE;
2678 goto got_it;
Hou Pengyange959c8f2017-04-25 12:45:13 +00002679 }
Hou Pengyang279d6df2017-04-27 00:17:21 +08002680
Jaegeuk Kimd29460e2017-06-21 17:52:39 -07002681 /* Deadlock due to between page->lock and f2fs_lock_op */
2682 if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
2683 return -EAGAIN;
Hou Pengyang279d6df2017-04-27 00:17:21 +08002684
Chao Yu4d57b862018-05-30 00:20:41 +08002685 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002686 if (err)
Hou Pengyang279d6df2017-04-27 00:17:21 +08002687 goto out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002688
Chao Yu28bc1062016-02-06 14:40:34 +08002689 fio->old_blkaddr = dn.data_blkaddr;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002690
2691 /* This page is already truncated */
Chao Yu7a9d7542016-02-22 18:36:38 +08002692 if (fio->old_blkaddr == NULL_ADDR) {
Jaegeuk Kim2bca1e22015-02-25 19:25:01 -08002693 ClearPageUptodate(page);
Chao Yub763f3b2021-04-28 17:20:31 +08002694 clear_page_private_gcing(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002695 goto out_writepage;
Jaegeuk Kim2bca1e22015-02-25 19:25:01 -08002696 }
Hou Pengyange959c8f2017-04-25 12:45:13 +00002697got_it:
Chao Yuc9b60782018-08-01 19:13:44 +08002698 if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2699 !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
Chao Yu93770ab2019-04-15 15:26:32 +08002700 DATA_GENERIC_ENHANCE)) {
Chao Yu10f966b2019-06-20 11:36:14 +08002701 err = -EFSCORRUPTED;
Chao Yuc9b60782018-08-01 19:13:44 +08002702 goto out_writepage;
2703 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002704 /*
2705 * If current allocation needs SSR,
2706 * it had better in-place writes for updated data.
2707 */
Chao Yu93770ab2019-04-15 15:26:32 +08002708 if (ipu_force ||
2709 (__is_valid_data_blkaddr(fio->old_blkaddr) &&
Chao Yu7b525dd2018-05-23 22:25:08 +08002710 need_inplace_update(fio))) {
Chao Yu4c8ff702019-11-01 18:07:14 +08002711 err = f2fs_encrypt_one_page(fio);
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002712 if (err)
2713 goto out_writepage;
2714
2715 set_page_writeback(page);
Jaegeuk Kim17c50032018-04-11 23:09:04 -07002716 ClearPageError(page);
Hou Pengyang279d6df2017-04-27 00:17:21 +08002717 f2fs_put_dnode(&dn);
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002718 if (fio->need_lock == LOCK_REQ)
Hou Pengyang279d6df2017-04-27 00:17:21 +08002719 f2fs_unlock_op(fio->sbi);
Chao Yu4d57b862018-05-30 00:20:41 +08002720 err = f2fs_inplace_write_data(fio);
Chao Yu6492a332019-02-21 20:37:14 +08002721 if (err) {
Satya Tangirala27aacd22020-07-02 01:56:06 +00002722 if (fscrypt_inode_uses_fs_layer_crypto(inode))
Eric Biggersd2d07272019-05-20 09:29:39 -07002723 fscrypt_finalize_bounce_page(&fio->encrypted_page);
Chao Yu6492a332019-02-21 20:37:14 +08002724 if (PageWriteback(page))
2725 end_page_writeback(page);
Chao Yucd23ffa92019-04-15 15:30:53 +08002726 } else {
2727 set_inode_flag(inode, FI_UPDATE_WRITE);
Chao Yu6492a332019-02-21 20:37:14 +08002728 }
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002729 trace_f2fs_do_write_data_page(fio->page, IPU);
Hou Pengyang279d6df2017-04-27 00:17:21 +08002730 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002731 }
Hou Pengyang279d6df2017-04-27 00:17:21 +08002732
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002733 if (fio->need_lock == LOCK_RETRY) {
2734 if (!f2fs_trylock_op(fio->sbi)) {
2735 err = -EAGAIN;
2736 goto out_writepage;
2737 }
2738 fio->need_lock = LOCK_REQ;
2739 }
2740
Chao Yu77357302018-07-17 00:02:17 +08002741 err = f2fs_get_node_info(fio->sbi, dn.nid, &ni);
2742 if (err)
2743 goto out_writepage;
2744
2745 fio->version = ni.version;
2746
Chao Yu4c8ff702019-11-01 18:07:14 +08002747 err = f2fs_encrypt_one_page(fio);
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002748 if (err)
2749 goto out_writepage;
2750
2751 set_page_writeback(page);
Jaegeuk Kim17c50032018-04-11 23:09:04 -07002752 ClearPageError(page);
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002753
Chao Yu4c8ff702019-11-01 18:07:14 +08002754 if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR)
2755 f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
2756
Hou Pengyang279d6df2017-04-27 00:17:21 +08002757 /* LFS mode write path */
Chao Yu4d57b862018-05-30 00:20:41 +08002758 f2fs_outplace_write_data(&dn, fio);
Hou Pengyang279d6df2017-04-27 00:17:21 +08002759 trace_f2fs_do_write_data_page(page, OPU);
2760 set_inode_flag(inode, FI_APPEND_WRITE);
2761 if (page->index == 0)
2762 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002763out_writepage:
2764 f2fs_put_dnode(&dn);
Hou Pengyang279d6df2017-04-27 00:17:21 +08002765out:
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002766 if (fio->need_lock == LOCK_REQ)
Hou Pengyang279d6df2017-04-27 00:17:21 +08002767 f2fs_unlock_op(fio->sbi);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002768 return err;
2769}
2770
Chao Yu4c8ff702019-11-01 18:07:14 +08002771int f2fs_write_single_data_page(struct page *page, int *submitted,
Chao Yu8648de22019-02-19 16:15:29 +08002772 struct bio **bio,
2773 sector_t *last_block,
Chao Yub0af6d42017-08-02 23:21:48 +08002774 struct writeback_control *wbc,
Chao Yu4c8ff702019-11-01 18:07:14 +08002775 enum iostat_type io_type,
Chao Yu3afae092021-01-11 17:42:53 +08002776 int compr_blocks,
2777 bool allow_balance)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002778{
2779 struct inode *inode = page->mapping->host;
Jaegeuk Kim40813632014-09-02 15:31:18 -07002780 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002781 loff_t i_size = i_size_read(inode);
Chao Yu4c8ff702019-11-01 18:07:14 +08002782 const pgoff_t end_index = ((unsigned long long)i_size)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002783 >> PAGE_SHIFT;
Chao Yu1f0d5c92019-11-07 17:29:00 +08002784 loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
Huajun Li9ffe0fb2013-11-10 23:13:20 +08002785 unsigned offset = 0;
Jaegeuk Kim39936832012-11-22 16:21:29 +09002786 bool need_balance_fs = false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002787 int err = 0;
Jaegeuk Kim458e6192013-12-11 13:54:01 +09002788 struct f2fs_io_info fio = {
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07002789 .sbi = sbi,
Chao Yu39d787b2017-09-29 13:59:38 +08002790 .ino = inode->i_ino,
Jaegeuk Kim458e6192013-12-11 13:54:01 +09002791 .type = DATA,
Mike Christie04d328d2016-06-05 14:31:55 -05002792 .op = REQ_OP_WRITE,
Jens Axboe76372412016-11-01 10:00:38 -06002793 .op_flags = wbc_to_write_flags(wbc),
Hou Pengyange959c8f2017-04-25 12:45:13 +00002794 .old_blkaddr = NULL_ADDR,
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07002795 .page = page,
Jaegeuk Kim4375a332015-04-23 12:04:33 -07002796 .encrypted_page = NULL,
Jaegeuk Kimd68f7352017-02-03 17:44:04 -08002797 .submitted = false,
Chao Yu4c8ff702019-11-01 18:07:14 +08002798 .compr_blocks = compr_blocks,
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002799 .need_lock = LOCK_RETRY,
Chao Yub0af6d42017-08-02 23:21:48 +08002800 .io_type = io_type,
Yufen Yu578c6472018-01-09 19:33:39 +08002801 .io_wbc = wbc,
Chao Yu8648de22019-02-19 16:15:29 +08002802 .bio = bio,
2803 .last_block = last_block,
Jaegeuk Kim458e6192013-12-11 13:54:01 +09002804 };
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002805
Chao Yuecda0de2014-05-06 16:48:26 +08002806 trace_f2fs_writepage(page, DATA);
2807
Chao Yudb198ae2018-01-18 17:29:10 +08002808 /* we should bypass data pages to proceed the kworkder jobs */
2809 if (unlikely(f2fs_cp_error(sbi))) {
2810 mapping_set_error(page->mapping, -EIO);
Chao Yu1174abf2018-05-28 16:59:26 +08002811 /*
2812 * don't drop any dirty dentry pages for keeping lastest
2813 * directory structure.
2814 */
2815 if (S_ISDIR(inode->i_mode))
2816 goto redirty_out;
Chao Yudb198ae2018-01-18 17:29:10 +08002817 goto out;
2818 }
2819
Chao Yu0771fcc2017-06-29 23:20:45 +08002820 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2821 goto redirty_out;
2822
Chao Yu4c8ff702019-11-01 18:07:14 +08002823 if (page->index < end_index ||
2824 f2fs_verity_in_progress(inode) ||
2825 compr_blocks)
Jaegeuk Kim39936832012-11-22 16:21:29 +09002826 goto write;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002827
2828 /*
2829 * If the offset is out-of-range of file size,
2830 * this page does not have to be written to disk.
2831 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002832 offset = i_size & (PAGE_SIZE - 1);
Jaegeuk Kim76f60262014-04-15 16:04:15 +09002833 if ((page->index >= end_index + 1) || !offset)
Jaegeuk Kim39936832012-11-22 16:21:29 +09002834 goto out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002835
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002836 zero_user_segment(page, offset, PAGE_SIZE);
Jaegeuk Kim39936832012-11-22 16:21:29 +09002837write:
Jaegeuk Kim1e843712014-12-09 06:08:59 -08002838 if (f2fs_is_drop_cache(inode))
2839 goto out;
Jaegeuk Kime6e5f562016-04-14 16:48:52 -07002840 /* we should not write 0'th page having journal header */
2841 if (f2fs_is_volatile_file(inode) && (!page->index ||
2842 (!wbc->for_reclaim &&
Chao Yu4d57b862018-05-30 00:20:41 +08002843 f2fs_available_free_memory(sbi, BASE_CHECK))))
Jaegeuk Kim1e843712014-12-09 06:08:59 -08002844 goto redirty_out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002845
Jaegeuk Kim435cbab2020-04-09 10:25:21 -07002846 /* Dentry/quota blocks are controlled by checkpoint */
2847 if (S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) {
Chao Yu79963d92020-06-18 14:36:23 +08002848 /*
2849 * We need to wait for node_write to avoid block allocation during
2850 * checkpoint. This can only happen to quota writes which can cause
2851 * the below discard race condition.
2852 */
2853 if (IS_NOQUOTA(inode))
2854 down_read(&sbi->node_write);
2855
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002856 fio.need_lock = LOCK_DONE;
Chao Yu4d57b862018-05-30 00:20:41 +08002857 err = f2fs_do_write_data_page(&fio);
Chao Yu79963d92020-06-18 14:36:23 +08002858
2859 if (IS_NOQUOTA(inode))
2860 up_read(&sbi->node_write);
2861
Jaegeuk Kimb230e6c2016-05-29 21:18:23 -07002862 goto done;
2863 }
2864
Jaegeuk Kim8618b882014-02-17 19:29:27 +09002865 if (!wbc->for_reclaim)
2866 need_balance_fs = true;
Jaegeuk Kim7f3037a2016-09-01 12:02:51 -07002867 else if (has_not_enough_free_secs(sbi, 0, 0))
Jaegeuk Kim39936832012-11-22 16:21:29 +09002868 goto redirty_out;
Jaegeuk Kimef095d12017-03-24 20:05:13 -04002869 else
2870 set_inode_flag(inode, FI_HOT_DATA);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002871
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07002872 err = -EAGAIN;
Yunlei Hedd7b2332017-02-23 20:31:20 +08002873 if (f2fs_has_inline_data(inode)) {
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07002874 err = f2fs_write_inline_data(inode, page);
Yunlei Hedd7b2332017-02-23 20:31:20 +08002875 if (!err)
2876 goto out;
2877 }
Hou Pengyang279d6df2017-04-27 00:17:21 +08002878
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002879 if (err == -EAGAIN) {
Chao Yu4d57b862018-05-30 00:20:41 +08002880 err = f2fs_do_write_data_page(&fio);
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002881 if (err == -EAGAIN) {
2882 fio.need_lock = LOCK_REQ;
Chao Yu4d57b862018-05-30 00:20:41 +08002883 err = f2fs_do_write_data_page(&fio);
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002884 }
2885 }
Chao Yua0d00fa2017-10-09 17:55:19 +08002886
Chao Yueb449792018-01-17 16:31:37 +08002887 if (err) {
2888 file_set_keep_isize(inode);
2889 } else {
Chao Yuc10c9822020-02-27 19:30:03 +08002890 spin_lock(&F2FS_I(inode)->i_size_lock);
Chao Yueb449792018-01-17 16:31:37 +08002891 if (F2FS_I(inode)->last_disk_size < psize)
2892 F2FS_I(inode)->last_disk_size = psize;
Chao Yuc10c9822020-02-27 19:30:03 +08002893 spin_unlock(&F2FS_I(inode)->i_size_lock);
Chao Yueb449792018-01-17 16:31:37 +08002894 }
Hou Pengyang279d6df2017-04-27 00:17:21 +08002895
Jaegeuk Kim8618b882014-02-17 19:29:27 +09002896done:
2897 if (err && err != -ENOENT)
2898 goto redirty_out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002899
Jaegeuk Kim39936832012-11-22 16:21:29 +09002900out:
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07002901 inode_dec_dirty_pages(inode);
Chao Yu2baf0782018-07-27 18:15:16 +08002902 if (err) {
Jaegeuk Kim2bca1e22015-02-25 19:25:01 -08002903 ClearPageUptodate(page);
Chao Yub763f3b2021-04-28 17:20:31 +08002904 clear_page_private_gcing(page);
Chao Yu2baf0782018-07-27 18:15:16 +08002905 }
Chao Yu0c3a5792016-01-18 18:28:11 +08002906
2907 if (wbc->for_reclaim) {
Chao Yubab475c2018-09-27 23:41:16 +08002908 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
Jaegeuk Kimef095d12017-03-24 20:05:13 -04002909 clear_inode_flag(inode, FI_HOT_DATA);
Chao Yu4d57b862018-05-30 00:20:41 +08002910 f2fs_remove_dirty_inode(inode);
Jaegeuk Kimd68f7352017-02-03 17:44:04 -08002911 submitted = NULL;
Chao Yueb7e8132015-11-10 18:45:07 +08002912 }
Chao Yu0c3a5792016-01-18 18:28:11 +08002913 unlock_page(page);
Chao Yu186857c2019-04-02 18:52:19 +08002914 if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
Chao Yu3afae092021-01-11 17:42:53 +08002915 !F2FS_I(inode)->cp_task && allow_balance)
Jaegeuk Kima7881892017-04-20 13:51:57 -07002916 f2fs_balance_fs(sbi, need_balance_fs);
Chao Yu0c3a5792016-01-18 18:28:11 +08002917
Jaegeuk Kimd68f7352017-02-03 17:44:04 -08002918 if (unlikely(f2fs_cp_error(sbi))) {
Jaegeuk Kimb9109b02017-05-10 11:28:38 -07002919 f2fs_submit_merged_write(sbi, DATA);
Chao Yu0b20fce2019-09-30 18:53:25 +08002920 f2fs_submit_merged_ipu_write(sbi, bio, NULL);
Jaegeuk Kimd68f7352017-02-03 17:44:04 -08002921 submitted = NULL;
2922 }
2923
2924 if (submitted)
Chao Yu4c8ff702019-11-01 18:07:14 +08002925 *submitted = fio.submitted ? 1 : 0;
Chao Yu0c3a5792016-01-18 18:28:11 +08002926
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002927 return 0;
2928
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002929redirty_out:
Jaegeuk Kim76f60262014-04-15 16:04:15 +09002930 redirty_page_for_writepage(wbc, page);
Jaegeuk Kim5b19d282018-05-03 23:26:02 -07002931 /*
2932 * pageout() in MM traslates EAGAIN, so calls handle_write_error()
2933 * -> mapping_set_error() -> set_bit(AS_EIO, ...).
2934 * file_write_and_wait_range() will see EIO error, which is critical
2935 * to return value of fsync() followed by atomic_write failure to user.
2936 */
2937 if (!err || wbc->for_reclaim)
Chao Yu0002b612016-11-28 19:13:43 -08002938 return AOP_WRITEPAGE_ACTIVATE;
Jaegeuk Kimb230e6c2016-05-29 21:18:23 -07002939 unlock_page(page);
2940 return err;
Namjae Jeonfa9150a2013-01-15 16:45:24 +09002941}
2942
Jaegeuk Kimf566bae2017-02-03 17:18:00 -08002943static int f2fs_write_data_page(struct page *page,
2944 struct writeback_control *wbc)
2945{
Chao Yu4c8ff702019-11-01 18:07:14 +08002946#ifdef CONFIG_F2FS_FS_COMPRESSION
2947 struct inode *inode = page->mapping->host;
2948
2949 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
2950 goto out;
2951
2952 if (f2fs_compressed_file(inode)) {
2953 if (f2fs_is_compressed_cluster(inode, page->index)) {
2954 redirty_page_for_writepage(wbc, page);
2955 return AOP_WRITEPAGE_ACTIVATE;
2956 }
2957 }
2958out:
2959#endif
2960
2961 return f2fs_write_single_data_page(page, NULL, NULL, NULL,
Chao Yu3afae092021-01-11 17:42:53 +08002962 wbc, FS_DATA_IO, 0, true);
Jaegeuk Kimf566bae2017-02-03 17:18:00 -08002963}
2964
Chao Yu8f46dca2015-07-14 18:56:10 +08002965/*
2966 * This function was copied from write_cche_pages from mm/page-writeback.c.
2967 * The major change is making write step of cold data page separately from
2968 * warm/hot data page.
2969 */
2970static int f2fs_write_cache_pages(struct address_space *mapping,
Chao Yub0af6d42017-08-02 23:21:48 +08002971 struct writeback_control *wbc,
2972 enum iostat_type io_type)
Chao Yu8f46dca2015-07-14 18:56:10 +08002973{
2974 int ret = 0;
Chao Yu4c8ff702019-11-01 18:07:14 +08002975 int done = 0, retry = 0;
Chao Yu8f46dca2015-07-14 18:56:10 +08002976 struct pagevec pvec;
Chao Yuc29fd0c2018-06-04 23:20:36 +08002977 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
Chao Yu8648de22019-02-19 16:15:29 +08002978 struct bio *bio = NULL;
2979 sector_t last_block;
Chao Yu4c8ff702019-11-01 18:07:14 +08002980#ifdef CONFIG_F2FS_FS_COMPRESSION
2981 struct inode *inode = mapping->host;
2982 struct compress_ctx cc = {
2983 .inode = inode,
2984 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2985 .cluster_size = F2FS_I(inode)->i_cluster_size,
2986 .cluster_idx = NULL_CLUSTER,
2987 .rpages = NULL,
2988 .nr_rpages = 0,
2989 .cpages = NULL,
2990 .rbuf = NULL,
2991 .cbuf = NULL,
2992 .rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
2993 .private = NULL,
2994 };
2995#endif
Chao Yu8f46dca2015-07-14 18:56:10 +08002996 int nr_pages;
Chao Yu8f46dca2015-07-14 18:56:10 +08002997 pgoff_t index;
2998 pgoff_t end; /* Inclusive */
2999 pgoff_t done_index;
Chao Yu8f46dca2015-07-14 18:56:10 +08003000 int range_whole = 0;
Matthew Wilcox10bbd232017-12-05 17:30:38 -05003001 xa_mark_t tag;
Chao Yubab475c2018-09-27 23:41:16 +08003002 int nwritten = 0;
Chao Yu4c8ff702019-11-01 18:07:14 +08003003 int submitted = 0;
3004 int i;
Chao Yu8f46dca2015-07-14 18:56:10 +08003005
Mel Gorman86679822017-11-15 17:37:52 -08003006 pagevec_init(&pvec);
Jaegeuk Kim46ae9572016-05-25 20:57:16 -07003007
Jaegeuk Kimef095d12017-03-24 20:05:13 -04003008 if (get_dirty_pages(mapping->host) <=
3009 SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
3010 set_inode_flag(mapping->host, FI_HOT_DATA);
3011 else
3012 clear_inode_flag(mapping->host, FI_HOT_DATA);
3013
Chao Yu8f46dca2015-07-14 18:56:10 +08003014 if (wbc->range_cyclic) {
Jason Yan4df7a75f2020-06-15 16:51:32 +08003015 index = mapping->writeback_index; /* prev offset */
Chao Yu8f46dca2015-07-14 18:56:10 +08003016 end = -1;
3017 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003018 index = wbc->range_start >> PAGE_SHIFT;
3019 end = wbc->range_end >> PAGE_SHIFT;
Chao Yu8f46dca2015-07-14 18:56:10 +08003020 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
3021 range_whole = 1;
Chao Yu8f46dca2015-07-14 18:56:10 +08003022 }
3023 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
3024 tag = PAGECACHE_TAG_TOWRITE;
3025 else
3026 tag = PAGECACHE_TAG_DIRTY;
3027retry:
Chao Yu4c8ff702019-11-01 18:07:14 +08003028 retry = 0;
Chao Yu8f46dca2015-07-14 18:56:10 +08003029 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
3030 tag_pages_for_writeback(mapping, index, end);
3031 done_index = index;
Chao Yu4c8ff702019-11-01 18:07:14 +08003032 while (!done && !retry && (index <= end)) {
Jan Kara69c4f352017-11-15 17:34:48 -08003033 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
Jan Kara67fd7072017-11-15 17:35:19 -08003034 tag);
Chao Yu8f46dca2015-07-14 18:56:10 +08003035 if (nr_pages == 0)
3036 break;
3037
3038 for (i = 0; i < nr_pages; i++) {
3039 struct page *page = pvec.pages[i];
Chao Yu4c8ff702019-11-01 18:07:14 +08003040 bool need_readd;
3041readd:
3042 need_readd = false;
3043#ifdef CONFIG_F2FS_FS_COMPRESSION
3044 if (f2fs_compressed_file(inode)) {
Fengnan Changb368cc52021-10-22 20:08:00 -07003045 void *fsdata = NULL;
3046 struct page *pagep;
3047 int ret2;
3048
Chao Yu4c8ff702019-11-01 18:07:14 +08003049 ret = f2fs_init_compress_ctx(&cc);
3050 if (ret) {
3051 done = 1;
3052 break;
3053 }
Chao Yu8f46dca2015-07-14 18:56:10 +08003054
Chao Yu4c8ff702019-11-01 18:07:14 +08003055 if (!f2fs_cluster_can_merge_page(&cc,
3056 page->index)) {
3057 ret = f2fs_write_multi_pages(&cc,
3058 &submitted, wbc, io_type);
3059 if (!ret)
3060 need_readd = true;
3061 goto result;
3062 }
3063
3064 if (unlikely(f2fs_cp_error(sbi)))
3065 goto lock_page;
3066
Fengnan Changb368cc52021-10-22 20:08:00 -07003067 if (!f2fs_cluster_is_empty(&cc))
3068 goto lock_page;
Chao Yu4c8ff702019-11-01 18:07:14 +08003069
Fengnan Changb368cc52021-10-22 20:08:00 -07003070 ret2 = f2fs_prepare_compress_overwrite(
Chao Yu4c8ff702019-11-01 18:07:14 +08003071 inode, &pagep,
3072 page->index, &fsdata);
Fengnan Changb368cc52021-10-22 20:08:00 -07003073 if (ret2 < 0) {
3074 ret = ret2;
3075 done = 1;
3076 break;
3077 } else if (ret2 &&
3078 (!f2fs_compress_write_end(inode,
3079 fsdata, page->index, 1) ||
3080 !f2fs_all_cluster_page_loaded(&cc,
3081 &pvec, i, nr_pages))) {
3082 retry = 1;
3083 break;
Chao Yu4c8ff702019-11-01 18:07:14 +08003084 }
3085 }
3086#endif
Chao Yuf8de4332018-05-23 22:25:09 +08003087 /* give a priority to WB_SYNC threads */
Chao Yuc29fd0c2018-06-04 23:20:36 +08003088 if (atomic_read(&sbi->wb_sync_req[DATA]) &&
Chao Yuf8de4332018-05-23 22:25:09 +08003089 wbc->sync_mode == WB_SYNC_NONE) {
3090 done = 1;
3091 break;
3092 }
Chao Yu4c8ff702019-11-01 18:07:14 +08003093#ifdef CONFIG_F2FS_FS_COMPRESSION
3094lock_page:
3095#endif
Chao Yu8f46dca2015-07-14 18:56:10 +08003096 done_index = page->index;
Jaegeuk Kimd29460e2017-06-21 17:52:39 -07003097retry_write:
Chao Yu8f46dca2015-07-14 18:56:10 +08003098 lock_page(page);
3099
3100 if (unlikely(page->mapping != mapping)) {
3101continue_unlock:
3102 unlock_page(page);
3103 continue;
3104 }
3105
3106 if (!PageDirty(page)) {
3107 /* someone wrote it for us */
3108 goto continue_unlock;
3109 }
3110
Chao Yu8f46dca2015-07-14 18:56:10 +08003111 if (PageWriteback(page)) {
Chao Yu0b20fce2019-09-30 18:53:25 +08003112 if (wbc->sync_mode != WB_SYNC_NONE)
Jaegeuk Kimfec1d652016-01-20 23:43:51 +08003113 f2fs_wait_on_page_writeback(page,
Chao Yubae0ee72018-12-25 17:43:42 +08003114 DATA, true, true);
Chao Yu0b20fce2019-09-30 18:53:25 +08003115 else
Chao Yu8f46dca2015-07-14 18:56:10 +08003116 goto continue_unlock;
3117 }
3118
Chao Yu8f46dca2015-07-14 18:56:10 +08003119 if (!clear_page_dirty_for_io(page))
3120 goto continue_unlock;
3121
Chao Yu4c8ff702019-11-01 18:07:14 +08003122#ifdef CONFIG_F2FS_FS_COMPRESSION
3123 if (f2fs_compressed_file(inode)) {
3124 get_page(page);
3125 f2fs_compress_ctx_add_page(&cc, page);
3126 continue;
3127 }
3128#endif
3129 ret = f2fs_write_single_data_page(page, &submitted,
Chao Yu3afae092021-01-11 17:42:53 +08003130 &bio, &last_block, wbc, io_type,
3131 0, true);
Chao Yu4c8ff702019-11-01 18:07:14 +08003132 if (ret == AOP_WRITEPAGE_ACTIVATE)
3133 unlock_page(page);
3134#ifdef CONFIG_F2FS_FS_COMPRESSION
3135result:
3136#endif
3137 nwritten += submitted;
3138 wbc->nr_to_write -= submitted;
3139
Chao Yu8f46dca2015-07-14 18:56:10 +08003140 if (unlikely(ret)) {
Chao Yu0002b612016-11-28 19:13:43 -08003141 /*
3142 * keep nr_to_write, since vfs uses this to
3143 * get # of written pages.
3144 */
3145 if (ret == AOP_WRITEPAGE_ACTIVATE) {
Chao Yu0002b612016-11-28 19:13:43 -08003146 ret = 0;
Chao Yu4c8ff702019-11-01 18:07:14 +08003147 goto next;
Jaegeuk Kimd29460e2017-06-21 17:52:39 -07003148 } else if (ret == -EAGAIN) {
3149 ret = 0;
3150 if (wbc->sync_mode == WB_SYNC_ALL) {
3151 cond_resched();
3152 congestion_wait(BLK_RW_ASYNC,
Chao Yu5df7731f2020-02-17 17:45:44 +08003153 DEFAULT_IO_TIMEOUT);
Jaegeuk Kimd29460e2017-06-21 17:52:39 -07003154 goto retry_write;
3155 }
Chao Yu4c8ff702019-11-01 18:07:14 +08003156 goto next;
Chao Yu0002b612016-11-28 19:13:43 -08003157 }
Jaegeuk Kimb230e6c2016-05-29 21:18:23 -07003158 done_index = page->index + 1;
3159 done = 1;
3160 break;
Chao Yu8f46dca2015-07-14 18:56:10 +08003161 }
3162
Chao Yu4c8ff702019-11-01 18:07:14 +08003163 if (wbc->nr_to_write <= 0 &&
Jaegeuk Kim687de7f2017-03-28 18:07:38 -07003164 wbc->sync_mode == WB_SYNC_NONE) {
Chao Yu8f46dca2015-07-14 18:56:10 +08003165 done = 1;
3166 break;
3167 }
Chao Yu4c8ff702019-11-01 18:07:14 +08003168next:
3169 if (need_readd)
3170 goto readd;
Chao Yu8f46dca2015-07-14 18:56:10 +08003171 }
3172 pagevec_release(&pvec);
3173 cond_resched();
3174 }
Chao Yu4c8ff702019-11-01 18:07:14 +08003175#ifdef CONFIG_F2FS_FS_COMPRESSION
3176 /* flush remained pages in compress cluster */
3177 if (f2fs_compressed_file(inode) && !f2fs_cluster_is_empty(&cc)) {
3178 ret = f2fs_write_multi_pages(&cc, &submitted, wbc, io_type);
3179 nwritten += submitted;
3180 wbc->nr_to_write -= submitted;
3181 if (ret) {
3182 done = 1;
3183 retry = 0;
3184 }
3185 }
Jaegeuk Kimadfc6942020-09-23 00:54:50 -07003186 if (f2fs_compressed_file(inode))
Chao Yu8bfbfb02021-05-10 17:30:32 +08003187 f2fs_destroy_compress_ctx(&cc, false);
Chao Yu4c8ff702019-11-01 18:07:14 +08003188#endif
Sahitya Tummalae78790f2020-06-02 18:11:47 +05303189 if (retry) {
Chao Yu8f46dca2015-07-14 18:56:10 +08003190 index = 0;
Sahitya Tummalae78790f2020-06-02 18:11:47 +05303191 end = -1;
Chao Yu8f46dca2015-07-14 18:56:10 +08003192 goto retry;
3193 }
Sahitya Tummalae78790f2020-06-02 18:11:47 +05303194 if (wbc->range_cyclic && !done)
3195 done_index = 0;
Chao Yu8f46dca2015-07-14 18:56:10 +08003196 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
3197 mapping->writeback_index = done_index;
3198
Chao Yubab475c2018-09-27 23:41:16 +08003199 if (nwritten)
Jaegeuk Kimb9109b02017-05-10 11:28:38 -07003200 f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
Chao Yubab475c2018-09-27 23:41:16 +08003201 NULL, 0, DATA);
Chao Yu8648de22019-02-19 16:15:29 +08003202 /* submit cached bio of IPU write */
3203 if (bio)
Chao Yu0b20fce2019-09-30 18:53:25 +08003204 f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
Chao Yu6ca56ca2016-09-29 18:50:11 +08003205
Chao Yu8f46dca2015-07-14 18:56:10 +08003206 return ret;
3207}
3208
Jaegeuk Kim853137c2018-08-09 17:53:34 -07003209static inline bool __should_serialize_io(struct inode *inode,
3210 struct writeback_control *wbc)
3211{
Chao Yu040d2bb2019-05-20 17:36:59 +08003212 /* to avoid deadlock in path of data flush */
3213 if (F2FS_I(inode)->cp_task)
3214 return false;
Chao Yub13f67ff2020-03-19 19:57:57 +08003215
3216 if (!S_ISREG(inode->i_mode))
3217 return false;
3218 if (IS_NOQUOTA(inode))
3219 return false;
3220
Daeho Jeong602a16d2020-12-01 13:08:02 +09003221 if (f2fs_need_compress_data(inode))
Chao Yub13f67ff2020-03-19 19:57:57 +08003222 return true;
Jaegeuk Kim853137c2018-08-09 17:53:34 -07003223 if (wbc->sync_mode != WB_SYNC_ALL)
3224 return true;
3225 if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
3226 return true;
3227 return false;
3228}
3229
Chao Yufc99fe22018-05-30 00:20:39 +08003230static int __f2fs_write_data_pages(struct address_space *mapping,
Chao Yub0af6d42017-08-02 23:21:48 +08003231 struct writeback_control *wbc,
3232 enum iostat_type io_type)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003233{
3234 struct inode *inode = mapping->host;
Jaegeuk Kim40813632014-09-02 15:31:18 -07003235 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim9dfa1ba2016-07-13 19:33:19 -07003236 struct blk_plug plug;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003237 int ret;
Jaegeuk Kim853137c2018-08-09 17:53:34 -07003238 bool locked = false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003239
P J Pcfb185a2013-04-03 11:38:00 +09003240 /* deal with chardevs and other special file */
3241 if (!mapping->a_ops->writepage)
3242 return 0;
3243
Chao Yu6a290542015-07-17 18:02:39 +08003244 /* skip writing if there is no dirty page in this inode */
3245 if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
3246 return 0;
3247
Chao Yu0771fcc2017-06-29 23:20:45 +08003248 /* during POR, we don't need to trigger writepage at all. */
3249 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
3250 goto skip_write;
3251
Chao Yuaf033b22018-09-20 20:05:00 +08003252 if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) &&
3253 wbc->sync_mode == WB_SYNC_NONE &&
Jaegeuk Kima1257022015-10-08 10:40:07 -07003254 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
Chao Yu4d57b862018-05-30 00:20:41 +08003255 f2fs_available_free_memory(sbi, DIRTY_DENTS))
Jaegeuk Kima1257022015-10-08 10:40:07 -07003256 goto skip_write;
3257
Chao Yud323d002015-10-27 09:53:45 +08003258 /* skip writing during file defragment */
Jaegeuk Kim91942322016-05-20 10:13:22 -07003259 if (is_inode_flag_set(inode, FI_DO_DEFRAG))
Chao Yud323d002015-10-27 09:53:45 +08003260 goto skip_write;
3261
Yunlei Hed31c7c32016-02-04 16:14:00 +08003262 trace_f2fs_writepages(mapping->host, wbc, DATA);
3263
Jaegeuk Kim687de7f2017-03-28 18:07:38 -07003264 /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
3265 if (wbc->sync_mode == WB_SYNC_ALL)
Chao Yuc29fd0c2018-06-04 23:20:36 +08003266 atomic_inc(&sbi->wb_sync_req[DATA]);
3267 else if (atomic_read(&sbi->wb_sync_req[DATA]))
Jaegeuk Kim687de7f2017-03-28 18:07:38 -07003268 goto skip_write;
3269
Jaegeuk Kim853137c2018-08-09 17:53:34 -07003270 if (__should_serialize_io(inode, wbc)) {
3271 mutex_lock(&sbi->writepages);
3272 locked = true;
3273 }
3274
Jaegeuk Kim9dfa1ba2016-07-13 19:33:19 -07003275 blk_start_plug(&plug);
Chao Yub0af6d42017-08-02 23:21:48 +08003276 ret = f2fs_write_cache_pages(mapping, wbc, io_type);
Jaegeuk Kim9dfa1ba2016-07-13 19:33:19 -07003277 blk_finish_plug(&plug);
Jaegeuk Kim687de7f2017-03-28 18:07:38 -07003278
Jaegeuk Kim853137c2018-08-09 17:53:34 -07003279 if (locked)
3280 mutex_unlock(&sbi->writepages);
3281
Jaegeuk Kim687de7f2017-03-28 18:07:38 -07003282 if (wbc->sync_mode == WB_SYNC_ALL)
Chao Yuc29fd0c2018-06-04 23:20:36 +08003283 atomic_dec(&sbi->wb_sync_req[DATA]);
Jaegeuk Kim28ea6162016-05-25 17:17:56 -07003284 /*
3285 * if some pages were truncated, we cannot guarantee its mapping->host
3286 * to detect pending bios.
3287 */
Jaegeuk Kim458e6192013-12-11 13:54:01 +09003288
Chao Yu4d57b862018-05-30 00:20:41 +08003289 f2fs_remove_dirty_inode(inode);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003290 return ret;
Jaegeuk Kimd3baf952014-03-18 13:43:05 +09003291
3292skip_write:
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07003293 wbc->pages_skipped += get_dirty_pages(inode);
Yunlei Hed31c7c32016-02-04 16:14:00 +08003294 trace_f2fs_writepages(mapping->host, wbc, DATA);
Jaegeuk Kimd3baf952014-03-18 13:43:05 +09003295 return 0;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003296}
3297
Chao Yub0af6d42017-08-02 23:21:48 +08003298static int f2fs_write_data_pages(struct address_space *mapping,
3299 struct writeback_control *wbc)
3300{
3301 struct inode *inode = mapping->host;
3302
3303 return __f2fs_write_data_pages(mapping, wbc,
3304 F2FS_I(inode)->cp_task == current ?
3305 FS_CP_DATA_IO : FS_DATA_IO);
3306}
3307
Eric Biggers3e679dc2021-07-16 09:39:11 -05003308static void f2fs_write_failed(struct inode *inode, loff_t to)
Chao Yu3aab8f82014-07-02 13:25:04 +08003309{
Jaegeuk Kim819d9152015-12-28 13:48:11 -08003310 loff_t i_size = i_size_read(inode);
Chao Yu3aab8f82014-07-02 13:25:04 +08003311
Jaegeuk Kim3f188c22019-12-03 18:54:29 -08003312 if (IS_NOQUOTA(inode))
3313 return;
3314
Eric Biggers95ae2512019-07-22 09:26:24 -07003315 /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
3316 if (to > i_size && !f2fs_verity_in_progress(inode)) {
Chao Yua33c1502018-08-05 23:04:25 +08003317 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Linus Torvalds6abaa832021-09-04 10:48:47 -07003318 filemap_invalidate_lock(inode->i_mapping);
Chao Yua33c1502018-08-05 23:04:25 +08003319
Jaegeuk Kim819d9152015-12-28 13:48:11 -08003320 truncate_pagecache(inode, i_size);
Jaegeuk Kim3f188c22019-12-03 18:54:29 -08003321 f2fs_truncate_blocks(inode, i_size, true);
Chao Yua33c1502018-08-05 23:04:25 +08003322
Linus Torvalds6abaa832021-09-04 10:48:47 -07003323 filemap_invalidate_unlock(inode->i_mapping);
Jaegeuk Kim6f8d4452018-07-25 12:11:56 +09003324 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Chao Yu3aab8f82014-07-02 13:25:04 +08003325 }
3326}
3327
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003328static int prepare_write_begin(struct f2fs_sb_info *sbi,
3329 struct page *page, loff_t pos, unsigned len,
3330 block_t *blk_addr, bool *node_changed)
3331{
3332 struct inode *inode = page->mapping->host;
3333 pgoff_t index = page->index;
3334 struct dnode_of_data dn;
3335 struct page *ipage;
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003336 bool locked = false;
Chao Yu94afd6d2021-08-04 10:23:48 +08003337 struct extent_info ei = {0, };
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003338 int err = 0;
Sheng Yong2866fb12018-11-14 19:34:28 +08003339 int flag;
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003340
Jaegeuk Kim24b84912016-02-03 13:49:44 -08003341 /*
3342 * we already allocated all the blocks, so we don't need to get
3343 * the block addresses when there is no need to fill the page.
3344 */
Jaegeuk Kimdc91de72017-01-13 13:12:29 -08003345 if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
Eric Biggers95ae2512019-07-22 09:26:24 -07003346 !is_inode_flag_set(inode, FI_NO_PREALLOC) &&
3347 !f2fs_verity_in_progress(inode))
Jaegeuk Kim24b84912016-02-03 13:49:44 -08003348 return 0;
3349
Sheng Yong2866fb12018-11-14 19:34:28 +08003350 /* f2fs_lock_op avoids race between write CP and convert_inline_page */
3351 if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode))
3352 flag = F2FS_GET_BLOCK_DEFAULT;
3353 else
3354 flag = F2FS_GET_BLOCK_PRE_AIO;
3355
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003356 if (f2fs_has_inline_data(inode) ||
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003357 (pos & PAGE_MASK) >= i_size_read(inode)) {
Chao Yu0ef81832020-06-18 14:36:22 +08003358 f2fs_do_map_lock(sbi, flag, true);
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003359 locked = true;
3360 }
Chao Yu4c8ff702019-11-01 18:07:14 +08003361
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003362restart:
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003363 /* check inline_data */
Chao Yu4d57b862018-05-30 00:20:41 +08003364 ipage = f2fs_get_node_page(sbi, inode->i_ino);
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003365 if (IS_ERR(ipage)) {
3366 err = PTR_ERR(ipage);
3367 goto unlock_out;
3368 }
3369
3370 set_new_dnode(&dn, inode, ipage, ipage, 0);
3371
3372 if (f2fs_has_inline_data(inode)) {
Chao Yuf2470372017-07-19 00:19:05 +08003373 if (pos + len <= MAX_INLINE_DATA(inode)) {
Chao Yu4d57b862018-05-30 00:20:41 +08003374 f2fs_do_read_inline_data(page, ipage);
Jaegeuk Kim91942322016-05-20 10:13:22 -07003375 set_inode_flag(inode, FI_DATA_EXIST);
Chao Yuab470362016-05-11 19:48:44 +08003376 if (inode->i_nlink)
Chao Yub763f3b2021-04-28 17:20:31 +08003377 set_page_private_inline(ipage);
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003378 } else {
3379 err = f2fs_convert_inline_page(&dn, page);
3380 if (err)
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003381 goto out;
3382 if (dn.data_blkaddr == NULL_ADDR)
3383 err = f2fs_get_block(&dn, index);
3384 }
3385 } else if (locked) {
3386 err = f2fs_get_block(&dn, index);
3387 } else {
3388 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
3389 dn.data_blkaddr = ei.blk + index - ei.fofs;
3390 } else {
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003391 /* hole case */
Chao Yu4d57b862018-05-30 00:20:41 +08003392 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
Jaegeuk Kim4da7bf52016-04-06 11:27:03 -07003393 if (err || dn.data_blkaddr == NULL_ADDR) {
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003394 f2fs_put_dnode(&dn);
Chao Yu0ef81832020-06-18 14:36:22 +08003395 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
Yunlei He59c90812017-03-13 20:22:18 +08003396 true);
Sheng Yong2866fb12018-11-14 19:34:28 +08003397 WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003398 locked = true;
3399 goto restart;
3400 }
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003401 }
3402 }
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003403
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003404 /* convert_inline_page can make node_changed */
3405 *blk_addr = dn.data_blkaddr;
3406 *node_changed = dn.node_changed;
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003407out:
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003408 f2fs_put_dnode(&dn);
3409unlock_out:
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003410 if (locked)
Chao Yu0ef81832020-06-18 14:36:22 +08003411 f2fs_do_map_lock(sbi, flag, false);
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003412 return err;
3413}
3414
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003415static int f2fs_write_begin(struct file *file, struct address_space *mapping,
3416 loff_t pos, unsigned len, unsigned flags,
3417 struct page **pagep, void **fsdata)
3418{
3419 struct inode *inode = mapping->host;
Jaegeuk Kim40813632014-09-02 15:31:18 -07003420 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim86531d62015-07-15 13:08:21 -07003421 struct page *page = NULL;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003422 pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
Chao Yua2e2e762018-01-15 17:16:46 +08003423 bool need_balance = false, drop_atomic = false;
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003424 block_t blkaddr = NULL_ADDR;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003425 int err = 0;
3426
Chao Yu62aed042014-05-06 16:46:04 +08003427 trace_f2fs_write_begin(inode, pos, len, flags);
3428
Chao Yu00e09c02019-08-23 17:58:36 +08003429 if (!f2fs_is_checkpoint_ready(sbi)) {
3430 err = -ENOSPC;
Daniel Rosenberg43549942018-08-20 19:21:43 -07003431 goto fail;
Chao Yu00e09c02019-08-23 17:58:36 +08003432 }
Daniel Rosenberg43549942018-08-20 19:21:43 -07003433
Jaegeuk Kim455e3a52018-07-27 18:15:11 +09003434 if ((f2fs_is_atomic_file(inode) &&
3435 !f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
3436 is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
Jaegeuk Kim57864ae2017-10-18 19:05:57 -07003437 err = -ENOMEM;
Chao Yua2e2e762018-01-15 17:16:46 +08003438 drop_atomic = true;
Jaegeuk Kim57864ae2017-10-18 19:05:57 -07003439 goto fail;
3440 }
3441
Jaegeuk Kim5f727392014-11-25 10:59:45 -08003442 /*
3443 * We should check this at this moment to avoid deadlock on inode page
3444 * and #0 page. The locking rule for inline_data conversion should be:
3445 * lock_page(page #0) -> lock_page(inode_page)
3446 */
3447 if (index != 0) {
3448 err = f2fs_convert_inline_inode(inode);
3449 if (err)
3450 goto fail;
3451 }
Chao Yu4c8ff702019-11-01 18:07:14 +08003452
3453#ifdef CONFIG_F2FS_FS_COMPRESSION
3454 if (f2fs_compressed_file(inode)) {
3455 int ret;
3456
3457 *fsdata = NULL;
3458
Fengnan Chang7eab7a62021-06-22 19:50:59 +08003459 if (len == PAGE_SIZE)
3460 goto repeat;
3461
Chao Yu4c8ff702019-11-01 18:07:14 +08003462 ret = f2fs_prepare_compress_overwrite(inode, pagep,
3463 index, fsdata);
3464 if (ret < 0) {
3465 err = ret;
3466 goto fail;
3467 } else if (ret) {
3468 return 0;
3469 }
3470 }
3471#endif
3472
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +09003473repeat:
Jaegeuk Kim86d54792017-02-17 09:55:55 -08003474 /*
3475 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
3476 * wait_for_stable_page. Will wait that below with our IO control.
3477 */
Chao Yu01eccef2017-10-28 16:52:30 +08003478 page = f2fs_pagecache_get_page(mapping, index,
Jaegeuk Kim86d54792017-02-17 09:55:55 -08003479 FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
Chao Yu3aab8f82014-07-02 13:25:04 +08003480 if (!page) {
3481 err = -ENOMEM;
3482 goto fail;
3483 }
Jaegeuk Kimd5f66992014-04-30 09:22:45 +09003484
Chao Yu4c8ff702019-11-01 18:07:14 +08003485 /* TODO: cluster can be compressed due to race with .writepage */
3486
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003487 *pagep = page;
3488
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003489 err = prepare_write_begin(sbi, page, pos, len,
3490 &blkaddr, &need_balance);
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07003491 if (err)
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003492 goto fail;
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07003493
Chao Yuaf033b22018-09-20 20:05:00 +08003494 if (need_balance && !IS_NOQUOTA(inode) &&
3495 has_not_enough_free_secs(sbi, 0, 0)) {
Jaegeuk Kim2a340762015-12-22 13:23:35 -08003496 unlock_page(page);
Jaegeuk Kim2c4db1a2016-01-07 14:15:04 -08003497 f2fs_balance_fs(sbi, true);
Jaegeuk Kim2a340762015-12-22 13:23:35 -08003498 lock_page(page);
3499 if (page->mapping != mapping) {
3500 /* The page got truncated from under us */
3501 f2fs_put_page(page, 1);
3502 goto repeat;
3503 }
3504 }
3505
Chao Yubae0ee72018-12-25 17:43:42 +08003506 f2fs_wait_on_page_writeback(page, DATA, false, true);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07003507
Jaegeuk Kim649d7df2016-09-06 11:02:03 -07003508 if (len == PAGE_SIZE || PageUptodate(page))
3509 return 0;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003510
Eric Biggers95ae2512019-07-22 09:26:24 -07003511 if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) &&
3512 !f2fs_verity_in_progress(inode)) {
Yunlei He746e2402016-12-20 11:11:35 +08003513 zero_user_segment(page, len, PAGE_SIZE);
3514 return 0;
3515 }
3516
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003517 if (blkaddr == NEW_ADDR) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003518 zero_user_segment(page, 0, PAGE_SIZE);
Jaegeuk Kim649d7df2016-09-06 11:02:03 -07003519 SetPageUptodate(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003520 } else {
Chao Yu93770ab2019-04-15 15:26:32 +08003521 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3522 DATA_GENERIC_ENHANCE_READ)) {
Chao Yu10f966b2019-06-20 11:36:14 +08003523 err = -EFSCORRUPTED;
Chao Yu93770ab2019-04-15 15:26:32 +08003524 goto fail;
3525 }
Jia Yangb7973092020-07-01 10:27:40 +08003526 err = f2fs_submit_page_read(inode, page, blkaddr, 0, true);
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -07003527 if (err)
Chao Yu3aab8f82014-07-02 13:25:04 +08003528 goto fail;
Chao Yud54c7952014-03-29 15:30:40 +08003529
Jaegeuk Kim393ff912013-03-08 21:29:23 +09003530 lock_page(page);
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09003531 if (unlikely(page->mapping != mapping)) {
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +09003532 f2fs_put_page(page, 1);
3533 goto repeat;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003534 }
Chao Yu1563ac72016-07-03 22:05:12 +08003535 if (unlikely(!PageUptodate(page))) {
3536 err = -EIO;
3537 goto fail;
Jaegeuk Kim4375a332015-04-23 12:04:33 -07003538 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003539 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003540 return 0;
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07003541
Chao Yu3aab8f82014-07-02 13:25:04 +08003542fail:
Jaegeuk Kim86531d62015-07-15 13:08:21 -07003543 f2fs_put_page(page, 1);
Eric Biggers3e679dc2021-07-16 09:39:11 -05003544 f2fs_write_failed(inode, pos + len);
Chao Yua2e2e762018-01-15 17:16:46 +08003545 if (drop_atomic)
Chao Yu4d57b862018-05-30 00:20:41 +08003546 f2fs_drop_inmem_pages_all(sbi, false);
Chao Yu3aab8f82014-07-02 13:25:04 +08003547 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003548}
3549
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +09003550static int f2fs_write_end(struct file *file,
3551 struct address_space *mapping,
3552 loff_t pos, unsigned len, unsigned copied,
3553 struct page *page, void *fsdata)
3554{
3555 struct inode *inode = page->mapping->host;
3556
Chao Yudfb2bf32014-05-06 16:47:23 +08003557 trace_f2fs_write_end(inode, pos, len, copied);
3558
Jaegeuk Kim649d7df2016-09-06 11:02:03 -07003559 /*
3560 * This should be come from len == PAGE_SIZE, and we expect copied
3561 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
3562 * let generic_perform_write() try to copy data again through copied=0.
3563 */
3564 if (!PageUptodate(page)) {
Yunlei He746e2402016-12-20 11:11:35 +08003565 if (unlikely(copied != len))
Jaegeuk Kim649d7df2016-09-06 11:02:03 -07003566 copied = 0;
3567 else
3568 SetPageUptodate(page);
3569 }
Chao Yu4c8ff702019-11-01 18:07:14 +08003570
3571#ifdef CONFIG_F2FS_FS_COMPRESSION
3572 /* overwrite compressed file */
3573 if (f2fs_compressed_file(inode) && fsdata) {
3574 f2fs_compress_write_end(inode, fsdata, page->index, copied);
3575 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
Chao Yu944dd22e2020-07-24 18:21:36 +08003576
3577 if (pos + copied > i_size_read(inode) &&
3578 !f2fs_verity_in_progress(inode))
3579 f2fs_i_size_write(inode, pos + copied);
Chao Yu4c8ff702019-11-01 18:07:14 +08003580 return copied;
3581 }
3582#endif
3583
Jaegeuk Kim649d7df2016-09-06 11:02:03 -07003584 if (!copied)
3585 goto unlock_out;
3586
Jaegeuk Kim34ba94b2014-10-09 13:19:53 -07003587 set_page_dirty(page);
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +09003588
Eric Biggers95ae2512019-07-22 09:26:24 -07003589 if (pos + copied > i_size_read(inode) &&
3590 !f2fs_verity_in_progress(inode))
Jaegeuk Kimfc9581c2016-05-20 09:22:03 -07003591 f2fs_i_size_write(inode, pos + copied);
Jaegeuk Kim649d7df2016-09-06 11:02:03 -07003592unlock_out:
Chao Yu3024c9a2016-08-06 21:09:41 +08003593 f2fs_put_page(page, 1);
Jaegeuk Kimd0239e12016-01-08 16:57:48 -08003594 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +09003595 return copied;
3596}
3597
Omar Sandoval6f673762015-03-16 04:33:52 -07003598static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
3599 loff_t offset)
Jaegeuk Kim944fcfc2013-12-26 20:15:09 +09003600{
Jaegeuk Kim8a56dd92018-06-29 18:55:12 -07003601 unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
3602 unsigned blkbits = i_blkbits;
3603 unsigned blocksize_mask = (1 << blkbits) - 1;
3604 unsigned long align = offset | iov_iter_alignment(iter);
3605 struct block_device *bdev = inode->i_sb->s_bdev;
Jaegeuk Kim944fcfc2013-12-26 20:15:09 +09003606
Gabriel Krisman Bertazi20d0a102020-08-19 16:07:31 -04003607 if (iov_iter_rw(iter) == READ && offset >= i_size_read(inode))
3608 return 1;
3609
Jaegeuk Kim8a56dd92018-06-29 18:55:12 -07003610 if (align & blocksize_mask) {
3611 if (bdev)
3612 blkbits = blksize_bits(bdev_logical_block_size(bdev));
3613 blocksize_mask = (1 << blkbits) - 1;
3614 if (align & blocksize_mask)
3615 return -EINVAL;
3616 return 1;
3617 }
Jaegeuk Kim944fcfc2013-12-26 20:15:09 +09003618 return 0;
3619}
3620
Chao Yu02b16d02018-11-12 00:46:46 +08003621static void f2fs_dio_end_io(struct bio *bio)
3622{
3623 struct f2fs_private_dio *dio = bio->bi_private;
3624
3625 dec_page_count(F2FS_I_SB(dio->inode),
3626 dio->write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
3627
3628 bio->bi_private = dio->orig_private;
3629 bio->bi_end_io = dio->orig_end_io;
3630
Chao Yuc8eb7022020-09-14 16:47:00 +08003631 kfree(dio);
Chao Yu02b16d02018-11-12 00:46:46 +08003632
3633 bio_endio(bio);
3634}
3635
3636static void f2fs_dio_submit_bio(struct bio *bio, struct inode *inode,
3637 loff_t file_offset)
3638{
3639 struct f2fs_private_dio *dio;
3640 bool write = (bio_op(bio) == REQ_OP_WRITE);
Chao Yu02b16d02018-11-12 00:46:46 +08003641
3642 dio = f2fs_kzalloc(F2FS_I_SB(inode),
3643 sizeof(struct f2fs_private_dio), GFP_NOFS);
YueHaibing8e114032019-01-04 01:38:29 +00003644 if (!dio)
Chao Yu02b16d02018-11-12 00:46:46 +08003645 goto out;
Chao Yu02b16d02018-11-12 00:46:46 +08003646
3647 dio->inode = inode;
3648 dio->orig_end_io = bio->bi_end_io;
3649 dio->orig_private = bio->bi_private;
3650 dio->write = write;
3651
3652 bio->bi_end_io = f2fs_dio_end_io;
3653 bio->bi_private = dio;
3654
3655 inc_page_count(F2FS_I_SB(inode),
3656 write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
3657
3658 submit_bio(bio);
3659 return;
3660out:
3661 bio->bi_status = BLK_STS_IOERR;
3662 bio_endio(bio);
3663}
3664
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07003665static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003666{
Jaegeuk Kimb439b102016-02-03 13:09:09 -08003667 struct address_space *mapping = iocb->ki_filp->f_mapping;
Chao Yu3aab8f82014-07-02 13:25:04 +08003668 struct inode *inode = mapping->host;
Hyunchul Lee0cdd3192018-01-31 11:36:57 +09003669 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yuf847c692018-09-27 18:34:52 +08003670 struct f2fs_inode_info *fi = F2FS_I(inode);
Chao Yu3aab8f82014-07-02 13:25:04 +08003671 size_t count = iov_iter_count(iter);
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07003672 loff_t offset = iocb->ki_pos;
Chao Yu82e0a5a2016-07-13 09:18:29 +08003673 int rw = iov_iter_rw(iter);
Chao Yu3aab8f82014-07-02 13:25:04 +08003674 int err;
Hyunchul Lee0cdd3192018-01-31 11:36:57 +09003675 enum rw_hint hint = iocb->ki_hint;
Chao Yu63189b72018-03-08 14:22:56 +08003676 int whint_mode = F2FS_OPTION(sbi).whint_mode;
Chao Yuf847c692018-09-27 18:34:52 +08003677 bool do_opu;
Jaegeuk Kim944fcfc2013-12-26 20:15:09 +09003678
Jaegeuk Kimb439b102016-02-03 13:09:09 -08003679 err = check_direct_IO(inode, iter, offset);
Jaegeuk Kimb9d777b2015-12-22 11:09:35 -08003680 if (err)
Jaegeuk Kim8a56dd92018-06-29 18:55:12 -07003681 return err < 0 ? err : 0;
Huajun Li9ffe0fb2013-11-10 23:13:20 +08003682
Chao Yuf847c692018-09-27 18:34:52 +08003683 if (f2fs_force_buffered_io(inode, iocb, iter))
Jaegeuk Kim36abef42016-06-03 19:29:38 -07003684 return 0;
Jaegeuk Kimfcc85a42015-04-21 20:39:58 -07003685
Eric Biggers6de86872021-07-16 09:39:12 -05003686 do_opu = rw == WRITE && f2fs_lfs_mode(sbi);
Chao Yuf847c692018-09-27 18:34:52 +08003687
Jaegeuk Kim5302fb02016-07-22 15:25:47 -07003688 trace_f2fs_direct_IO_enter(inode, offset, count, rw);
Chao Yu70407fa2014-07-31 21:11:22 +08003689
Hyunchul Lee0cdd3192018-01-31 11:36:57 +09003690 if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
3691 iocb->ki_hint = WRITE_LIFE_NOT_SET;
3692
Chao Yuf847c692018-09-27 18:34:52 +08003693 if (iocb->ki_flags & IOCB_NOWAIT) {
3694 if (!down_read_trylock(&fi->i_gc_rwsem[rw])) {
Hyunchul Leeb91050a2018-03-08 19:34:38 +09003695 iocb->ki_hint = hint;
3696 err = -EAGAIN;
3697 goto out;
3698 }
Chao Yuf847c692018-09-27 18:34:52 +08003699 if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) {
3700 up_read(&fi->i_gc_rwsem[rw]);
3701 iocb->ki_hint = hint;
3702 err = -EAGAIN;
3703 goto out;
3704 }
3705 } else {
3706 down_read(&fi->i_gc_rwsem[rw]);
3707 if (do_opu)
3708 down_read(&fi->i_gc_rwsem[READ]);
Hyunchul Leeb91050a2018-03-08 19:34:38 +09003709 }
3710
Chao Yu02b16d02018-11-12 00:46:46 +08003711 err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
Chao Yuf9d6d052018-11-13 14:33:45 +08003712 iter, rw == WRITE ? get_data_block_dio_write :
3713 get_data_block_dio, NULL, f2fs_dio_submit_bio,
DongDongJuad8d6a02020-03-20 15:01:32 +09003714 rw == WRITE ? DIO_LOCKING | DIO_SKIP_HOLES :
3715 DIO_SKIP_HOLES);
Chao Yuf847c692018-09-27 18:34:52 +08003716
3717 if (do_opu)
3718 up_read(&fi->i_gc_rwsem[READ]);
3719
3720 up_read(&fi->i_gc_rwsem[rw]);
Chao Yu82e0a5a2016-07-13 09:18:29 +08003721
3722 if (rw == WRITE) {
Hyunchul Lee0cdd3192018-01-31 11:36:57 +09003723 if (whint_mode == WHINT_MODE_OFF)
3724 iocb->ki_hint = hint;
Chao Yub0af6d42017-08-02 23:21:48 +08003725 if (err > 0) {
3726 f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
3727 err);
Chao Yuf847c692018-09-27 18:34:52 +08003728 if (!do_opu)
3729 set_inode_flag(inode, FI_UPDATE_WRITE);
Jack Qiu335cac82020-08-31 09:58:02 +08003730 } else if (err == -EIOCBQUEUED) {
3731 f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
3732 count - iov_iter_count(iter));
Chao Yub0af6d42017-08-02 23:21:48 +08003733 } else if (err < 0) {
Eric Biggers3e679dc2021-07-16 09:39:11 -05003734 f2fs_write_failed(inode, offset + count);
Chao Yub0af6d42017-08-02 23:21:48 +08003735 }
Chao Yu8b83ac82020-04-16 18:16:56 +08003736 } else {
3737 if (err > 0)
3738 f2fs_update_iostat(sbi, APP_DIRECT_READ_IO, err);
Jack Qiu335cac82020-08-31 09:58:02 +08003739 else if (err == -EIOCBQUEUED)
3740 f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_READ_IO,
3741 count - iov_iter_count(iter));
Jaegeuk Kim6bfc4912016-04-18 17:07:44 -04003742 }
Chao Yu70407fa2014-07-31 21:11:22 +08003743
Hyunchul Leeb91050a2018-03-08 19:34:38 +09003744out:
Jaegeuk Kim5302fb02016-07-22 15:25:47 -07003745 trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
Chao Yu70407fa2014-07-31 21:11:22 +08003746
Chao Yu3aab8f82014-07-02 13:25:04 +08003747 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003748}
3749
Chao Yu487261f2015-02-05 17:44:29 +08003750void f2fs_invalidate_page(struct page *page, unsigned int offset,
3751 unsigned int length)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003752{
3753 struct inode *inode = page->mapping->host;
Chao Yu487261f2015-02-05 17:44:29 +08003754 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07003755
Chao Yu487261f2015-02-05 17:44:29 +08003756 if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003757 (offset % PAGE_SIZE || length != PAGE_SIZE))
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07003758 return;
3759
Chao Yu487261f2015-02-05 17:44:29 +08003760 if (PageDirty(page)) {
Chao Yu933439c2016-10-11 22:57:01 +08003761 if (inode->i_ino == F2FS_META_INO(sbi)) {
Chao Yu487261f2015-02-05 17:44:29 +08003762 dec_page_count(sbi, F2FS_DIRTY_META);
Chao Yu933439c2016-10-11 22:57:01 +08003763 } else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
Chao Yu487261f2015-02-05 17:44:29 +08003764 dec_page_count(sbi, F2FS_DIRTY_NODES);
Chao Yu933439c2016-10-11 22:57:01 +08003765 } else {
Chao Yu487261f2015-02-05 17:44:29 +08003766 inode_dec_dirty_pages(inode);
Chao Yu4d57b862018-05-30 00:20:41 +08003767 f2fs_remove_dirty_inode(inode);
Chao Yu933439c2016-10-11 22:57:01 +08003768 }
Chao Yu487261f2015-02-05 17:44:29 +08003769 }
Chao Yudecd36b2015-08-07 18:42:09 +08003770
Chao Yub763f3b2021-04-28 17:20:31 +08003771 clear_page_private_gcing(page);
Chao Yu2baf0782018-07-27 18:15:16 +08003772
Chao Yu6ce19af2021-05-20 19:51:50 +08003773 if (test_opt(sbi, COMPRESS_CACHE)) {
3774 if (f2fs_compressed_file(inode))
3775 f2fs_invalidate_compress_pages(sbi, inode->i_ino);
3776 if (inode->i_ino == F2FS_COMPRESS_INO(sbi))
3777 clear_page_private_data(page);
3778 }
3779
Chao Yub763f3b2021-04-28 17:20:31 +08003780 if (page_private_atomic(page))
Chao Yu4d57b862018-05-30 00:20:41 +08003781 return f2fs_drop_inmem_page(inode, page);
Chao Yudecd36b2015-08-07 18:42:09 +08003782
Chao Yub763f3b2021-04-28 17:20:31 +08003783 detach_page_private(page);
3784 set_page_private(page, 0);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003785}
3786
Chao Yu487261f2015-02-05 17:44:29 +08003787int f2fs_release_page(struct page *page, gfp_t wait)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003788{
Jaegeuk Kimf68daee2015-01-30 11:39:08 -08003789 /* If this is dirty page, keep PagePrivate */
3790 if (PageDirty(page))
3791 return 0;
3792
Chao Yudecd36b2015-08-07 18:42:09 +08003793 /* This is atomic written page, keep Private */
Chao Yub763f3b2021-04-28 17:20:31 +08003794 if (page_private_atomic(page))
Chao Yudecd36b2015-08-07 18:42:09 +08003795 return 0;
3796
Chao Yu6ce19af2021-05-20 19:51:50 +08003797 if (test_opt(F2FS_P_SB(page), COMPRESS_CACHE)) {
3798 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
3799 struct inode *inode = page->mapping->host;
3800
3801 if (f2fs_compressed_file(inode))
3802 f2fs_invalidate_compress_pages(sbi, inode->i_ino);
3803 if (inode->i_ino == F2FS_COMPRESS_INO(sbi))
3804 clear_page_private_data(page);
3805 }
3806
Chao Yub763f3b2021-04-28 17:20:31 +08003807 clear_page_private_gcing(page);
3808
3809 detach_page_private(page);
3810 set_page_private(page, 0);
Jaegeuk Kimc3850aa2013-03-14 09:24:32 +09003811 return 1;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003812}
3813
3814static int f2fs_set_data_page_dirty(struct page *page)
3815{
Jaegeuk Kim4969c062019-07-01 19:15:29 -07003816 struct inode *inode = page_file_mapping(page)->host;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003817
Jaegeuk Kim26c6b882013-10-24 17:53:29 +09003818 trace_f2fs_set_page_dirty(page, DATA);
3819
Jaegeuk Kim237c0792016-06-30 18:49:15 -07003820 if (!PageUptodate(page))
3821 SetPageUptodate(page);
Jaegeuk Kim4969c062019-07-01 19:15:29 -07003822 if (PageSwapCache(page))
3823 return __set_page_dirty_nobuffers(page);
Jaegeuk Kim34ba94b2014-10-09 13:19:53 -07003824
Chao Yu5fe45742017-01-07 18:50:26 +08003825 if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
Chao Yub763f3b2021-04-28 17:20:31 +08003826 if (!page_private_atomic(page)) {
Chao Yu4d57b862018-05-30 00:20:41 +08003827 f2fs_register_inmem_page(inode, page);
Chao Yudecd36b2015-08-07 18:42:09 +08003828 return 1;
3829 }
3830 /*
3831 * Previously, this page has been registered, we just
3832 * return here.
3833 */
3834 return 0;
Jaegeuk Kim34ba94b2014-10-09 13:19:53 -07003835 }
3836
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003837 if (!PageDirty(page)) {
Jaegeuk Kimb87078a2018-04-20 19:29:52 -07003838 __set_page_dirty_nobuffers(page);
Chao Yu4d57b862018-05-30 00:20:41 +08003839 f2fs_update_dirty_page(inode, page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003840 return 1;
3841 }
3842 return 0;
3843}
3844
Chao Yuc1c63382020-03-30 17:13:29 +08003845
3846static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
3847{
3848#ifdef CONFIG_F2FS_FS_COMPRESSION
3849 struct dnode_of_data dn;
3850 sector_t start_idx, blknr = 0;
3851 int ret;
3852
3853 start_idx = round_down(block, F2FS_I(inode)->i_cluster_size);
3854
3855 set_new_dnode(&dn, inode, NULL, NULL, 0);
3856 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
3857 if (ret)
3858 return 0;
3859
3860 if (dn.data_blkaddr != COMPRESS_ADDR) {
3861 dn.ofs_in_node += block - start_idx;
3862 blknr = f2fs_data_blkaddr(&dn);
3863 if (!__is_valid_data_blkaddr(blknr))
3864 blknr = 0;
3865 }
3866
3867 f2fs_put_dnode(&dn);
Chao Yuc1c63382020-03-30 17:13:29 +08003868 return blknr;
3869#else
Chao Yu250e84d2020-06-28 20:29:38 +08003870 return 0;
Chao Yuc1c63382020-03-30 17:13:29 +08003871#endif
3872}
3873
3874
Jaegeuk Kimc01e54b2013-01-17 20:30:23 +09003875static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
3876{
Chao Yu454ae7e2014-04-22 13:34:01 +08003877 struct inode *inode = mapping->host;
Chao Yub79b0a32020-06-29 20:13:12 +08003878 sector_t blknr = 0;
Chao Yu454ae7e2014-04-22 13:34:01 +08003879
Jaegeuk Kim1d373a02015-10-19 10:29:51 -07003880 if (f2fs_has_inline_data(inode))
Chao Yub79b0a32020-06-29 20:13:12 +08003881 goto out;
Jaegeuk Kim1d373a02015-10-19 10:29:51 -07003882
3883 /* make sure allocating whole blocks */
3884 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
3885 filemap_write_and_wait(mapping);
3886
Daeho Jeong4eda1682020-08-31 09:24:01 +09003887 /* Block number less than F2FS MAX BLOCKS */
Chengguang Xu6d1451b2021-01-13 13:21:54 +08003888 if (unlikely(block >= max_file_blocks(inode)))
Daeho Jeong4eda1682020-08-31 09:24:01 +09003889 goto out;
Chao Yuc1c63382020-03-30 17:13:29 +08003890
Daeho Jeong4eda1682020-08-31 09:24:01 +09003891 if (f2fs_compressed_file(inode)) {
3892 blknr = f2fs_bmap_compress(inode, block);
3893 } else {
Jaegeuk Kimb876f4c92020-11-24 15:19:10 -08003894 struct f2fs_map_blocks map;
3895
3896 memset(&map, 0, sizeof(map));
3897 map.m_lblk = block;
3898 map.m_len = 1;
3899 map.m_next_pgofs = NULL;
3900 map.m_seg_type = NO_CHECK_TYPE;
3901
3902 if (!f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_BMAP))
3903 blknr = map.m_pblk;
Daeho Jeong4eda1682020-08-31 09:24:01 +09003904 }
Chao Yub79b0a32020-06-29 20:13:12 +08003905out:
3906 trace_f2fs_bmap(inode, block, blknr);
3907 return blknr;
Chao Yu429511c2015-02-05 17:54:31 +08003908}
3909
Weichao Guo5b7a4872016-09-20 05:03:27 +08003910#ifdef CONFIG_MIGRATION
3911#include <linux/migrate.h>
3912
3913int f2fs_migrate_page(struct address_space *mapping,
3914 struct page *newpage, struct page *page, enum migrate_mode mode)
3915{
3916 int rc, extra_count;
3917 struct f2fs_inode_info *fi = F2FS_I(mapping->host);
Chao Yub763f3b2021-04-28 17:20:31 +08003918 bool atomic_written = page_private_atomic(page);
Weichao Guo5b7a4872016-09-20 05:03:27 +08003919
3920 BUG_ON(PageWriteback(page));
3921
3922 /* migrating an atomic written page is safe with the inmem_lock hold */
Jaegeuk Kimff1048e2017-07-06 14:46:01 -07003923 if (atomic_written) {
3924 if (mode != MIGRATE_SYNC)
3925 return -EBUSY;
3926 if (!mutex_trylock(&fi->inmem_lock))
3927 return -EAGAIN;
3928 }
Weichao Guo5b7a4872016-09-20 05:03:27 +08003929
Chao Yu240a5912019-03-06 17:30:59 +08003930 /* one extra reference was held for atomic_write page */
3931 extra_count = atomic_written ? 1 : 0;
Weichao Guo5b7a4872016-09-20 05:03:27 +08003932 rc = migrate_page_move_mapping(mapping, newpage,
Keith Busch37109692019-07-18 15:58:46 -07003933 page, extra_count);
Weichao Guo5b7a4872016-09-20 05:03:27 +08003934 if (rc != MIGRATEPAGE_SUCCESS) {
3935 if (atomic_written)
3936 mutex_unlock(&fi->inmem_lock);
3937 return rc;
3938 }
3939
3940 if (atomic_written) {
3941 struct inmem_pages *cur;
Yi Zhuang5f029c02021-04-06 09:47:35 +08003942
Weichao Guo5b7a4872016-09-20 05:03:27 +08003943 list_for_each_entry(cur, &fi->inmem_pages, list)
3944 if (cur->page == page) {
3945 cur->page = newpage;
3946 break;
3947 }
3948 mutex_unlock(&fi->inmem_lock);
3949 put_page(page);
3950 get_page(newpage);
3951 }
3952
Jaegeuk Kimc9ebd3d2021-07-04 22:11:25 -07003953 /* guarantee to start from no stale private field */
3954 set_page_private(newpage, 0);
Chao Yu240a5912019-03-06 17:30:59 +08003955 if (PagePrivate(page)) {
Chao Yub763f3b2021-04-28 17:20:31 +08003956 set_page_private(newpage, page_private(page));
3957 SetPagePrivate(newpage);
3958 get_page(newpage);
3959
3960 set_page_private(page, 0);
3961 ClearPagePrivate(page);
3962 put_page(page);
Chao Yu240a5912019-03-06 17:30:59 +08003963 }
Weichao Guo5b7a4872016-09-20 05:03:27 +08003964
Jérôme Glisse2916ecc2017-09-08 16:12:06 -07003965 if (mode != MIGRATE_SYNC_NO_COPY)
3966 migrate_page_copy(newpage, page);
3967 else
3968 migrate_page_states(newpage, page);
Weichao Guo5b7a4872016-09-20 05:03:27 +08003969
3970 return MIGRATEPAGE_SUCCESS;
3971}
3972#endif
3973
Jaegeuk Kim4969c062019-07-01 19:15:29 -07003974#ifdef CONFIG_SWAP
Chao Yu859fca62021-05-26 14:29:27 +08003975static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
3976 unsigned int blkcnt)
3977{
3978 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3979 unsigned int blkofs;
3980 unsigned int blk_per_sec = BLKS_PER_SEC(sbi);
3981 unsigned int secidx = start_blk / blk_per_sec;
3982 unsigned int end_sec = secidx + blkcnt / blk_per_sec;
3983 int ret = 0;
3984
3985 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Jan Karaedc6d012021-04-13 18:10:37 +02003986 filemap_invalidate_lock(inode->i_mapping);
Chao Yu859fca62021-05-26 14:29:27 +08003987
3988 set_inode_flag(inode, FI_ALIGNED_WRITE);
3989
3990 for (; secidx < end_sec; secidx++) {
3991 down_write(&sbi->pin_sem);
3992
3993 f2fs_lock_op(sbi);
3994 f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
3995 f2fs_unlock_op(sbi);
3996
3997 set_inode_flag(inode, FI_DO_DEFRAG);
3998
3999 for (blkofs = 0; blkofs < blk_per_sec; blkofs++) {
4000 struct page *page;
4001 unsigned int blkidx = secidx * blk_per_sec + blkofs;
4002
4003 page = f2fs_get_lock_data_page(inode, blkidx, true);
4004 if (IS_ERR(page)) {
4005 up_write(&sbi->pin_sem);
4006 ret = PTR_ERR(page);
4007 goto done;
4008 }
4009
4010 set_page_dirty(page);
4011 f2fs_put_page(page, 1);
4012 }
4013
4014 clear_inode_flag(inode, FI_DO_DEFRAG);
4015
4016 ret = filemap_fdatawrite(inode->i_mapping);
4017
4018 up_write(&sbi->pin_sem);
4019
4020 if (ret)
4021 break;
4022 }
4023
4024done:
4025 clear_inode_flag(inode, FI_DO_DEFRAG);
4026 clear_inode_flag(inode, FI_ALIGNED_WRITE);
4027
Jan Karaedc6d012021-04-13 18:10:37 +02004028 filemap_invalidate_unlock(inode->i_mapping);
Chao Yu859fca62021-05-26 14:29:27 +08004029 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4030
4031 return ret;
4032}
4033
Chao Yu0b8fc002021-05-26 14:29:26 +08004034static int check_swap_activate(struct swap_info_struct *sis,
Chao Yuaf4b6b82020-10-12 17:06:05 +08004035 struct file *swap_file, sector_t *span)
4036{
4037 struct address_space *mapping = swap_file->f_mapping;
4038 struct inode *inode = mapping->host;
huangjianan@oppo.com36e4d952021-03-01 12:58:44 +08004039 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yuaf4b6b82020-10-12 17:06:05 +08004040 sector_t cur_lblock;
4041 sector_t last_lblock;
4042 sector_t pblock;
4043 sector_t lowest_pblock = -1;
4044 sector_t highest_pblock = 0;
4045 int nr_extents = 0;
4046 unsigned long nr_pblocks;
Chao Yu859fca62021-05-26 14:29:27 +08004047 unsigned int blks_per_sec = BLKS_PER_SEC(sbi);
4048 unsigned int sec_blks_mask = BLKS_PER_SEC(sbi) - 1;
Jaegeuk Kimca298242021-05-11 14:38:47 -07004049 unsigned int not_aligned = 0;
huangjianan@oppo.com36e4d952021-03-01 12:58:44 +08004050 int ret = 0;
Chao Yuaf4b6b82020-10-12 17:06:05 +08004051
4052 /*
4053 * Map all the blocks into the extent list. This code doesn't try
4054 * to be very smart.
4055 */
4056 cur_lblock = 0;
Jaegeuk Kim6cbfcab2020-11-24 14:55:47 -08004057 last_lblock = bytes_to_blks(inode, i_size_read(inode));
Chao Yuaf4b6b82020-10-12 17:06:05 +08004058
huangjianan@oppo.com1da66102021-02-27 20:02:30 +08004059 while (cur_lblock < last_lblock && cur_lblock < sis->max) {
Jaegeuk Kimb876f4c92020-11-24 15:19:10 -08004060 struct f2fs_map_blocks map;
Chao Yu859fca62021-05-26 14:29:27 +08004061retry:
Chao Yuaf4b6b82020-10-12 17:06:05 +08004062 cond_resched();
4063
Jaegeuk Kimb876f4c92020-11-24 15:19:10 -08004064 memset(&map, 0, sizeof(map));
4065 map.m_lblk = cur_lblock;
huangjianan@oppo.com36e4d952021-03-01 12:58:44 +08004066 map.m_len = last_lblock - cur_lblock;
4067 map.m_next_pgofs = NULL;
4068 map.m_next_extent = NULL;
Jaegeuk Kimb876f4c92020-11-24 15:19:10 -08004069 map.m_seg_type = NO_CHECK_TYPE;
huangjianan@oppo.com36e4d952021-03-01 12:58:44 +08004070 map.m_may_create = false;
Chao Yuaf4b6b82020-10-12 17:06:05 +08004071
Jaegeuk Kimb876f4c92020-11-24 15:19:10 -08004072 ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
Chao Yuaf4b6b82020-10-12 17:06:05 +08004073 if (ret)
huangjianan@oppo.com36e4d952021-03-01 12:58:44 +08004074 goto out;
Chao Yuaf4b6b82020-10-12 17:06:05 +08004075
4076 /* hole */
huangjianan@oppo.com36e4d952021-03-01 12:58:44 +08004077 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
Joe Perches833dcd32021-05-26 13:05:36 -07004078 f2fs_err(sbi, "Swapfile has holes");
Jaegeuk Kimf3951832021-05-12 07:38:00 -07004079 ret = -EINVAL;
huangjianan@oppo.com36e4d952021-03-01 12:58:44 +08004080 goto out;
4081 }
Chao Yuaf4b6b82020-10-12 17:06:05 +08004082
Jaegeuk Kimb876f4c92020-11-24 15:19:10 -08004083 pblock = map.m_pblk;
4084 nr_pblocks = map.m_len;
Chao Yuaf4b6b82020-10-12 17:06:05 +08004085
Chao Yu859fca62021-05-26 14:29:27 +08004086 if ((pblock - SM_I(sbi)->main_blkaddr) & sec_blks_mask ||
4087 nr_pblocks & sec_blks_mask) {
Jaegeuk Kimca298242021-05-11 14:38:47 -07004088 not_aligned++;
huangjianan@oppo.com36e4d952021-03-01 12:58:44 +08004089
Chao Yu859fca62021-05-26 14:29:27 +08004090 nr_pblocks = roundup(nr_pblocks, blks_per_sec);
4091 if (cur_lblock + nr_pblocks > sis->max)
4092 nr_pblocks -= blks_per_sec;
4093
4094 if (!nr_pblocks) {
4095 /* this extent is last one */
4096 nr_pblocks = map.m_len;
4097 f2fs_warn(sbi, "Swapfile: last extent is not aligned to section");
4098 goto next;
4099 }
4100
4101 ret = f2fs_migrate_blocks(inode, cur_lblock,
4102 nr_pblocks);
4103 if (ret)
4104 goto out;
4105 goto retry;
4106 }
4107next:
Chao Yuaf4b6b82020-10-12 17:06:05 +08004108 if (cur_lblock + nr_pblocks >= sis->max)
4109 nr_pblocks = sis->max - cur_lblock;
4110
4111 if (cur_lblock) { /* exclude the header page */
4112 if (pblock < lowest_pblock)
4113 lowest_pblock = pblock;
4114 if (pblock + nr_pblocks - 1 > highest_pblock)
4115 highest_pblock = pblock + nr_pblocks - 1;
4116 }
4117
4118 /*
4119 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
4120 */
4121 ret = add_swap_extent(sis, cur_lblock, nr_pblocks, pblock);
4122 if (ret < 0)
4123 goto out;
4124 nr_extents += ret;
4125 cur_lblock += nr_pblocks;
4126 }
4127 ret = nr_extents;
4128 *span = 1 + highest_pblock - lowest_pblock;
4129 if (cur_lblock == 0)
4130 cur_lblock = 1; /* force Empty message */
4131 sis->max = cur_lblock;
4132 sis->pages = cur_lblock - 1;
4133 sis->highest_bit = cur_lblock - 1;
4134out:
Chao Yu859fca62021-05-26 14:29:27 +08004135 if (not_aligned)
4136 f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%u * N)",
4137 not_aligned, blks_per_sec * F2FS_BLKSIZE);
Chao Yuaf4b6b82020-10-12 17:06:05 +08004138 return ret;
Chao Yuaf4b6b82020-10-12 17:06:05 +08004139}
4140
Jaegeuk Kim4969c062019-07-01 19:15:29 -07004141static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
4142 sector_t *span)
4143{
4144 struct inode *inode = file_inode(file);
4145 int ret;
4146
4147 if (!S_ISREG(inode->i_mode))
4148 return -EINVAL;
4149
4150 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
4151 return -EROFS;
4152
Shin'ichiro Kawasakid927ccf2021-05-10 20:24:44 +09004153 if (f2fs_lfs_mode(F2FS_I_SB(inode))) {
4154 f2fs_err(F2FS_I_SB(inode),
4155 "Swapfile not supported in LFS mode");
4156 return -EINVAL;
4157 }
4158
Jaegeuk Kim4969c062019-07-01 19:15:29 -07004159 ret = f2fs_convert_inline_inode(inode);
4160 if (ret)
4161 return ret;
4162
Daeho Jeong78134d02020-09-08 11:44:11 +09004163 if (!f2fs_disable_compressed_file(inode))
Chao Yu4c8ff702019-11-01 18:07:14 +08004164 return -EINVAL;
4165
Chao Yu0b979f12020-12-26 18:07:41 +08004166 f2fs_precache_extents(inode);
4167
Chao Yu3e5e4792019-12-27 18:44:56 +08004168 ret = check_swap_activate(sis, file, span);
4169 if (ret < 0)
Jaegeuk Kim4969c062019-07-01 19:15:29 -07004170 return ret;
4171
4172 set_inode_flag(inode, FI_PIN_FILE);
Jaegeuk Kim4969c062019-07-01 19:15:29 -07004173 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
Chao Yu3e5e4792019-12-27 18:44:56 +08004174 return ret;
Jaegeuk Kim4969c062019-07-01 19:15:29 -07004175}
4176
4177static void f2fs_swap_deactivate(struct file *file)
4178{
4179 struct inode *inode = file_inode(file);
4180
4181 clear_inode_flag(inode, FI_PIN_FILE);
4182}
4183#else
4184static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
4185 sector_t *span)
4186{
4187 return -EOPNOTSUPP;
4188}
4189
4190static void f2fs_swap_deactivate(struct file *file)
4191{
4192}
4193#endif
4194
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09004195const struct address_space_operations f2fs_dblock_aops = {
4196 .readpage = f2fs_read_data_page,
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07004197 .readahead = f2fs_readahead,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09004198 .writepage = f2fs_write_data_page,
4199 .writepages = f2fs_write_data_pages,
4200 .write_begin = f2fs_write_begin,
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +09004201 .write_end = f2fs_write_end,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09004202 .set_page_dirty = f2fs_set_data_page_dirty,
Chao Yu487261f2015-02-05 17:44:29 +08004203 .invalidatepage = f2fs_invalidate_page,
4204 .releasepage = f2fs_release_page,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09004205 .direct_IO = f2fs_direct_IO,
Jaegeuk Kimc01e54b2013-01-17 20:30:23 +09004206 .bmap = f2fs_bmap,
Jaegeuk Kim4969c062019-07-01 19:15:29 -07004207 .swap_activate = f2fs_swap_activate,
4208 .swap_deactivate = f2fs_swap_deactivate,
Weichao Guo5b7a4872016-09-20 05:03:27 +08004209#ifdef CONFIG_MIGRATION
4210 .migratepage = f2fs_migrate_page,
4211#endif
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09004212};
Eric Biggers6dbb1792018-04-18 11:09:48 -07004213
Matthew Wilcox5ec2d992017-12-04 20:25:25 -05004214void f2fs_clear_page_cache_dirty_tag(struct page *page)
Chao Yuaec2f722018-05-26 18:03:35 +08004215{
4216 struct address_space *mapping = page_mapping(page);
4217 unsigned long flags;
4218
4219 xa_lock_irqsave(&mapping->i_pages, flags);
Matthew Wilcox5ec2d992017-12-04 20:25:25 -05004220 __xa_clear_mark(&mapping->i_pages, page_index(page),
Chao Yuaec2f722018-05-26 18:03:35 +08004221 PAGECACHE_TAG_DIRTY);
4222 xa_unlock_irqrestore(&mapping->i_pages, flags);
4223}
4224
Eric Biggers6dbb1792018-04-18 11:09:48 -07004225int __init f2fs_init_post_read_processing(void)
4226{
Eric Biggers95ae2512019-07-22 09:26:24 -07004227 bio_post_read_ctx_cache =
4228 kmem_cache_create("f2fs_bio_post_read_ctx",
4229 sizeof(struct bio_post_read_ctx), 0, 0, NULL);
Eric Biggers6dbb1792018-04-18 11:09:48 -07004230 if (!bio_post_read_ctx_cache)
4231 goto fail;
4232 bio_post_read_ctx_pool =
4233 mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
4234 bio_post_read_ctx_cache);
4235 if (!bio_post_read_ctx_pool)
4236 goto fail_free_cache;
4237 return 0;
4238
4239fail_free_cache:
4240 kmem_cache_destroy(bio_post_read_ctx_cache);
4241fail:
4242 return -ENOMEM;
4243}
4244
Chao Yu0b20fce2019-09-30 18:53:25 +08004245void f2fs_destroy_post_read_processing(void)
Eric Biggers6dbb1792018-04-18 11:09:48 -07004246{
4247 mempool_destroy(bio_post_read_ctx_pool);
4248 kmem_cache_destroy(bio_post_read_ctx_cache);
4249}
Chao Yu0b20fce2019-09-30 18:53:25 +08004250
Chao Yu4c8ff702019-11-01 18:07:14 +08004251int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi)
4252{
4253 if (!f2fs_sb_has_encrypt(sbi) &&
4254 !f2fs_sb_has_verity(sbi) &&
4255 !f2fs_sb_has_compression(sbi))
4256 return 0;
4257
4258 sbi->post_read_wq = alloc_workqueue("f2fs_post_read_wq",
4259 WQ_UNBOUND | WQ_HIGHPRI,
4260 num_online_cpus());
4261 if (!sbi->post_read_wq)
4262 return -ENOMEM;
4263 return 0;
4264}
4265
4266void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi)
4267{
4268 if (sbi->post_read_wq)
4269 destroy_workqueue(sbi->post_read_wq);
4270}
4271
Chao Yu0b20fce2019-09-30 18:53:25 +08004272int __init f2fs_init_bio_entry_cache(void)
4273{
Chao Yu98510002020-02-17 17:46:20 +08004274 bio_entry_slab = f2fs_kmem_cache_create("f2fs_bio_entry_slab",
Chao Yu0b20fce2019-09-30 18:53:25 +08004275 sizeof(struct bio_entry));
4276 if (!bio_entry_slab)
4277 return -ENOMEM;
4278 return 0;
4279}
4280
Chao Yuf5438052019-12-04 09:52:58 +08004281void f2fs_destroy_bio_entry_cache(void)
Chao Yu0b20fce2019-09-30 18:53:25 +08004282{
4283 kmem_cache_destroy(bio_entry_slab);
4284}