blob: a71e818cd67b43eec47c768bc772c93652646479 [file] [log] [blame]
Chao Yu7c1a0002018-09-12 09:16:07 +08001// SPDX-License-Identifier: GPL-2.0
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09002/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003 * fs/f2fs/data.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09007 */
8#include <linux/fs.h>
9#include <linux/f2fs_fs.h>
10#include <linux/buffer_head.h>
NeilBrown40342472022-01-14 14:07:14 -080011#include <linux/sched/mm.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090012#include <linux/mpage.h>
13#include <linux/writeback.h>
Chao Yu8f46dca2015-07-14 18:56:10 +080014#include <linux/pagevec.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090015#include <linux/blkdev.h>
16#include <linux/bio.h>
Satya Tangirala27aacd22020-07-02 01:56:06 +000017#include <linux/blk-crypto.h>
Jaegeuk Kim4969c062019-07-01 19:15:29 -070018#include <linux/swap.h>
Geert Uytterhoeven690e4a32012-12-19 22:19:30 +010019#include <linux/prefetch.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080020#include <linux/uio.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010021#include <linux/sched/signal.h>
Christoph Hellwig10c5db22020-05-23 09:30:11 +020022#include <linux/fiemap.h>
Eric Biggers1517c1a2021-07-23 00:59:20 -070023#include <linux/iomap.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090024
25#include "f2fs.h"
26#include "node.h"
27#include "segment.h"
Daeho Jeong52118742021-08-19 20:52:28 -070028#include "iostat.h"
Namjae Jeon848753a2013-04-23 16:38:02 +090029#include <trace/events/f2fs.h>
Jaegeuk Kimeb47b802012-11-02 17:10:12 +090030
Eric Biggers6dbb1792018-04-18 11:09:48 -070031#define NUM_PREALLOC_POST_READ_CTXS 128
32
33static struct kmem_cache *bio_post_read_ctx_cache;
Chao Yu0b20fce2019-09-30 18:53:25 +080034static struct kmem_cache *bio_entry_slab;
Eric Biggers6dbb1792018-04-18 11:09:48 -070035static mempool_t *bio_post_read_ctx_pool;
Chao Yuf5438052019-12-04 09:52:58 +080036static struct bio_set f2fs_bioset;
37
38#define F2FS_BIO_POOL_SIZE NR_CURSEG_TYPE
39
40int __init f2fs_init_bioset(void)
41{
42 if (bioset_init(&f2fs_bioset, F2FS_BIO_POOL_SIZE,
43 0, BIOSET_NEED_BVECS))
44 return -ENOMEM;
45 return 0;
46}
47
48void f2fs_destroy_bioset(void)
49{
50 bioset_exit(&f2fs_bioset);
51}
52
Chao Yu36951b32016-11-16 10:41:20 +080053static bool __is_cp_guaranteed(struct page *page)
54{
55 struct address_space *mapping = page->mapping;
56 struct inode *inode;
57 struct f2fs_sb_info *sbi;
58
59 if (!mapping)
60 return false;
61
62 inode = mapping->host;
63 sbi = F2FS_I_SB(inode);
64
65 if (inode->i_ino == F2FS_META_INO(sbi) ||
Jack Qiua87aff12020-07-24 16:55:28 +080066 inode->i_ino == F2FS_NODE_INO(sbi) ||
Chao Yub763f3b2021-04-28 17:20:31 +080067 S_ISDIR(inode->i_mode))
68 return true;
69
70 if (f2fs_is_compressed_page(page))
71 return false;
Daeho Jeong3db1de02022-04-28 11:18:09 -070072 if ((S_ISREG(inode->i_mode) && IS_NOQUOTA(inode)) ||
Chao Yub763f3b2021-04-28 17:20:31 +080073 page_private_gcing(page))
Chao Yu36951b32016-11-16 10:41:20 +080074 return true;
75 return false;
76}
77
Jaegeuk Kim5f9abab2018-10-16 10:20:53 -070078static enum count_type __read_io_type(struct page *page)
79{
Jaegeuk Kim4969c062019-07-01 19:15:29 -070080 struct address_space *mapping = page_file_mapping(page);
Jaegeuk Kim5f9abab2018-10-16 10:20:53 -070081
82 if (mapping) {
83 struct inode *inode = mapping->host;
84 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
85
86 if (inode->i_ino == F2FS_META_INO(sbi))
87 return F2FS_RD_META;
88
89 if (inode->i_ino == F2FS_NODE_INO(sbi))
90 return F2FS_RD_NODE;
91 }
92 return F2FS_RD_DATA;
93}
94
Eric Biggers6dbb1792018-04-18 11:09:48 -070095/* postprocessing steps for read bios */
96enum bio_post_read_step {
Eric Biggers7f59b272021-01-04 22:33:02 -080097#ifdef CONFIG_FS_ENCRYPTION
98 STEP_DECRYPT = 1 << 0,
99#else
100 STEP_DECRYPT = 0, /* compile out the decryption-related code */
101#endif
102#ifdef CONFIG_F2FS_FS_COMPRESSION
103 STEP_DECOMPRESS = 1 << 1,
104#else
105 STEP_DECOMPRESS = 0, /* compile out the decompression-related code */
106#endif
107#ifdef CONFIG_FS_VERITY
108 STEP_VERITY = 1 << 2,
109#else
110 STEP_VERITY = 0, /* compile out the verity-related code */
111#endif
Eric Biggers6dbb1792018-04-18 11:09:48 -0700112};
113
114struct bio_post_read_ctx {
115 struct bio *bio;
Chao Yu4c8ff702019-11-01 18:07:14 +0800116 struct f2fs_sb_info *sbi;
Eric Biggers6dbb1792018-04-18 11:09:48 -0700117 struct work_struct work;
Eric Biggers6dbb1792018-04-18 11:09:48 -0700118 unsigned int enabled_steps;
Daeho Jeong4931e0c2021-07-28 12:38:11 -0700119 block_t fs_blkaddr;
Eric Biggers6dbb1792018-04-18 11:09:48 -0700120};
121
Daeho Jeongbff139b2022-08-02 12:24:37 -0700122static void f2fs_finish_read_bio(struct bio *bio, bool in_task)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900123{
Eric Biggers6dbb1792018-04-18 11:09:48 -0700124 struct bio_vec *bv;
Ming Lei6dc4f102019-02-15 19:13:19 +0800125 struct bvec_iter_all iter_all;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900126
Eric Biggers7f59b272021-01-04 22:33:02 -0800127 /*
128 * Update and unlock the bio's pagecache pages, and put the
129 * decompression context for any compressed pages.
130 */
Christoph Hellwig2b070cf2019-04-25 09:03:00 +0200131 bio_for_each_segment_all(bv, bio, iter_all) {
Eric Biggers7f59b272021-01-04 22:33:02 -0800132 struct page *page = bv->bv_page;
Eric Biggers6dbb1792018-04-18 11:09:48 -0700133
Eric Biggers7f59b272021-01-04 22:33:02 -0800134 if (f2fs_is_compressed_page(page)) {
135 if (bio->bi_status)
Daeho Jeongbff139b2022-08-02 12:24:37 -0700136 f2fs_end_read_compressed_page(page, true, 0,
137 in_task);
138 f2fs_put_page_dic(page, in_task);
Chao Yu4c8ff702019-11-01 18:07:14 +0800139 continue;
140 }
Chao Yu4c8ff702019-11-01 18:07:14 +0800141
Eric Biggers14db0b32022-08-15 16:50:51 -0700142 /* PG_error was set if verity failed. */
Eric Biggers6dbb1792018-04-18 11:09:48 -0700143 if (bio->bi_status || PageError(page)) {
144 ClearPageUptodate(page);
Jaegeuk Kimfb7d70d2018-09-25 13:54:33 -0700145 /* will re-read again later */
146 ClearPageError(page);
Eric Biggers6dbb1792018-04-18 11:09:48 -0700147 } else {
148 SetPageUptodate(page);
149 }
Jaegeuk Kim5f9abab2018-10-16 10:20:53 -0700150 dec_page_count(F2FS_P_SB(page), __read_io_type(page));
Eric Biggers6dbb1792018-04-18 11:09:48 -0700151 unlock_page(page);
152 }
Eric Biggers7f59b272021-01-04 22:33:02 -0800153
154 if (bio->bi_private)
155 mempool_free(bio->bi_private, bio_post_read_ctx_pool);
156 bio_put(bio);
Chao Yu4c8ff702019-11-01 18:07:14 +0800157}
158
Eric Biggers7f59b272021-01-04 22:33:02 -0800159static void f2fs_verify_bio(struct work_struct *work)
Eric Biggers95ae2512019-07-22 09:26:24 -0700160{
161 struct bio_post_read_ctx *ctx =
162 container_of(work, struct bio_post_read_ctx, work);
Eric Biggers644c8c92019-12-31 12:14:16 -0600163 struct bio *bio = ctx->bio;
Eric Biggers7f59b272021-01-04 22:33:02 -0800164 bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS);
Eric Biggers644c8c92019-12-31 12:14:16 -0600165
166 /*
Matthew Wilcox (Oracle)704528d2022-03-23 21:29:04 -0400167 * fsverity_verify_bio() may call readahead() again, and while verity
Eric Biggers7f59b272021-01-04 22:33:02 -0800168 * will be disabled for this, decryption and/or decompression may still
169 * be needed, resulting in another bio_post_read_ctx being allocated.
170 * So to prevent deadlocks we need to release the current ctx to the
171 * mempool first. This assumes that verity is the last post-read step.
Eric Biggers644c8c92019-12-31 12:14:16 -0600172 */
173 mempool_free(ctx, bio_post_read_ctx_pool);
174 bio->bi_private = NULL;
Eric Biggers95ae2512019-07-22 09:26:24 -0700175
Eric Biggers7f59b272021-01-04 22:33:02 -0800176 /*
177 * Verify the bio's pages with fs-verity. Exclude compressed pages,
178 * as those were handled separately by f2fs_end_read_compressed_page().
179 */
180 if (may_have_compressed_pages) {
181 struct bio_vec *bv;
182 struct bvec_iter_all iter_all;
Chao Yu4c8ff702019-11-01 18:07:14 +0800183
Eric Biggers7f59b272021-01-04 22:33:02 -0800184 bio_for_each_segment_all(bv, bio, iter_all) {
185 struct page *page = bv->bv_page;
186
187 if (!f2fs_is_compressed_page(page) &&
Eric Biggers14db0b32022-08-15 16:50:51 -0700188 !fsverity_verify_page(page))
Eric Biggers7f59b272021-01-04 22:33:02 -0800189 SetPageError(page);
190 }
191 } else {
192 fsverity_verify_bio(bio);
193 }
194
Daeho Jeongbff139b2022-08-02 12:24:37 -0700195 f2fs_finish_read_bio(bio, true);
Eric Biggers7f59b272021-01-04 22:33:02 -0800196}
197
198/*
199 * If the bio's data needs to be verified with fs-verity, then enqueue the
200 * verity work for the bio. Otherwise finish the bio now.
201 *
202 * Note that to avoid deadlocks, the verity work can't be done on the
203 * decryption/decompression workqueue. This is because verifying the data pages
204 * can involve reading verity metadata pages from the file, and these verity
205 * metadata pages may be encrypted and/or compressed.
206 */
Daeho Jeongbff139b2022-08-02 12:24:37 -0700207static void f2fs_verify_and_finish_bio(struct bio *bio, bool in_task)
Eric Biggers7f59b272021-01-04 22:33:02 -0800208{
209 struct bio_post_read_ctx *ctx = bio->bi_private;
210
211 if (ctx && (ctx->enabled_steps & STEP_VERITY)) {
212 INIT_WORK(&ctx->work, f2fs_verify_bio);
213 fsverity_enqueue_verify_work(&ctx->work);
214 } else {
Daeho Jeongbff139b2022-08-02 12:24:37 -0700215 f2fs_finish_read_bio(bio, in_task);
Eric Biggers7f59b272021-01-04 22:33:02 -0800216 }
217}
218
219/*
220 * Handle STEP_DECOMPRESS by decompressing any compressed clusters whose last
221 * remaining page was read by @ctx->bio.
222 *
223 * Note that a bio may span clusters (even a mix of compressed and uncompressed
224 * clusters) or be for just part of a cluster. STEP_DECOMPRESS just indicates
225 * that the bio includes at least one compressed page. The actual decompression
226 * is done on a per-cluster basis, not a per-bio basis.
227 */
Daeho Jeongbff139b2022-08-02 12:24:37 -0700228static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx,
229 bool in_task)
Eric Biggers7f59b272021-01-04 22:33:02 -0800230{
231 struct bio_vec *bv;
232 struct bvec_iter_all iter_all;
233 bool all_compressed = true;
Daeho Jeong4931e0c2021-07-28 12:38:11 -0700234 block_t blkaddr = ctx->fs_blkaddr;
Eric Biggers7f59b272021-01-04 22:33:02 -0800235
236 bio_for_each_segment_all(bv, ctx->bio, iter_all) {
237 struct page *page = bv->bv_page;
238
Eric Biggers7f59b272021-01-04 22:33:02 -0800239 if (f2fs_is_compressed_page(page))
Eric Biggers14db0b32022-08-15 16:50:51 -0700240 f2fs_end_read_compressed_page(page, false, blkaddr,
241 in_task);
Eric Biggers7f59b272021-01-04 22:33:02 -0800242 else
243 all_compressed = false;
Chao Yu6ce19af2021-05-20 19:51:50 +0800244
245 blkaddr++;
Eric Biggers7f59b272021-01-04 22:33:02 -0800246 }
247
248 /*
249 * Optimization: if all the bio's pages are compressed, then scheduling
250 * the per-bio verity work is unnecessary, as verity will be fully
251 * handled at the compression cluster level.
252 */
253 if (all_compressed)
254 ctx->enabled_steps &= ~STEP_VERITY;
Chao Yu4c8ff702019-11-01 18:07:14 +0800255}
256
257static void f2fs_post_read_work(struct work_struct *work)
258{
259 struct bio_post_read_ctx *ctx =
260 container_of(work, struct bio_post_read_ctx, work);
Eric Biggers14db0b32022-08-15 16:50:51 -0700261 struct bio *bio = ctx->bio;
Chao Yu4c8ff702019-11-01 18:07:14 +0800262
Eric Biggers14db0b32022-08-15 16:50:51 -0700263 if ((ctx->enabled_steps & STEP_DECRYPT) && !fscrypt_decrypt_bio(bio)) {
264 f2fs_finish_read_bio(bio, true);
265 return;
266 }
Chao Yu4c8ff702019-11-01 18:07:14 +0800267
Eric Biggers7f59b272021-01-04 22:33:02 -0800268 if (ctx->enabled_steps & STEP_DECOMPRESS)
Daeho Jeongbff139b2022-08-02 12:24:37 -0700269 f2fs_handle_step_decompress(ctx, true);
Chao Yu4c8ff702019-11-01 18:07:14 +0800270
Eric Biggers14db0b32022-08-15 16:50:51 -0700271 f2fs_verify_and_finish_bio(bio, true);
Eric Biggers6dbb1792018-04-18 11:09:48 -0700272}
273
274static void f2fs_read_end_io(struct bio *bio)
275{
Chao Yuc45d6002019-11-01 17:53:23 +0800276 struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
Daeho Jeonga4b68172021-08-20 15:29:09 -0700277 struct bio_post_read_ctx *ctx;
Daeho Jeongbff139b2022-08-02 12:24:37 -0700278 bool intask = in_task();
Daeho Jeonga4b68172021-08-20 15:29:09 -0700279
280 iostat_update_and_unbind_ctx(bio, 0);
281 ctx = bio->bi_private;
Chao Yuc45d6002019-11-01 17:53:23 +0800282
283 if (time_to_inject(sbi, FAULT_READ_IO)) {
284 f2fs_show_injection_info(sbi, FAULT_READ_IO);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200285 bio->bi_status = BLK_STS_IOERR;
Chao Yu55523512017-02-25 11:08:28 +0800286 }
Chao Yu8b038c72016-09-18 23:30:07 +0800287
Eric Biggers7f59b272021-01-04 22:33:02 -0800288 if (bio->bi_status) {
Daeho Jeongbff139b2022-08-02 12:24:37 -0700289 f2fs_finish_read_bio(bio, intask);
Eric Biggers6dbb1792018-04-18 11:09:48 -0700290 return;
Chao Yu12377022015-05-25 18:03:38 +0800291 }
292
Daeho Jeongbff139b2022-08-02 12:24:37 -0700293 if (ctx) {
294 unsigned int enabled_steps = ctx->enabled_steps &
295 (STEP_DECRYPT | STEP_DECOMPRESS);
296
297 /*
298 * If we have only decompression step between decompression and
299 * decrypt, we don't need post processing for this.
300 */
301 if (enabled_steps == STEP_DECOMPRESS &&
302 !f2fs_low_mem_mode(sbi)) {
303 f2fs_handle_step_decompress(ctx, intask);
304 } else if (enabled_steps) {
305 INIT_WORK(&ctx->work, f2fs_post_read_work);
306 queue_work(ctx->sbi->post_read_wq, &ctx->work);
307 return;
308 }
Eric Biggers7f59b272021-01-04 22:33:02 -0800309 }
Daeho Jeongbff139b2022-08-02 12:24:37 -0700310
311 f2fs_verify_and_finish_bio(bio, intask);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900312}
313
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200314static void f2fs_write_end_io(struct bio *bio)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900315{
Daeho Jeonga4b68172021-08-20 15:29:09 -0700316 struct f2fs_sb_info *sbi;
Linus Torvaldsf5688492014-01-30 11:19:05 -0800317 struct bio_vec *bvec;
Ming Lei6dc4f102019-02-15 19:13:19 +0800318 struct bvec_iter_all iter_all;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900319
Daeho Jeonga4b68172021-08-20 15:29:09 -0700320 iostat_update_and_unbind_ctx(bio, 1);
321 sbi = bio->bi_private;
322
Chao Yu6f5c2ed2018-09-12 09:22:29 +0800323 if (time_to_inject(sbi, FAULT_WRITE_IO)) {
Chao Yuc45d6002019-11-01 17:53:23 +0800324 f2fs_show_injection_info(sbi, FAULT_WRITE_IO);
Chao Yu6f5c2ed2018-09-12 09:22:29 +0800325 bio->bi_status = BLK_STS_IOERR;
326 }
327
Christoph Hellwig2b070cf2019-04-25 09:03:00 +0200328 bio_for_each_segment_all(bvec, bio, iter_all) {
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900329 struct page *page = bvec->bv_page;
Chao Yu36951b32016-11-16 10:41:20 +0800330 enum count_type type = WB_DATA_TYPE(page);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900331
Chao Yub763f3b2021-04-28 17:20:31 +0800332 if (page_private_dummy(page)) {
333 clear_page_private_dummy(page);
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800334 unlock_page(page);
335 mempool_free(page, sbi->write_io_dummy);
336
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200337 if (unlikely(bio->bi_status))
Chao Yua9cfee02022-09-28 23:38:53 +0800338 f2fs_stop_checkpoint(sbi, true,
339 STOP_CP_REASON_WRITE_FAIL);
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800340 continue;
341 }
342
Eric Biggersd2d07272019-05-20 09:29:39 -0700343 fscrypt_finalize_bounce_page(&page);
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700344
Chao Yu4c8ff702019-11-01 18:07:14 +0800345#ifdef CONFIG_F2FS_FS_COMPRESSION
346 if (f2fs_is_compressed_page(page)) {
347 f2fs_compress_write_end_io(bio, page);
348 continue;
349 }
350#endif
351
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200352 if (unlikely(bio->bi_status)) {
Michal Hocko5114a972016-10-11 13:56:01 -0700353 mapping_set_error(page->mapping, -EIO);
Jaegeuk Kimb1ca3212017-12-31 16:26:38 -0800354 if (type == F2FS_WB_CP_DATA)
Chao Yua9cfee02022-09-28 23:38:53 +0800355 f2fs_stop_checkpoint(sbi, true,
356 STOP_CP_REASON_WRITE_FAIL);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900357 }
Yunlei He7dff55d2018-01-11 14:19:32 +0800358
359 f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
360 page->index != nid_of_node(page));
361
Chao Yu36951b32016-11-16 10:41:20 +0800362 dec_page_count(sbi, type);
Chao Yu50fa53e2018-08-02 23:03:19 +0800363 if (f2fs_in_warm_node_list(sbi, page))
364 f2fs_del_fsync_node_entry(sbi, page);
Chao Yub763f3b2021-04-28 17:20:31 +0800365 clear_page_private_gcing(page);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900366 end_page_writeback(page);
Linus Torvaldsf5688492014-01-30 11:19:05 -0800367 }
Chao Yu36951b32016-11-16 10:41:20 +0800368 if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
Jaegeuk Kimf5730182016-05-17 16:23:36 -0700369 wq_has_sleeper(&sbi->cp_wait))
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900370 wake_up(&sbi->cp_wait);
371
372 bio_put(bio);
373}
374
Jaegeuk Kim3c62be12016-10-06 19:02:05 -0700375struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
Christoph Hellwig51898102022-02-28 14:41:22 +0200376 block_t blk_addr, sector_t *sector)
Jaegeuk Kim3c62be12016-10-06 19:02:05 -0700377{
378 struct block_device *bdev = sbi->sb->s_bdev;
379 int i;
380
Damien Le Moal09168782019-03-16 09:13:06 +0900381 if (f2fs_is_multi_device(sbi)) {
382 for (i = 0; i < sbi->s_ndevs; i++) {
383 if (FDEV(i).start_blk <= blk_addr &&
384 FDEV(i).end_blk >= blk_addr) {
385 blk_addr -= FDEV(i).start_blk;
386 bdev = FDEV(i).bdev;
387 break;
388 }
Jaegeuk Kim3c62be12016-10-06 19:02:05 -0700389 }
390 }
Christoph Hellwig51898102022-02-28 14:41:22 +0200391
392 if (sector)
393 *sector = SECTOR_FROM_BLOCK(blk_addr);
Jaegeuk Kim3c62be12016-10-06 19:02:05 -0700394 return bdev;
395}
396
397int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
398{
399 int i;
400
Damien Le Moal09168782019-03-16 09:13:06 +0900401 if (!f2fs_is_multi_device(sbi))
402 return 0;
403
Jaegeuk Kim3c62be12016-10-06 19:02:05 -0700404 for (i = 0; i < sbi->s_ndevs; i++)
405 if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
406 return i;
407 return 0;
408}
409
Bart Van Assche7649c872022-07-14 11:07:18 -0700410static blk_opf_t f2fs_io_flags(struct f2fs_io_info *fio)
Christoph Hellwig64bf0ee2022-02-28 14:41:23 +0200411{
412 unsigned int temp_mask = (1 << NR_TEMP_TYPE) - 1;
Jaegeuk Kim0adc2ab2022-04-12 15:01:58 -0700413 unsigned int fua_flag, meta_flag, io_flag;
Bart Van Assche7649c872022-07-14 11:07:18 -0700414 blk_opf_t op_flags = 0;
Jaegeuk Kim0adc2ab2022-04-12 15:01:58 -0700415
416 if (fio->op != REQ_OP_WRITE)
417 return 0;
418 if (fio->type == DATA)
419 io_flag = fio->sbi->data_io_flag;
420 else if (fio->type == NODE)
421 io_flag = fio->sbi->node_io_flag;
422 else
423 return 0;
424
425 fua_flag = io_flag & temp_mask;
426 meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask;
Christoph Hellwig64bf0ee2022-02-28 14:41:23 +0200427
428 /*
429 * data/node io flag bits per temp:
430 * REQ_META | REQ_FUA |
431 * 5 | 4 | 3 | 2 | 1 | 0 |
432 * Cold | Warm | Hot | Cold | Warm | Hot |
433 */
434 if ((1 << fio->temp) & meta_flag)
Jaegeuk Kim0adc2ab2022-04-12 15:01:58 -0700435 op_flags |= REQ_META;
Christoph Hellwig64bf0ee2022-02-28 14:41:23 +0200436 if ((1 << fio->temp) & fua_flag)
Jaegeuk Kim0adc2ab2022-04-12 15:01:58 -0700437 op_flags |= REQ_FUA;
438 return op_flags;
Christoph Hellwig64bf0ee2022-02-28 14:41:23 +0200439}
440
Chao Yub757f6e2019-08-23 17:58:35 +0800441static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
Gu Zheng940a6d32013-12-20 17:39:59 +0800442{
Chao Yub757f6e2019-08-23 17:58:35 +0800443 struct f2fs_sb_info *sbi = fio->sbi;
Christoph Hellwig51898102022-02-28 14:41:22 +0200444 struct block_device *bdev;
445 sector_t sector;
Gu Zheng940a6d32013-12-20 17:39:59 +0800446 struct bio *bio;
447
Christoph Hellwig51898102022-02-28 14:41:22 +0200448 bdev = f2fs_target_device(sbi, fio->new_blkaddr, &sector);
Jaegeuk Kim0adc2ab2022-04-12 15:01:58 -0700449 bio = bio_alloc_bioset(bdev, npages,
450 fio->op | fio->op_flags | f2fs_io_flags(fio),
451 GFP_NOIO, &f2fs_bioset);
Christoph Hellwig51898102022-02-28 14:41:22 +0200452 bio->bi_iter.bi_sector = sector;
Chao Yub757f6e2019-08-23 17:58:35 +0800453 if (is_read_io(fio->op)) {
Hyunchul Lee0cdd3192018-01-31 11:36:57 +0900454 bio->bi_end_io = f2fs_read_end_io;
455 bio->bi_private = NULL;
456 } else {
457 bio->bi_end_io = f2fs_write_end_io;
458 bio->bi_private = sbi;
Hyunchul Lee0cdd3192018-01-31 11:36:57 +0900459 }
Daeho Jeonga4b68172021-08-20 15:29:09 -0700460 iostat_alloc_and_bind_ctx(sbi, bio, NULL);
461
Chao Yub757f6e2019-08-23 17:58:35 +0800462 if (fio->io_wbc)
463 wbc_init_bio(fio->io_wbc, bio);
Gu Zheng940a6d32013-12-20 17:39:59 +0800464
465 return bio;
466}
467
Satya Tangirala27aacd22020-07-02 01:56:06 +0000468static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
469 pgoff_t first_idx,
470 const struct f2fs_io_info *fio,
471 gfp_t gfp_mask)
472{
473 /*
474 * The f2fs garbage collector sets ->encrypted_page when it wants to
475 * read/write raw data without encryption.
476 */
477 if (!fio || !fio->encrypted_page)
478 fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
479}
480
481static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
482 pgoff_t next_idx,
483 const struct f2fs_io_info *fio)
484{
485 /*
486 * The f2fs garbage collector sets ->encrypted_page when it wants to
487 * read/write raw data without encryption.
488 */
489 if (fio && fio->encrypted_page)
490 return !bio_has_crypt_ctx(bio);
491
492 return fscrypt_mergeable_bio(bio, inode, next_idx);
493}
494
Linus Torvalds4fc29c12016-07-27 10:36:31 -0700495static inline void __submit_bio(struct f2fs_sb_info *sbi,
496 struct bio *bio, enum page_type type)
Jaegeuk Kimf5730182016-05-17 16:23:36 -0700497{
Linus Torvalds4fc29c12016-07-27 10:36:31 -0700498 if (!is_read_io(bio_op(bio))) {
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800499 unsigned int start;
500
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800501 if (type != DATA && type != NODE)
502 goto submit_io;
503
Chao Yub0332a02020-02-14 17:44:12 +0800504 if (f2fs_lfs_mode(sbi) && current->plug)
Tiezhu Yang3bb09a02018-02-06 08:21:45 +0800505 blk_finish_plug(current->plug);
506
Dehe Gu39f71b72021-02-02 17:39:22 +0800507 if (!F2FS_IO_ALIGNED(sbi))
Chao Yu8223ecc2019-08-28 17:33:38 +0800508 goto submit_io;
509
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800510 start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
511 start %= F2FS_IO_SIZE(sbi);
512
513 if (start == 0)
514 goto submit_io;
515
516 /* fill dummy pages */
517 for (; start < F2FS_IO_SIZE(sbi); start++) {
518 struct page *page =
519 mempool_alloc(sbi->write_io_dummy,
Gao Xiangbc73a4b2019-02-19 10:31:52 +0800520 GFP_NOIO | __GFP_NOFAIL);
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800521 f2fs_bug_on(sbi, !page);
522
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800523 lock_page(page);
Chao Yub763f3b2021-04-28 17:20:31 +0800524
525 zero_user_segment(page, 0, PAGE_SIZE);
526 set_page_private_dummy(page);
527
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800528 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
529 f2fs_bug_on(sbi, 1);
530 }
531 /*
532 * In the NODE case, we lose next block address chain. So, we
533 * need to do checkpoint in f2fs_sync_file.
534 */
535 if (type == NODE)
536 set_sbi_flag(sbi, SBI_NEED_CP);
Jaegeuk Kim19a5f5e2016-06-04 14:25:24 -0700537 }
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800538submit_io:
Jaegeuk Kim554b5122016-12-21 12:13:03 -0800539 if (is_read_io(bio_op(bio)))
540 trace_f2fs_submit_read_bio(sbi->sb, type, bio);
541 else
542 trace_f2fs_submit_write_bio(sbi->sb, type, bio);
Daeho Jeonga4b68172021-08-20 15:29:09 -0700543
544 iostat_update_submit_ctx(bio, type);
Mike Christie4e49ea42016-06-05 14:31:41 -0500545 submit_bio(bio);
Jaegeuk Kimf5730182016-05-17 16:23:36 -0700546}
547
Chao Yu4c8ff702019-11-01 18:07:14 +0800548void f2fs_submit_bio(struct f2fs_sb_info *sbi,
549 struct bio *bio, enum page_type type)
550{
551 __submit_bio(sbi, bio, type);
552}
553
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900554static void __submit_merged_bio(struct f2fs_bio_info *io)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900555{
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900556 struct f2fs_io_info *fio = &io->fio;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900557
558 if (!io->bio)
559 return;
560
Jaegeuk Kim554b5122016-12-21 12:13:03 -0800561 if (is_read_io(fio->op))
562 trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
563 else
564 trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
565
Linus Torvalds4fc29c12016-07-27 10:36:31 -0700566 __submit_bio(io->sbi, io->bio, fio->type);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900567 io->bio = NULL;
568}
569
Chao Yu8648de22019-02-19 16:15:29 +0800570static bool __has_merged_page(struct bio *bio, struct inode *inode,
Chao Yubab475c2018-09-27 23:41:16 +0800571 struct page *page, nid_t ino)
Chao Yu0fd785e2016-01-18 18:24:59 +0800572{
Chao Yu0fd785e2016-01-18 18:24:59 +0800573 struct bio_vec *bvec;
Ming Lei6dc4f102019-02-15 19:13:19 +0800574 struct bvec_iter_all iter_all;
Chao Yu0fd785e2016-01-18 18:24:59 +0800575
Chao Yu8648de22019-02-19 16:15:29 +0800576 if (!bio)
Chao Yu0fd785e2016-01-18 18:24:59 +0800577 return false;
Chao Yu0c3a5792016-01-18 18:28:11 +0800578
Chao Yubab475c2018-09-27 23:41:16 +0800579 if (!inode && !page && !ino)
Chao Yu0c3a5792016-01-18 18:28:11 +0800580 return true;
Chao Yu0fd785e2016-01-18 18:24:59 +0800581
Chao Yu8648de22019-02-19 16:15:29 +0800582 bio_for_each_segment_all(bvec, bio, iter_all) {
Chao Yu4c8ff702019-11-01 18:07:14 +0800583 struct page *target = bvec->bv_page;
Chao Yu0fd785e2016-01-18 18:24:59 +0800584
Chao Yu4c8ff702019-11-01 18:07:14 +0800585 if (fscrypt_is_bounce_page(target)) {
Eric Biggersd2d07272019-05-20 09:29:39 -0700586 target = fscrypt_pagecache_page(target);
Chao Yu4c8ff702019-11-01 18:07:14 +0800587 if (IS_ERR(target))
588 continue;
589 }
590 if (f2fs_is_compressed_page(target)) {
591 target = f2fs_compress_control_page(target);
592 if (IS_ERR(target))
593 continue;
594 }
Chao Yu0fd785e2016-01-18 18:24:59 +0800595
Chao Yu0c3a5792016-01-18 18:28:11 +0800596 if (inode && inode == target->mapping->host)
Chao Yu0fd785e2016-01-18 18:24:59 +0800597 return true;
Chao Yubab475c2018-09-27 23:41:16 +0800598 if (page && page == target)
599 return true;
Chao Yu0c3a5792016-01-18 18:28:11 +0800600 if (ino && ino == ino_of_node(target))
601 return true;
Chao Yu0fd785e2016-01-18 18:24:59 +0800602 }
603
Chao Yu0fd785e2016-01-18 18:24:59 +0800604 return false;
605}
606
Yufen Yu908ea652022-05-25 17:43:36 +0800607int f2fs_init_write_merge_io(struct f2fs_sb_info *sbi)
608{
609 int i;
610
611 for (i = 0; i < NR_PAGE_TYPE; i++) {
612 int n = (i == META) ? 1 : NR_TEMP_TYPE;
613 int j;
614
615 sbi->write_io[i] = f2fs_kmalloc(sbi,
616 array_size(n, sizeof(struct f2fs_bio_info)),
617 GFP_KERNEL);
618 if (!sbi->write_io[i])
619 return -ENOMEM;
620
621 for (j = HOT; j < n; j++) {
622 init_f2fs_rwsem(&sbi->write_io[i][j].io_rwsem);
623 sbi->write_io[i][j].sbi = sbi;
624 sbi->write_io[i][j].bio = NULL;
625 spin_lock_init(&sbi->write_io[i][j].io_lock);
626 INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
627 INIT_LIST_HEAD(&sbi->write_io[i][j].bio_list);
628 init_f2fs_rwsem(&sbi->write_io[i][j].bio_list_lock);
629 }
630 }
631
632 return 0;
633}
634
Jaegeuk Kimb9109b02017-05-10 11:28:38 -0700635static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
Jaegeuk Kima912b542017-05-10 11:18:25 -0700636 enum page_type type, enum temp_type temp)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900637{
638 enum page_type btype = PAGE_TYPE_OF_BIO(type);
Jaegeuk Kima912b542017-05-10 11:18:25 -0700639 struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900640
Tim Murraye4544b62022-01-07 12:48:44 -0800641 f2fs_down_write(&io->io_rwsem);
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900642
643 /* change META to META_FLUSH in the checkpoint procedure */
644 if (type >= META_FLUSH) {
645 io->fio.type = META_FLUSH;
Christoph Hellwig64bf0ee2022-02-28 14:41:23 +0200646 io->bio->bi_opf |= REQ_META | REQ_PRIO | REQ_SYNC;
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600647 if (!test_opt(sbi, NOBARRIER))
Christoph Hellwig64bf0ee2022-02-28 14:41:23 +0200648 io->bio->bi_opf |= REQ_PREFLUSH | REQ_FUA;
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900649 }
650 __submit_merged_bio(io);
Tim Murraye4544b62022-01-07 12:48:44 -0800651 f2fs_up_write(&io->io_rwsem);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900652}
653
Jaegeuk Kima912b542017-05-10 11:18:25 -0700654static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
Chao Yubab475c2018-09-27 23:41:16 +0800655 struct inode *inode, struct page *page,
656 nid_t ino, enum page_type type, bool force)
Chao Yu0c3a5792016-01-18 18:28:11 +0800657{
Jaegeuk Kima912b542017-05-10 11:18:25 -0700658 enum temp_type temp;
Yunlong Song1e771e82018-11-13 11:57:32 +0800659 bool ret = true;
Jaegeuk Kima912b542017-05-10 11:18:25 -0700660
661 for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
Yunlong Song1e771e82018-11-13 11:57:32 +0800662 if (!force) {
663 enum page_type btype = PAGE_TYPE_OF_BIO(type);
664 struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
Jaegeuk Kima912b542017-05-10 11:18:25 -0700665
Tim Murraye4544b62022-01-07 12:48:44 -0800666 f2fs_down_read(&io->io_rwsem);
Chao Yu8648de22019-02-19 16:15:29 +0800667 ret = __has_merged_page(io->bio, inode, page, ino);
Tim Murraye4544b62022-01-07 12:48:44 -0800668 f2fs_up_read(&io->io_rwsem);
Yunlong Song1e771e82018-11-13 11:57:32 +0800669 }
670 if (ret)
671 __f2fs_submit_merged_write(sbi, type, temp);
Jaegeuk Kima912b542017-05-10 11:18:25 -0700672
673 /* TODO: use HOT temp only for meta pages now. */
674 if (type >= META)
675 break;
676 }
Chao Yu0c3a5792016-01-18 18:28:11 +0800677}
678
Jaegeuk Kimb9109b02017-05-10 11:28:38 -0700679void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
Chao Yu406657d2016-02-24 17:17:55 +0800680{
Hariprasad Kelamadcc00f2019-04-06 16:29:36 +0530681 __submit_merged_write_cond(sbi, NULL, NULL, 0, type, true);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900682}
683
Jaegeuk Kimb9109b02017-05-10 11:28:38 -0700684void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
Chao Yubab475c2018-09-27 23:41:16 +0800685 struct inode *inode, struct page *page,
686 nid_t ino, enum page_type type)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900687{
Chao Yubab475c2018-09-27 23:41:16 +0800688 __submit_merged_write_cond(sbi, inode, page, ino, type, false);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900689}
690
Jaegeuk Kimb9109b02017-05-10 11:28:38 -0700691void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
Chao Yu406657d2016-02-24 17:17:55 +0800692{
Jaegeuk Kimb9109b02017-05-10 11:28:38 -0700693 f2fs_submit_merged_write(sbi, DATA);
694 f2fs_submit_merged_write(sbi, NODE);
695 f2fs_submit_merged_write(sbi, META);
Chao Yu406657d2016-02-24 17:17:55 +0800696}
697
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900698/*
699 * Fill the locked page with data located in the block address.
Tomohiro Kusumi771a9a72017-04-05 22:49:44 +0300700 * A caller needs to unlock the page on failure.
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900701 */
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700702int f2fs_submit_page_bio(struct f2fs_io_info *fio)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900703{
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900704 struct bio *bio;
Jaegeuk Kim0b81d072015-05-15 16:26:10 -0700705 struct page *page = fio->encrypted_page ?
706 fio->encrypted_page : fio->page;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900707
Chao Yuc9b60782018-08-01 19:13:44 +0800708 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
Chao Yu93770ab2019-04-15 15:26:32 +0800709 fio->is_por ? META_POR : (__is_meta_io(fio) ?
Chao Yu95fa90c2022-09-28 23:38:54 +0800710 META_GENERIC : DATA_GENERIC_ENHANCE))) {
711 f2fs_handle_error(fio->sbi, ERROR_INVALID_BLKADDR);
Chao Yu10f966b2019-06-20 11:36:14 +0800712 return -EFSCORRUPTED;
Chao Yu95fa90c2022-09-28 23:38:54 +0800713 }
Chao Yuc9b60782018-08-01 19:13:44 +0800714
Chao Yu2ace38e2014-12-24 16:08:14 +0800715 trace_f2fs_submit_page_bio(page, fio);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900716
717 /* Allocate a new bio */
Chao Yub757f6e2019-08-23 17:58:35 +0800718 bio = __bio_alloc(fio, 1);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900719
Satya Tangirala27aacd22020-07-02 01:56:06 +0000720 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
721 fio->page->index, fio, GFP_NOIO);
722
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300723 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900724 bio_put(bio);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900725 return -EFAULT;
726 }
Chao Yu78efac52018-10-22 23:24:28 +0800727
728 if (fio->io_wbc && !is_read_io(fio->op))
Tejun Heo34e51a52019-06-27 13:39:49 -0700729 wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
Chao Yu78efac52018-10-22 23:24:28 +0800730
Jaegeuk Kim5f9abab2018-10-16 10:20:53 -0700731 inc_page_count(fio->sbi, is_read_io(fio->op) ?
Zhang Qilong544b53d2022-09-14 09:33:22 +0800732 __read_io_type(page) : WB_DATA_TYPE(fio->page));
Chao Yu4c58ed02018-10-22 09:12:51 +0800733
734 __submit_bio(fio->sbi, bio, fio->type);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900735 return 0;
736}
737
Chao Yu8896cbd2019-07-12 16:55:41 +0800738static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
739 block_t last_blkaddr, block_t cur_blkaddr)
740{
Jaegeuk Kim10208562020-12-03 09:52:45 -0800741 if (unlikely(sbi->max_io_bytes &&
742 bio->bi_iter.bi_size >= sbi->max_io_bytes))
743 return false;
Chao Yu8896cbd2019-07-12 16:55:41 +0800744 if (last_blkaddr + 1 != cur_blkaddr)
745 return false;
Christoph Hellwig309dca302021-01-24 11:02:34 +0100746 return bio->bi_bdev == f2fs_target_device(sbi, cur_blkaddr, NULL);
Chao Yu8896cbd2019-07-12 16:55:41 +0800747}
748
749static bool io_type_is_mergeable(struct f2fs_bio_info *io,
750 struct f2fs_io_info *fio)
751{
752 if (io->fio.op != fio->op)
753 return false;
754 return io->fio.op_flags == fio->op_flags;
755}
756
757static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
758 struct f2fs_bio_info *io,
759 struct f2fs_io_info *fio,
760 block_t last_blkaddr,
761 block_t cur_blkaddr)
762{
Chao Yuc72db712019-07-12 16:55:42 +0800763 if (F2FS_IO_ALIGNED(sbi) && (fio->type == DATA || fio->type == NODE)) {
764 unsigned int filled_blocks =
765 F2FS_BYTES_TO_BLK(bio->bi_iter.bi_size);
766 unsigned int io_size = F2FS_IO_SIZE(sbi);
767 unsigned int left_vecs = bio->bi_max_vecs - bio->bi_vcnt;
768
769 /* IOs in bio is aligned and left space of vectors is not enough */
770 if (!(filled_blocks % io_size) && left_vecs < io_size)
771 return false;
772 }
Chao Yu8896cbd2019-07-12 16:55:41 +0800773 if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr))
774 return false;
775 return io_type_is_mergeable(io, fio);
776}
777
Chao Yu0b20fce2019-09-30 18:53:25 +0800778static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
779 struct page *page, enum temp_type temp)
780{
781 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
782 struct bio_entry *be;
783
Chao Yu32410572021-08-09 08:24:48 +0800784 be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS, true, NULL);
Chao Yu0b20fce2019-09-30 18:53:25 +0800785 be->bio = bio;
786 bio_get(bio);
787
788 if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
789 f2fs_bug_on(sbi, 1);
790
Tim Murraye4544b62022-01-07 12:48:44 -0800791 f2fs_down_write(&io->bio_list_lock);
Chao Yu0b20fce2019-09-30 18:53:25 +0800792 list_add_tail(&be->list, &io->bio_list);
Tim Murraye4544b62022-01-07 12:48:44 -0800793 f2fs_up_write(&io->bio_list_lock);
Chao Yu0b20fce2019-09-30 18:53:25 +0800794}
795
796static void del_bio_entry(struct bio_entry *be)
797{
798 list_del(&be->list);
799 kmem_cache_free(bio_entry_slab, be);
800}
801
Satya Tangirala27aacd22020-07-02 01:56:06 +0000802static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
Chao Yu0b20fce2019-09-30 18:53:25 +0800803 struct page *page)
804{
Satya Tangirala27aacd22020-07-02 01:56:06 +0000805 struct f2fs_sb_info *sbi = fio->sbi;
Chao Yu0b20fce2019-09-30 18:53:25 +0800806 enum temp_type temp;
807 bool found = false;
808 int ret = -EAGAIN;
809
810 for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
811 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
812 struct list_head *head = &io->bio_list;
813 struct bio_entry *be;
814
Tim Murraye4544b62022-01-07 12:48:44 -0800815 f2fs_down_write(&io->bio_list_lock);
Chao Yu0b20fce2019-09-30 18:53:25 +0800816 list_for_each_entry(be, head, list) {
817 if (be->bio != *bio)
818 continue;
819
820 found = true;
821
Satya Tangirala27aacd22020-07-02 01:56:06 +0000822 f2fs_bug_on(sbi, !page_is_mergeable(sbi, *bio,
823 *fio->last_block,
824 fio->new_blkaddr));
825 if (f2fs_crypt_mergeable_bio(*bio,
826 fio->page->mapping->host,
827 fio->page->index, fio) &&
828 bio_add_page(*bio, page, PAGE_SIZE, 0) ==
829 PAGE_SIZE) {
Chao Yu0b20fce2019-09-30 18:53:25 +0800830 ret = 0;
831 break;
832 }
833
Satya Tangirala27aacd22020-07-02 01:56:06 +0000834 /* page can't be merged into bio; submit the bio */
Chao Yu0b20fce2019-09-30 18:53:25 +0800835 del_bio_entry(be);
836 __submit_bio(sbi, *bio, DATA);
837 break;
838 }
Tim Murraye4544b62022-01-07 12:48:44 -0800839 f2fs_up_write(&io->bio_list_lock);
Chao Yu0b20fce2019-09-30 18:53:25 +0800840 }
841
842 if (ret) {
843 bio_put(*bio);
844 *bio = NULL;
845 }
846
847 return ret;
848}
849
850void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
851 struct bio **bio, struct page *page)
852{
853 enum temp_type temp;
854 bool found = false;
855 struct bio *target = bio ? *bio : NULL;
856
857 for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
858 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
859 struct list_head *head = &io->bio_list;
860 struct bio_entry *be;
861
862 if (list_empty(head))
863 continue;
864
Tim Murraye4544b62022-01-07 12:48:44 -0800865 f2fs_down_read(&io->bio_list_lock);
Chao Yu0b20fce2019-09-30 18:53:25 +0800866 list_for_each_entry(be, head, list) {
867 if (target)
868 found = (target == be->bio);
869 else
870 found = __has_merged_page(be->bio, NULL,
871 page, 0);
872 if (found)
873 break;
874 }
Tim Murraye4544b62022-01-07 12:48:44 -0800875 f2fs_up_read(&io->bio_list_lock);
Chao Yu0b20fce2019-09-30 18:53:25 +0800876
877 if (!found)
878 continue;
879
880 found = false;
881
Tim Murraye4544b62022-01-07 12:48:44 -0800882 f2fs_down_write(&io->bio_list_lock);
Chao Yu0b20fce2019-09-30 18:53:25 +0800883 list_for_each_entry(be, head, list) {
884 if (target)
885 found = (target == be->bio);
886 else
887 found = __has_merged_page(be->bio, NULL,
888 page, 0);
889 if (found) {
890 target = be->bio;
891 del_bio_entry(be);
892 break;
893 }
894 }
Tim Murraye4544b62022-01-07 12:48:44 -0800895 f2fs_up_write(&io->bio_list_lock);
Chao Yu0b20fce2019-09-30 18:53:25 +0800896 }
897
898 if (found)
899 __submit_bio(sbi, target, DATA);
900 if (bio && *bio) {
901 bio_put(*bio);
902 *bio = NULL;
903 }
904}
905
Chao Yu8648de22019-02-19 16:15:29 +0800906int f2fs_merge_page_bio(struct f2fs_io_info *fio)
907{
908 struct bio *bio = *fio->bio;
909 struct page *page = fio->encrypted_page ?
910 fio->encrypted_page : fio->page;
911
912 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
Chao Yu95fa90c2022-09-28 23:38:54 +0800913 __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC)) {
914 f2fs_handle_error(fio->sbi, ERROR_INVALID_BLKADDR);
Chao Yu10f966b2019-06-20 11:36:14 +0800915 return -EFSCORRUPTED;
Chao Yu95fa90c2022-09-28 23:38:54 +0800916 }
Chao Yu8648de22019-02-19 16:15:29 +0800917
918 trace_f2fs_submit_page_bio(page, fio);
Chao Yu8648de22019-02-19 16:15:29 +0800919
Chao Yu8896cbd2019-07-12 16:55:41 +0800920 if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
Chao Yu0b20fce2019-09-30 18:53:25 +0800921 fio->new_blkaddr))
922 f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
Chao Yu8648de22019-02-19 16:15:29 +0800923alloc_new:
924 if (!bio) {
Christoph Hellwiga8affc02021-03-11 12:01:37 +0100925 bio = __bio_alloc(fio, BIO_MAX_VECS);
Satya Tangirala27aacd22020-07-02 01:56:06 +0000926 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
927 fio->page->index, fio, GFP_NOIO);
Chao Yu8648de22019-02-19 16:15:29 +0800928
Chao Yu0b20fce2019-09-30 18:53:25 +0800929 add_bio_entry(fio->sbi, bio, page, fio->temp);
930 } else {
Satya Tangirala27aacd22020-07-02 01:56:06 +0000931 if (add_ipu_page(fio, &bio, page))
Chao Yu0b20fce2019-09-30 18:53:25 +0800932 goto alloc_new;
Chao Yu8648de22019-02-19 16:15:29 +0800933 }
934
935 if (fio->io_wbc)
Linus Torvalds9637d512019-07-15 21:20:52 -0700936 wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
Chao Yu8648de22019-02-19 16:15:29 +0800937
938 inc_page_count(fio->sbi, WB_DATA_TYPE(page));
939
940 *fio->last_block = fio->new_blkaddr;
941 *fio->bio = bio;
942
943 return 0;
944}
945
Chao Yufe16efe2018-05-28 23:47:18 +0800946void f2fs_submit_page_write(struct f2fs_io_info *fio)
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900947{
Jaegeuk Kim05ca3632015-04-23 14:38:15 -0700948 struct f2fs_sb_info *sbi = fio->sbi;
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900949 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
Jaegeuk Kima912b542017-05-10 11:18:25 -0700950 struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
Jaegeuk Kim4375a332015-04-23 12:04:33 -0700951 struct page *bio_page;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900952
Jaegeuk Kimb9109b02017-05-10 11:28:38 -0700953 f2fs_bug_on(sbi, is_read_io(fio->op));
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900954
Tim Murraye4544b62022-01-07 12:48:44 -0800955 f2fs_down_write(&io->io_rwsem);
Chao Yufb830fc2017-05-19 23:37:01 +0800956next:
957 if (fio->in_list) {
958 spin_lock(&io->io_lock);
959 if (list_empty(&io->io_list)) {
960 spin_unlock(&io->io_lock);
Chao Yufe16efe2018-05-28 23:47:18 +0800961 goto out;
Chao Yufb830fc2017-05-19 23:37:01 +0800962 }
963 fio = list_first_entry(&io->io_list,
964 struct f2fs_io_info, list);
965 list_del(&fio->list);
966 spin_unlock(&io->io_lock);
967 }
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900968
Chao Yu93770ab2019-04-15 15:26:32 +0800969 verify_fio_blkaddr(fio);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900970
Chao Yu4c8ff702019-11-01 18:07:14 +0800971 if (fio->encrypted_page)
972 bio_page = fio->encrypted_page;
973 else if (fio->compressed_page)
974 bio_page = fio->compressed_page;
975 else
976 bio_page = fio->page;
Chao Yu36951b32016-11-16 10:41:20 +0800977
Thomas Meyerebf7c522017-10-07 16:02:21 +0200978 /* set submitted = true as a return value */
979 fio->submitted = true;
Jaegeuk Kimd68f7352017-02-03 17:44:04 -0800980
Jaegeuk Kimb9109b02017-05-10 11:28:38 -0700981 inc_page_count(sbi, WB_DATA_TYPE(bio_page));
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900982
Satya Tangirala27aacd22020-07-02 01:56:06 +0000983 if (io->bio &&
984 (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
985 fio->new_blkaddr) ||
986 !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
987 bio_page->index, fio)))
Jaegeuk Kim458e6192013-12-11 13:54:01 +0900988 __submit_merged_bio(io);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +0900989alloc_new:
990 if (io->bio == NULL) {
Chao Yu8223ecc2019-08-28 17:33:38 +0800991 if (F2FS_IO_ALIGNED(sbi) &&
992 (fio->type == DATA || fio->type == NODE) &&
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800993 fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
Jaegeuk Kimb9109b02017-05-10 11:28:38 -0700994 dec_page_count(sbi, WB_DATA_TYPE(bio_page));
Chao Yufe16efe2018-05-28 23:47:18 +0800995 fio->retry = true;
996 goto skip;
Jaegeuk Kim0a595eb2016-12-14 10:12:56 -0800997 }
Christoph Hellwiga8affc02021-03-11 12:01:37 +0100998 io->bio = __bio_alloc(fio, BIO_MAX_VECS);
Satya Tangirala27aacd22020-07-02 01:56:06 +0000999 f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
1000 bio_page->index, fio, GFP_NOIO);
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001001 io->fio = *fio;
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +09001002 }
1003
Jaegeuk Kima912b542017-05-10 11:18:25 -07001004 if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
Jaegeuk Kim458e6192013-12-11 13:54:01 +09001005 __submit_merged_bio(io);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +09001006 goto alloc_new;
1007 }
1008
Yufen Yu578c6472018-01-09 19:33:39 +08001009 if (fio->io_wbc)
Tejun Heo34e51a52019-06-27 13:39:49 -07001010 wbc_account_cgroup_owner(fio->io_wbc, bio_page, PAGE_SIZE);
Yufen Yu578c6472018-01-09 19:33:39 +08001011
Chao Yu7a9d7542016-02-22 18:36:38 +08001012 io->last_block_in_bio = fio->new_blkaddr;
Chao Yufb830fc2017-05-19 23:37:01 +08001013
1014 trace_f2fs_submit_page_write(fio->page, fio);
Chao Yufe16efe2018-05-28 23:47:18 +08001015skip:
Chao Yufb830fc2017-05-19 23:37:01 +08001016 if (fio->in_list)
1017 goto next;
Chao Yufe16efe2018-05-28 23:47:18 +08001018out:
Daniel Rosenberg43549942018-08-20 19:21:43 -07001019 if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
Chao Yu00e09c02019-08-23 17:58:36 +08001020 !f2fs_is_checkpoint_ready(sbi))
Jaegeuk Kim5ce80582018-09-06 11:40:12 -07001021 __submit_merged_bio(io);
Tim Murraye4544b62022-01-07 12:48:44 -08001022 f2fs_up_write(&io->io_rwsem);
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +09001023}
1024
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -07001025static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
Bart Van Assche7649c872022-07-14 11:07:18 -07001026 unsigned nr_pages, blk_opf_t op_flag,
Eric Biggers7f59b272021-01-04 22:33:02 -08001027 pgoff_t first_idx, bool for_write)
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -07001028{
1029 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -07001030 struct bio *bio;
Daeho Jeonga4b68172021-08-20 15:29:09 -07001031 struct bio_post_read_ctx *ctx = NULL;
Eric Biggers6dbb1792018-04-18 11:09:48 -07001032 unsigned int post_read_steps = 0;
Christoph Hellwig51898102022-02-28 14:41:22 +02001033 sector_t sector;
1034 struct block_device *bdev = f2fs_target_device(sbi, blkaddr, &sector);
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -07001035
Christoph Hellwig64bf0ee2022-02-28 14:41:23 +02001036 bio = bio_alloc_bioset(bdev, bio_max_segs(nr_pages),
1037 REQ_OP_READ | op_flag,
Christoph Hellwig609be102022-01-24 10:11:03 +01001038 for_write ? GFP_NOIO : GFP_KERNEL, &f2fs_bioset);
Eric Biggers6dbb1792018-04-18 11:09:48 -07001039 if (!bio)
1040 return ERR_PTR(-ENOMEM);
Christoph Hellwig51898102022-02-28 14:41:22 +02001041 bio->bi_iter.bi_sector = sector;
Satya Tangirala27aacd22020-07-02 01:56:06 +00001042 f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
Eric Biggers6dbb1792018-04-18 11:09:48 -07001043 bio->bi_end_io = f2fs_read_end_io;
Eric Biggers6dbb1792018-04-18 11:09:48 -07001044
Satya Tangirala27aacd22020-07-02 01:56:06 +00001045 if (fscrypt_inode_uses_fs_layer_crypto(inode))
Eric Biggers7f59b272021-01-04 22:33:02 -08001046 post_read_steps |= STEP_DECRYPT;
Eric Biggers95ae2512019-07-22 09:26:24 -07001047
Eric Biggers7f59b272021-01-04 22:33:02 -08001048 if (f2fs_need_verity(inode, first_idx))
1049 post_read_steps |= STEP_VERITY;
1050
1051 /*
1052 * STEP_DECOMPRESS is handled specially, since a compressed file might
1053 * contain both compressed and uncompressed clusters. We'll allocate a
1054 * bio_post_read_ctx if the file is compressed, but the caller is
1055 * responsible for enabling STEP_DECOMPRESS if it's actually needed.
1056 */
1057
1058 if (post_read_steps || f2fs_compressed_file(inode)) {
Eric Biggerse8ce5742019-12-31 12:14:56 -06001059 /* Due to the mempool, this never fails. */
Eric Biggers6dbb1792018-04-18 11:09:48 -07001060 ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
Eric Biggers6dbb1792018-04-18 11:09:48 -07001061 ctx->bio = bio;
Chao Yu4c8ff702019-11-01 18:07:14 +08001062 ctx->sbi = sbi;
Eric Biggers6dbb1792018-04-18 11:09:48 -07001063 ctx->enabled_steps = post_read_steps;
Daeho Jeong4931e0c2021-07-28 12:38:11 -07001064 ctx->fs_blkaddr = blkaddr;
Eric Biggers6dbb1792018-04-18 11:09:48 -07001065 bio->bi_private = ctx;
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -07001066 }
Daeho Jeonga4b68172021-08-20 15:29:09 -07001067 iostat_alloc_and_bind_ctx(sbi, bio, ctx);
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -07001068
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -07001069 return bio;
1070}
1071
1072/* This can handle encryption stuffs */
1073static int f2fs_submit_page_read(struct inode *inode, struct page *page,
Bart Van Assche7649c872022-07-14 11:07:18 -07001074 block_t blkaddr, blk_opf_t op_flags,
1075 bool for_write)
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -07001076{
Chao Yu93770ab2019-04-15 15:26:32 +08001077 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1078 struct bio *bio;
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -07001079
Jia Yangb7973092020-07-01 10:27:40 +08001080 bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
Eric Biggers7f59b272021-01-04 22:33:02 -08001081 page->index, for_write);
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -07001082 if (IS_ERR(bio))
1083 return PTR_ERR(bio);
1084
Jaegeuk Kim0ded69f2018-08-22 21:18:00 -07001085 /* wait for GCed page writeback via META_MAPPING */
1086 f2fs_wait_on_block_writeback(inode, blkaddr);
1087
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -07001088 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
1089 bio_put(bio);
1090 return -EFAULT;
1091 }
Jaegeuk Kimfb7d70d2018-09-25 13:54:33 -07001092 ClearPageError(page);
Chao Yu93770ab2019-04-15 15:26:32 +08001093 inc_page_count(sbi, F2FS_RD_DATA);
Chao Yu34a23522022-08-20 11:04:41 +08001094 f2fs_update_iostat(sbi, NULL, FS_DATA_READ_IO, F2FS_BLKSIZE);
Chao Yu93770ab2019-04-15 15:26:32 +08001095 __submit_bio(sbi, bio, DATA);
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -07001096 return 0;
1097}
1098
Chao Yu46008c62016-05-09 19:56:30 +08001099static void __set_data_blkaddr(struct dnode_of_data *dn)
1100{
1101 struct f2fs_node *rn = F2FS_NODE(dn->node_page);
1102 __le32 *addr_array;
Chao Yu7a2af762017-07-19 00:19:06 +08001103 int base = 0;
1104
1105 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
1106 base = get_extra_isize(dn->inode);
Chao Yu46008c62016-05-09 19:56:30 +08001107
1108 /* Get physical address of data block */
1109 addr_array = blkaddr_in_node(rn);
Chao Yu7a2af762017-07-19 00:19:06 +08001110 addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
Chao Yu46008c62016-05-09 19:56:30 +08001111}
1112
Jaegeuk Kim93dfe2a2013-11-30 12:51:14 +09001113/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001114 * Lock ordering for the change of data block address:
1115 * ->data_page
1116 * ->node_page
1117 * update block addresses in the node page
1118 */
Chao Yu4d57b862018-05-30 00:20:41 +08001119void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001120{
Chao Yubae0ee72018-12-25 17:43:42 +08001121 f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
Chao Yu46008c62016-05-09 19:56:30 +08001122 __set_data_blkaddr(dn);
1123 if (set_page_dirty(dn->node_page))
Jaegeuk Kim12719ae2016-01-07 13:23:12 -08001124 dn->node_changed = true;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001125}
1126
Chao Yuf28b3432016-02-24 17:16:47 +08001127void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
1128{
1129 dn->data_blkaddr = blkaddr;
Chao Yu4d57b862018-05-30 00:20:41 +08001130 f2fs_set_data_blkaddr(dn);
Chao Yuf28b3432016-02-24 17:16:47 +08001131 f2fs_update_extent_cache(dn);
1132}
1133
Chao Yu46008c62016-05-09 19:56:30 +08001134/* dn->ofs_in_node will be returned with up-to-date last block pointer */
Chao Yu4d57b862018-05-30 00:20:41 +08001135int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001136{
Jaegeuk Kim40813632014-09-02 15:31:18 -07001137 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
Chao Yu0abd6752017-07-09 00:13:07 +08001138 int err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001139
Chao Yu46008c62016-05-09 19:56:30 +08001140 if (!count)
1141 return 0;
1142
Jaegeuk Kim91942322016-05-20 10:13:22 -07001143 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001144 return -EPERM;
Chao Yu0abd6752017-07-09 00:13:07 +08001145 if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
1146 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001147
Chao Yu46008c62016-05-09 19:56:30 +08001148 trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
1149 dn->ofs_in_node, count);
Namjae Jeonc01e2852013-04-23 17:00:52 +09001150
Chao Yubae0ee72018-12-25 17:43:42 +08001151 f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
Chao Yu46008c62016-05-09 19:56:30 +08001152
1153 for (; count > 0; dn->ofs_in_node++) {
Chao Yua2ced1c2020-02-14 17:44:10 +08001154 block_t blkaddr = f2fs_data_blkaddr(dn);
Yi Zhuang5f029c02021-04-06 09:47:35 +08001155
Chao Yu46008c62016-05-09 19:56:30 +08001156 if (blkaddr == NULL_ADDR) {
1157 dn->data_blkaddr = NEW_ADDR;
1158 __set_data_blkaddr(dn);
1159 count--;
1160 }
1161 }
1162
1163 if (set_page_dirty(dn->node_page))
1164 dn->node_changed = true;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001165 return 0;
1166}
1167
Chao Yu46008c62016-05-09 19:56:30 +08001168/* Should keep dn->ofs_in_node unchanged */
Chao Yu4d57b862018-05-30 00:20:41 +08001169int f2fs_reserve_new_block(struct dnode_of_data *dn)
Chao Yu46008c62016-05-09 19:56:30 +08001170{
1171 unsigned int ofs_in_node = dn->ofs_in_node;
1172 int ret;
1173
Chao Yu4d57b862018-05-30 00:20:41 +08001174 ret = f2fs_reserve_new_blocks(dn, 1);
Chao Yu46008c62016-05-09 19:56:30 +08001175 dn->ofs_in_node = ofs_in_node;
1176 return ret;
1177}
1178
Huajun Lib6009652013-11-10 23:13:18 +08001179int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
1180{
1181 bool need_put = dn->inode_page ? false : true;
1182 int err;
1183
Chao Yu4d57b862018-05-30 00:20:41 +08001184 err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE);
Huajun Lib6009652013-11-10 23:13:18 +08001185 if (err)
1186 return err;
Jaegeuk Kima8865372013-12-27 17:04:17 +09001187
Huajun Lib6009652013-11-10 23:13:18 +08001188 if (dn->data_blkaddr == NULL_ADDR)
Chao Yu4d57b862018-05-30 00:20:41 +08001189 err = f2fs_reserve_new_block(dn);
Jaegeuk Kima8865372013-12-27 17:04:17 +09001190 if (err || need_put)
Huajun Lib6009652013-11-10 23:13:18 +08001191 f2fs_put_dnode(dn);
1192 return err;
1193}
1194
Fan Li759af1c2015-08-05 15:52:16 +08001195int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001196{
Chao Yu94afd6d2021-08-04 10:23:48 +08001197 struct extent_info ei = {0, };
Fan Li759af1c2015-08-05 15:52:16 +08001198 struct inode *inode = dn->inode;
Chao Yu028a41e2015-03-19 19:26:02 +08001199
Fan Li759af1c2015-08-05 15:52:16 +08001200 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1201 dn->data_blkaddr = ei.blk + index - ei.fofs;
1202 return 0;
Chao Yu429511c2015-02-05 17:54:31 +08001203 }
1204
Fan Li759af1c2015-08-05 15:52:16 +08001205 return f2fs_reserve_block(dn, index);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001206}
1207
Chao Yu4d57b862018-05-30 00:20:41 +08001208struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
Bart Van Assche7649c872022-07-14 11:07:18 -07001209 blk_opf_t op_flags, bool for_write)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001210{
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001211 struct address_space *mapping = inode->i_mapping;
1212 struct dnode_of_data dn;
1213 struct page *page;
Chao Yu94afd6d2021-08-04 10:23:48 +08001214 struct extent_info ei = {0, };
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001215 int err;
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001216
Jaegeuk Kima56c7c62015-10-09 15:11:38 -07001217 page = f2fs_grab_cache_page(mapping, index, for_write);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001218 if (!page)
1219 return ERR_PTR(-ENOMEM);
1220
Chao Yucb3bc9e2015-02-05 18:03:40 +08001221 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1222 dn.data_blkaddr = ei.blk + index - ei.fofs;
Chao Yu93770ab2019-04-15 15:26:32 +08001223 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
1224 DATA_GENERIC_ENHANCE_READ)) {
Chao Yu10f966b2019-06-20 11:36:14 +08001225 err = -EFSCORRUPTED;
Chao Yu95fa90c2022-09-28 23:38:54 +08001226 f2fs_handle_error(F2FS_I_SB(inode),
1227 ERROR_INVALID_BLKADDR);
Chao Yu93770ab2019-04-15 15:26:32 +08001228 goto put_err;
1229 }
Chao Yucb3bc9e2015-02-05 18:03:40 +08001230 goto got_it;
1231 }
1232
Jaegeuk Kim650495d2013-05-13 08:38:35 +09001233 set_new_dnode(&dn, inode, NULL, NULL, 0);
Chao Yu4d57b862018-05-30 00:20:41 +08001234 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
Jaegeuk Kim86531d62015-07-15 13:08:21 -07001235 if (err)
1236 goto put_err;
Jaegeuk Kim650495d2013-05-13 08:38:35 +09001237 f2fs_put_dnode(&dn);
1238
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09001239 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
Jaegeuk Kim86531d62015-07-15 13:08:21 -07001240 err = -ENOENT;
1241 goto put_err;
Jaegeuk Kim650495d2013-05-13 08:38:35 +09001242 }
Chao Yu93770ab2019-04-15 15:26:32 +08001243 if (dn.data_blkaddr != NEW_ADDR &&
1244 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
1245 dn.data_blkaddr,
1246 DATA_GENERIC_ENHANCE)) {
Chao Yu10f966b2019-06-20 11:36:14 +08001247 err = -EFSCORRUPTED;
Chao Yu95fa90c2022-09-28 23:38:54 +08001248 f2fs_handle_error(F2FS_I_SB(inode),
1249 ERROR_INVALID_BLKADDR);
Chao Yu93770ab2019-04-15 15:26:32 +08001250 goto put_err;
1251 }
Chao Yucb3bc9e2015-02-05 18:03:40 +08001252got_it:
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -07001253 if (PageUptodate(page)) {
1254 unlock_page(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001255 return page;
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -07001256 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001257
Jaegeuk Kimd59ff4d2013-08-20 19:13:07 +09001258 /*
1259 * A new dentry page is allocated but not able to be written, since its
1260 * new inode page couldn't be allocated due to -ENOSPC.
1261 * In such the case, its blkaddr can be remained as NEW_ADDR.
Chao Yu4d57b862018-05-30 00:20:41 +08001262 * see, f2fs_add_link -> f2fs_get_new_data_page ->
1263 * f2fs_init_inode_metadata.
Jaegeuk Kimd59ff4d2013-08-20 19:13:07 +09001264 */
1265 if (dn.data_blkaddr == NEW_ADDR) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001266 zero_user_segment(page, 0, PAGE_SIZE);
Jaegeuk Kim237c0792016-06-30 18:49:15 -07001267 if (!PageUptodate(page))
1268 SetPageUptodate(page);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -07001269 unlock_page(page);
Jaegeuk Kimd59ff4d2013-08-20 19:13:07 +09001270 return page;
1271 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001272
Jia Yangb7973092020-07-01 10:27:40 +08001273 err = f2fs_submit_page_read(inode, page, dn.data_blkaddr,
1274 op_flags, for_write);
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001275 if (err)
Jaegeuk Kim86531d62015-07-15 13:08:21 -07001276 goto put_err;
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -07001277 return page;
Jaegeuk Kim86531d62015-07-15 13:08:21 -07001278
1279put_err:
1280 f2fs_put_page(page, 1);
1281 return ERR_PTR(err);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -07001282}
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001283
Chao Yu4d57b862018-05-30 00:20:41 +08001284struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index)
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -07001285{
1286 struct address_space *mapping = inode->i_mapping;
1287 struct page *page;
1288
1289 page = find_get_page(mapping, index);
1290 if (page && PageUptodate(page))
1291 return page;
1292 f2fs_put_page(page, 0);
1293
Chao Yu4d57b862018-05-30 00:20:41 +08001294 page = f2fs_get_read_data_page(inode, index, 0, false);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -07001295 if (IS_ERR(page))
1296 return page;
1297
1298 if (PageUptodate(page))
1299 return page;
1300
1301 wait_on_page_locked(page);
1302 if (unlikely(!PageUptodate(page))) {
1303 f2fs_put_page(page, 0);
1304 return ERR_PTR(-EIO);
1305 }
1306 return page;
1307}
1308
1309/*
1310 * If it tries to access a hole, return an error.
1311 * Because, the callers, functions in dir.c and GC, should be able to know
1312 * whether this page exists or not.
1313 */
Chao Yu4d57b862018-05-30 00:20:41 +08001314struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
Jaegeuk Kima56c7c62015-10-09 15:11:38 -07001315 bool for_write)
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -07001316{
1317 struct address_space *mapping = inode->i_mapping;
1318 struct page *page;
1319repeat:
Chao Yu4d57b862018-05-30 00:20:41 +08001320 page = f2fs_get_read_data_page(inode, index, 0, for_write);
Jaegeuk Kim43f3eae2015-04-30 17:00:33 -07001321 if (IS_ERR(page))
1322 return page;
1323
1324 /* wait for read completion */
Jaegeuk Kim393ff912013-03-08 21:29:23 +09001325 lock_page(page);
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09001326 if (unlikely(page->mapping != mapping)) {
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +09001327 f2fs_put_page(page, 1);
1328 goto repeat;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001329 }
Chao Yu1563ac72016-07-03 22:05:12 +08001330 if (unlikely(!PageUptodate(page))) {
1331 f2fs_put_page(page, 1);
1332 return ERR_PTR(-EIO);
1333 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001334 return page;
1335}
1336
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001337/*
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001338 * Caller ensures that this data page is never allocated.
1339 * A new zero-filled data page is allocated in the page cache.
Jaegeuk Kim39936832012-11-22 16:21:29 +09001340 *
Chao Yu4f4124d2013-12-21 18:02:14 +08001341 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
1342 * f2fs_unlock_op().
Chao Yu470f00e2015-07-14 18:14:06 +08001343 * Note that, ipage is set only by make_empty_dir, and if any error occur,
1344 * ipage should be released by this function.
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001345 */
Chao Yu4d57b862018-05-30 00:20:41 +08001346struct page *f2fs_get_new_data_page(struct inode *inode,
Jaegeuk Kima8865372013-12-27 17:04:17 +09001347 struct page *ipage, pgoff_t index, bool new_i_size)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001348{
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001349 struct address_space *mapping = inode->i_mapping;
1350 struct page *page;
1351 struct dnode_of_data dn;
1352 int err;
Jaegeuk Kim76121182016-01-01 22:03:47 -08001353
Jaegeuk Kima56c7c62015-10-09 15:11:38 -07001354 page = f2fs_grab_cache_page(mapping, index, true);
Chao Yu470f00e2015-07-14 18:14:06 +08001355 if (!page) {
1356 /*
1357 * before exiting, we should make sure ipage will be released
1358 * if any error occur.
1359 */
1360 f2fs_put_page(ipage, 1);
Jaegeuk Kim01f28612015-04-29 11:18:42 -07001361 return ERR_PTR(-ENOMEM);
Chao Yu470f00e2015-07-14 18:14:06 +08001362 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001363
Jaegeuk Kima8865372013-12-27 17:04:17 +09001364 set_new_dnode(&dn, inode, ipage, NULL, 0);
Huajun Lib6009652013-11-10 23:13:18 +08001365 err = f2fs_reserve_block(&dn, index);
Jaegeuk Kim01f28612015-04-29 11:18:42 -07001366 if (err) {
1367 f2fs_put_page(page, 1);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001368 return ERR_PTR(err);
Jaegeuk Kima8865372013-12-27 17:04:17 +09001369 }
Jaegeuk Kim01f28612015-04-29 11:18:42 -07001370 if (!ipage)
1371 f2fs_put_dnode(&dn);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001372
1373 if (PageUptodate(page))
Jaegeuk Kim01f28612015-04-29 11:18:42 -07001374 goto got_it;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001375
1376 if (dn.data_blkaddr == NEW_ADDR) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001377 zero_user_segment(page, 0, PAGE_SIZE);
Jaegeuk Kim237c0792016-06-30 18:49:15 -07001378 if (!PageUptodate(page))
1379 SetPageUptodate(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001380 } else {
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001381 f2fs_put_page(page, 1);
Jaegeuk Kima8865372013-12-27 17:04:17 +09001382
Jaegeuk Kim76121182016-01-01 22:03:47 -08001383 /* if ipage exists, blkaddr should be NEW_ADDR */
1384 f2fs_bug_on(F2FS_I_SB(inode), ipage);
Chao Yu4d57b862018-05-30 00:20:41 +08001385 page = f2fs_get_lock_data_page(inode, index, true);
Jaegeuk Kim4375a332015-04-23 12:04:33 -07001386 if (IS_ERR(page))
Jaegeuk Kim76121182016-01-01 22:03:47 -08001387 return page;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001388 }
Jaegeuk Kim01f28612015-04-29 11:18:42 -07001389got_it:
Chao Yu9edcdab2015-09-11 14:43:52 +08001390 if (new_i_size && i_size_read(inode) <
Jaegeuk Kimee6d1822016-05-20 16:32:49 -07001391 ((loff_t)(index + 1) << PAGE_SHIFT))
Jaegeuk Kimfc9581c2016-05-20 09:22:03 -07001392 f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001393 return page;
1394}
1395
Hyunchul Leed5097be2017-11-28 09:23:00 +09001396static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001397{
Jaegeuk Kim40813632014-09-02 15:31:18 -07001398 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001399 struct f2fs_summary sum;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001400 struct node_info ni;
Chao Yu6aa58d82018-08-14 22:37:25 +08001401 block_t old_blkaddr;
Chao Yu46008c62016-05-09 19:56:30 +08001402 blkcnt_t count = 1;
Chao Yu0abd6752017-07-09 00:13:07 +08001403 int err;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001404
Jaegeuk Kim91942322016-05-20 10:13:22 -07001405 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001406 return -EPERM;
Chao Yudf6136e2015-03-23 10:33:37 +08001407
Jaegeuk Kima9419b62021-12-13 14:16:32 -08001408 err = f2fs_get_node_info(sbi, dn->nid, &ni, false);
Chao Yu77357302018-07-17 00:02:17 +08001409 if (err)
1410 return err;
1411
Chao Yua2ced1c2020-02-14 17:44:10 +08001412 dn->data_blkaddr = f2fs_data_blkaddr(dn);
Chao Yuf847c692018-09-27 18:34:52 +08001413 if (dn->data_blkaddr != NULL_ADDR)
Chao Yudf6136e2015-03-23 10:33:37 +08001414 goto alloc;
1415
Chao Yu0abd6752017-07-09 00:13:07 +08001416 if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
1417 return err;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001418
Chao Yudf6136e2015-03-23 10:33:37 +08001419alloc:
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001420 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
Chao Yu6aa58d82018-08-14 22:37:25 +08001421 old_blkaddr = dn->data_blkaddr;
1422 f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
Chao Yu093749e2020-08-04 21:14:49 +08001423 &sum, seg_type, NULL);
Chao Yu6ce19af2021-05-20 19:51:50 +08001424 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
Chao Yu6aa58d82018-08-14 22:37:25 +08001425 invalidate_mapping_pages(META_MAPPING(sbi),
1426 old_blkaddr, old_blkaddr);
Chao Yu6ce19af2021-05-20 19:51:50 +08001427 f2fs_invalidate_compress_page(sbi, old_blkaddr);
1428 }
Chao Yu86f35dc2019-08-28 17:33:35 +08001429 f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001430 return 0;
1431}
1432
Chao Yu0ef81832020-06-18 14:36:22 +08001433void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
Yunlei He59c90812017-03-13 20:22:18 +08001434{
1435 if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1436 if (lock)
Tim Murraye4544b62022-01-07 12:48:44 -08001437 f2fs_down_read(&sbi->node_change);
Yunlei He59c90812017-03-13 20:22:18 +08001438 else
Tim Murraye4544b62022-01-07 12:48:44 -08001439 f2fs_up_read(&sbi->node_change);
Yunlei He59c90812017-03-13 20:22:18 +08001440 } else {
1441 if (lock)
1442 f2fs_lock_op(sbi);
1443 else
1444 f2fs_unlock_op(sbi);
1445 }
1446}
1447
Jaegeuk Kim0a8165d2012-11-29 13:28:09 +09001448/*
Chao Yu7a88ddb2020-02-27 19:30:05 +08001449 * f2fs_map_blocks() tries to find or build mapping relationship which
1450 * maps continuous logical blocks to physical blocks, and return such
1451 * info via f2fs_map_blocks structure.
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001452 */
Chao Yud323d002015-10-27 09:53:45 +08001453int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
Chao Yue2b4e2b2015-08-19 19:11:19 +08001454 int create, int flag)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001455{
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001456 unsigned int maxblocks = map->m_len;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001457 struct dnode_of_data dn;
Chao Yuf9811702015-09-21 20:17:52 +08001458 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yuf9d6d052018-11-13 14:33:45 +08001459 int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
Chao Yu46008c62016-05-09 19:56:30 +08001460 pgoff_t pgofs, end_offset, end;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001461 int err = 0, ofs = 1;
Chao Yu46008c62016-05-09 19:56:30 +08001462 unsigned int ofs_in_node, last_ofs_in_node;
1463 blkcnt_t prealloc;
Chao Yu94afd6d2021-08-04 10:23:48 +08001464 struct extent_info ei = {0, };
Fan Li7df3a432015-12-17 13:20:59 +08001465 block_t blkaddr;
Chao Yuc4020b22018-01-11 14:42:30 +08001466 unsigned int start_pgofs;
Chao Yu71f2c822021-09-01 14:39:20 +08001467 int bidx = 0;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001468
Chao Yudfd02e42016-08-20 15:12:01 +08001469 if (!maxblocks)
1470 return 0;
1471
Chao Yu71f2c822021-09-01 14:39:20 +08001472 map->m_bdev = inode->i_sb->s_bdev;
1473 map->m_multidev_dio =
1474 f2fs_allow_multi_device_dio(F2FS_I_SB(inode), flag);
1475
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001476 map->m_len = 0;
1477 map->m_flags = 0;
1478
1479 /* it only supports block size == page size */
1480 pgofs = (pgoff_t)map->m_lblk;
Chao Yu46008c62016-05-09 19:56:30 +08001481 end = pgofs + maxblocks;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001482
Jaegeuk Kim24b84912016-02-03 13:49:44 -08001483 if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
Chao Yub0332a02020-02-14 17:44:12 +08001484 if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
Jia Zhuf4f0b672018-11-20 04:29:35 +08001485 map->m_may_create)
1486 goto next_dnode;
1487
Jaegeuk Kim003a3e12015-04-06 19:55:34 -07001488 map->m_pblk = ei.blk + pgofs - ei.fofs;
1489 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
1490 map->m_flags = F2FS_MAP_MAPPED;
Chao Yuc4020b22018-01-11 14:42:30 +08001491 if (map->m_next_extent)
1492 *map->m_next_extent = pgofs + map->m_len;
Sahitya Tummala1e78e8b2018-10-10 10:56:22 +05301493
1494 /* for hardware encryption, but to avoid potential issue in future */
1495 if (flag == F2FS_GET_BLOCK_DIO)
1496 f2fs_wait_on_block_writeback_range(inode,
1497 map->m_pblk, map->m_len);
Chao Yu71f2c822021-09-01 14:39:20 +08001498
1499 if (map->m_multidev_dio) {
1500 block_t blk_addr = map->m_pblk;
1501
1502 bidx = f2fs_target_device_index(sbi, map->m_pblk);
1503
1504 map->m_bdev = FDEV(bidx).bdev;
1505 map->m_pblk -= FDEV(bidx).start_blk;
1506 map->m_len = min(map->m_len,
1507 FDEV(bidx).end_blk + 1 - map->m_pblk);
1508
1509 if (map->m_may_create)
1510 f2fs_update_device_state(sbi, inode->i_ino,
1511 blk_addr, map->m_len);
1512 }
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001513 goto out;
Chao Yua2e7d1b2015-02-05 17:50:30 +08001514 }
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001515
Chao Yu4fe71e82016-01-26 15:37:38 +08001516next_dnode:
Chao Yuf9d6d052018-11-13 14:33:45 +08001517 if (map->m_may_create)
Chao Yu0ef81832020-06-18 14:36:22 +08001518 f2fs_do_map_lock(sbi, flag, true);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001519
1520 /* When reading holes, we need its node page */
1521 set_new_dnode(&dn, inode, NULL, NULL, 0);
Chao Yu4d57b862018-05-30 00:20:41 +08001522 err = f2fs_get_dnode_of_data(&dn, pgofs, mode);
Jaegeuk Kim1ec79082013-12-26 16:55:22 +09001523 if (err) {
Chao Yu43473f92016-05-05 19:13:02 +08001524 if (flag == F2FS_GET_BLOCK_BMAP)
1525 map->m_pblk = 0;
Chao Yuadf9ea82021-08-26 10:03:15 +08001526
Chao Yuda859852016-01-26 15:42:58 +08001527 if (err == -ENOENT) {
Chao Yuadf9ea82021-08-26 10:03:15 +08001528 /*
1529 * There is one exceptional case that read_node_page()
1530 * may return -ENOENT due to filesystem has been
1531 * shutdown or cp_error, so force to convert error
1532 * number to EIO for such case.
1533 */
1534 if (map->m_may_create &&
1535 (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
1536 f2fs_cp_error(sbi))) {
1537 err = -EIO;
1538 goto unlock_out;
1539 }
1540
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001541 err = 0;
Chao Yuda859852016-01-26 15:42:58 +08001542 if (map->m_next_pgofs)
1543 *map->m_next_pgofs =
Chao Yu4d57b862018-05-30 00:20:41 +08001544 f2fs_get_next_page_offset(&dn, pgofs);
Chao Yuc4020b22018-01-11 14:42:30 +08001545 if (map->m_next_extent)
1546 *map->m_next_extent =
Chao Yu4d57b862018-05-30 00:20:41 +08001547 f2fs_get_next_page_offset(&dn, pgofs);
Chao Yuda859852016-01-26 15:42:58 +08001548 }
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001549 goto unlock_out;
Namjae Jeon848753a2013-04-23 16:38:02 +09001550 }
Chao Yu973163f2015-09-18 16:51:51 +08001551
Chao Yuc4020b22018-01-11 14:42:30 +08001552 start_pgofs = pgofs;
Chao Yu46008c62016-05-09 19:56:30 +08001553 prealloc = 0;
Arnd Bergmann230436b32016-11-02 14:52:15 +01001554 last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
Chao Yu81ca7352016-01-26 15:39:35 +08001555 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001556
Chao Yu4fe71e82016-01-26 15:37:38 +08001557next_block:
Chao Yua2ced1c2020-02-14 17:44:10 +08001558 blkaddr = f2fs_data_blkaddr(&dn);
Chao Yu973163f2015-09-18 16:51:51 +08001559
Chao Yuc9b60782018-08-01 19:13:44 +08001560 if (__is_valid_data_blkaddr(blkaddr) &&
Chao Yu93770ab2019-04-15 15:26:32 +08001561 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
Chao Yu10f966b2019-06-20 11:36:14 +08001562 err = -EFSCORRUPTED;
Chao Yu95fa90c2022-09-28 23:38:54 +08001563 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
Chao Yuc9b60782018-08-01 19:13:44 +08001564 goto sync_out;
1565 }
1566
Chao Yu93770ab2019-04-15 15:26:32 +08001567 if (__is_valid_data_blkaddr(blkaddr)) {
Chao Yuf847c692018-09-27 18:34:52 +08001568 /* use out-place-update for driect IO under LFS mode */
Chao Yub0332a02020-02-14 17:44:12 +08001569 if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
Chao Yuf9d6d052018-11-13 14:33:45 +08001570 map->m_may_create) {
Chao Yuf847c692018-09-27 18:34:52 +08001571 err = __allocate_data_block(&dn, map->m_seg_type);
Chao Yu05e3600612019-08-28 17:33:36 +08001572 if (err)
1573 goto sync_out;
1574 blkaddr = dn.data_blkaddr;
1575 set_inode_flag(inode, FI_APPEND_WRITE);
Chao Yuf847c692018-09-27 18:34:52 +08001576 }
1577 } else {
Fan Li7df3a432015-12-17 13:20:59 +08001578 if (create) {
1579 if (unlikely(f2fs_cp_error(sbi))) {
1580 err = -EIO;
1581 goto sync_out;
Chao Yu973163f2015-09-18 16:51:51 +08001582 }
Jaegeuk Kim24b84912016-02-03 13:49:44 -08001583 if (flag == F2FS_GET_BLOCK_PRE_AIO) {
Chao Yu46008c62016-05-09 19:56:30 +08001584 if (blkaddr == NULL_ADDR) {
1585 prealloc++;
1586 last_ofs_in_node = dn.ofs_in_node;
1587 }
Jaegeuk Kim24b84912016-02-03 13:49:44 -08001588 } else {
Jaegeuk Kim0a4daae2018-09-19 15:28:40 -07001589 WARN_ON(flag != F2FS_GET_BLOCK_PRE_DIO &&
1590 flag != F2FS_GET_BLOCK_DIO);
Hyunchul Leed5097be2017-11-28 09:23:00 +09001591 err = __allocate_data_block(&dn,
1592 map->m_seg_type);
Jaegeuk Kimd4dd19e2021-11-12 14:31:16 -08001593 if (!err) {
1594 if (flag == F2FS_GET_BLOCK_PRE_DIO)
1595 file_need_truncate(inode);
Jaegeuk Kim91942322016-05-20 10:13:22 -07001596 set_inode_flag(inode, FI_APPEND_WRITE);
Jaegeuk Kimd4dd19e2021-11-12 14:31:16 -08001597 }
Jaegeuk Kim24b84912016-02-03 13:49:44 -08001598 }
Fan Li7df3a432015-12-17 13:20:59 +08001599 if (err)
1600 goto sync_out;
Kinglong Mee3f2be042017-02-23 19:55:05 +08001601 map->m_flags |= F2FS_MAP_NEW;
Fan Li7df3a432015-12-17 13:20:59 +08001602 blkaddr = dn.data_blkaddr;
1603 } else {
Chao Yubbe1da72021-08-06 08:02:50 +08001604 if (f2fs_compressed_file(inode) &&
1605 f2fs_sanity_check_cluster(&dn) &&
1606 (flag != F2FS_GET_BLOCK_FIEMAP ||
1607 IS_ENABLED(CONFIG_F2FS_CHECK_FS))) {
1608 err = -EFSCORRUPTED;
Chao Yu95fa90c2022-09-28 23:38:54 +08001609 f2fs_handle_error(sbi,
1610 ERROR_CORRUPTED_CLUSTER);
Chao Yubbe1da72021-08-06 08:02:50 +08001611 goto sync_out;
1612 }
Chao Yu43473f92016-05-05 19:13:02 +08001613 if (flag == F2FS_GET_BLOCK_BMAP) {
1614 map->m_pblk = 0;
1615 goto sync_out;
1616 }
Chao Yuc4020b22018-01-11 14:42:30 +08001617 if (flag == F2FS_GET_BLOCK_PRECACHE)
1618 goto sync_out;
Chao Yuda859852016-01-26 15:42:58 +08001619 if (flag == F2FS_GET_BLOCK_FIEMAP &&
1620 blkaddr == NULL_ADDR) {
1621 if (map->m_next_pgofs)
1622 *map->m_next_pgofs = pgofs + 1;
Fan Li7df3a432015-12-17 13:20:59 +08001623 goto sync_out;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001624 }
Chao Yuf3d98e72018-01-10 18:18:52 +08001625 if (flag != F2FS_GET_BLOCK_FIEMAP) {
1626 /* for defragment case */
1627 if (map->m_next_pgofs)
1628 *map->m_next_pgofs = pgofs + 1;
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001629 goto sync_out;
Chao Yuf3d98e72018-01-10 18:18:52 +08001630 }
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001631 }
1632 }
Fan Li7df3a432015-12-17 13:20:59 +08001633
Chao Yu46008c62016-05-09 19:56:30 +08001634 if (flag == F2FS_GET_BLOCK_PRE_AIO)
1635 goto skip;
1636
Chao Yu71f2c822021-09-01 14:39:20 +08001637 if (map->m_multidev_dio)
1638 bidx = f2fs_target_device_index(sbi, blkaddr);
1639
Chao Yu4fe71e82016-01-26 15:37:38 +08001640 if (map->m_len == 0) {
1641 /* preallocated unwritten block should be mapped for fiemap. */
1642 if (blkaddr == NEW_ADDR)
1643 map->m_flags |= F2FS_MAP_UNWRITTEN;
1644 map->m_flags |= F2FS_MAP_MAPPED;
1645
1646 map->m_pblk = blkaddr;
1647 map->m_len = 1;
Chao Yu71f2c822021-09-01 14:39:20 +08001648
1649 if (map->m_multidev_dio)
1650 map->m_bdev = FDEV(bidx).bdev;
Chao Yu4fe71e82016-01-26 15:37:38 +08001651 } else if ((map->m_pblk != NEW_ADDR &&
Fan Li7df3a432015-12-17 13:20:59 +08001652 blkaddr == (map->m_pblk + ofs)) ||
Jaegeuk Kimb439b102016-02-03 13:09:09 -08001653 (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
Chao Yu46008c62016-05-09 19:56:30 +08001654 flag == F2FS_GET_BLOCK_PRE_DIO) {
Chao Yu71f2c822021-09-01 14:39:20 +08001655 if (map->m_multidev_dio && map->m_bdev != FDEV(bidx).bdev)
1656 goto sync_out;
Fan Li7df3a432015-12-17 13:20:59 +08001657 ofs++;
Fan Li7df3a432015-12-17 13:20:59 +08001658 map->m_len++;
Chao Yu4fe71e82016-01-26 15:37:38 +08001659 } else {
1660 goto sync_out;
1661 }
1662
Chao Yu46008c62016-05-09 19:56:30 +08001663skip:
Chao Yu4fe71e82016-01-26 15:37:38 +08001664 dn.ofs_in_node++;
1665 pgofs++;
1666
Chao Yu46008c62016-05-09 19:56:30 +08001667 /* preallocate blocks in batch for one dnode page */
1668 if (flag == F2FS_GET_BLOCK_PRE_AIO &&
1669 (pgofs == end || dn.ofs_in_node == end_offset)) {
Chao Yu4fe71e82016-01-26 15:37:38 +08001670
Chao Yu46008c62016-05-09 19:56:30 +08001671 dn.ofs_in_node = ofs_in_node;
Chao Yu4d57b862018-05-30 00:20:41 +08001672 err = f2fs_reserve_new_blocks(&dn, prealloc);
Chao Yu46008c62016-05-09 19:56:30 +08001673 if (err)
1674 goto sync_out;
Chao Yu4fe71e82016-01-26 15:37:38 +08001675
Chao Yu46008c62016-05-09 19:56:30 +08001676 map->m_len += dn.ofs_in_node - ofs_in_node;
1677 if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
1678 err = -ENOSPC;
1679 goto sync_out;
Chao Yu4fe71e82016-01-26 15:37:38 +08001680 }
Chao Yu46008c62016-05-09 19:56:30 +08001681 dn.ofs_in_node = end_offset;
Fan Li7df3a432015-12-17 13:20:59 +08001682 }
1683
Chao Yu46008c62016-05-09 19:56:30 +08001684 if (pgofs >= end)
1685 goto sync_out;
1686 else if (dn.ofs_in_node < end_offset)
1687 goto next_block;
1688
Chao Yuc4020b22018-01-11 14:42:30 +08001689 if (flag == F2FS_GET_BLOCK_PRECACHE) {
1690 if (map->m_flags & F2FS_MAP_MAPPED) {
1691 unsigned int ofs = start_pgofs - map->m_lblk;
1692
1693 f2fs_update_extent_cache_range(&dn,
1694 start_pgofs, map->m_pblk + ofs,
1695 map->m_len - ofs);
1696 }
1697 }
1698
Chao Yu46008c62016-05-09 19:56:30 +08001699 f2fs_put_dnode(&dn);
1700
Chao Yuf9d6d052018-11-13 14:33:45 +08001701 if (map->m_may_create) {
Chao Yu0ef81832020-06-18 14:36:22 +08001702 f2fs_do_map_lock(sbi, flag, false);
Chao Yu6f2d8ed2016-10-11 22:57:03 +08001703 f2fs_balance_fs(sbi, dn.node_changed);
Chao Yu46008c62016-05-09 19:56:30 +08001704 }
Chao Yu46008c62016-05-09 19:56:30 +08001705 goto next_dnode;
1706
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001707sync_out:
Sahitya Tummala1e78e8b2018-10-10 10:56:22 +05301708
Chao Yu71f2c822021-09-01 14:39:20 +08001709 if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED) {
1710 /*
1711 * for hardware encryption, but to avoid potential issue
1712 * in future
1713 */
Sahitya Tummala1e78e8b2018-10-10 10:56:22 +05301714 f2fs_wait_on_block_writeback_range(inode,
1715 map->m_pblk, map->m_len);
1716
Chao Yu71f2c822021-09-01 14:39:20 +08001717 if (map->m_multidev_dio) {
1718 block_t blk_addr = map->m_pblk;
1719
1720 bidx = f2fs_target_device_index(sbi, map->m_pblk);
1721
1722 map->m_bdev = FDEV(bidx).bdev;
1723 map->m_pblk -= FDEV(bidx).start_blk;
1724
1725 if (map->m_may_create)
1726 f2fs_update_device_state(sbi, inode->i_ino,
1727 blk_addr, map->m_len);
1728
1729 f2fs_bug_on(sbi, blk_addr + map->m_len >
1730 FDEV(bidx).end_blk + 1);
1731 }
1732 }
1733
Chao Yuc4020b22018-01-11 14:42:30 +08001734 if (flag == F2FS_GET_BLOCK_PRECACHE) {
1735 if (map->m_flags & F2FS_MAP_MAPPED) {
1736 unsigned int ofs = start_pgofs - map->m_lblk;
1737
1738 f2fs_update_extent_cache_range(&dn,
1739 start_pgofs, map->m_pblk + ofs,
1740 map->m_len - ofs);
1741 }
1742 if (map->m_next_extent)
1743 *map->m_next_extent = pgofs + 1;
1744 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001745 f2fs_put_dnode(&dn);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001746unlock_out:
Chao Yuf9d6d052018-11-13 14:33:45 +08001747 if (map->m_may_create) {
Chao Yu0ef81832020-06-18 14:36:22 +08001748 f2fs_do_map_lock(sbi, flag, false);
Chao Yu6f2d8ed2016-10-11 22:57:03 +08001749 f2fs_balance_fs(sbi, dn.node_changed);
Jaegeuk Kim2a340762015-12-22 13:23:35 -08001750 }
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001751out:
Chao Yu71f2c822021-09-01 14:39:20 +08001752 trace_f2fs_map_blocks(inode, map, create, flag, err);
Jaegeuk Kimbfad7c22013-12-16 19:04:05 +09001753 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09001754}
1755
Hyunchul Leeb91050a2018-03-08 19:34:38 +09001756bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
1757{
1758 struct f2fs_map_blocks map;
1759 block_t last_lblk;
1760 int err;
1761
1762 if (pos + len > i_size_read(inode))
1763 return false;
1764
1765 map.m_lblk = F2FS_BYTES_TO_BLK(pos);
1766 map.m_next_pgofs = NULL;
1767 map.m_next_extent = NULL;
1768 map.m_seg_type = NO_CHECK_TYPE;
Jia Zhuf4f0b672018-11-20 04:29:35 +08001769 map.m_may_create = false;
Hyunchul Leeb91050a2018-03-08 19:34:38 +09001770 last_lblk = F2FS_BLK_ALIGN(pos + len);
1771
1772 while (map.m_lblk < last_lblk) {
1773 map.m_len = last_lblk - map.m_lblk;
1774 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
1775 if (err || map.m_len == 0)
1776 return false;
1777 map.m_lblk += map.m_len;
1778 }
1779 return true;
1780}
1781
Jaegeuk Kim43b9d4b2020-11-24 15:04:27 -08001782static inline u64 bytes_to_blks(struct inode *inode, u64 bytes)
1783{
1784 return (bytes >> inode->i_blkbits);
1785}
1786
1787static inline u64 blks_to_bytes(struct inode *inode, u64 blks)
1788{
1789 return (blks << inode->i_blkbits);
1790}
1791
Chao Yu442a9db2018-01-11 14:39:57 +08001792static int f2fs_xattr_fiemap(struct inode *inode,
1793 struct fiemap_extent_info *fieinfo)
1794{
1795 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1796 struct page *page;
1797 struct node_info ni;
1798 __u64 phys = 0, len;
1799 __u32 flags;
1800 nid_t xnid = F2FS_I(inode)->i_xattr_nid;
1801 int err = 0;
1802
1803 if (f2fs_has_inline_xattr(inode)) {
1804 int offset;
1805
1806 page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
1807 inode->i_ino, false);
1808 if (!page)
1809 return -ENOMEM;
1810
Jaegeuk Kima9419b62021-12-13 14:16:32 -08001811 err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
Chao Yu77357302018-07-17 00:02:17 +08001812 if (err) {
1813 f2fs_put_page(page, 1);
1814 return err;
1815 }
Chao Yu442a9db2018-01-11 14:39:57 +08001816
Jaegeuk Kim6cbfcab2020-11-24 14:55:47 -08001817 phys = blks_to_bytes(inode, ni.blk_addr);
Chao Yu442a9db2018-01-11 14:39:57 +08001818 offset = offsetof(struct f2fs_inode, i_addr) +
1819 sizeof(__le32) * (DEF_ADDRS_PER_INODE -
Chao Yub323fd22018-01-17 16:31:36 +08001820 get_inline_xattr_addrs(inode));
Chao Yu442a9db2018-01-11 14:39:57 +08001821
1822 phys += offset;
1823 len = inline_xattr_size(inode);
1824
1825 f2fs_put_page(page, 1);
1826
1827 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;
1828
1829 if (!xnid)
1830 flags |= FIEMAP_EXTENT_LAST;
1831
1832 err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
Chao Yudd5a09b2020-06-29 20:13:13 +08001833 trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
Zhang Qilongca7efd72022-09-23 15:17:55 +08001834 if (err)
Chao Yu442a9db2018-01-11 14:39:57 +08001835 return err;
1836 }
1837
1838 if (xnid) {
1839 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
1840 if (!page)
1841 return -ENOMEM;
1842
Jaegeuk Kima9419b62021-12-13 14:16:32 -08001843 err = f2fs_get_node_info(sbi, xnid, &ni, false);
Chao Yu77357302018-07-17 00:02:17 +08001844 if (err) {
1845 f2fs_put_page(page, 1);
1846 return err;
1847 }
Chao Yu442a9db2018-01-11 14:39:57 +08001848
Jaegeuk Kim6cbfcab2020-11-24 14:55:47 -08001849 phys = blks_to_bytes(inode, ni.blk_addr);
Chao Yu442a9db2018-01-11 14:39:57 +08001850 len = inode->i_sb->s_blocksize;
1851
1852 f2fs_put_page(page, 1);
1853
1854 flags = FIEMAP_EXTENT_LAST;
1855 }
1856
Chao Yudd5a09b2020-06-29 20:13:13 +08001857 if (phys) {
Chao Yu442a9db2018-01-11 14:39:57 +08001858 err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
Chao Yudd5a09b2020-06-29 20:13:13 +08001859 trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1860 }
Chao Yu442a9db2018-01-11 14:39:57 +08001861
1862 return (err < 0 ? err : 0);
1863}
1864
Chao Yubf38fba2020-03-28 17:40:40 +08001865static loff_t max_inode_blocks(struct inode *inode)
1866{
1867 loff_t result = ADDRS_PER_INODE(inode);
1868 loff_t leaf_count = ADDRS_PER_BLOCK(inode);
1869
1870 /* two direct node blocks */
1871 result += (leaf_count * 2);
1872
1873 /* two indirect node blocks */
1874 leaf_count *= NIDS_PER_BLOCK;
1875 result += (leaf_count * 2);
1876
1877 /* one double indirect node block */
1878 leaf_count *= NIDS_PER_BLOCK;
1879 result += leaf_count;
1880
1881 return result;
1882}
1883
Jaegeuk Kim9ab701342014-06-08 04:30:14 +09001884int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1885 u64 start, u64 len)
1886{
Jaegeuk Kimb876f4c92020-11-24 15:19:10 -08001887 struct f2fs_map_blocks map;
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001888 sector_t start_blk, last_blk;
Chao Yuda859852016-01-26 15:42:58 +08001889 pgoff_t next_pgofs;
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001890 u64 logical = 0, phys = 0, size = 0;
1891 u32 flags = 0;
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001892 int ret = 0;
Daeho Jeong093f0ba2021-07-25 21:18:19 -07001893 bool compr_cluster = false, compr_appended;
Chao Yubf38fba2020-03-28 17:40:40 +08001894 unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
Daeho Jeong093f0ba2021-07-25 21:18:19 -07001895 unsigned int count_in_cluster = 0;
Chengguang Xu0bb20452021-03-09 13:21:18 +08001896 loff_t maxbytes;
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001897
Chao Yuc4020b22018-01-11 14:42:30 +08001898 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
1899 ret = f2fs_precache_extents(inode);
1900 if (ret)
1901 return ret;
1902 }
1903
Christoph Hellwig45dd0522020-05-23 09:30:14 +02001904 ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_XATTR);
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001905 if (ret)
1906 return ret;
1907
Chao Yuf1b43d42018-01-11 14:37:35 +08001908 inode_lock(inode);
1909
Chengguang Xu0bb20452021-03-09 13:21:18 +08001910 maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
1911 if (start > maxbytes) {
1912 ret = -EFBIG;
1913 goto out;
1914 }
1915
1916 if (len > maxbytes || (maxbytes - len) < start)
1917 len = maxbytes - start;
1918
Chao Yu442a9db2018-01-11 14:39:57 +08001919 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
1920 ret = f2fs_xattr_fiemap(inode, fieinfo);
1921 goto out;
1922 }
1923
Chao Yu7975f342019-07-22 18:03:50 +08001924 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
Jaegeuk Kim67f8cf32015-10-15 11:34:49 -07001925 ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
1926 if (ret != -EAGAIN)
Chao Yuf1b43d42018-01-11 14:37:35 +08001927 goto out;
Jaegeuk Kim67f8cf32015-10-15 11:34:49 -07001928 }
1929
Jaegeuk Kim6cbfcab2020-11-24 14:55:47 -08001930 if (bytes_to_blks(inode, len) == 0)
1931 len = blks_to_bytes(inode, 1);
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001932
Jaegeuk Kim6cbfcab2020-11-24 14:55:47 -08001933 start_blk = bytes_to_blks(inode, start);
1934 last_blk = bytes_to_blks(inode, start + len - 1);
Fan Li9a950d52015-12-26 18:07:41 +08001935
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001936next:
Jaegeuk Kimb876f4c92020-11-24 15:19:10 -08001937 memset(&map, 0, sizeof(map));
1938 map.m_lblk = start_blk;
1939 map.m_len = bytes_to_blks(inode, len);
1940 map.m_next_pgofs = &next_pgofs;
1941 map.m_seg_type = NO_CHECK_TYPE;
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001942
Daeho Jeong093f0ba2021-07-25 21:18:19 -07001943 if (compr_cluster) {
1944 map.m_lblk += 1;
1945 map.m_len = cluster_size - count_in_cluster;
1946 }
Chao Yubf38fba2020-03-28 17:40:40 +08001947
Jaegeuk Kimb876f4c92020-11-24 15:19:10 -08001948 ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001949 if (ret)
1950 goto out;
1951
1952 /* HOLE */
Daeho Jeong093f0ba2021-07-25 21:18:19 -07001953 if (!compr_cluster && !(map.m_flags & F2FS_MAP_FLAGS)) {
Chao Yuda859852016-01-26 15:42:58 +08001954 start_blk = next_pgofs;
Chao Yu58736fa2016-10-11 22:57:04 +08001955
Jaegeuk Kim6cbfcab2020-11-24 14:55:47 -08001956 if (blks_to_bytes(inode, start_blk) < blks_to_bytes(inode,
Chao Yubf38fba2020-03-28 17:40:40 +08001957 max_inode_blocks(inode)))
Fan Li9a950d52015-12-26 18:07:41 +08001958 goto prep_next;
Chao Yu58736fa2016-10-11 22:57:04 +08001959
Fan Li9a950d52015-12-26 18:07:41 +08001960 flags |= FIEMAP_EXTENT_LAST;
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07001961 }
Fan Li9a950d52015-12-26 18:07:41 +08001962
Daeho Jeong093f0ba2021-07-25 21:18:19 -07001963 compr_appended = false;
1964 /* In a case of compressed cluster, append this to the last extent */
1965 if (compr_cluster && ((map.m_flags & F2FS_MAP_UNWRITTEN) ||
1966 !(map.m_flags & F2FS_MAP_FLAGS))) {
1967 compr_appended = true;
1968 goto skip_fill;
1969 }
1970
Chao Yuda5af122016-01-08 20:19:27 +08001971 if (size) {
Chao Yu0953fe82020-12-14 17:20:57 +08001972 flags |= FIEMAP_EXTENT_MERGED;
Chandan Rajendra62230e0d2018-12-12 15:20:11 +05301973 if (IS_ENCRYPTED(inode))
Chao Yuda5af122016-01-08 20:19:27 +08001974 flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
1975
Fan Li9a950d52015-12-26 18:07:41 +08001976 ret = fiemap_fill_next_extent(fieinfo, logical,
1977 phys, size, flags);
Chao Yudd5a09b2020-06-29 20:13:13 +08001978 trace_f2fs_fiemap(inode, logical, phys, size, flags, ret);
Chao Yubf38fba2020-03-28 17:40:40 +08001979 if (ret)
1980 goto out;
1981 size = 0;
Chao Yuda5af122016-01-08 20:19:27 +08001982 }
Fan Li9a950d52015-12-26 18:07:41 +08001983
Chao Yubf38fba2020-03-28 17:40:40 +08001984 if (start_blk > last_blk)
Fan Li9a950d52015-12-26 18:07:41 +08001985 goto out;
1986
Daeho Jeong093f0ba2021-07-25 21:18:19 -07001987skip_fill:
Jaegeuk Kimb876f4c92020-11-24 15:19:10 -08001988 if (map.m_pblk == COMPRESS_ADDR) {
Chao Yubf38fba2020-03-28 17:40:40 +08001989 compr_cluster = true;
Daeho Jeong093f0ba2021-07-25 21:18:19 -07001990 count_in_cluster = 1;
1991 } else if (compr_appended) {
1992 unsigned int appended_blks = cluster_size -
1993 count_in_cluster + 1;
1994 size += blks_to_bytes(inode, appended_blks);
1995 start_blk += appended_blks;
1996 compr_cluster = false;
1997 } else {
1998 logical = blks_to_bytes(inode, start_blk);
1999 phys = __is_valid_data_blkaddr(map.m_pblk) ?
2000 blks_to_bytes(inode, map.m_pblk) : 0;
2001 size = blks_to_bytes(inode, map.m_len);
2002 flags = 0;
2003
2004 if (compr_cluster) {
2005 flags = FIEMAP_EXTENT_ENCODED;
2006 count_in_cluster += map.m_len;
2007 if (count_in_cluster == cluster_size) {
2008 compr_cluster = false;
2009 size += blks_to_bytes(inode, 1);
2010 }
2011 } else if (map.m_flags & F2FS_MAP_UNWRITTEN) {
2012 flags = FIEMAP_EXTENT_UNWRITTEN;
2013 }
2014
2015 start_blk += bytes_to_blks(inode, size);
Chao Yubf38fba2020-03-28 17:40:40 +08002016 }
2017
Fan Li9a950d52015-12-26 18:07:41 +08002018prep_next:
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07002019 cond_resched();
2020 if (fatal_signal_pending(current))
2021 ret = -EINTR;
2022 else
2023 goto next;
2024out:
2025 if (ret == 1)
2026 ret = 0;
2027
Al Viro59551022016-01-22 15:40:57 -05002028 inode_unlock(inode);
Jaegeuk Kim7f63eb72015-05-08 19:30:32 -07002029 return ret;
Jaegeuk Kim9ab701342014-06-08 04:30:14 +09002030}
2031
Eric Biggers95ae2512019-07-22 09:26:24 -07002032static inline loff_t f2fs_readpage_limit(struct inode *inode)
2033{
2034 if (IS_ENABLED(CONFIG_FS_VERITY) &&
2035 (IS_VERITY(inode) || f2fs_verity_in_progress(inode)))
2036 return inode->i_sb->s_maxbytes;
2037
2038 return i_size_read(inode);
2039}
2040
Chao Yu2df0ab02019-03-25 21:07:30 +08002041static int f2fs_read_single_page(struct inode *inode, struct page *page,
2042 unsigned nr_pages,
2043 struct f2fs_map_blocks *map,
2044 struct bio **bio_ret,
2045 sector_t *last_block_in_bio,
2046 bool is_readahead)
2047{
2048 struct bio *bio = *bio_ret;
Jaegeuk Kim43b9d4b2020-11-24 15:04:27 -08002049 const unsigned blocksize = blks_to_bytes(inode, 1);
Chao Yu2df0ab02019-03-25 21:07:30 +08002050 sector_t block_in_file;
2051 sector_t last_block;
2052 sector_t last_block_in_file;
2053 sector_t block_nr;
2054 int ret = 0;
2055
Jaegeuk Kim4969c062019-07-01 19:15:29 -07002056 block_in_file = (sector_t)page_index(page);
Chao Yu2df0ab02019-03-25 21:07:30 +08002057 last_block = block_in_file + nr_pages;
Jaegeuk Kim43b9d4b2020-11-24 15:04:27 -08002058 last_block_in_file = bytes_to_blks(inode,
2059 f2fs_readpage_limit(inode) + blocksize - 1);
Chao Yu2df0ab02019-03-25 21:07:30 +08002060 if (last_block > last_block_in_file)
2061 last_block = last_block_in_file;
2062
2063 /* just zeroing out page which is beyond EOF */
2064 if (block_in_file >= last_block)
2065 goto zero_out;
2066 /*
2067 * Map blocks using the previous result first.
2068 */
2069 if ((map->m_flags & F2FS_MAP_MAPPED) &&
2070 block_in_file > map->m_lblk &&
2071 block_in_file < (map->m_lblk + map->m_len))
2072 goto got_it;
2073
2074 /*
2075 * Then do more f2fs_map_blocks() calls until we are
2076 * done with this page.
2077 */
2078 map->m_lblk = block_in_file;
2079 map->m_len = last_block - block_in_file;
2080
2081 ret = f2fs_map_blocks(inode, map, 0, F2FS_GET_BLOCK_DEFAULT);
2082 if (ret)
2083 goto out;
2084got_it:
2085 if ((map->m_flags & F2FS_MAP_MAPPED)) {
2086 block_nr = map->m_pblk + block_in_file - map->m_lblk;
2087 SetPageMappedToDisk(page);
2088
Chao Yu2df0ab02019-03-25 21:07:30 +08002089 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
Chao Yu93770ab2019-04-15 15:26:32 +08002090 DATA_GENERIC_ENHANCE_READ)) {
Chao Yu10f966b2019-06-20 11:36:14 +08002091 ret = -EFSCORRUPTED;
Chao Yu95fa90c2022-09-28 23:38:54 +08002092 f2fs_handle_error(F2FS_I_SB(inode),
2093 ERROR_INVALID_BLKADDR);
Chao Yu2df0ab02019-03-25 21:07:30 +08002094 goto out;
2095 }
2096 } else {
2097zero_out:
2098 zero_user_segment(page, 0, PAGE_SIZE);
Eric Biggers95ae2512019-07-22 09:26:24 -07002099 if (f2fs_need_verity(inode, page->index) &&
2100 !fsverity_verify_page(page)) {
2101 ret = -EIO;
2102 goto out;
2103 }
Chao Yu2df0ab02019-03-25 21:07:30 +08002104 if (!PageUptodate(page))
2105 SetPageUptodate(page);
2106 unlock_page(page);
2107 goto out;
2108 }
2109
2110 /*
2111 * This page will go to BIO. Do we need to send this
2112 * BIO off first?
2113 */
Satya Tangirala27aacd22020-07-02 01:56:06 +00002114 if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
2115 *last_block_in_bio, block_nr) ||
2116 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
Chao Yu2df0ab02019-03-25 21:07:30 +08002117submit_and_realloc:
2118 __submit_bio(F2FS_I_SB(inode), bio, DATA);
2119 bio = NULL;
2120 }
2121 if (bio == NULL) {
2122 bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
Chao Yu06837282020-02-18 18:21:35 +08002123 is_readahead ? REQ_RAHEAD : 0, page->index,
Eric Biggers7f59b272021-01-04 22:33:02 -08002124 false);
Chao Yu2df0ab02019-03-25 21:07:30 +08002125 if (IS_ERR(bio)) {
2126 ret = PTR_ERR(bio);
2127 bio = NULL;
2128 goto out;
2129 }
2130 }
2131
2132 /*
2133 * If the page is under writeback, we need to wait for
2134 * its completion to see the correct decrypted data.
2135 */
2136 f2fs_wait_on_block_writeback(inode, block_nr);
2137
2138 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2139 goto submit_and_realloc;
2140
2141 inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
Chao Yu34a23522022-08-20 11:04:41 +08002142 f2fs_update_iostat(F2FS_I_SB(inode), NULL, FS_DATA_READ_IO,
2143 F2FS_BLKSIZE);
Chao Yu2df0ab02019-03-25 21:07:30 +08002144 ClearPageError(page);
2145 *last_block_in_bio = block_nr;
2146 goto out;
Chao Yu2df0ab02019-03-25 21:07:30 +08002147out:
2148 *bio_ret = bio;
2149 return ret;
2150}
2151
Chao Yu4c8ff702019-11-01 18:07:14 +08002152#ifdef CONFIG_F2FS_FS_COMPRESSION
2153int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
2154 unsigned nr_pages, sector_t *last_block_in_bio,
Chao Yu06837282020-02-18 18:21:35 +08002155 bool is_readahead, bool for_write)
Chao Yu4c8ff702019-11-01 18:07:14 +08002156{
2157 struct dnode_of_data dn;
2158 struct inode *inode = cc->inode;
2159 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2160 struct bio *bio = *bio_ret;
2161 unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size;
2162 sector_t last_block_in_file;
Jaegeuk Kim43b9d4b2020-11-24 15:04:27 -08002163 const unsigned blocksize = blks_to_bytes(inode, 1);
Chao Yu4c8ff702019-11-01 18:07:14 +08002164 struct decompress_io_ctx *dic = NULL;
Chao Yu94afd6d2021-08-04 10:23:48 +08002165 struct extent_info ei = {0, };
2166 bool from_dnode = true;
Chao Yu4c8ff702019-11-01 18:07:14 +08002167 int i;
2168 int ret = 0;
2169
2170 f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
2171
Jaegeuk Kim43b9d4b2020-11-24 15:04:27 -08002172 last_block_in_file = bytes_to_blks(inode,
2173 f2fs_readpage_limit(inode) + blocksize - 1);
Chao Yu4c8ff702019-11-01 18:07:14 +08002174
2175 /* get rid of pages beyond EOF */
2176 for (i = 0; i < cc->cluster_size; i++) {
2177 struct page *page = cc->rpages[i];
2178
2179 if (!page)
2180 continue;
2181 if ((sector_t)page->index >= last_block_in_file) {
2182 zero_user_segment(page, 0, PAGE_SIZE);
2183 if (!PageUptodate(page))
2184 SetPageUptodate(page);
2185 } else if (!PageUptodate(page)) {
2186 continue;
2187 }
2188 unlock_page(page);
Jaegeuk Kim9605f752021-08-30 13:30:45 -07002189 if (for_write)
2190 put_page(page);
Chao Yu4c8ff702019-11-01 18:07:14 +08002191 cc->rpages[i] = NULL;
2192 cc->nr_rpages--;
2193 }
2194
2195 /* we are done since all pages are beyond EOF */
2196 if (f2fs_cluster_is_empty(cc))
2197 goto out;
2198
Chao Yu94afd6d2021-08-04 10:23:48 +08002199 if (f2fs_lookup_extent_cache(inode, start_idx, &ei))
2200 from_dnode = false;
2201
2202 if (!from_dnode)
2203 goto skip_reading_dnode;
2204
Chao Yu4c8ff702019-11-01 18:07:14 +08002205 set_new_dnode(&dn, inode, NULL, NULL, 0);
2206 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
2207 if (ret)
2208 goto out;
2209
Chao Yua86d27d2020-07-29 21:21:35 +08002210 f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
Chao Yu4c8ff702019-11-01 18:07:14 +08002211
Chao Yu94afd6d2021-08-04 10:23:48 +08002212skip_reading_dnode:
Chao Yu4c8ff702019-11-01 18:07:14 +08002213 for (i = 1; i < cc->cluster_size; i++) {
2214 block_t blkaddr;
2215
Chao Yu94afd6d2021-08-04 10:23:48 +08002216 blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
2217 dn.ofs_in_node + i) :
2218 ei.blk + i - 1;
Chao Yu4c8ff702019-11-01 18:07:14 +08002219
2220 if (!__is_valid_data_blkaddr(blkaddr))
2221 break;
2222
2223 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
2224 ret = -EFAULT;
2225 goto out_put_dnode;
2226 }
2227 cc->nr_cpages++;
Chao Yu94afd6d2021-08-04 10:23:48 +08002228
2229 if (!from_dnode && i >= ei.c_len)
2230 break;
Chao Yu4c8ff702019-11-01 18:07:14 +08002231 }
2232
2233 /* nothing to decompress */
2234 if (cc->nr_cpages == 0) {
2235 ret = 0;
2236 goto out_put_dnode;
2237 }
2238
2239 dic = f2fs_alloc_dic(cc);
2240 if (IS_ERR(dic)) {
2241 ret = PTR_ERR(dic);
2242 goto out_put_dnode;
2243 }
2244
Chao Yu6ce19af2021-05-20 19:51:50 +08002245 for (i = 0; i < cc->nr_cpages; i++) {
Chao Yu4c8ff702019-11-01 18:07:14 +08002246 struct page *page = dic->cpages[i];
2247 block_t blkaddr;
Eric Biggers7f59b272021-01-04 22:33:02 -08002248 struct bio_post_read_ctx *ctx;
Chao Yu4c8ff702019-11-01 18:07:14 +08002249
Chao Yu94afd6d2021-08-04 10:23:48 +08002250 blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
2251 dn.ofs_in_node + i + 1) :
2252 ei.blk + i;
Chao Yu4c8ff702019-11-01 18:07:14 +08002253
Chao Yu6ce19af2021-05-20 19:51:50 +08002254 f2fs_wait_on_block_writeback(inode, blkaddr);
2255
2256 if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
2257 if (atomic_dec_and_test(&dic->remaining_pages))
Daeho Jeongbff139b2022-08-02 12:24:37 -07002258 f2fs_decompress_cluster(dic, true);
Chao Yu6ce19af2021-05-20 19:51:50 +08002259 continue;
2260 }
2261
Satya Tangirala27aacd22020-07-02 01:56:06 +00002262 if (bio && (!page_is_mergeable(sbi, bio,
2263 *last_block_in_bio, blkaddr) ||
2264 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
Chao Yu4c8ff702019-11-01 18:07:14 +08002265submit_and_realloc:
2266 __submit_bio(sbi, bio, DATA);
2267 bio = NULL;
2268 }
2269
2270 if (!bio) {
2271 bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
2272 is_readahead ? REQ_RAHEAD : 0,
Eric Biggers7f59b272021-01-04 22:33:02 -08002273 page->index, for_write);
Chao Yu4c8ff702019-11-01 18:07:14 +08002274 if (IS_ERR(bio)) {
2275 ret = PTR_ERR(bio);
Daeho Jeongbff139b2022-08-02 12:24:37 -07002276 f2fs_decompress_end_io(dic, ret, true);
Chao Yu4c8ff702019-11-01 18:07:14 +08002277 f2fs_put_dnode(&dn);
Chao Yuf3494342020-04-23 17:57:33 +08002278 *bio_ret = NULL;
Chao Yu4c8ff702019-11-01 18:07:14 +08002279 return ret;
2280 }
2281 }
2282
Chao Yu4c8ff702019-11-01 18:07:14 +08002283 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2284 goto submit_and_realloc;
2285
Daeho Jeonga4b68172021-08-20 15:29:09 -07002286 ctx = get_post_read_ctx(bio);
Eric Biggers7f59b272021-01-04 22:33:02 -08002287 ctx->enabled_steps |= STEP_DECOMPRESS;
2288 refcount_inc(&dic->refcnt);
Chao Yu03382f12020-04-21 19:36:21 +08002289
Chao Yu4c8ff702019-11-01 18:07:14 +08002290 inc_page_count(sbi, F2FS_RD_DATA);
Chao Yu34a23522022-08-20 11:04:41 +08002291 f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE);
Chao Yu4c8ff702019-11-01 18:07:14 +08002292 ClearPageError(page);
2293 *last_block_in_bio = blkaddr;
2294 }
2295
Chao Yu94afd6d2021-08-04 10:23:48 +08002296 if (from_dnode)
2297 f2fs_put_dnode(&dn);
Chao Yu4c8ff702019-11-01 18:07:14 +08002298
2299 *bio_ret = bio;
2300 return 0;
2301
2302out_put_dnode:
Chao Yu94afd6d2021-08-04 10:23:48 +08002303 if (from_dnode)
2304 f2fs_put_dnode(&dn);
Chao Yu4c8ff702019-11-01 18:07:14 +08002305out:
Eric Biggers7f59b272021-01-04 22:33:02 -08002306 for (i = 0; i < cc->cluster_size; i++) {
2307 if (cc->rpages[i]) {
2308 ClearPageUptodate(cc->rpages[i]);
2309 ClearPageError(cc->rpages[i]);
2310 unlock_page(cc->rpages[i]);
2311 }
2312 }
Chao Yu4c8ff702019-11-01 18:07:14 +08002313 *bio_ret = bio;
2314 return ret;
2315}
2316#endif
2317
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002318/*
2319 * This function was originally taken from fs/mpage.c, and customized for f2fs.
2320 * Major change was from block_size == page_size in f2fs by default.
2321 */
Matthew Wilcox (Oracle)e20a7692020-06-01 21:47:27 -07002322static int f2fs_mpage_readpages(struct inode *inode,
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002323 struct readahead_control *rac, struct page *page)
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002324{
2325 struct bio *bio = NULL;
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002326 sector_t last_block_in_bio = 0;
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002327 struct f2fs_map_blocks map;
Chao Yu4c8ff702019-11-01 18:07:14 +08002328#ifdef CONFIG_F2FS_FS_COMPRESSION
2329 struct compress_ctx cc = {
2330 .inode = inode,
2331 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2332 .cluster_size = F2FS_I(inode)->i_cluster_size,
2333 .cluster_idx = NULL_CLUSTER,
2334 .rpages = NULL,
2335 .cpages = NULL,
2336 .nr_rpages = 0,
2337 .nr_cpages = 0,
2338 };
Fengnan Changa2649312021-08-12 19:36:41 +08002339 pgoff_t nc_cluster_idx = NULL_CLUSTER;
Chao Yu4c8ff702019-11-01 18:07:14 +08002340#endif
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002341 unsigned nr_pages = rac ? readahead_count(rac) : 1;
Chao Yu4c8ff702019-11-01 18:07:14 +08002342 unsigned max_nr_pages = nr_pages;
Chao Yu2df0ab02019-03-25 21:07:30 +08002343 int ret = 0;
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002344
2345 map.m_pblk = 0;
2346 map.m_lblk = 0;
2347 map.m_len = 0;
2348 map.m_flags = 0;
Chao Yuda859852016-01-26 15:42:58 +08002349 map.m_next_pgofs = NULL;
Chao Yuc4020b22018-01-11 14:42:30 +08002350 map.m_next_extent = NULL;
Hyunchul Leed5097be2017-11-28 09:23:00 +09002351 map.m_seg_type = NO_CHECK_TYPE;
Chao Yuf9d6d052018-11-13 14:33:45 +08002352 map.m_may_create = false;
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002353
LiFan736c0a72017-11-25 11:46:18 +08002354 for (; nr_pages; nr_pages--) {
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002355 if (rac) {
2356 page = readahead_page(rac);
Kinglong Meea83d50b2017-03-13 16:35:13 +08002357 prefetchw(&page->flags);
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002358 }
2359
Chao Yu4c8ff702019-11-01 18:07:14 +08002360#ifdef CONFIG_F2FS_FS_COMPRESSION
2361 if (f2fs_compressed_file(inode)) {
2362 /* there are remained comressed pages, submit them */
2363 if (!f2fs_cluster_can_merge_page(&cc, page->index)) {
2364 ret = f2fs_read_multi_pages(&cc, &bio,
2365 max_nr_pages,
2366 &last_block_in_bio,
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002367 rac != NULL, false);
Chao Yu8bfbfb02021-05-10 17:30:32 +08002368 f2fs_destroy_compress_ctx(&cc, false);
Chao Yu4c8ff702019-11-01 18:07:14 +08002369 if (ret)
2370 goto set_error_page;
2371 }
Fengnan Changa2649312021-08-12 19:36:41 +08002372 if (cc.cluster_idx == NULL_CLUSTER) {
2373 if (nc_cluster_idx ==
2374 page->index >> cc.log_cluster_size) {
2375 goto read_single_page;
2376 }
Chao Yu4c8ff702019-11-01 18:07:14 +08002377
Fengnan Changa2649312021-08-12 19:36:41 +08002378 ret = f2fs_is_compressed_cluster(inode, page->index);
2379 if (ret < 0)
2380 goto set_error_page;
2381 else if (!ret) {
2382 nc_cluster_idx =
2383 page->index >> cc.log_cluster_size;
2384 goto read_single_page;
2385 }
2386
2387 nc_cluster_idx = NULL_CLUSTER;
2388 }
Chao Yu4c8ff702019-11-01 18:07:14 +08002389 ret = f2fs_init_compress_ctx(&cc);
2390 if (ret)
2391 goto set_error_page;
2392
2393 f2fs_compress_ctx_add_page(&cc, page);
2394
2395 goto next_page;
2396 }
2397read_single_page:
2398#endif
2399
2400 ret = f2fs_read_single_page(inode, page, max_nr_pages, &map,
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002401 &bio, &last_block_in_bio, rac);
Chao Yu2df0ab02019-03-25 21:07:30 +08002402 if (ret) {
Chao Yu4c8ff702019-11-01 18:07:14 +08002403#ifdef CONFIG_F2FS_FS_COMPRESSION
2404set_error_page:
2405#endif
Chao Yu2df0ab02019-03-25 21:07:30 +08002406 SetPageError(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002407 zero_user_segment(page, 0, PAGE_SIZE);
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002408 unlock_page(page);
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002409 }
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002410#ifdef CONFIG_F2FS_FS_COMPRESSION
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002411next_page:
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002412#endif
2413 if (rac)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002414 put_page(page);
Chao Yu4c8ff702019-11-01 18:07:14 +08002415
2416#ifdef CONFIG_F2FS_FS_COMPRESSION
2417 if (f2fs_compressed_file(inode)) {
2418 /* last page */
2419 if (nr_pages == 1 && !f2fs_cluster_is_empty(&cc)) {
2420 ret = f2fs_read_multi_pages(&cc, &bio,
2421 max_nr_pages,
2422 &last_block_in_bio,
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002423 rac != NULL, false);
Chao Yu8bfbfb02021-05-10 17:30:32 +08002424 f2fs_destroy_compress_ctx(&cc, false);
Chao Yu4c8ff702019-11-01 18:07:14 +08002425 }
2426 }
2427#endif
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002428 }
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002429 if (bio)
Linus Torvalds4fc29c12016-07-27 10:36:31 -07002430 __submit_bio(F2FS_I_SB(inode), bio, DATA);
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002431 return ret;
Jaegeuk Kimf1e88662015-04-09 11:20:42 -07002432}
2433
Matthew Wilcox (Oracle)be055842022-04-29 11:12:16 -04002434static int f2fs_read_data_folio(struct file *file, struct folio *folio)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002435{
Matthew Wilcox (Oracle)be055842022-04-29 11:12:16 -04002436 struct page *page = &folio->page;
Jaegeuk Kim4969c062019-07-01 19:15:29 -07002437 struct inode *inode = page_file_mapping(page)->host;
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07002438 int ret = -EAGAIN;
Huajun Li9ffe0fb2013-11-10 23:13:20 +08002439
Chao Yuc20e89cd2014-05-06 16:53:08 +08002440 trace_f2fs_readpage(page, DATA);
2441
Chao Yu4c8ff702019-11-01 18:07:14 +08002442 if (!f2fs_is_compress_backend_ready(inode)) {
2443 unlock_page(page);
2444 return -EOPNOTSUPP;
2445 }
2446
arter97e1c42042014-08-06 23:22:50 +09002447 /* If the file has inline data, try to read it directly */
Huajun Li9ffe0fb2013-11-10 23:13:20 +08002448 if (f2fs_has_inline_data(inode))
2449 ret = f2fs_read_inline_data(inode, page);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07002450 if (ret == -EAGAIN)
Matthew Wilcox (Oracle)e20a7692020-06-01 21:47:27 -07002451 ret = f2fs_mpage_readpages(inode, NULL, page);
Huajun Li9ffe0fb2013-11-10 23:13:20 +08002452 return ret;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002453}
2454
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002455static void f2fs_readahead(struct readahead_control *rac)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002456{
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002457 struct inode *inode = rac->mapping->host;
Chao Yub8c29402015-10-12 17:02:26 +08002458
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002459 trace_f2fs_readpages(inode, readahead_index(rac), readahead_count(rac));
Huajun Li9ffe0fb2013-11-10 23:13:20 +08002460
Chao Yu4c8ff702019-11-01 18:07:14 +08002461 if (!f2fs_is_compress_backend_ready(inode))
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002462 return;
Chao Yu4c8ff702019-11-01 18:07:14 +08002463
Matthew Wilcox (Oracle)704528d2022-03-23 21:29:04 -04002464 /* If the file has inline data, skip readahead */
Huajun Li9ffe0fb2013-11-10 23:13:20 +08002465 if (f2fs_has_inline_data(inode))
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07002466 return;
Huajun Li9ffe0fb2013-11-10 23:13:20 +08002467
Matthew Wilcox (Oracle)e20a7692020-06-01 21:47:27 -07002468 f2fs_mpage_readpages(inode, rac, NULL);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002469}
2470
Chao Yu4c8ff702019-11-01 18:07:14 +08002471int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002472{
2473 struct inode *inode = fio->page->mapping->host;
Chao Yu4c8ff702019-11-01 18:07:14 +08002474 struct page *mpage, *page;
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002475 gfp_t gfp_flags = GFP_NOFS;
2476
Jaegeuk Kim19585932017-09-05 16:54:24 -07002477 if (!f2fs_encrypted_file(inode))
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002478 return 0;
2479
Chao Yu4c8ff702019-11-01 18:07:14 +08002480 page = fio->compressed_page ? fio->compressed_page : fio->page;
2481
Eric Biggers6dbb1792018-04-18 11:09:48 -07002482 /* wait for GCed page writeback via META_MAPPING */
Jaegeuk Kim0ded69f2018-08-22 21:18:00 -07002483 f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002484
Satya Tangirala27aacd22020-07-02 01:56:06 +00002485 if (fscrypt_inode_uses_inline_crypto(inode))
2486 return 0;
2487
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002488retry_encrypt:
Chao Yu4c8ff702019-11-01 18:07:14 +08002489 fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
2490 PAGE_SIZE, 0, gfp_flags);
Chao Yu6aa58d82018-08-14 22:37:25 +08002491 if (IS_ERR(fio->encrypted_page)) {
2492 /* flush pending IOs and wait for a while in the ENOMEM case */
2493 if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
2494 f2fs_flush_merged_writes(fio->sbi);
NeilBrown40342472022-01-14 14:07:14 -08002495 memalloc_retry_wait(GFP_NOFS);
Chao Yu6aa58d82018-08-14 22:37:25 +08002496 gfp_flags |= __GFP_NOFAIL;
2497 goto retry_encrypt;
2498 }
2499 return PTR_ERR(fio->encrypted_page);
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002500 }
Chao Yu6aa58d82018-08-14 22:37:25 +08002501
2502 mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
2503 if (mpage) {
2504 if (PageUptodate(mpage))
2505 memcpy(page_address(mpage),
2506 page_address(fio->encrypted_page), PAGE_SIZE);
2507 f2fs_put_page(mpage, 1);
2508 }
2509 return 0;
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002510}
2511
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002512static inline bool check_inplace_update_policy(struct inode *inode,
2513 struct f2fs_io_info *fio)
2514{
2515 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2516 unsigned int policy = SM_I(sbi)->ipu_policy;
2517
Chao Yu1018a542022-02-04 15:19:46 +08002518 if (policy & (0x1 << F2FS_IPU_HONOR_OPU_WRITE) &&
2519 is_inode_flag_set(inode, FI_OPU_WRITE))
2520 return false;
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002521 if (policy & (0x1 << F2FS_IPU_FORCE))
2522 return true;
Chao Yu4d57b862018-05-30 00:20:41 +08002523 if (policy & (0x1 << F2FS_IPU_SSR) && f2fs_need_SSR(sbi))
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002524 return true;
2525 if (policy & (0x1 << F2FS_IPU_UTIL) &&
2526 utilization(sbi) > SM_I(sbi)->min_ipu_util)
2527 return true;
Chao Yu4d57b862018-05-30 00:20:41 +08002528 if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && f2fs_need_SSR(sbi) &&
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002529 utilization(sbi) > SM_I(sbi)->min_ipu_util)
2530 return true;
2531
2532 /*
2533 * IPU for rewrite async pages
2534 */
2535 if (policy & (0x1 << F2FS_IPU_ASYNC) &&
2536 fio && fio->op == REQ_OP_WRITE &&
2537 !(fio->op_flags & REQ_SYNC) &&
Chandan Rajendra62230e0d2018-12-12 15:20:11 +05302538 !IS_ENCRYPTED(inode))
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002539 return true;
2540
2541 /* this is only set during fdatasync */
2542 if (policy & (0x1 << F2FS_IPU_FSYNC) &&
2543 is_inode_flag_set(inode, FI_NEED_IPU))
2544 return true;
2545
Daniel Rosenberg43549942018-08-20 19:21:43 -07002546 if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2547 !f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2548 return true;
2549
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002550 return false;
2551}
2552
Chao Yu4d57b862018-05-30 00:20:41 +08002553bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002554{
Chao Yu859fca62021-05-26 14:29:27 +08002555 /* swap file is migrating in aligned write mode */
2556 if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
2557 return false;
2558
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002559 if (f2fs_is_pinned_file(inode))
2560 return true;
2561
2562 /* if this is cold file, we should overwrite to avoid fragmentation */
Weichao Guof3b23c72022-09-07 10:38:48 +08002563 if (file_is_cold(inode) && !is_inode_flag_set(inode, FI_OPU_WRITE))
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002564 return true;
2565
2566 return check_inplace_update_policy(inode, fio);
2567}
2568
Chao Yu4d57b862018-05-30 00:20:41 +08002569bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002570{
2571 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2572
Jaegeuk Kim19bdba52021-12-09 10:25:43 -08002573 /* The below cases were checked when setting it. */
2574 if (f2fs_is_pinned_file(inode))
2575 return false;
2576 if (fio && is_sbi_flag_set(sbi, SBI_NEED_FSCK))
2577 return true;
Chao Yub0332a02020-02-14 17:44:12 +08002578 if (f2fs_lfs_mode(sbi))
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002579 return true;
2580 if (S_ISDIR(inode->i_mode))
2581 return true;
Chao Yuaf033b22018-09-20 20:05:00 +08002582 if (IS_NOQUOTA(inode))
2583 return true;
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002584 if (f2fs_is_atomic_file(inode))
2585 return true;
Chao Yu859fca62021-05-26 14:29:27 +08002586
2587 /* swap file is migrating in aligned write mode */
2588 if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
2589 return true;
2590
Chao Yu1018a542022-02-04 15:19:46 +08002591 if (is_inode_flag_set(inode, FI_OPU_WRITE))
2592 return true;
2593
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002594 if (fio) {
Chao Yub763f3b2021-04-28 17:20:31 +08002595 if (page_private_gcing(fio->page))
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002596 return true;
Chao Yub763f3b2021-04-28 17:20:31 +08002597 if (page_private_dummy(fio->page))
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002598 return true;
Daniel Rosenberg43549942018-08-20 19:21:43 -07002599 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2600 f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2601 return true;
Chao Yubb9e3bb8d2018-01-17 16:31:38 +08002602 }
2603 return false;
2604}
2605
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002606static inline bool need_inplace_update(struct f2fs_io_info *fio)
2607{
2608 struct inode *inode = fio->page->mapping->host;
2609
Chao Yu4d57b862018-05-30 00:20:41 +08002610 if (f2fs_should_update_outplace(inode, fio))
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002611 return false;
2612
Chao Yu4d57b862018-05-30 00:20:41 +08002613 return f2fs_should_update_inplace(inode, fio);
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002614}
2615
Chao Yu4d57b862018-05-30 00:20:41 +08002616int f2fs_do_write_data_page(struct f2fs_io_info *fio)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002617{
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07002618 struct page *page = fio->page;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002619 struct inode *inode = page->mapping->host;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002620 struct dnode_of_data dn;
Chao Yu94afd6d2021-08-04 10:23:48 +08002621 struct extent_info ei = {0, };
Chao Yu77357302018-07-17 00:02:17 +08002622 struct node_info ni;
Hou Pengyange959c8f2017-04-25 12:45:13 +00002623 bool ipu_force = false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002624 int err = 0;
2625
Daeho Jeong3db1de02022-04-28 11:18:09 -07002626 /* Use COW inode to make dnode_of_data for atomic write */
2627 if (f2fs_is_atomic_file(inode))
2628 set_new_dnode(&dn, F2FS_I(inode)->cow_inode, NULL, NULL, 0);
2629 else
2630 set_new_dnode(&dn, inode, NULL, NULL, 0);
2631
Hou Pengyange959c8f2017-04-25 12:45:13 +00002632 if (need_inplace_update(fio) &&
2633 f2fs_lookup_extent_cache(inode, page->index, &ei)) {
2634 fio->old_blkaddr = ei.blk + page->index - ei.fofs;
Jaegeuk Kima8177372017-04-24 15:20:16 -07002635
Chao Yuc9b60782018-08-01 19:13:44 +08002636 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
Chao Yu95fa90c2022-09-28 23:38:54 +08002637 DATA_GENERIC_ENHANCE)) {
2638 f2fs_handle_error(fio->sbi,
2639 ERROR_INVALID_BLKADDR);
Chao Yu10f966b2019-06-20 11:36:14 +08002640 return -EFSCORRUPTED;
Chao Yu95fa90c2022-09-28 23:38:54 +08002641 }
Chao Yuc9b60782018-08-01 19:13:44 +08002642
2643 ipu_force = true;
2644 fio->need_lock = LOCK_DONE;
2645 goto got_it;
Hou Pengyange959c8f2017-04-25 12:45:13 +00002646 }
Hou Pengyang279d6df2017-04-27 00:17:21 +08002647
Jaegeuk Kimd29460e2017-06-21 17:52:39 -07002648 /* Deadlock due to between page->lock and f2fs_lock_op */
2649 if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
2650 return -EAGAIN;
Hou Pengyang279d6df2017-04-27 00:17:21 +08002651
Chao Yu4d57b862018-05-30 00:20:41 +08002652 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002653 if (err)
Hou Pengyang279d6df2017-04-27 00:17:21 +08002654 goto out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002655
Chao Yu28bc1062016-02-06 14:40:34 +08002656 fio->old_blkaddr = dn.data_blkaddr;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002657
2658 /* This page is already truncated */
Chao Yu7a9d7542016-02-22 18:36:38 +08002659 if (fio->old_blkaddr == NULL_ADDR) {
Jaegeuk Kim2bca1e22015-02-25 19:25:01 -08002660 ClearPageUptodate(page);
Chao Yub763f3b2021-04-28 17:20:31 +08002661 clear_page_private_gcing(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002662 goto out_writepage;
Jaegeuk Kim2bca1e22015-02-25 19:25:01 -08002663 }
Hou Pengyange959c8f2017-04-25 12:45:13 +00002664got_it:
Chao Yuc9b60782018-08-01 19:13:44 +08002665 if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2666 !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
Chao Yu93770ab2019-04-15 15:26:32 +08002667 DATA_GENERIC_ENHANCE)) {
Chao Yu10f966b2019-06-20 11:36:14 +08002668 err = -EFSCORRUPTED;
Chao Yu95fa90c2022-09-28 23:38:54 +08002669 f2fs_handle_error(fio->sbi, ERROR_INVALID_BLKADDR);
Chao Yuc9b60782018-08-01 19:13:44 +08002670 goto out_writepage;
2671 }
Daeho Jeong3db1de02022-04-28 11:18:09 -07002672
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002673 /*
2674 * If current allocation needs SSR,
2675 * it had better in-place writes for updated data.
2676 */
Chao Yu93770ab2019-04-15 15:26:32 +08002677 if (ipu_force ||
2678 (__is_valid_data_blkaddr(fio->old_blkaddr) &&
Chao Yu7b525dd2018-05-23 22:25:08 +08002679 need_inplace_update(fio))) {
Chao Yu4c8ff702019-11-01 18:07:14 +08002680 err = f2fs_encrypt_one_page(fio);
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002681 if (err)
2682 goto out_writepage;
2683
2684 set_page_writeback(page);
Jaegeuk Kim17c50032018-04-11 23:09:04 -07002685 ClearPageError(page);
Hou Pengyang279d6df2017-04-27 00:17:21 +08002686 f2fs_put_dnode(&dn);
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002687 if (fio->need_lock == LOCK_REQ)
Hou Pengyang279d6df2017-04-27 00:17:21 +08002688 f2fs_unlock_op(fio->sbi);
Chao Yu4d57b862018-05-30 00:20:41 +08002689 err = f2fs_inplace_write_data(fio);
Chao Yu6492a332019-02-21 20:37:14 +08002690 if (err) {
Satya Tangirala27aacd22020-07-02 01:56:06 +00002691 if (fscrypt_inode_uses_fs_layer_crypto(inode))
Eric Biggersd2d07272019-05-20 09:29:39 -07002692 fscrypt_finalize_bounce_page(&fio->encrypted_page);
Chao Yu6492a332019-02-21 20:37:14 +08002693 if (PageWriteback(page))
2694 end_page_writeback(page);
Chao Yucd23ffa92019-04-15 15:30:53 +08002695 } else {
2696 set_inode_flag(inode, FI_UPDATE_WRITE);
Chao Yu6492a332019-02-21 20:37:14 +08002697 }
Hou Pengyang7eab0c02017-04-25 12:45:12 +00002698 trace_f2fs_do_write_data_page(fio->page, IPU);
Hou Pengyang279d6df2017-04-27 00:17:21 +08002699 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002700 }
Hou Pengyang279d6df2017-04-27 00:17:21 +08002701
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002702 if (fio->need_lock == LOCK_RETRY) {
2703 if (!f2fs_trylock_op(fio->sbi)) {
2704 err = -EAGAIN;
2705 goto out_writepage;
2706 }
2707 fio->need_lock = LOCK_REQ;
2708 }
2709
Jaegeuk Kima9419b62021-12-13 14:16:32 -08002710 err = f2fs_get_node_info(fio->sbi, dn.nid, &ni, false);
Chao Yu77357302018-07-17 00:02:17 +08002711 if (err)
2712 goto out_writepage;
2713
2714 fio->version = ni.version;
2715
Chao Yu4c8ff702019-11-01 18:07:14 +08002716 err = f2fs_encrypt_one_page(fio);
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002717 if (err)
2718 goto out_writepage;
2719
2720 set_page_writeback(page);
Jaegeuk Kim17c50032018-04-11 23:09:04 -07002721 ClearPageError(page);
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002722
Chao Yu4c8ff702019-11-01 18:07:14 +08002723 if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR)
2724 f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
2725
Hou Pengyang279d6df2017-04-27 00:17:21 +08002726 /* LFS mode write path */
Chao Yu4d57b862018-05-30 00:20:41 +08002727 f2fs_outplace_write_data(&dn, fio);
Hou Pengyang279d6df2017-04-27 00:17:21 +08002728 trace_f2fs_do_write_data_page(page, OPU);
2729 set_inode_flag(inode, FI_APPEND_WRITE);
2730 if (page->index == 0)
2731 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002732out_writepage:
2733 f2fs_put_dnode(&dn);
Hou Pengyang279d6df2017-04-27 00:17:21 +08002734out:
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002735 if (fio->need_lock == LOCK_REQ)
Hou Pengyang279d6df2017-04-27 00:17:21 +08002736 f2fs_unlock_op(fio->sbi);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002737 return err;
2738}
2739
Chao Yu4c8ff702019-11-01 18:07:14 +08002740int f2fs_write_single_data_page(struct page *page, int *submitted,
Chao Yu8648de22019-02-19 16:15:29 +08002741 struct bio **bio,
2742 sector_t *last_block,
Chao Yub0af6d42017-08-02 23:21:48 +08002743 struct writeback_control *wbc,
Chao Yu4c8ff702019-11-01 18:07:14 +08002744 enum iostat_type io_type,
Chao Yu3afae092021-01-11 17:42:53 +08002745 int compr_blocks,
2746 bool allow_balance)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002747{
2748 struct inode *inode = page->mapping->host;
Jaegeuk Kim40813632014-09-02 15:31:18 -07002749 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002750 loff_t i_size = i_size_read(inode);
Chao Yu4c8ff702019-11-01 18:07:14 +08002751 const pgoff_t end_index = ((unsigned long long)i_size)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002752 >> PAGE_SHIFT;
Chao Yu1f0d5c92019-11-07 17:29:00 +08002753 loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
Huajun Li9ffe0fb2013-11-10 23:13:20 +08002754 unsigned offset = 0;
Jaegeuk Kim39936832012-11-22 16:21:29 +09002755 bool need_balance_fs = false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002756 int err = 0;
Jaegeuk Kim458e6192013-12-11 13:54:01 +09002757 struct f2fs_io_info fio = {
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07002758 .sbi = sbi,
Chao Yu39d787b2017-09-29 13:59:38 +08002759 .ino = inode->i_ino,
Jaegeuk Kim458e6192013-12-11 13:54:01 +09002760 .type = DATA,
Mike Christie04d328d2016-06-05 14:31:55 -05002761 .op = REQ_OP_WRITE,
Jens Axboe76372412016-11-01 10:00:38 -06002762 .op_flags = wbc_to_write_flags(wbc),
Hou Pengyange959c8f2017-04-25 12:45:13 +00002763 .old_blkaddr = NULL_ADDR,
Jaegeuk Kim05ca3632015-04-23 14:38:15 -07002764 .page = page,
Jaegeuk Kim4375a332015-04-23 12:04:33 -07002765 .encrypted_page = NULL,
Jaegeuk Kimd68f7352017-02-03 17:44:04 -08002766 .submitted = false,
Chao Yu4c8ff702019-11-01 18:07:14 +08002767 .compr_blocks = compr_blocks,
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002768 .need_lock = LOCK_RETRY,
Chao Yu0d5b9d82022-07-12 23:26:43 +08002769 .post_read = f2fs_post_read_required(inode),
Chao Yub0af6d42017-08-02 23:21:48 +08002770 .io_type = io_type,
Yufen Yu578c6472018-01-09 19:33:39 +08002771 .io_wbc = wbc,
Chao Yu8648de22019-02-19 16:15:29 +08002772 .bio = bio,
2773 .last_block = last_block,
Jaegeuk Kim458e6192013-12-11 13:54:01 +09002774 };
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002775
Chao Yuecda0de2014-05-06 16:48:26 +08002776 trace_f2fs_writepage(page, DATA);
2777
Chao Yudb198ae2018-01-18 17:29:10 +08002778 /* we should bypass data pages to proceed the kworkder jobs */
2779 if (unlikely(f2fs_cp_error(sbi))) {
2780 mapping_set_error(page->mapping, -EIO);
Chao Yu1174abf2018-05-28 16:59:26 +08002781 /*
2782 * don't drop any dirty dentry pages for keeping lastest
2783 * directory structure.
2784 */
2785 if (S_ISDIR(inode->i_mode))
2786 goto redirty_out;
Chao Yudb198ae2018-01-18 17:29:10 +08002787 goto out;
2788 }
2789
Chao Yu0771fcc2017-06-29 23:20:45 +08002790 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2791 goto redirty_out;
2792
Chao Yu4c8ff702019-11-01 18:07:14 +08002793 if (page->index < end_index ||
2794 f2fs_verity_in_progress(inode) ||
2795 compr_blocks)
Jaegeuk Kim39936832012-11-22 16:21:29 +09002796 goto write;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002797
2798 /*
2799 * If the offset is out-of-range of file size,
2800 * this page does not have to be written to disk.
2801 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002802 offset = i_size & (PAGE_SIZE - 1);
Jaegeuk Kim76f60262014-04-15 16:04:15 +09002803 if ((page->index >= end_index + 1) || !offset)
Jaegeuk Kim39936832012-11-22 16:21:29 +09002804 goto out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002805
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002806 zero_user_segment(page, offset, PAGE_SIZE);
Jaegeuk Kim39936832012-11-22 16:21:29 +09002807write:
Jaegeuk Kim1e843712014-12-09 06:08:59 -08002808 if (f2fs_is_drop_cache(inode))
2809 goto out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002810
Jaegeuk Kim435cbab2020-04-09 10:25:21 -07002811 /* Dentry/quota blocks are controlled by checkpoint */
2812 if (S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) {
Chao Yu79963d92020-06-18 14:36:23 +08002813 /*
2814 * We need to wait for node_write to avoid block allocation during
2815 * checkpoint. This can only happen to quota writes which can cause
2816 * the below discard race condition.
2817 */
2818 if (IS_NOQUOTA(inode))
Tim Murraye4544b62022-01-07 12:48:44 -08002819 f2fs_down_read(&sbi->node_write);
Chao Yu79963d92020-06-18 14:36:23 +08002820
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002821 fio.need_lock = LOCK_DONE;
Chao Yu4d57b862018-05-30 00:20:41 +08002822 err = f2fs_do_write_data_page(&fio);
Chao Yu79963d92020-06-18 14:36:23 +08002823
2824 if (IS_NOQUOTA(inode))
Tim Murraye4544b62022-01-07 12:48:44 -08002825 f2fs_up_read(&sbi->node_write);
Chao Yu79963d92020-06-18 14:36:23 +08002826
Jaegeuk Kimb230e6c2016-05-29 21:18:23 -07002827 goto done;
2828 }
2829
Jaegeuk Kim8618b882014-02-17 19:29:27 +09002830 if (!wbc->for_reclaim)
2831 need_balance_fs = true;
Jaegeuk Kim7f3037a2016-09-01 12:02:51 -07002832 else if (has_not_enough_free_secs(sbi, 0, 0))
Jaegeuk Kim39936832012-11-22 16:21:29 +09002833 goto redirty_out;
Jaegeuk Kimef095d12017-03-24 20:05:13 -04002834 else
2835 set_inode_flag(inode, FI_HOT_DATA);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002836
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07002837 err = -EAGAIN;
Yunlei Hedd7b2332017-02-23 20:31:20 +08002838 if (f2fs_has_inline_data(inode)) {
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07002839 err = f2fs_write_inline_data(inode, page);
Yunlei Hedd7b2332017-02-23 20:31:20 +08002840 if (!err)
2841 goto out;
2842 }
Hou Pengyang279d6df2017-04-27 00:17:21 +08002843
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002844 if (err == -EAGAIN) {
Chao Yu4d57b862018-05-30 00:20:41 +08002845 err = f2fs_do_write_data_page(&fio);
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002846 if (err == -EAGAIN) {
2847 fio.need_lock = LOCK_REQ;
Chao Yu4d57b862018-05-30 00:20:41 +08002848 err = f2fs_do_write_data_page(&fio);
Jaegeuk Kimcc156202017-05-12 13:51:34 -07002849 }
2850 }
Chao Yua0d00fa2017-10-09 17:55:19 +08002851
Chao Yueb449792018-01-17 16:31:37 +08002852 if (err) {
2853 file_set_keep_isize(inode);
2854 } else {
Chao Yuc10c9822020-02-27 19:30:03 +08002855 spin_lock(&F2FS_I(inode)->i_size_lock);
Chao Yueb449792018-01-17 16:31:37 +08002856 if (F2FS_I(inode)->last_disk_size < psize)
2857 F2FS_I(inode)->last_disk_size = psize;
Chao Yuc10c9822020-02-27 19:30:03 +08002858 spin_unlock(&F2FS_I(inode)->i_size_lock);
Chao Yueb449792018-01-17 16:31:37 +08002859 }
Hou Pengyang279d6df2017-04-27 00:17:21 +08002860
Jaegeuk Kim8618b882014-02-17 19:29:27 +09002861done:
2862 if (err && err != -ENOENT)
2863 goto redirty_out;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002864
Jaegeuk Kim39936832012-11-22 16:21:29 +09002865out:
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07002866 inode_dec_dirty_pages(inode);
Chao Yu2baf0782018-07-27 18:15:16 +08002867 if (err) {
Jaegeuk Kim2bca1e22015-02-25 19:25:01 -08002868 ClearPageUptodate(page);
Chao Yub763f3b2021-04-28 17:20:31 +08002869 clear_page_private_gcing(page);
Chao Yu2baf0782018-07-27 18:15:16 +08002870 }
Chao Yu0c3a5792016-01-18 18:28:11 +08002871
2872 if (wbc->for_reclaim) {
Chao Yubab475c2018-09-27 23:41:16 +08002873 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
Jaegeuk Kimef095d12017-03-24 20:05:13 -04002874 clear_inode_flag(inode, FI_HOT_DATA);
Chao Yu4d57b862018-05-30 00:20:41 +08002875 f2fs_remove_dirty_inode(inode);
Jaegeuk Kimd68f7352017-02-03 17:44:04 -08002876 submitted = NULL;
Chao Yueb7e8132015-11-10 18:45:07 +08002877 }
Chao Yu0c3a5792016-01-18 18:28:11 +08002878 unlock_page(page);
Chao Yu186857c2019-04-02 18:52:19 +08002879 if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
Chao Yud80afef2022-09-14 21:28:46 +08002880 !F2FS_I(inode)->wb_task && allow_balance)
Jaegeuk Kima7881892017-04-20 13:51:57 -07002881 f2fs_balance_fs(sbi, need_balance_fs);
Chao Yu0c3a5792016-01-18 18:28:11 +08002882
Jaegeuk Kimd68f7352017-02-03 17:44:04 -08002883 if (unlikely(f2fs_cp_error(sbi))) {
Jaegeuk Kimb9109b02017-05-10 11:28:38 -07002884 f2fs_submit_merged_write(sbi, DATA);
Chao Yu0b20fce2019-09-30 18:53:25 +08002885 f2fs_submit_merged_ipu_write(sbi, bio, NULL);
Jaegeuk Kimd68f7352017-02-03 17:44:04 -08002886 submitted = NULL;
2887 }
2888
2889 if (submitted)
Chao Yu4c8ff702019-11-01 18:07:14 +08002890 *submitted = fio.submitted ? 1 : 0;
Chao Yu0c3a5792016-01-18 18:28:11 +08002891
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002892 return 0;
2893
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09002894redirty_out:
Jaegeuk Kim76f60262014-04-15 16:04:15 +09002895 redirty_page_for_writepage(wbc, page);
Jaegeuk Kim5b19d282018-05-03 23:26:02 -07002896 /*
2897 * pageout() in MM traslates EAGAIN, so calls handle_write_error()
2898 * -> mapping_set_error() -> set_bit(AS_EIO, ...).
2899 * file_write_and_wait_range() will see EIO error, which is critical
2900 * to return value of fsync() followed by atomic_write failure to user.
2901 */
2902 if (!err || wbc->for_reclaim)
Chao Yu0002b612016-11-28 19:13:43 -08002903 return AOP_WRITEPAGE_ACTIVATE;
Jaegeuk Kimb230e6c2016-05-29 21:18:23 -07002904 unlock_page(page);
2905 return err;
Namjae Jeonfa9150a2013-01-15 16:45:24 +09002906}
2907
Jaegeuk Kimf566bae2017-02-03 17:18:00 -08002908static int f2fs_write_data_page(struct page *page,
2909 struct writeback_control *wbc)
2910{
Chao Yu4c8ff702019-11-01 18:07:14 +08002911#ifdef CONFIG_F2FS_FS_COMPRESSION
2912 struct inode *inode = page->mapping->host;
2913
2914 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
2915 goto out;
2916
2917 if (f2fs_compressed_file(inode)) {
2918 if (f2fs_is_compressed_cluster(inode, page->index)) {
2919 redirty_page_for_writepage(wbc, page);
2920 return AOP_WRITEPAGE_ACTIVATE;
2921 }
2922 }
2923out:
2924#endif
2925
2926 return f2fs_write_single_data_page(page, NULL, NULL, NULL,
Chao Yu3afae092021-01-11 17:42:53 +08002927 wbc, FS_DATA_IO, 0, true);
Jaegeuk Kimf566bae2017-02-03 17:18:00 -08002928}
2929
Chao Yu8f46dca2015-07-14 18:56:10 +08002930/*
2931 * This function was copied from write_cche_pages from mm/page-writeback.c.
2932 * The major change is making write step of cold data page separately from
2933 * warm/hot data page.
2934 */
2935static int f2fs_write_cache_pages(struct address_space *mapping,
Chao Yub0af6d42017-08-02 23:21:48 +08002936 struct writeback_control *wbc,
2937 enum iostat_type io_type)
Chao Yu8f46dca2015-07-14 18:56:10 +08002938{
2939 int ret = 0;
Chao Yu4c8ff702019-11-01 18:07:14 +08002940 int done = 0, retry = 0;
Fengnan Chang01fc4b92022-07-31 11:33:46 +08002941 struct page *pages[F2FS_ONSTACK_PAGES];
Chao Yuc29fd0c2018-06-04 23:20:36 +08002942 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
Chao Yu8648de22019-02-19 16:15:29 +08002943 struct bio *bio = NULL;
2944 sector_t last_block;
Chao Yu4c8ff702019-11-01 18:07:14 +08002945#ifdef CONFIG_F2FS_FS_COMPRESSION
2946 struct inode *inode = mapping->host;
2947 struct compress_ctx cc = {
2948 .inode = inode,
2949 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2950 .cluster_size = F2FS_I(inode)->i_cluster_size,
2951 .cluster_idx = NULL_CLUSTER,
2952 .rpages = NULL,
2953 .nr_rpages = 0,
2954 .cpages = NULL,
Fengnan Chang3271d7e2021-11-10 10:37:13 +08002955 .valid_nr_cpages = 0,
Chao Yu4c8ff702019-11-01 18:07:14 +08002956 .rbuf = NULL,
2957 .cbuf = NULL,
2958 .rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
2959 .private = NULL,
2960 };
2961#endif
Chao Yu8f46dca2015-07-14 18:56:10 +08002962 int nr_pages;
Chao Yu8f46dca2015-07-14 18:56:10 +08002963 pgoff_t index;
2964 pgoff_t end; /* Inclusive */
2965 pgoff_t done_index;
Chao Yu8f46dca2015-07-14 18:56:10 +08002966 int range_whole = 0;
Matthew Wilcox10bbd232017-12-05 17:30:38 -05002967 xa_mark_t tag;
Chao Yubab475c2018-09-27 23:41:16 +08002968 int nwritten = 0;
Chao Yu4c8ff702019-11-01 18:07:14 +08002969 int submitted = 0;
2970 int i;
Chao Yu8f46dca2015-07-14 18:56:10 +08002971
Jaegeuk Kimef095d12017-03-24 20:05:13 -04002972 if (get_dirty_pages(mapping->host) <=
2973 SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
2974 set_inode_flag(mapping->host, FI_HOT_DATA);
2975 else
2976 clear_inode_flag(mapping->host, FI_HOT_DATA);
2977
Chao Yu8f46dca2015-07-14 18:56:10 +08002978 if (wbc->range_cyclic) {
Jason Yan4df7a75f2020-06-15 16:51:32 +08002979 index = mapping->writeback_index; /* prev offset */
Chao Yu8f46dca2015-07-14 18:56:10 +08002980 end = -1;
2981 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002982 index = wbc->range_start >> PAGE_SHIFT;
2983 end = wbc->range_end >> PAGE_SHIFT;
Chao Yu8f46dca2015-07-14 18:56:10 +08002984 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2985 range_whole = 1;
Chao Yu8f46dca2015-07-14 18:56:10 +08002986 }
2987 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2988 tag = PAGECACHE_TAG_TOWRITE;
2989 else
2990 tag = PAGECACHE_TAG_DIRTY;
2991retry:
Chao Yu4c8ff702019-11-01 18:07:14 +08002992 retry = 0;
Chao Yu8f46dca2015-07-14 18:56:10 +08002993 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2994 tag_pages_for_writeback(mapping, index, end);
2995 done_index = index;
Chao Yu4c8ff702019-11-01 18:07:14 +08002996 while (!done && !retry && (index <= end)) {
Fengnan Chang01fc4b92022-07-31 11:33:46 +08002997 nr_pages = find_get_pages_range_tag(mapping, &index, end,
2998 tag, F2FS_ONSTACK_PAGES, pages);
Chao Yu8f46dca2015-07-14 18:56:10 +08002999 if (nr_pages == 0)
3000 break;
3001
3002 for (i = 0; i < nr_pages; i++) {
Fengnan Chang01fc4b92022-07-31 11:33:46 +08003003 struct page *page = pages[i];
Chao Yu4c8ff702019-11-01 18:07:14 +08003004 bool need_readd;
3005readd:
3006 need_readd = false;
3007#ifdef CONFIG_F2FS_FS_COMPRESSION
3008 if (f2fs_compressed_file(inode)) {
Fengnan Changb368cc52021-10-22 20:08:00 -07003009 void *fsdata = NULL;
3010 struct page *pagep;
3011 int ret2;
3012
Chao Yu4c8ff702019-11-01 18:07:14 +08003013 ret = f2fs_init_compress_ctx(&cc);
3014 if (ret) {
3015 done = 1;
3016 break;
3017 }
Chao Yu8f46dca2015-07-14 18:56:10 +08003018
Chao Yu4c8ff702019-11-01 18:07:14 +08003019 if (!f2fs_cluster_can_merge_page(&cc,
3020 page->index)) {
3021 ret = f2fs_write_multi_pages(&cc,
3022 &submitted, wbc, io_type);
3023 if (!ret)
3024 need_readd = true;
3025 goto result;
3026 }
3027
3028 if (unlikely(f2fs_cp_error(sbi)))
3029 goto lock_page;
3030
Fengnan Changb368cc52021-10-22 20:08:00 -07003031 if (!f2fs_cluster_is_empty(&cc))
3032 goto lock_page;
Chao Yu4c8ff702019-11-01 18:07:14 +08003033
Fengnan Chang4f8219f2022-07-31 11:33:45 +08003034 if (f2fs_all_cluster_page_ready(&cc,
Fengnan Chang01fc4b92022-07-31 11:33:46 +08003035 pages, i, nr_pages, true))
Fengnan Chang4f8219f2022-07-31 11:33:45 +08003036 goto lock_page;
3037
Fengnan Changb368cc52021-10-22 20:08:00 -07003038 ret2 = f2fs_prepare_compress_overwrite(
Chao Yu4c8ff702019-11-01 18:07:14 +08003039 inode, &pagep,
3040 page->index, &fsdata);
Fengnan Changb368cc52021-10-22 20:08:00 -07003041 if (ret2 < 0) {
3042 ret = ret2;
3043 done = 1;
3044 break;
3045 } else if (ret2 &&
3046 (!f2fs_compress_write_end(inode,
3047 fsdata, page->index, 1) ||
Fengnan Chang4f8219f2022-07-31 11:33:45 +08003048 !f2fs_all_cluster_page_ready(&cc,
Fengnan Chang01fc4b92022-07-31 11:33:46 +08003049 pages, i, nr_pages, false))) {
Fengnan Changb368cc52021-10-22 20:08:00 -07003050 retry = 1;
3051 break;
Chao Yu4c8ff702019-11-01 18:07:14 +08003052 }
3053 }
3054#endif
Chao Yuf8de4332018-05-23 22:25:09 +08003055 /* give a priority to WB_SYNC threads */
Chao Yuc29fd0c2018-06-04 23:20:36 +08003056 if (atomic_read(&sbi->wb_sync_req[DATA]) &&
Chao Yuf8de4332018-05-23 22:25:09 +08003057 wbc->sync_mode == WB_SYNC_NONE) {
3058 done = 1;
3059 break;
3060 }
Chao Yu4c8ff702019-11-01 18:07:14 +08003061#ifdef CONFIG_F2FS_FS_COMPRESSION
3062lock_page:
3063#endif
Chao Yu8f46dca2015-07-14 18:56:10 +08003064 done_index = page->index;
Jaegeuk Kimd29460e2017-06-21 17:52:39 -07003065retry_write:
Chao Yu8f46dca2015-07-14 18:56:10 +08003066 lock_page(page);
3067
3068 if (unlikely(page->mapping != mapping)) {
3069continue_unlock:
3070 unlock_page(page);
3071 continue;
3072 }
3073
3074 if (!PageDirty(page)) {
3075 /* someone wrote it for us */
3076 goto continue_unlock;
3077 }
3078
Chao Yu8f46dca2015-07-14 18:56:10 +08003079 if (PageWriteback(page)) {
Chao Yu0b20fce2019-09-30 18:53:25 +08003080 if (wbc->sync_mode != WB_SYNC_NONE)
Jaegeuk Kimfec1d652016-01-20 23:43:51 +08003081 f2fs_wait_on_page_writeback(page,
Chao Yubae0ee72018-12-25 17:43:42 +08003082 DATA, true, true);
Chao Yu0b20fce2019-09-30 18:53:25 +08003083 else
Chao Yu8f46dca2015-07-14 18:56:10 +08003084 goto continue_unlock;
3085 }
3086
Chao Yu8f46dca2015-07-14 18:56:10 +08003087 if (!clear_page_dirty_for_io(page))
3088 goto continue_unlock;
3089
Chao Yu4c8ff702019-11-01 18:07:14 +08003090#ifdef CONFIG_F2FS_FS_COMPRESSION
3091 if (f2fs_compressed_file(inode)) {
3092 get_page(page);
3093 f2fs_compress_ctx_add_page(&cc, page);
3094 continue;
3095 }
3096#endif
3097 ret = f2fs_write_single_data_page(page, &submitted,
Chao Yu3afae092021-01-11 17:42:53 +08003098 &bio, &last_block, wbc, io_type,
3099 0, true);
Chao Yu4c8ff702019-11-01 18:07:14 +08003100 if (ret == AOP_WRITEPAGE_ACTIVATE)
3101 unlock_page(page);
3102#ifdef CONFIG_F2FS_FS_COMPRESSION
3103result:
3104#endif
3105 nwritten += submitted;
3106 wbc->nr_to_write -= submitted;
3107
Chao Yu8f46dca2015-07-14 18:56:10 +08003108 if (unlikely(ret)) {
Chao Yu0002b612016-11-28 19:13:43 -08003109 /*
3110 * keep nr_to_write, since vfs uses this to
3111 * get # of written pages.
3112 */
3113 if (ret == AOP_WRITEPAGE_ACTIVATE) {
Chao Yu0002b612016-11-28 19:13:43 -08003114 ret = 0;
Chao Yu4c8ff702019-11-01 18:07:14 +08003115 goto next;
Jaegeuk Kimd29460e2017-06-21 17:52:39 -07003116 } else if (ret == -EAGAIN) {
3117 ret = 0;
3118 if (wbc->sync_mode == WB_SYNC_ALL) {
NeilBrowna64239d2022-03-22 14:39:13 -07003119 f2fs_io_schedule_timeout(
Chao Yu5df7731f2020-02-17 17:45:44 +08003120 DEFAULT_IO_TIMEOUT);
Jaegeuk Kimd29460e2017-06-21 17:52:39 -07003121 goto retry_write;
3122 }
Chao Yu4c8ff702019-11-01 18:07:14 +08003123 goto next;
Chao Yu0002b612016-11-28 19:13:43 -08003124 }
Jaegeuk Kimb230e6c2016-05-29 21:18:23 -07003125 done_index = page->index + 1;
3126 done = 1;
3127 break;
Chao Yu8f46dca2015-07-14 18:56:10 +08003128 }
3129
Chao Yu4c8ff702019-11-01 18:07:14 +08003130 if (wbc->nr_to_write <= 0 &&
Jaegeuk Kim687de7f2017-03-28 18:07:38 -07003131 wbc->sync_mode == WB_SYNC_NONE) {
Chao Yu8f46dca2015-07-14 18:56:10 +08003132 done = 1;
3133 break;
3134 }
Chao Yu4c8ff702019-11-01 18:07:14 +08003135next:
3136 if (need_readd)
3137 goto readd;
Chao Yu8f46dca2015-07-14 18:56:10 +08003138 }
Fengnan Chang01fc4b92022-07-31 11:33:46 +08003139 release_pages(pages, nr_pages);
Chao Yu8f46dca2015-07-14 18:56:10 +08003140 cond_resched();
3141 }
Chao Yu4c8ff702019-11-01 18:07:14 +08003142#ifdef CONFIG_F2FS_FS_COMPRESSION
3143 /* flush remained pages in compress cluster */
3144 if (f2fs_compressed_file(inode) && !f2fs_cluster_is_empty(&cc)) {
3145 ret = f2fs_write_multi_pages(&cc, &submitted, wbc, io_type);
3146 nwritten += submitted;
3147 wbc->nr_to_write -= submitted;
3148 if (ret) {
3149 done = 1;
3150 retry = 0;
3151 }
3152 }
Jaegeuk Kimadfc6942020-09-23 00:54:50 -07003153 if (f2fs_compressed_file(inode))
Chao Yu8bfbfb02021-05-10 17:30:32 +08003154 f2fs_destroy_compress_ctx(&cc, false);
Chao Yu4c8ff702019-11-01 18:07:14 +08003155#endif
Sahitya Tummalae78790f2020-06-02 18:11:47 +05303156 if (retry) {
Chao Yu8f46dca2015-07-14 18:56:10 +08003157 index = 0;
Sahitya Tummalae78790f2020-06-02 18:11:47 +05303158 end = -1;
Chao Yu8f46dca2015-07-14 18:56:10 +08003159 goto retry;
3160 }
Sahitya Tummalae78790f2020-06-02 18:11:47 +05303161 if (wbc->range_cyclic && !done)
3162 done_index = 0;
Chao Yu8f46dca2015-07-14 18:56:10 +08003163 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
3164 mapping->writeback_index = done_index;
3165
Chao Yubab475c2018-09-27 23:41:16 +08003166 if (nwritten)
Jaegeuk Kimb9109b02017-05-10 11:28:38 -07003167 f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
Chao Yubab475c2018-09-27 23:41:16 +08003168 NULL, 0, DATA);
Chao Yu8648de22019-02-19 16:15:29 +08003169 /* submit cached bio of IPU write */
3170 if (bio)
Chao Yu0b20fce2019-09-30 18:53:25 +08003171 f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
Chao Yu6ca56ca2016-09-29 18:50:11 +08003172
Chao Yu8f46dca2015-07-14 18:56:10 +08003173 return ret;
3174}
3175
Jaegeuk Kim853137c2018-08-09 17:53:34 -07003176static inline bool __should_serialize_io(struct inode *inode,
3177 struct writeback_control *wbc)
3178{
Chao Yu040d2bb2019-05-20 17:36:59 +08003179 /* to avoid deadlock in path of data flush */
Chao Yud80afef2022-09-14 21:28:46 +08003180 if (F2FS_I(inode)->wb_task)
Chao Yu040d2bb2019-05-20 17:36:59 +08003181 return false;
Chao Yub13f67ff2020-03-19 19:57:57 +08003182
3183 if (!S_ISREG(inode->i_mode))
3184 return false;
3185 if (IS_NOQUOTA(inode))
3186 return false;
3187
Daeho Jeong602a16d2020-12-01 13:08:02 +09003188 if (f2fs_need_compress_data(inode))
Chao Yub13f67ff2020-03-19 19:57:57 +08003189 return true;
Jaegeuk Kim853137c2018-08-09 17:53:34 -07003190 if (wbc->sync_mode != WB_SYNC_ALL)
3191 return true;
3192 if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
3193 return true;
3194 return false;
3195}
3196
Chao Yufc99fe22018-05-30 00:20:39 +08003197static int __f2fs_write_data_pages(struct address_space *mapping,
Chao Yub0af6d42017-08-02 23:21:48 +08003198 struct writeback_control *wbc,
3199 enum iostat_type io_type)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003200{
3201 struct inode *inode = mapping->host;
Jaegeuk Kim40813632014-09-02 15:31:18 -07003202 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim9dfa1ba2016-07-13 19:33:19 -07003203 struct blk_plug plug;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003204 int ret;
Jaegeuk Kim853137c2018-08-09 17:53:34 -07003205 bool locked = false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003206
P J Pcfb185a2013-04-03 11:38:00 +09003207 /* deal with chardevs and other special file */
3208 if (!mapping->a_ops->writepage)
3209 return 0;
3210
Chao Yu6a290542015-07-17 18:02:39 +08003211 /* skip writing if there is no dirty page in this inode */
3212 if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
3213 return 0;
3214
Chao Yu0771fcc2017-06-29 23:20:45 +08003215 /* during POR, we don't need to trigger writepage at all. */
3216 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
3217 goto skip_write;
3218
Chao Yuaf033b22018-09-20 20:05:00 +08003219 if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) &&
3220 wbc->sync_mode == WB_SYNC_NONE &&
Jaegeuk Kima1257022015-10-08 10:40:07 -07003221 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
Chao Yu4d57b862018-05-30 00:20:41 +08003222 f2fs_available_free_memory(sbi, DIRTY_DENTS))
Jaegeuk Kima1257022015-10-08 10:40:07 -07003223 goto skip_write;
3224
Chao Yu1018a542022-02-04 15:19:46 +08003225 /* skip writing in file defragment preparing stage */
3226 if (is_inode_flag_set(inode, FI_SKIP_WRITES))
Chao Yud323d002015-10-27 09:53:45 +08003227 goto skip_write;
3228
Yunlei Hed31c7c32016-02-04 16:14:00 +08003229 trace_f2fs_writepages(mapping->host, wbc, DATA);
3230
Jaegeuk Kim687de7f2017-03-28 18:07:38 -07003231 /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
3232 if (wbc->sync_mode == WB_SYNC_ALL)
Chao Yuc29fd0c2018-06-04 23:20:36 +08003233 atomic_inc(&sbi->wb_sync_req[DATA]);
Chao Yu34415092022-01-27 13:44:49 +08003234 else if (atomic_read(&sbi->wb_sync_req[DATA])) {
3235 /* to avoid potential deadlock */
3236 if (current->plug)
3237 blk_finish_plug(current->plug);
Jaegeuk Kim687de7f2017-03-28 18:07:38 -07003238 goto skip_write;
Chao Yu34415092022-01-27 13:44:49 +08003239 }
Jaegeuk Kim687de7f2017-03-28 18:07:38 -07003240
Jaegeuk Kim853137c2018-08-09 17:53:34 -07003241 if (__should_serialize_io(inode, wbc)) {
3242 mutex_lock(&sbi->writepages);
3243 locked = true;
3244 }
3245
Jaegeuk Kim9dfa1ba2016-07-13 19:33:19 -07003246 blk_start_plug(&plug);
Chao Yub0af6d42017-08-02 23:21:48 +08003247 ret = f2fs_write_cache_pages(mapping, wbc, io_type);
Jaegeuk Kim9dfa1ba2016-07-13 19:33:19 -07003248 blk_finish_plug(&plug);
Jaegeuk Kim687de7f2017-03-28 18:07:38 -07003249
Jaegeuk Kim853137c2018-08-09 17:53:34 -07003250 if (locked)
3251 mutex_unlock(&sbi->writepages);
3252
Jaegeuk Kim687de7f2017-03-28 18:07:38 -07003253 if (wbc->sync_mode == WB_SYNC_ALL)
Chao Yuc29fd0c2018-06-04 23:20:36 +08003254 atomic_dec(&sbi->wb_sync_req[DATA]);
Jaegeuk Kim28ea6162016-05-25 17:17:56 -07003255 /*
3256 * if some pages were truncated, we cannot guarantee its mapping->host
3257 * to detect pending bios.
3258 */
Jaegeuk Kim458e6192013-12-11 13:54:01 +09003259
Chao Yu4d57b862018-05-30 00:20:41 +08003260 f2fs_remove_dirty_inode(inode);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003261 return ret;
Jaegeuk Kimd3baf952014-03-18 13:43:05 +09003262
3263skip_write:
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07003264 wbc->pages_skipped += get_dirty_pages(inode);
Yunlei Hed31c7c32016-02-04 16:14:00 +08003265 trace_f2fs_writepages(mapping->host, wbc, DATA);
Jaegeuk Kimd3baf952014-03-18 13:43:05 +09003266 return 0;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003267}
3268
Chao Yub0af6d42017-08-02 23:21:48 +08003269static int f2fs_write_data_pages(struct address_space *mapping,
3270 struct writeback_control *wbc)
3271{
3272 struct inode *inode = mapping->host;
3273
3274 return __f2fs_write_data_pages(mapping, wbc,
3275 F2FS_I(inode)->cp_task == current ?
3276 FS_CP_DATA_IO : FS_DATA_IO);
3277}
3278
Eric Biggersa1e09b02021-07-23 00:59:21 -07003279void f2fs_write_failed(struct inode *inode, loff_t to)
Chao Yu3aab8f82014-07-02 13:25:04 +08003280{
Jaegeuk Kim819d9152015-12-28 13:48:11 -08003281 loff_t i_size = i_size_read(inode);
Chao Yu3aab8f82014-07-02 13:25:04 +08003282
Jaegeuk Kim3f188c22019-12-03 18:54:29 -08003283 if (IS_NOQUOTA(inode))
3284 return;
3285
Eric Biggers95ae2512019-07-22 09:26:24 -07003286 /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
3287 if (to > i_size && !f2fs_verity_in_progress(inode)) {
Tim Murraye4544b62022-01-07 12:48:44 -08003288 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Linus Torvalds6abaa832021-09-04 10:48:47 -07003289 filemap_invalidate_lock(inode->i_mapping);
Chao Yua33c1502018-08-05 23:04:25 +08003290
Jaegeuk Kim819d9152015-12-28 13:48:11 -08003291 truncate_pagecache(inode, i_size);
Jaegeuk Kim3f188c22019-12-03 18:54:29 -08003292 f2fs_truncate_blocks(inode, i_size, true);
Chao Yua33c1502018-08-05 23:04:25 +08003293
Linus Torvalds6abaa832021-09-04 10:48:47 -07003294 filemap_invalidate_unlock(inode->i_mapping);
Tim Murraye4544b62022-01-07 12:48:44 -08003295 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Chao Yu3aab8f82014-07-02 13:25:04 +08003296 }
3297}
3298
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003299static int prepare_write_begin(struct f2fs_sb_info *sbi,
3300 struct page *page, loff_t pos, unsigned len,
3301 block_t *blk_addr, bool *node_changed)
3302{
3303 struct inode *inode = page->mapping->host;
3304 pgoff_t index = page->index;
3305 struct dnode_of_data dn;
3306 struct page *ipage;
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003307 bool locked = false;
Chao Yu94afd6d2021-08-04 10:23:48 +08003308 struct extent_info ei = {0, };
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003309 int err = 0;
Sheng Yong2866fb12018-11-14 19:34:28 +08003310 int flag;
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003311
Jaegeuk Kim24b84912016-02-03 13:49:44 -08003312 /*
Eric Biggers3d697a42021-07-16 09:39:13 -05003313 * If a whole page is being written and we already preallocated all the
3314 * blocks, then there is no need to get a block address now.
Jaegeuk Kim24b84912016-02-03 13:49:44 -08003315 */
Eric Biggers3d697a42021-07-16 09:39:13 -05003316 if (len == PAGE_SIZE && is_inode_flag_set(inode, FI_PREALLOCATED_ALL))
Jaegeuk Kim24b84912016-02-03 13:49:44 -08003317 return 0;
3318
Sheng Yong2866fb12018-11-14 19:34:28 +08003319 /* f2fs_lock_op avoids race between write CP and convert_inline_page */
3320 if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode))
3321 flag = F2FS_GET_BLOCK_DEFAULT;
3322 else
3323 flag = F2FS_GET_BLOCK_PRE_AIO;
3324
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003325 if (f2fs_has_inline_data(inode) ||
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003326 (pos & PAGE_MASK) >= i_size_read(inode)) {
Chao Yu0ef81832020-06-18 14:36:22 +08003327 f2fs_do_map_lock(sbi, flag, true);
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003328 locked = true;
3329 }
Chao Yu4c8ff702019-11-01 18:07:14 +08003330
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003331restart:
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003332 /* check inline_data */
Chao Yu4d57b862018-05-30 00:20:41 +08003333 ipage = f2fs_get_node_page(sbi, inode->i_ino);
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003334 if (IS_ERR(ipage)) {
3335 err = PTR_ERR(ipage);
3336 goto unlock_out;
3337 }
3338
3339 set_new_dnode(&dn, inode, ipage, ipage, 0);
3340
3341 if (f2fs_has_inline_data(inode)) {
Chao Yuf2470372017-07-19 00:19:05 +08003342 if (pos + len <= MAX_INLINE_DATA(inode)) {
Chao Yu4d57b862018-05-30 00:20:41 +08003343 f2fs_do_read_inline_data(page, ipage);
Jaegeuk Kim91942322016-05-20 10:13:22 -07003344 set_inode_flag(inode, FI_DATA_EXIST);
Chao Yuab470362016-05-11 19:48:44 +08003345 if (inode->i_nlink)
Chao Yub763f3b2021-04-28 17:20:31 +08003346 set_page_private_inline(ipage);
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003347 } else {
3348 err = f2fs_convert_inline_page(&dn, page);
3349 if (err)
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003350 goto out;
3351 if (dn.data_blkaddr == NULL_ADDR)
3352 err = f2fs_get_block(&dn, index);
3353 }
3354 } else if (locked) {
3355 err = f2fs_get_block(&dn, index);
3356 } else {
3357 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
3358 dn.data_blkaddr = ei.blk + index - ei.fofs;
3359 } else {
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003360 /* hole case */
Chao Yu4d57b862018-05-30 00:20:41 +08003361 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
Jaegeuk Kim4da7bf52016-04-06 11:27:03 -07003362 if (err || dn.data_blkaddr == NULL_ADDR) {
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003363 f2fs_put_dnode(&dn);
Chao Yu0ef81832020-06-18 14:36:22 +08003364 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
Yunlei He59c90812017-03-13 20:22:18 +08003365 true);
Sheng Yong2866fb12018-11-14 19:34:28 +08003366 WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003367 locked = true;
3368 goto restart;
3369 }
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003370 }
3371 }
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003372
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003373 /* convert_inline_page can make node_changed */
3374 *blk_addr = dn.data_blkaddr;
3375 *node_changed = dn.node_changed;
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003376out:
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003377 f2fs_put_dnode(&dn);
3378unlock_out:
Jaegeuk Kimb4d07a32015-12-23 13:48:58 -08003379 if (locked)
Chao Yu0ef81832020-06-18 14:36:22 +08003380 f2fs_do_map_lock(sbi, flag, false);
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003381 return err;
3382}
3383
Daeho Jeong3db1de02022-04-28 11:18:09 -07003384static int __find_data_block(struct inode *inode, pgoff_t index,
3385 block_t *blk_addr)
3386{
3387 struct dnode_of_data dn;
3388 struct page *ipage;
3389 struct extent_info ei = {0, };
3390 int err = 0;
3391
3392 ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
3393 if (IS_ERR(ipage))
3394 return PTR_ERR(ipage);
3395
3396 set_new_dnode(&dn, inode, ipage, ipage, 0);
3397
3398 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
3399 dn.data_blkaddr = ei.blk + index - ei.fofs;
3400 } else {
3401 /* hole case */
3402 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3403 if (err) {
3404 dn.data_blkaddr = NULL_ADDR;
3405 err = 0;
3406 }
3407 }
3408 *blk_addr = dn.data_blkaddr;
3409 f2fs_put_dnode(&dn);
3410 return err;
3411}
3412
3413static int __reserve_data_block(struct inode *inode, pgoff_t index,
3414 block_t *blk_addr, bool *node_changed)
3415{
3416 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3417 struct dnode_of_data dn;
3418 struct page *ipage;
3419 int err = 0;
3420
3421 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
3422
3423 ipage = f2fs_get_node_page(sbi, inode->i_ino);
3424 if (IS_ERR(ipage)) {
3425 err = PTR_ERR(ipage);
3426 goto unlock_out;
3427 }
3428 set_new_dnode(&dn, inode, ipage, ipage, 0);
3429
3430 err = f2fs_get_block(&dn, index);
3431
3432 *blk_addr = dn.data_blkaddr;
3433 *node_changed = dn.node_changed;
3434 f2fs_put_dnode(&dn);
3435
3436unlock_out:
3437 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
3438 return err;
3439}
3440
3441static int prepare_atomic_write_begin(struct f2fs_sb_info *sbi,
3442 struct page *page, loff_t pos, unsigned int len,
3443 block_t *blk_addr, bool *node_changed)
3444{
3445 struct inode *inode = page->mapping->host;
3446 struct inode *cow_inode = F2FS_I(inode)->cow_inode;
3447 pgoff_t index = page->index;
3448 int err = 0;
Daeho Jeongf8e2f322022-07-18 16:02:48 -07003449 block_t ori_blk_addr = NULL_ADDR;
Daeho Jeong3db1de02022-04-28 11:18:09 -07003450
3451 /* If pos is beyond the end of file, reserve a new block in COW inode */
3452 if ((pos & PAGE_MASK) >= i_size_read(inode))
Daeho Jeongf8e2f322022-07-18 16:02:48 -07003453 goto reserve_block;
Daeho Jeong3db1de02022-04-28 11:18:09 -07003454
3455 /* Look for the block in COW inode first */
3456 err = __find_data_block(cow_inode, index, blk_addr);
3457 if (err)
3458 return err;
3459 else if (*blk_addr != NULL_ADDR)
3460 return 0;
3461
3462 /* Look for the block in the original inode */
3463 err = __find_data_block(inode, index, &ori_blk_addr);
3464 if (err)
3465 return err;
3466
Daeho Jeongf8e2f322022-07-18 16:02:48 -07003467reserve_block:
Daeho Jeong3db1de02022-04-28 11:18:09 -07003468 /* Finally, we should reserve a new block in COW inode for the update */
3469 err = __reserve_data_block(cow_inode, index, blk_addr, node_changed);
3470 if (err)
3471 return err;
Daeho Jeongf8e2f322022-07-18 16:02:48 -07003472 inc_atomic_write_cnt(inode);
Daeho Jeong3db1de02022-04-28 11:18:09 -07003473
3474 if (ori_blk_addr != NULL_ADDR)
3475 *blk_addr = ori_blk_addr;
3476 return 0;
3477}
3478
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003479static int f2fs_write_begin(struct file *file, struct address_space *mapping,
Matthew Wilcox (Oracle)9d6b0cd2022-02-22 14:31:43 -05003480 loff_t pos, unsigned len, struct page **pagep, void **fsdata)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003481{
3482 struct inode *inode = mapping->host;
Jaegeuk Kim40813632014-09-02 15:31:18 -07003483 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kim86531d62015-07-15 13:08:21 -07003484 struct page *page = NULL;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003485 pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
Daeho Jeong3db1de02022-04-28 11:18:09 -07003486 bool need_balance = false;
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003487 block_t blkaddr = NULL_ADDR;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003488 int err = 0;
3489
Matthew Wilcox (Oracle)9d6b0cd2022-02-22 14:31:43 -05003490 trace_f2fs_write_begin(inode, pos, len);
Chao Yu62aed042014-05-06 16:46:04 +08003491
Chao Yu00e09c02019-08-23 17:58:36 +08003492 if (!f2fs_is_checkpoint_ready(sbi)) {
3493 err = -ENOSPC;
Daniel Rosenberg43549942018-08-20 19:21:43 -07003494 goto fail;
Chao Yu00e09c02019-08-23 17:58:36 +08003495 }
Daniel Rosenberg43549942018-08-20 19:21:43 -07003496
Jaegeuk Kim5f727392014-11-25 10:59:45 -08003497 /*
3498 * We should check this at this moment to avoid deadlock on inode page
3499 * and #0 page. The locking rule for inline_data conversion should be:
3500 * lock_page(page #0) -> lock_page(inode_page)
3501 */
3502 if (index != 0) {
3503 err = f2fs_convert_inline_inode(inode);
3504 if (err)
3505 goto fail;
3506 }
Chao Yu4c8ff702019-11-01 18:07:14 +08003507
3508#ifdef CONFIG_F2FS_FS_COMPRESSION
3509 if (f2fs_compressed_file(inode)) {
3510 int ret;
3511
3512 *fsdata = NULL;
3513
Fengnan Chang9b56adc2022-03-18 09:23:04 +08003514 if (len == PAGE_SIZE && !(f2fs_is_atomic_file(inode)))
Fengnan Chang7eab7a62021-06-22 19:50:59 +08003515 goto repeat;
3516
Chao Yu4c8ff702019-11-01 18:07:14 +08003517 ret = f2fs_prepare_compress_overwrite(inode, pagep,
3518 index, fsdata);
3519 if (ret < 0) {
3520 err = ret;
3521 goto fail;
3522 } else if (ret) {
3523 return 0;
3524 }
3525 }
3526#endif
3527
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +09003528repeat:
Jaegeuk Kim86d54792017-02-17 09:55:55 -08003529 /*
3530 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
3531 * wait_for_stable_page. Will wait that below with our IO control.
3532 */
Chao Yu01eccef2017-10-28 16:52:30 +08003533 page = f2fs_pagecache_get_page(mapping, index,
Jaegeuk Kim86d54792017-02-17 09:55:55 -08003534 FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
Chao Yu3aab8f82014-07-02 13:25:04 +08003535 if (!page) {
3536 err = -ENOMEM;
3537 goto fail;
3538 }
Jaegeuk Kimd5f66992014-04-30 09:22:45 +09003539
Chao Yu4c8ff702019-11-01 18:07:14 +08003540 /* TODO: cluster can be compressed due to race with .writepage */
3541
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003542 *pagep = page;
3543
Daeho Jeong3db1de02022-04-28 11:18:09 -07003544 if (f2fs_is_atomic_file(inode))
3545 err = prepare_atomic_write_begin(sbi, page, pos, len,
3546 &blkaddr, &need_balance);
3547 else
3548 err = prepare_write_begin(sbi, page, pos, len,
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003549 &blkaddr, &need_balance);
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07003550 if (err)
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003551 goto fail;
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07003552
Chao Yuaf033b22018-09-20 20:05:00 +08003553 if (need_balance && !IS_NOQUOTA(inode) &&
3554 has_not_enough_free_secs(sbi, 0, 0)) {
Jaegeuk Kim2a340762015-12-22 13:23:35 -08003555 unlock_page(page);
Jaegeuk Kim2c4db1a2016-01-07 14:15:04 -08003556 f2fs_balance_fs(sbi, true);
Jaegeuk Kim2a340762015-12-22 13:23:35 -08003557 lock_page(page);
3558 if (page->mapping != mapping) {
3559 /* The page got truncated from under us */
3560 f2fs_put_page(page, 1);
3561 goto repeat;
3562 }
3563 }
3564
Chao Yubae0ee72018-12-25 17:43:42 +08003565 f2fs_wait_on_page_writeback(page, DATA, false, true);
Jaegeuk Kimb3d208f2014-10-23 19:48:09 -07003566
Jaegeuk Kim649d7df2016-09-06 11:02:03 -07003567 if (len == PAGE_SIZE || PageUptodate(page))
3568 return 0;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003569
Eric Biggers95ae2512019-07-22 09:26:24 -07003570 if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) &&
3571 !f2fs_verity_in_progress(inode)) {
Yunlei He746e2402016-12-20 11:11:35 +08003572 zero_user_segment(page, len, PAGE_SIZE);
3573 return 0;
3574 }
3575
Jaegeuk Kim2aadac02015-12-23 11:55:18 -08003576 if (blkaddr == NEW_ADDR) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003577 zero_user_segment(page, 0, PAGE_SIZE);
Jaegeuk Kim649d7df2016-09-06 11:02:03 -07003578 SetPageUptodate(page);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003579 } else {
Chao Yu93770ab2019-04-15 15:26:32 +08003580 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3581 DATA_GENERIC_ENHANCE_READ)) {
Chao Yu10f966b2019-06-20 11:36:14 +08003582 err = -EFSCORRUPTED;
Chao Yu95fa90c2022-09-28 23:38:54 +08003583 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
Chao Yu93770ab2019-04-15 15:26:32 +08003584 goto fail;
3585 }
Jia Yangb7973092020-07-01 10:27:40 +08003586 err = f2fs_submit_page_read(inode, page, blkaddr, 0, true);
Jaegeuk Kim13ba41e2017-09-06 21:04:44 -07003587 if (err)
Chao Yu3aab8f82014-07-02 13:25:04 +08003588 goto fail;
Chao Yud54c7952014-03-29 15:30:40 +08003589
Jaegeuk Kim393ff912013-03-08 21:29:23 +09003590 lock_page(page);
Jaegeuk Kim6bacf522013-12-06 15:00:58 +09003591 if (unlikely(page->mapping != mapping)) {
Jaegeuk Kimafcb7ca02013-04-26 11:55:17 +09003592 f2fs_put_page(page, 1);
3593 goto repeat;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003594 }
Chao Yu1563ac72016-07-03 22:05:12 +08003595 if (unlikely(!PageUptodate(page))) {
3596 err = -EIO;
3597 goto fail;
Jaegeuk Kim4375a332015-04-23 12:04:33 -07003598 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003599 }
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003600 return 0;
Jaegeuk Kim9ba69cf2014-10-17 20:33:55 -07003601
Chao Yu3aab8f82014-07-02 13:25:04 +08003602fail:
Jaegeuk Kim86531d62015-07-15 13:08:21 -07003603 f2fs_put_page(page, 1);
Eric Biggers3e679dc2021-07-16 09:39:11 -05003604 f2fs_write_failed(inode, pos + len);
Chao Yu3aab8f82014-07-02 13:25:04 +08003605 return err;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003606}
3607
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +09003608static int f2fs_write_end(struct file *file,
3609 struct address_space *mapping,
3610 loff_t pos, unsigned len, unsigned copied,
3611 struct page *page, void *fsdata)
3612{
3613 struct inode *inode = page->mapping->host;
3614
Chao Yudfb2bf32014-05-06 16:47:23 +08003615 trace_f2fs_write_end(inode, pos, len, copied);
3616
Jaegeuk Kim649d7df2016-09-06 11:02:03 -07003617 /*
3618 * This should be come from len == PAGE_SIZE, and we expect copied
3619 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
3620 * let generic_perform_write() try to copy data again through copied=0.
3621 */
3622 if (!PageUptodate(page)) {
Yunlei He746e2402016-12-20 11:11:35 +08003623 if (unlikely(copied != len))
Jaegeuk Kim649d7df2016-09-06 11:02:03 -07003624 copied = 0;
3625 else
3626 SetPageUptodate(page);
3627 }
Chao Yu4c8ff702019-11-01 18:07:14 +08003628
3629#ifdef CONFIG_F2FS_FS_COMPRESSION
3630 /* overwrite compressed file */
3631 if (f2fs_compressed_file(inode) && fsdata) {
3632 f2fs_compress_write_end(inode, fsdata, page->index, copied);
3633 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
Chao Yu944dd22e2020-07-24 18:21:36 +08003634
3635 if (pos + copied > i_size_read(inode) &&
3636 !f2fs_verity_in_progress(inode))
3637 f2fs_i_size_write(inode, pos + copied);
Chao Yu4c8ff702019-11-01 18:07:14 +08003638 return copied;
3639 }
3640#endif
3641
Jaegeuk Kim649d7df2016-09-06 11:02:03 -07003642 if (!copied)
3643 goto unlock_out;
3644
Jaegeuk Kim34ba94b2014-10-09 13:19:53 -07003645 set_page_dirty(page);
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +09003646
Eric Biggers95ae2512019-07-22 09:26:24 -07003647 if (pos + copied > i_size_read(inode) &&
Daeho Jeong3db1de02022-04-28 11:18:09 -07003648 !f2fs_verity_in_progress(inode)) {
Jaegeuk Kimfc9581c2016-05-20 09:22:03 -07003649 f2fs_i_size_write(inode, pos + copied);
Daeho Jeong3db1de02022-04-28 11:18:09 -07003650 if (f2fs_is_atomic_file(inode))
3651 f2fs_i_size_write(F2FS_I(inode)->cow_inode,
3652 pos + copied);
3653 }
Jaegeuk Kim649d7df2016-09-06 11:02:03 -07003654unlock_out:
Chao Yu3024c9a2016-08-06 21:09:41 +08003655 f2fs_put_page(page, 1);
Jaegeuk Kimd0239e12016-01-08 16:57:48 -08003656 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +09003657 return copied;
3658}
3659
Matthew Wilcox (Oracle)91503992022-02-09 20:21:44 +00003660void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003661{
Matthew Wilcox (Oracle)91503992022-02-09 20:21:44 +00003662 struct inode *inode = folio->mapping->host;
Chao Yu487261f2015-02-05 17:44:29 +08003663 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07003664
Chao Yu487261f2015-02-05 17:44:29 +08003665 if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
Matthew Wilcox (Oracle)91503992022-02-09 20:21:44 +00003666 (offset || length != folio_size(folio)))
Jaegeuk Kima7ffdbe2014-09-12 15:53:45 -07003667 return;
3668
Matthew Wilcox (Oracle)91503992022-02-09 20:21:44 +00003669 if (folio_test_dirty(folio)) {
Chao Yu933439c2016-10-11 22:57:01 +08003670 if (inode->i_ino == F2FS_META_INO(sbi)) {
Chao Yu487261f2015-02-05 17:44:29 +08003671 dec_page_count(sbi, F2FS_DIRTY_META);
Chao Yu933439c2016-10-11 22:57:01 +08003672 } else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
Chao Yu487261f2015-02-05 17:44:29 +08003673 dec_page_count(sbi, F2FS_DIRTY_NODES);
Chao Yu933439c2016-10-11 22:57:01 +08003674 } else {
Chao Yu487261f2015-02-05 17:44:29 +08003675 inode_dec_dirty_pages(inode);
Chao Yu4d57b862018-05-30 00:20:41 +08003676 f2fs_remove_dirty_inode(inode);
Chao Yu933439c2016-10-11 22:57:01 +08003677 }
Chao Yu487261f2015-02-05 17:44:29 +08003678 }
Chao Yudecd36b2015-08-07 18:42:09 +08003679
Matthew Wilcox (Oracle)91503992022-02-09 20:21:44 +00003680 clear_page_private_gcing(&folio->page);
Chao Yu2baf0782018-07-27 18:15:16 +08003681
Chao Yu2a64e302021-12-16 17:13:56 +08003682 if (test_opt(sbi, COMPRESS_CACHE) &&
3683 inode->i_ino == F2FS_COMPRESS_INO(sbi))
Matthew Wilcox (Oracle)91503992022-02-09 20:21:44 +00003684 clear_page_private_data(&folio->page);
Chao Yu6ce19af2021-05-20 19:51:50 +08003685
Matthew Wilcox (Oracle)91503992022-02-09 20:21:44 +00003686 folio_detach_private(folio);
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003687}
3688
Matthew Wilcox (Oracle)c26cd042022-04-30 23:41:46 -04003689bool f2fs_release_folio(struct folio *folio, gfp_t wait)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003690{
Matthew Wilcox (Oracle)c26cd042022-04-30 23:41:46 -04003691 struct f2fs_sb_info *sbi;
3692
3693 /* If this is dirty folio, keep private data */
3694 if (folio_test_dirty(folio))
3695 return false;
Jaegeuk Kimf68daee2015-01-30 11:39:08 -08003696
Matthew Wilcox (Oracle)c26cd042022-04-30 23:41:46 -04003697 sbi = F2FS_M_SB(folio->mapping);
3698 if (test_opt(sbi, COMPRESS_CACHE)) {
3699 struct inode *inode = folio->mapping->host;
Chao Yu6ce19af2021-05-20 19:51:50 +08003700
Matthew Wilcox (Oracle)c26cd042022-04-30 23:41:46 -04003701 if (inode->i_ino == F2FS_COMPRESS_INO(sbi))
3702 clear_page_private_data(&folio->page);
Chao Yu6ce19af2021-05-20 19:51:50 +08003703 }
3704
Matthew Wilcox (Oracle)c26cd042022-04-30 23:41:46 -04003705 clear_page_private_gcing(&folio->page);
Chao Yub763f3b2021-04-28 17:20:31 +08003706
Matthew Wilcox (Oracle)c26cd042022-04-30 23:41:46 -04003707 folio_detach_private(folio);
3708 return true;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003709}
3710
Matthew Wilcox (Oracle)4f5e34f2022-02-09 20:22:07 +00003711static bool f2fs_dirty_data_folio(struct address_space *mapping,
3712 struct folio *folio)
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003713{
Matthew Wilcox (Oracle)4f5e34f2022-02-09 20:22:07 +00003714 struct inode *inode = mapping->host;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003715
Matthew Wilcox (Oracle)4f5e34f2022-02-09 20:22:07 +00003716 trace_f2fs_set_page_dirty(&folio->page, DATA);
Jaegeuk Kim26c6b882013-10-24 17:53:29 +09003717
Matthew Wilcox (Oracle)4f5e34f2022-02-09 20:22:07 +00003718 if (!folio_test_uptodate(folio))
3719 folio_mark_uptodate(folio);
3720 BUG_ON(folio_test_swapcache(folio));
Jaegeuk Kim34ba94b2014-10-09 13:19:53 -07003721
Shuqi Zhang9b7eadd2022-08-31 10:24:40 +08003722 if (filemap_dirty_folio(mapping, folio)) {
Matthew Wilcox (Oracle)4f5e34f2022-02-09 20:22:07 +00003723 f2fs_update_dirty_folio(inode, folio);
3724 return true;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003725 }
Matthew Wilcox (Oracle)0fb5b2e2022-03-29 16:22:54 -04003726 return false;
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09003727}
3728
Chao Yuc1c63382020-03-30 17:13:29 +08003729
3730static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
3731{
3732#ifdef CONFIG_F2FS_FS_COMPRESSION
3733 struct dnode_of_data dn;
3734 sector_t start_idx, blknr = 0;
3735 int ret;
3736
3737 start_idx = round_down(block, F2FS_I(inode)->i_cluster_size);
3738
3739 set_new_dnode(&dn, inode, NULL, NULL, 0);
3740 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
3741 if (ret)
3742 return 0;
3743
3744 if (dn.data_blkaddr != COMPRESS_ADDR) {
3745 dn.ofs_in_node += block - start_idx;
3746 blknr = f2fs_data_blkaddr(&dn);
3747 if (!__is_valid_data_blkaddr(blknr))
3748 blknr = 0;
3749 }
3750
3751 f2fs_put_dnode(&dn);
Chao Yuc1c63382020-03-30 17:13:29 +08003752 return blknr;
3753#else
Chao Yu250e84d2020-06-28 20:29:38 +08003754 return 0;
Chao Yuc1c63382020-03-30 17:13:29 +08003755#endif
3756}
3757
3758
Jaegeuk Kimc01e54b2013-01-17 20:30:23 +09003759static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
3760{
Chao Yu454ae7e2014-04-22 13:34:01 +08003761 struct inode *inode = mapping->host;
Chao Yub79b0a32020-06-29 20:13:12 +08003762 sector_t blknr = 0;
Chao Yu454ae7e2014-04-22 13:34:01 +08003763
Jaegeuk Kim1d373a02015-10-19 10:29:51 -07003764 if (f2fs_has_inline_data(inode))
Chao Yub79b0a32020-06-29 20:13:12 +08003765 goto out;
Jaegeuk Kim1d373a02015-10-19 10:29:51 -07003766
3767 /* make sure allocating whole blocks */
3768 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
3769 filemap_write_and_wait(mapping);
3770
Daeho Jeong4eda1682020-08-31 09:24:01 +09003771 /* Block number less than F2FS MAX BLOCKS */
Chengguang Xu6d1451b2021-01-13 13:21:54 +08003772 if (unlikely(block >= max_file_blocks(inode)))
Daeho Jeong4eda1682020-08-31 09:24:01 +09003773 goto out;
Chao Yuc1c63382020-03-30 17:13:29 +08003774
Daeho Jeong4eda1682020-08-31 09:24:01 +09003775 if (f2fs_compressed_file(inode)) {
3776 blknr = f2fs_bmap_compress(inode, block);
3777 } else {
Jaegeuk Kimb876f4c92020-11-24 15:19:10 -08003778 struct f2fs_map_blocks map;
3779
3780 memset(&map, 0, sizeof(map));
3781 map.m_lblk = block;
3782 map.m_len = 1;
3783 map.m_next_pgofs = NULL;
3784 map.m_seg_type = NO_CHECK_TYPE;
3785
3786 if (!f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_BMAP))
3787 blknr = map.m_pblk;
Daeho Jeong4eda1682020-08-31 09:24:01 +09003788 }
Chao Yub79b0a32020-06-29 20:13:12 +08003789out:
3790 trace_f2fs_bmap(inode, block, blknr);
3791 return blknr;
Chao Yu429511c2015-02-05 17:54:31 +08003792}
3793
Jaegeuk Kim4969c062019-07-01 19:15:29 -07003794#ifdef CONFIG_SWAP
Chao Yu859fca62021-05-26 14:29:27 +08003795static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
3796 unsigned int blkcnt)
3797{
3798 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3799 unsigned int blkofs;
3800 unsigned int blk_per_sec = BLKS_PER_SEC(sbi);
3801 unsigned int secidx = start_blk / blk_per_sec;
3802 unsigned int end_sec = secidx + blkcnt / blk_per_sec;
3803 int ret = 0;
3804
Tim Murraye4544b62022-01-07 12:48:44 -08003805 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Jan Karaedc6d012021-04-13 18:10:37 +02003806 filemap_invalidate_lock(inode->i_mapping);
Chao Yu859fca62021-05-26 14:29:27 +08003807
3808 set_inode_flag(inode, FI_ALIGNED_WRITE);
Chao Yu1018a542022-02-04 15:19:46 +08003809 set_inode_flag(inode, FI_OPU_WRITE);
Chao Yu859fca62021-05-26 14:29:27 +08003810
3811 for (; secidx < end_sec; secidx++) {
Tim Murraye4544b62022-01-07 12:48:44 -08003812 f2fs_down_write(&sbi->pin_sem);
Chao Yu859fca62021-05-26 14:29:27 +08003813
3814 f2fs_lock_op(sbi);
3815 f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
3816 f2fs_unlock_op(sbi);
3817
Chao Yu1018a542022-02-04 15:19:46 +08003818 set_inode_flag(inode, FI_SKIP_WRITES);
Chao Yu859fca62021-05-26 14:29:27 +08003819
3820 for (blkofs = 0; blkofs < blk_per_sec; blkofs++) {
3821 struct page *page;
3822 unsigned int blkidx = secidx * blk_per_sec + blkofs;
3823
3824 page = f2fs_get_lock_data_page(inode, blkidx, true);
3825 if (IS_ERR(page)) {
Tim Murraye4544b62022-01-07 12:48:44 -08003826 f2fs_up_write(&sbi->pin_sem);
Chao Yu859fca62021-05-26 14:29:27 +08003827 ret = PTR_ERR(page);
3828 goto done;
3829 }
3830
3831 set_page_dirty(page);
3832 f2fs_put_page(page, 1);
3833 }
3834
Chao Yu1018a542022-02-04 15:19:46 +08003835 clear_inode_flag(inode, FI_SKIP_WRITES);
Chao Yu859fca62021-05-26 14:29:27 +08003836
3837 ret = filemap_fdatawrite(inode->i_mapping);
3838
Tim Murraye4544b62022-01-07 12:48:44 -08003839 f2fs_up_write(&sbi->pin_sem);
Chao Yu859fca62021-05-26 14:29:27 +08003840
3841 if (ret)
3842 break;
3843 }
3844
3845done:
Chao Yu1018a542022-02-04 15:19:46 +08003846 clear_inode_flag(inode, FI_SKIP_WRITES);
3847 clear_inode_flag(inode, FI_OPU_WRITE);
Chao Yu859fca62021-05-26 14:29:27 +08003848 clear_inode_flag(inode, FI_ALIGNED_WRITE);
3849
Jan Karaedc6d012021-04-13 18:10:37 +02003850 filemap_invalidate_unlock(inode->i_mapping);
Tim Murraye4544b62022-01-07 12:48:44 -08003851 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
Chao Yu859fca62021-05-26 14:29:27 +08003852
3853 return ret;
3854}
3855
Chao Yu0b8fc002021-05-26 14:29:26 +08003856static int check_swap_activate(struct swap_info_struct *sis,
Chao Yuaf4b6b82020-10-12 17:06:05 +08003857 struct file *swap_file, sector_t *span)
3858{
3859 struct address_space *mapping = swap_file->f_mapping;
3860 struct inode *inode = mapping->host;
huangjianan@oppo.com36e4d952021-03-01 12:58:44 +08003861 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
Chao Yuaf4b6b82020-10-12 17:06:05 +08003862 sector_t cur_lblock;
3863 sector_t last_lblock;
3864 sector_t pblock;
3865 sector_t lowest_pblock = -1;
3866 sector_t highest_pblock = 0;
3867 int nr_extents = 0;
3868 unsigned long nr_pblocks;
Chao Yu859fca62021-05-26 14:29:27 +08003869 unsigned int blks_per_sec = BLKS_PER_SEC(sbi);
3870 unsigned int sec_blks_mask = BLKS_PER_SEC(sbi) - 1;
Jaegeuk Kimca298242021-05-11 14:38:47 -07003871 unsigned int not_aligned = 0;
huangjianan@oppo.com36e4d952021-03-01 12:58:44 +08003872 int ret = 0;
Chao Yuaf4b6b82020-10-12 17:06:05 +08003873
3874 /*
3875 * Map all the blocks into the extent list. This code doesn't try
3876 * to be very smart.
3877 */
3878 cur_lblock = 0;
Jaegeuk Kim6cbfcab2020-11-24 14:55:47 -08003879 last_lblock = bytes_to_blks(inode, i_size_read(inode));
Chao Yuaf4b6b82020-10-12 17:06:05 +08003880
huangjianan@oppo.com1da66102021-02-27 20:02:30 +08003881 while (cur_lblock < last_lblock && cur_lblock < sis->max) {
Jaegeuk Kimb876f4c92020-11-24 15:19:10 -08003882 struct f2fs_map_blocks map;
Chao Yu859fca62021-05-26 14:29:27 +08003883retry:
Chao Yuaf4b6b82020-10-12 17:06:05 +08003884 cond_resched();
3885
Jaegeuk Kimb876f4c92020-11-24 15:19:10 -08003886 memset(&map, 0, sizeof(map));
3887 map.m_lblk = cur_lblock;
huangjianan@oppo.com36e4d952021-03-01 12:58:44 +08003888 map.m_len = last_lblock - cur_lblock;
3889 map.m_next_pgofs = NULL;
3890 map.m_next_extent = NULL;
Jaegeuk Kimb876f4c92020-11-24 15:19:10 -08003891 map.m_seg_type = NO_CHECK_TYPE;
huangjianan@oppo.com36e4d952021-03-01 12:58:44 +08003892 map.m_may_create = false;
Chao Yuaf4b6b82020-10-12 17:06:05 +08003893
Jaegeuk Kimb876f4c92020-11-24 15:19:10 -08003894 ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
Chao Yuaf4b6b82020-10-12 17:06:05 +08003895 if (ret)
huangjianan@oppo.com36e4d952021-03-01 12:58:44 +08003896 goto out;
Chao Yuaf4b6b82020-10-12 17:06:05 +08003897
3898 /* hole */
huangjianan@oppo.com36e4d952021-03-01 12:58:44 +08003899 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
Joe Perches833dcd32021-05-26 13:05:36 -07003900 f2fs_err(sbi, "Swapfile has holes");
Jaegeuk Kimf3951832021-05-12 07:38:00 -07003901 ret = -EINVAL;
huangjianan@oppo.com36e4d952021-03-01 12:58:44 +08003902 goto out;
3903 }
Chao Yuaf4b6b82020-10-12 17:06:05 +08003904
Jaegeuk Kimb876f4c92020-11-24 15:19:10 -08003905 pblock = map.m_pblk;
3906 nr_pblocks = map.m_len;
Chao Yuaf4b6b82020-10-12 17:06:05 +08003907
Chao Yu859fca62021-05-26 14:29:27 +08003908 if ((pblock - SM_I(sbi)->main_blkaddr) & sec_blks_mask ||
3909 nr_pblocks & sec_blks_mask) {
Jaegeuk Kimca298242021-05-11 14:38:47 -07003910 not_aligned++;
huangjianan@oppo.com36e4d952021-03-01 12:58:44 +08003911
Chao Yu859fca62021-05-26 14:29:27 +08003912 nr_pblocks = roundup(nr_pblocks, blks_per_sec);
3913 if (cur_lblock + nr_pblocks > sis->max)
3914 nr_pblocks -= blks_per_sec;
3915
3916 if (!nr_pblocks) {
3917 /* this extent is last one */
3918 nr_pblocks = map.m_len;
3919 f2fs_warn(sbi, "Swapfile: last extent is not aligned to section");
3920 goto next;
3921 }
3922
3923 ret = f2fs_migrate_blocks(inode, cur_lblock,
3924 nr_pblocks);
3925 if (ret)
3926 goto out;
3927 goto retry;
3928 }
3929next:
Chao Yuaf4b6b82020-10-12 17:06:05 +08003930 if (cur_lblock + nr_pblocks >= sis->max)
3931 nr_pblocks = sis->max - cur_lblock;
3932
3933 if (cur_lblock) { /* exclude the header page */
3934 if (pblock < lowest_pblock)
3935 lowest_pblock = pblock;
3936 if (pblock + nr_pblocks - 1 > highest_pblock)
3937 highest_pblock = pblock + nr_pblocks - 1;
3938 }
3939
3940 /*
3941 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
3942 */
3943 ret = add_swap_extent(sis, cur_lblock, nr_pblocks, pblock);
3944 if (ret < 0)
3945 goto out;
3946 nr_extents += ret;
3947 cur_lblock += nr_pblocks;
3948 }
3949 ret = nr_extents;
3950 *span = 1 + highest_pblock - lowest_pblock;
3951 if (cur_lblock == 0)
3952 cur_lblock = 1; /* force Empty message */
3953 sis->max = cur_lblock;
3954 sis->pages = cur_lblock - 1;
3955 sis->highest_bit = cur_lblock - 1;
3956out:
Chao Yu859fca62021-05-26 14:29:27 +08003957 if (not_aligned)
3958 f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%u * N)",
3959 not_aligned, blks_per_sec * F2FS_BLKSIZE);
Chao Yuaf4b6b82020-10-12 17:06:05 +08003960 return ret;
Chao Yuaf4b6b82020-10-12 17:06:05 +08003961}
3962
Jaegeuk Kim4969c062019-07-01 19:15:29 -07003963static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
3964 sector_t *span)
3965{
3966 struct inode *inode = file_inode(file);
3967 int ret;
3968
3969 if (!S_ISREG(inode->i_mode))
3970 return -EINVAL;
3971
3972 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3973 return -EROFS;
3974
Shin'ichiro Kawasakid927ccf2021-05-10 20:24:44 +09003975 if (f2fs_lfs_mode(F2FS_I_SB(inode))) {
3976 f2fs_err(F2FS_I_SB(inode),
3977 "Swapfile not supported in LFS mode");
3978 return -EINVAL;
3979 }
3980
Jaegeuk Kim4969c062019-07-01 19:15:29 -07003981 ret = f2fs_convert_inline_inode(inode);
3982 if (ret)
3983 return ret;
3984
Daeho Jeong78134d02020-09-08 11:44:11 +09003985 if (!f2fs_disable_compressed_file(inode))
Chao Yu4c8ff702019-11-01 18:07:14 +08003986 return -EINVAL;
3987
Chao Yu0b979f12020-12-26 18:07:41 +08003988 f2fs_precache_extents(inode);
3989
Chao Yu3e5e4792019-12-27 18:44:56 +08003990 ret = check_swap_activate(sis, file, span);
3991 if (ret < 0)
Jaegeuk Kim4969c062019-07-01 19:15:29 -07003992 return ret;
3993
Chao Yu8ec071c2022-10-04 09:11:33 +08003994 stat_inc_swapfile_inode(inode);
Jaegeuk Kim4969c062019-07-01 19:15:29 -07003995 set_inode_flag(inode, FI_PIN_FILE);
Jaegeuk Kim4969c062019-07-01 19:15:29 -07003996 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
Chao Yu3e5e4792019-12-27 18:44:56 +08003997 return ret;
Jaegeuk Kim4969c062019-07-01 19:15:29 -07003998}
3999
4000static void f2fs_swap_deactivate(struct file *file)
4001{
4002 struct inode *inode = file_inode(file);
4003
Chao Yu8ec071c2022-10-04 09:11:33 +08004004 stat_dec_swapfile_inode(inode);
Jaegeuk Kim4969c062019-07-01 19:15:29 -07004005 clear_inode_flag(inode, FI_PIN_FILE);
4006}
4007#else
4008static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
4009 sector_t *span)
4010{
4011 return -EOPNOTSUPP;
4012}
4013
4014static void f2fs_swap_deactivate(struct file *file)
4015{
4016}
4017#endif
4018
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09004019const struct address_space_operations f2fs_dblock_aops = {
Matthew Wilcox (Oracle)be055842022-04-29 11:12:16 -04004020 .read_folio = f2fs_read_data_folio,
Matthew Wilcox (Oracle)23323192020-06-01 21:47:23 -07004021 .readahead = f2fs_readahead,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09004022 .writepage = f2fs_write_data_page,
4023 .writepages = f2fs_write_data_pages,
4024 .write_begin = f2fs_write_begin,
Jaegeuk Kima1dd3c12013-06-27 13:04:08 +09004025 .write_end = f2fs_write_end,
Matthew Wilcox (Oracle)4f5e34f2022-02-09 20:22:07 +00004026 .dirty_folio = f2fs_dirty_data_folio,
Matthew Wilcox (Oracle)1d5b9bd2022-06-06 10:47:21 -04004027 .migrate_folio = filemap_migrate_folio,
Matthew Wilcox (Oracle)91503992022-02-09 20:21:44 +00004028 .invalidate_folio = f2fs_invalidate_folio,
Matthew Wilcox (Oracle)c26cd042022-04-30 23:41:46 -04004029 .release_folio = f2fs_release_folio,
Eric Biggersa1e09b02021-07-23 00:59:21 -07004030 .direct_IO = noop_direct_IO,
Jaegeuk Kimc01e54b2013-01-17 20:30:23 +09004031 .bmap = f2fs_bmap,
Jaegeuk Kim4969c062019-07-01 19:15:29 -07004032 .swap_activate = f2fs_swap_activate,
4033 .swap_deactivate = f2fs_swap_deactivate,
Jaegeuk Kimeb47b802012-11-02 17:10:12 +09004034};
Eric Biggers6dbb1792018-04-18 11:09:48 -07004035
Matthew Wilcox5ec2d992017-12-04 20:25:25 -05004036void f2fs_clear_page_cache_dirty_tag(struct page *page)
Chao Yuaec2f722018-05-26 18:03:35 +08004037{
4038 struct address_space *mapping = page_mapping(page);
4039 unsigned long flags;
4040
4041 xa_lock_irqsave(&mapping->i_pages, flags);
Matthew Wilcox5ec2d992017-12-04 20:25:25 -05004042 __xa_clear_mark(&mapping->i_pages, page_index(page),
Chao Yuaec2f722018-05-26 18:03:35 +08004043 PAGECACHE_TAG_DIRTY);
4044 xa_unlock_irqrestore(&mapping->i_pages, flags);
4045}
4046
Eric Biggers6dbb1792018-04-18 11:09:48 -07004047int __init f2fs_init_post_read_processing(void)
4048{
Eric Biggers95ae2512019-07-22 09:26:24 -07004049 bio_post_read_ctx_cache =
4050 kmem_cache_create("f2fs_bio_post_read_ctx",
4051 sizeof(struct bio_post_read_ctx), 0, 0, NULL);
Eric Biggers6dbb1792018-04-18 11:09:48 -07004052 if (!bio_post_read_ctx_cache)
4053 goto fail;
4054 bio_post_read_ctx_pool =
4055 mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
4056 bio_post_read_ctx_cache);
4057 if (!bio_post_read_ctx_pool)
4058 goto fail_free_cache;
4059 return 0;
4060
4061fail_free_cache:
4062 kmem_cache_destroy(bio_post_read_ctx_cache);
4063fail:
4064 return -ENOMEM;
4065}
4066
Chao Yu0b20fce2019-09-30 18:53:25 +08004067void f2fs_destroy_post_read_processing(void)
Eric Biggers6dbb1792018-04-18 11:09:48 -07004068{
4069 mempool_destroy(bio_post_read_ctx_pool);
4070 kmem_cache_destroy(bio_post_read_ctx_cache);
4071}
Chao Yu0b20fce2019-09-30 18:53:25 +08004072
Chao Yu4c8ff702019-11-01 18:07:14 +08004073int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi)
4074{
4075 if (!f2fs_sb_has_encrypt(sbi) &&
4076 !f2fs_sb_has_verity(sbi) &&
4077 !f2fs_sb_has_compression(sbi))
4078 return 0;
4079
4080 sbi->post_read_wq = alloc_workqueue("f2fs_post_read_wq",
4081 WQ_UNBOUND | WQ_HIGHPRI,
4082 num_online_cpus());
4083 if (!sbi->post_read_wq)
4084 return -ENOMEM;
4085 return 0;
4086}
4087
4088void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi)
4089{
4090 if (sbi->post_read_wq)
4091 destroy_workqueue(sbi->post_read_wq);
4092}
4093
Chao Yu0b20fce2019-09-30 18:53:25 +08004094int __init f2fs_init_bio_entry_cache(void)
4095{
Chao Yu98510002020-02-17 17:46:20 +08004096 bio_entry_slab = f2fs_kmem_cache_create("f2fs_bio_entry_slab",
Chao Yu0b20fce2019-09-30 18:53:25 +08004097 sizeof(struct bio_entry));
4098 if (!bio_entry_slab)
4099 return -ENOMEM;
4100 return 0;
4101}
4102
Chao Yuf5438052019-12-04 09:52:58 +08004103void f2fs_destroy_bio_entry_cache(void)
Chao Yu0b20fce2019-09-30 18:53:25 +08004104{
4105 kmem_cache_destroy(bio_entry_slab);
4106}
Eric Biggers1517c1a2021-07-23 00:59:20 -07004107
4108static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
4109 unsigned int flags, struct iomap *iomap,
4110 struct iomap *srcmap)
4111{
4112 struct f2fs_map_blocks map = {};
4113 pgoff_t next_pgofs = 0;
4114 int err;
4115
4116 map.m_lblk = bytes_to_blks(inode, offset);
4117 map.m_len = bytes_to_blks(inode, offset + length - 1) - map.m_lblk + 1;
4118 map.m_next_pgofs = &next_pgofs;
4119 map.m_seg_type = f2fs_rw_hint_to_seg_type(inode->i_write_hint);
4120 if (flags & IOMAP_WRITE)
4121 map.m_may_create = true;
4122
4123 err = f2fs_map_blocks(inode, &map, flags & IOMAP_WRITE,
4124 F2FS_GET_BLOCK_DIO);
4125 if (err)
4126 return err;
4127
4128 iomap->offset = blks_to_bytes(inode, map.m_lblk);
4129
Eric Biggers8a2c77b2022-01-28 15:39:39 -08004130 /*
4131 * When inline encryption is enabled, sometimes I/O to an encrypted file
4132 * has to be broken up to guarantee DUN contiguity. Handle this by
4133 * limiting the length of the mapping returned.
4134 */
4135 map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len);
4136
Eric Biggers1517c1a2021-07-23 00:59:20 -07004137 if (map.m_flags & (F2FS_MAP_MAPPED | F2FS_MAP_UNWRITTEN)) {
4138 iomap->length = blks_to_bytes(inode, map.m_len);
4139 if (map.m_flags & F2FS_MAP_MAPPED) {
4140 iomap->type = IOMAP_MAPPED;
4141 iomap->flags |= IOMAP_F_MERGED;
4142 } else {
4143 iomap->type = IOMAP_UNWRITTEN;
4144 }
4145 if (WARN_ON_ONCE(!__is_valid_data_blkaddr(map.m_pblk)))
4146 return -EINVAL;
4147
4148 iomap->bdev = map.m_bdev;
4149 iomap->addr = blks_to_bytes(inode, map.m_pblk);
4150 } else {
4151 iomap->length = blks_to_bytes(inode, next_pgofs) -
4152 iomap->offset;
4153 iomap->type = IOMAP_HOLE;
4154 iomap->addr = IOMAP_NULL_ADDR;
4155 }
4156
4157 if (map.m_flags & F2FS_MAP_NEW)
4158 iomap->flags |= IOMAP_F_NEW;
4159 if ((inode->i_state & I_DIRTY_DATASYNC) ||
4160 offset + length > i_size_read(inode))
4161 iomap->flags |= IOMAP_F_DIRTY;
4162
4163 return 0;
4164}
4165
4166const struct iomap_ops f2fs_iomap_ops = {
4167 .iomap_begin = f2fs_iomap_begin,
4168};