blob: 05bee80ac7dee59500a6e919921a9735d410890c [file] [log] [blame]
Thomas Gleixner7336d0e2019-05-31 01:09:56 -07001// SPDX-License-Identifier: GPL-2.0-only
David Teiglandb3b94fa2006-01-16 16:50:04 +00002/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
Bob Peterson7eabb772008-01-28 11:24:35 -06004 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
David Teiglandb3b94fa2006-01-16 16:50:04 +00005 */
6
7#include <linux/sched.h>
8#include <linux/slab.h>
9#include <linux/spinlock.h>
10#include <linux/completion.h>
11#include <linux/buffer_head.h>
12#include <linux/pagemap.h>
Steven Whitehousefd88de562006-05-05 16:59:11 -040013#include <linux/pagevec.h>
Steven Whitehouse9b124fb2006-01-30 11:55:32 +000014#include <linux/mpage.h>
Steven Whitehoused1665e42006-02-14 11:54:42 +000015#include <linux/fs.h>
Steven Whitehousea8d638e2007-01-15 13:52:17 +000016#include <linux/writeback.h>
Steven Whitehouse7765ec22007-10-16 01:25:07 -070017#include <linux/swap.h>
Steven Whitehouse5c676f62006-02-27 17:23:27 -050018#include <linux/gfs2_ondisk.h>
Steven Whitehouse47e83b52007-10-18 11:15:50 +010019#include <linux/backing-dev.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080020#include <linux/uio.h>
Steven Whitehouse774016b2014-02-06 15:47:47 +000021#include <trace/events/writeback.h>
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +010022#include <linux/sched/signal.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000023
24#include "gfs2.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050025#include "incore.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000026#include "bmap.h"
27#include "glock.h"
28#include "inode.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000029#include "log.h"
30#include "meta_io.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000031#include "quota.h"
32#include "trans.h"
Steven Whitehouse18ec7d52006-02-08 11:50:51 +000033#include "rgrp.h"
Robert Petersoncd81a4b2007-05-14 12:42:18 -050034#include "super.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050035#include "util.h"
Steven Whitehouse4340fe62006-07-11 09:46:33 -040036#include "glops.h"
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +010037#include "aops.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000038
Steven Whitehouseba7f7292006-07-26 11:27:10 -040039
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +010040void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
41 unsigned int from, unsigned int len)
Steven Whitehouseba7f7292006-07-26 11:27:10 -040042{
43 struct buffer_head *head = page_buffers(page);
44 unsigned int bsize = head->b_size;
45 struct buffer_head *bh;
Andreas Gruenbacher88b65ce2017-11-06 19:58:36 +010046 unsigned int to = from + len;
Steven Whitehouseba7f7292006-07-26 11:27:10 -040047 unsigned int start, end;
48
49 for (bh = head, start = 0; bh != head || !start;
50 bh = bh->b_this_page, start = end) {
51 end = start + bsize;
Andreas Gruenbacher88b65ce2017-11-06 19:58:36 +010052 if (end <= from)
Steven Whitehouseba7f7292006-07-26 11:27:10 -040053 continue;
Andreas Gruenbacher88b65ce2017-11-06 19:58:36 +010054 if (start >= to)
55 break;
Andreas Gruenbacher845802b2018-06-04 07:50:16 -050056 set_buffer_uptodate(bh);
Steven Whitehouse350a9b02012-12-14 12:36:02 +000057 gfs2_trans_add_data(ip->i_gl, bh);
Steven Whitehouseba7f7292006-07-26 11:27:10 -040058 }
59}
60
David Teiglandb3b94fa2006-01-16 16:50:04 +000061/**
Steven Whitehouse7a6bbac2006-09-18 17:18:23 -040062 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
David Teiglandb3b94fa2006-01-16 16:50:04 +000063 * @inode: The inode
64 * @lblock: The block number to look up
65 * @bh_result: The buffer head to return the result in
66 * @create: Non-zero if we may add block to the file
67 *
68 * Returns: errno
69 */
70
Steven Whitehouse7a6bbac2006-09-18 17:18:23 -040071static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
72 struct buffer_head *bh_result, int create)
David Teiglandb3b94fa2006-01-16 16:50:04 +000073{
David Teiglandb3b94fa2006-01-16 16:50:04 +000074 int error;
75
Bob Petersone9e1ef22007-12-10 14:13:27 -060076 error = gfs2_block_map(inode, lblock, bh_result, 0);
David Teiglandb3b94fa2006-01-16 16:50:04 +000077 if (error)
78 return error;
Wendy Chengde986e82007-09-18 09:19:13 -040079 if (!buffer_mapped(bh_result))
Bob Peterson4e79e3f2020-11-12 10:02:48 -060080 return -ENODATA;
Steven Whitehouse623d9352006-08-31 12:14:44 -040081 return 0;
82}
Steven Whitehouse7a6bbac2006-09-18 17:18:23 -040083
David Teiglandb3b94fa2006-01-16 16:50:04 +000084/**
Bob Peterson21b69242020-07-22 11:51:09 -050085 * gfs2_write_jdata_page - gfs2 jdata-specific version of block_write_full_page
86 * @page: The page to write
87 * @wbc: The writeback control
88 *
89 * This is the same as calling block_write_full_page, but it also
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -050090 * writes pages outside of i_size
91 */
Bob Peterson21b69242020-07-22 11:51:09 -050092static int gfs2_write_jdata_page(struct page *page,
93 struct writeback_control *wbc)
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -050094{
95 struct inode * const inode = page->mapping->host;
96 loff_t i_size = i_size_read(inode);
97 const pgoff_t end_index = i_size >> PAGE_SHIFT;
98 unsigned offset;
99
100 /*
101 * The page straddles i_size. It must be zeroed out on each and every
102 * writepage invocation because it may be mmapped. "A file is mapped
103 * in multiples of the page size. For a file that is not a multiple of
104 * the page size, the remaining memory is zeroed when mapped, and
105 * writes to that region are not written out to the file."
106 */
Andreas Gruenbacherf3b64b52019-08-31 21:29:12 +0100107 offset = i_size & (PAGE_SIZE - 1);
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500108 if (page->index == end_index && offset)
109 zero_user_segment(page, offset, PAGE_SIZE);
110
Bob Peterson21b69242020-07-22 11:51:09 -0500111 return __block_write_full_page(inode, page, gfs2_get_block_noalloc, wbc,
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500112 end_buffer_async_write);
113}
114
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100115/**
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100116 * __gfs2_jdata_writepage - The core of jdata writepage
117 * @page: The page to write
118 * @wbc: The writeback control
119 *
120 * This is shared between writepage and writepages and implements the
121 * core of the writepage operation. If a transaction is required then
122 * PageChecked will have been set and the transaction will have
123 * already been started before this is called.
124 */
125
126static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
127{
128 struct inode *inode = page->mapping->host;
129 struct gfs2_inode *ip = GFS2_I(inode);
130 struct gfs2_sbd *sdp = GFS2_SB(inode);
131
132 if (PageChecked(page)) {
133 ClearPageChecked(page);
134 if (!page_has_buffers(page)) {
135 create_empty_buffers(page, inode->i_sb->s_blocksize,
Fabian Frederick47a9a522016-08-02 12:05:27 -0500136 BIT(BH_Dirty)|BIT(BH_Uptodate));
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100137 }
Andreas Gruenbacher88b65ce2017-11-06 19:58:36 +0100138 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100139 }
Bob Peterson21b69242020-07-22 11:51:09 -0500140 return gfs2_write_jdata_page(page, wbc);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100141}
142
143/**
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100144 * gfs2_jdata_writepage - Write complete page
145 * @page: Page to write
Fabian Frederick12725742015-05-05 13:29:54 -0500146 * @wbc: The writeback control
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100147 *
148 * Returns: errno
149 *
150 */
151
152static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
153{
154 struct inode *inode = page->mapping->host;
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500155 struct gfs2_inode *ip = GFS2_I(inode);
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100156 struct gfs2_sbd *sdp = GFS2_SB(inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000157
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500158 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
159 goto out;
160 if (PageChecked(page) || current->journal_info)
161 goto out_ignore;
Bob Petersone5562802019-12-10 12:05:55 -0600162 return __gfs2_jdata_writepage(page, wbc);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000163
164out_ignore:
165 redirty_page_for_writepage(wbc, page);
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500166out:
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000167 unlock_page(page);
168 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000169}
170
171/**
Steven Whitehouse45138992013-01-28 09:30:07 +0000172 * gfs2_writepages - Write a bunch of dirty pages back to disk
Steven Whitehousea8d638e2007-01-15 13:52:17 +0000173 * @mapping: The mapping to write
174 * @wbc: Write-back control
175 *
Steven Whitehouse45138992013-01-28 09:30:07 +0000176 * Used for both ordered and writeback modes.
Steven Whitehousea8d638e2007-01-15 13:52:17 +0000177 */
Steven Whitehouse45138992013-01-28 09:30:07 +0000178static int gfs2_writepages(struct address_space *mapping,
179 struct writeback_control *wbc)
Steven Whitehousea8d638e2007-01-15 13:52:17 +0000180{
Abhi Dasb066a4eeb2017-08-04 12:15:32 -0500181 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
Christoph Hellwig2164f9b2019-07-01 23:54:39 +0200182 struct iomap_writepage_ctx wpc = { };
183 int ret;
Abhi Dasb066a4eeb2017-08-04 12:15:32 -0500184
185 /*
186 * Even if we didn't write any pages here, we might still be holding
187 * dirty pages in the ail. We forcibly flush the ail because we don't
188 * want balance_dirty_pages() to loop indefinitely trying to write out
189 * pages held in the ail that it can't find.
190 */
Christoph Hellwig2164f9b2019-07-01 23:54:39 +0200191 ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
Abhi Dasb066a4eeb2017-08-04 12:15:32 -0500192 if (ret == 0)
193 set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
Abhi Dasb066a4eeb2017-08-04 12:15:32 -0500194 return ret;
Steven Whitehousea8d638e2007-01-15 13:52:17 +0000195}
196
197/**
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100198 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
199 * @mapping: The mapping
200 * @wbc: The writeback control
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100201 * @pvec: The vector of pages
202 * @nr_pages: The number of pages to write
Fabian Frederick12725742015-05-05 13:29:54 -0500203 * @done_index: Page index
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100204 *
205 * Returns: non-zero if loop should terminate, zero otherwise
206 */
207
208static int gfs2_write_jdata_pagevec(struct address_space *mapping,
209 struct writeback_control *wbc,
210 struct pagevec *pvec,
Andreas Gruenbacher9aa01592017-11-27 10:54:55 -0600211 int nr_pages,
Steven Whitehouse774016b2014-02-06 15:47:47 +0000212 pgoff_t *done_index)
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100213{
214 struct inode *inode = mapping->host;
215 struct gfs2_sbd *sdp = GFS2_SB(inode);
Andreas Gruenbacher45eb0502019-09-02 17:31:06 +0100216 unsigned nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100217 int i;
218 int ret;
219
Abhijith Das20b95bf2008-03-06 17:43:52 -0600220 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100221 if (ret < 0)
222 return ret;
223
224 for(i = 0; i < nr_pages; i++) {
225 struct page *page = pvec->pages[i];
226
Steven Whitehouse774016b2014-02-06 15:47:47 +0000227 *done_index = page->index;
228
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100229 lock_page(page);
230
231 if (unlikely(page->mapping != mapping)) {
Steven Whitehouse774016b2014-02-06 15:47:47 +0000232continue_unlock:
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100233 unlock_page(page);
234 continue;
235 }
236
Steven Whitehouse774016b2014-02-06 15:47:47 +0000237 if (!PageDirty(page)) {
238 /* someone wrote it for us */
239 goto continue_unlock;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100240 }
241
Steven Whitehouse774016b2014-02-06 15:47:47 +0000242 if (PageWriteback(page)) {
243 if (wbc->sync_mode != WB_SYNC_NONE)
244 wait_on_page_writeback(page);
245 else
246 goto continue_unlock;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100247 }
248
Steven Whitehouse774016b2014-02-06 15:47:47 +0000249 BUG_ON(PageWriteback(page));
250 if (!clear_page_dirty_for_io(page))
251 goto continue_unlock;
252
Christoph Hellwigde1414a2015-01-14 10:42:36 +0100253 trace_wbc_writepage(wbc, inode_to_bdi(inode));
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100254
255 ret = __gfs2_jdata_writepage(page, wbc);
Steven Whitehouse774016b2014-02-06 15:47:47 +0000256 if (unlikely(ret)) {
257 if (ret == AOP_WRITEPAGE_ACTIVATE) {
258 unlock_page(page);
259 ret = 0;
260 } else {
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100261
Steven Whitehouse774016b2014-02-06 15:47:47 +0000262 /*
263 * done_index is set past this page,
264 * so media errors will not choke
265 * background writeout for the entire
266 * file. This has consequences for
267 * range_cyclic semantics (ie. it may
268 * not be suitable for data integrity
269 * writeout).
270 */
271 *done_index = page->index + 1;
272 ret = 1;
273 break;
274 }
275 }
276
277 /*
278 * We stop writing back only if we are not doing
279 * integrity sync. In case of integrity sync we have to
280 * keep going until we have written all the pages
281 * we tagged for writeback prior to entering this loop.
282 */
283 if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100284 ret = 1;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000285 break;
286 }
287
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100288 }
289 gfs2_trans_end(sdp);
290 return ret;
291}
292
293/**
294 * gfs2_write_cache_jdata - Like write_cache_pages but different
295 * @mapping: The mapping to write
296 * @wbc: The writeback control
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100297 *
298 * The reason that we use our own function here is that we need to
299 * start transactions before we grab page locks. This allows us
300 * to get the ordering right.
301 */
302
303static int gfs2_write_cache_jdata(struct address_space *mapping,
304 struct writeback_control *wbc)
305{
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100306 int ret = 0;
307 int done = 0;
308 struct pagevec pvec;
309 int nr_pages;
Kees Cook3f649ab2020-06-03 13:09:38 -0700310 pgoff_t writeback_index;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100311 pgoff_t index;
312 pgoff_t end;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000313 pgoff_t done_index;
314 int cycled;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100315 int range_whole = 0;
Matthew Wilcox10bbd232017-12-05 17:30:38 -0500316 xa_mark_t tag;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100317
Mel Gorman86679822017-11-15 17:37:52 -0800318 pagevec_init(&pvec);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100319 if (wbc->range_cyclic) {
Steven Whitehouse774016b2014-02-06 15:47:47 +0000320 writeback_index = mapping->writeback_index; /* prev offset */
321 index = writeback_index;
322 if (index == 0)
323 cycled = 1;
324 else
325 cycled = 0;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100326 end = -1;
327 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300328 index = wbc->range_start >> PAGE_SHIFT;
329 end = wbc->range_end >> PAGE_SHIFT;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100330 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
331 range_whole = 1;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000332 cycled = 1; /* ignore range_cyclic tests */
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100333 }
Steven Whitehouse774016b2014-02-06 15:47:47 +0000334 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
335 tag = PAGECACHE_TAG_TOWRITE;
336 else
337 tag = PAGECACHE_TAG_DIRTY;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100338
339retry:
Steven Whitehouse774016b2014-02-06 15:47:47 +0000340 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
341 tag_pages_for_writeback(mapping, index, end);
342 done_index = index;
343 while (!done && (index <= end)) {
Jan Karad2bc5b32017-11-15 17:34:58 -0800344 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
Jan Kara67fd7072017-11-15 17:35:19 -0800345 tag);
Steven Whitehouse774016b2014-02-06 15:47:47 +0000346 if (nr_pages == 0)
347 break;
348
Andreas Gruenbacher9aa01592017-11-27 10:54:55 -0600349 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100350 if (ret)
351 done = 1;
352 if (ret > 0)
353 ret = 0;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100354 pagevec_release(&pvec);
355 cond_resched();
356 }
357
Steven Whitehouse774016b2014-02-06 15:47:47 +0000358 if (!cycled && !done) {
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100359 /*
Steven Whitehouse774016b2014-02-06 15:47:47 +0000360 * range_cyclic:
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100361 * We hit the last page and there is more work to be done: wrap
362 * back to the start of the file
363 */
Steven Whitehouse774016b2014-02-06 15:47:47 +0000364 cycled = 1;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100365 index = 0;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000366 end = writeback_index - 1;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100367 goto retry;
368 }
369
370 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steven Whitehouse774016b2014-02-06 15:47:47 +0000371 mapping->writeback_index = done_index;
372
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100373 return ret;
374}
375
376
377/**
378 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
379 * @mapping: The mapping to write
380 * @wbc: The writeback control
381 *
382 */
383
384static int gfs2_jdata_writepages(struct address_space *mapping,
385 struct writeback_control *wbc)
386{
387 struct gfs2_inode *ip = GFS2_I(mapping->host);
388 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
389 int ret;
390
391 ret = gfs2_write_cache_jdata(mapping, wbc);
392 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
Bob Peterson805c09072018-01-08 10:34:17 -0500393 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
394 GFS2_LFC_JDATA_WPAGES);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100395 ret = gfs2_write_cache_jdata(mapping, wbc);
396 }
397 return ret;
398}
399
400/**
David Teiglandb3b94fa2006-01-16 16:50:04 +0000401 * stuffed_readpage - Fill in a Linux page with stuffed file data
402 * @ip: the inode
403 * @page: the page
404 *
405 * Returns: errno
406 */
Christoph Hellwig378b6cb2019-07-01 23:54:35 +0200407static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000408{
409 struct buffer_head *dibh;
Steven Whitehouse602c89d2010-03-25 14:32:43 +0000410 u64 dsize = i_size_read(&ip->i_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000411 void *kaddr;
412 int error;
413
Steven Whitehousebf126ae2007-04-20 09:18:30 +0100414 /*
Nick Piggin3c18ddd2008-04-28 02:12:10 -0700415 * Due to the order of unstuffing files and ->fault(), we can be
Steven Whitehousebf126ae2007-04-20 09:18:30 +0100416 * asked for a zero page in the case of a stuffed file being extended,
417 * so we need to supply one here. It doesn't happen often.
418 */
419 if (unlikely(page->index)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300420 zero_user(page, 0, PAGE_SIZE);
Abhijith Das0a7ab792009-01-07 16:03:37 -0600421 SetPageUptodate(page);
Steven Whitehousebf126ae2007-04-20 09:18:30 +0100422 return 0;
423 }
Steven Whitehousefd88de562006-05-05 16:59:11 -0400424
David Teiglandb3b94fa2006-01-16 16:50:04 +0000425 error = gfs2_meta_inode_buffer(ip, &dibh);
426 if (error)
427 return error;
428
Cong Wangd9349282011-11-25 23:14:30 +0800429 kaddr = kmap_atomic(page);
Andreas Gruenbacher235628c2017-11-14 16:53:12 +0100430 if (dsize > gfs2_max_stuffed_size(ip))
431 dsize = gfs2_max_stuffed_size(ip);
Steven Whitehouse602c89d2010-03-25 14:32:43 +0000432 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300433 memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
Cong Wangd9349282011-11-25 23:14:30 +0800434 kunmap_atomic(kaddr);
Steven Whitehousebf126ae2007-04-20 09:18:30 +0100435 flush_dcache_page(page);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000436 brelse(dibh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000437 SetPageUptodate(page);
438
439 return 0;
440}
441
Matthew Wilcox (Oracle)e9b5b232022-05-01 21:39:29 -0400442/**
443 * gfs2_read_folio - read a folio from a file
444 * @file: The file to read
445 * @folio: The folio in the file
446 */
447static int gfs2_read_folio(struct file *file, struct folio *folio)
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100448{
Matthew Wilcox (Oracle)e9b5b232022-05-01 21:39:29 -0400449 struct inode *inode = folio->mapping->host;
Christoph Hellwig2164f9b2019-07-01 23:54:39 +0200450 struct gfs2_inode *ip = GFS2_I(inode);
451 struct gfs2_sbd *sdp = GFS2_SB(inode);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100452 int error;
453
Christoph Hellwig2164f9b2019-07-01 23:54:39 +0200454 if (!gfs2_is_jdata(ip) ||
Matthew Wilcox (Oracle)e9b5b232022-05-01 21:39:29 -0400455 (i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
Matthew Wilcox (Oracle)7479c502022-04-29 08:54:32 -0400456 error = iomap_read_folio(folio, &gfs2_iomap_ops);
Andreas Gruenbacherf95cbb42018-06-06 20:30:38 +0100457 } else if (gfs2_is_stuffed(ip)) {
Matthew Wilcox (Oracle)e9b5b232022-05-01 21:39:29 -0400458 error = stuffed_readpage(ip, &folio->page);
459 folio_unlock(folio);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100460 } else {
Matthew Wilcox (Oracle)f132ab72022-04-29 11:47:39 -0400461 error = mpage_read_folio(folio, gfs2_block_map);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100462 }
463
Bob Petersoneb43e6602019-11-14 09:52:15 -0500464 if (unlikely(gfs2_withdrawn(sdp)))
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100465 return -EIO;
466
467 return error;
468}
469
470/**
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100471 * gfs2_internal_read - read an internal file
472 * @ip: The gfs2 inode
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100473 * @buf: The buffer to fill
474 * @pos: The file position
475 * @size: The amount to read
476 *
477 */
478
Andrew Price43066292012-04-16 16:40:55 +0100479int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
480 unsigned size)
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100481{
482 struct address_space *mapping = ip->i_inode.i_mapping;
Andreas Gruenbacher45eb0502019-09-02 17:31:06 +0100483 unsigned long index = *pos >> PAGE_SHIFT;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300484 unsigned offset = *pos & (PAGE_SIZE - 1);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100485 unsigned copied = 0;
486 unsigned amt;
487 struct page *page;
488 void *p;
489
490 do {
491 amt = size - copied;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300492 if (offset + size > PAGE_SIZE)
493 amt = PAGE_SIZE - offset;
Matthew Wilcox (Oracle)e9b5b232022-05-01 21:39:29 -0400494 page = read_cache_page(mapping, index, gfs2_read_folio, NULL);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100495 if (IS_ERR(page))
496 return PTR_ERR(page);
Cong Wangd9349282011-11-25 23:14:30 +0800497 p = kmap_atomic(page);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100498 memcpy(buf + copied, p + offset, amt);
Cong Wangd9349282011-11-25 23:14:30 +0800499 kunmap_atomic(p);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300500 put_page(page);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100501 copied += amt;
502 index++;
503 offset = 0;
504 } while(copied < size);
505 (*pos) += size;
506 return size;
Steven Whitehousefd88de562006-05-05 16:59:11 -0400507}
508
Steven Whitehousefd88de562006-05-05 16:59:11 -0400509/**
Matthew Wilcox (Oracle)d4388342020-06-01 21:47:02 -0700510 * gfs2_readahead - Read a bunch of pages at once
Lee Jonesc551f662021-03-30 17:44:29 +0100511 * @rac: Read-ahead control structure
Steven Whitehousefd88de562006-05-05 16:59:11 -0400512 *
513 * Some notes:
514 * 1. This is only for readahead, so we can simply ignore any things
515 * which are slightly inconvenient (such as locking conflicts between
516 * the page lock and the glock) and return having done no I/O. Its
517 * obviously not something we'd want to do on too regular a basis.
518 * Any I/O we ignore at this time will be done via readpage later.
Steven Whitehousee1d5b182006-12-15 16:49:51 -0500519 * 2. We don't handle stuffed files here we let readpage do the honours.
Matthew Wilcox (Oracle)d4388342020-06-01 21:47:02 -0700520 * 3. mpage_readahead() does most of the heavy lifting in the common case.
Bob Petersone9e1ef22007-12-10 14:13:27 -0600521 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
Steven Whitehousefd88de562006-05-05 16:59:11 -0400522 */
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100523
Matthew Wilcox (Oracle)d4388342020-06-01 21:47:02 -0700524static void gfs2_readahead(struct readahead_control *rac)
Steven Whitehousefd88de562006-05-05 16:59:11 -0400525{
Matthew Wilcox (Oracle)d4388342020-06-01 21:47:02 -0700526 struct inode *inode = rac->mapping->host;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400527 struct gfs2_inode *ip = GFS2_I(inode);
Steven Whitehousefd88de562006-05-05 16:59:11 -0400528
Christoph Hellwig2164f9b2019-07-01 23:54:39 +0200529 if (gfs2_is_stuffed(ip))
530 ;
531 else if (gfs2_is_jdata(ip))
Matthew Wilcox (Oracle)d4388342020-06-01 21:47:02 -0700532 mpage_readahead(rac, gfs2_block_map);
Christoph Hellwig2164f9b2019-07-01 23:54:39 +0200533 else
534 iomap_readahead(rac, &gfs2_iomap_ops);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000535}
536
537/**
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500538 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
539 * @inode: the rindex inode
540 */
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +0100541void adjust_fs_space(struct inode *inode)
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500542{
Andreas Gruenbacherd0a22a42019-04-29 20:50:30 +0100543 struct gfs2_sbd *sdp = GFS2_SB(inode);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500544 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500545 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
546 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
Bob Peterson70c11ba2021-06-30 11:46:17 -0500547 struct buffer_head *m_bh;
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500548 u64 fs_total, new_free;
549
Andreas Gruenbacherd0a22a42019-04-29 20:50:30 +0100550 if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
551 return;
552
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500553 /* Total up the file system space, according to the latest rindex. */
554 fs_total = gfs2_ri_total(sdp);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500555 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
Andreas Gruenbacherd0a22a42019-04-29 20:50:30 +0100556 goto out;
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500557
558 spin_lock(&sdp->sd_statfs_spin);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500559 gfs2_statfs_change_in(m_sc, m_bh->b_data +
560 sizeof(struct gfs2_dinode));
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500561 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
562 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
563 else
564 new_free = 0;
565 spin_unlock(&sdp->sd_statfs_spin);
Robert Peterson6c532672007-05-10 16:54:38 -0500566 fs_warn(sdp, "File system extended by %llu blocks.\n",
567 (unsigned long long)new_free);
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500568 gfs2_statfs_change(sdp, new_free, new_free, 0);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500569
Bob Peterson70c11ba2021-06-30 11:46:17 -0500570 update_statfs(sdp, m_bh);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500571 brelse(m_bh);
Andreas Gruenbacherd0a22a42019-04-29 20:50:30 +0100572out:
573 sdp->sd_rindex_uptodate = 0;
574 gfs2_trans_end(sdp);
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500575}
576
Matthew Wilcox (Oracle)e6219002022-02-09 20:22:12 +0000577static bool jdata_dirty_folio(struct address_space *mapping,
578 struct folio *folio)
Robert Peterson8fb68592007-06-12 11:24:36 -0500579{
Bob Peterson6302d6f42020-08-19 08:55:18 -0500580 if (current->journal_info)
Matthew Wilcox (Oracle)e6219002022-02-09 20:22:12 +0000581 folio_set_checked(folio);
582 return block_dirty_folio(mapping, folio);
Robert Peterson8fb68592007-06-12 11:24:36 -0500583}
584
585/**
David Teiglandb3b94fa2006-01-16 16:50:04 +0000586 * gfs2_bmap - Block map function
587 * @mapping: Address space info
588 * @lblock: The block to map
589 *
590 * Returns: The disk address for the block or 0 on hole or error
591 */
592
593static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
594{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400595 struct gfs2_inode *ip = GFS2_I(mapping->host);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000596 struct gfs2_holder i_gh;
597 sector_t dblock = 0;
598 int error;
599
David Teiglandb3b94fa2006-01-16 16:50:04 +0000600 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
601 if (error)
602 return 0;
603
604 if (!gfs2_is_stuffed(ip))
Christoph Hellwig7770c93a2019-07-01 23:54:36 +0200605 dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000606
607 gfs2_glock_dq_uninit(&i_gh);
608
609 return dblock;
610}
611
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100612static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
613{
614 struct gfs2_bufdata *bd;
615
616 lock_buffer(bh);
617 gfs2_log_lock(sdp);
618 clear_buffer_dirty(bh);
619 bd = bh->b_private;
620 if (bd) {
Bob Petersonc0752aa2012-05-01 12:00:34 -0400621 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
622 list_del_init(&bd->bd_list);
Bob Peterson68942872020-07-22 10:27:50 -0500623 else {
624 spin_lock(&sdp->sd_ail_lock);
Bob Peterson68cd4ce2016-05-02 11:53:35 -0500625 gfs2_remove_from_journal(bh, REMOVE_JDATA);
Bob Peterson68942872020-07-22 10:27:50 -0500626 spin_unlock(&sdp->sd_ail_lock);
627 }
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100628 }
629 bh->b_bdev = NULL;
630 clear_buffer_mapped(bh);
631 clear_buffer_req(bh);
632 clear_buffer_new(bh);
633 gfs2_log_unlock(sdp);
634 unlock_buffer(bh);
635}
636
Matthew Wilcox (Oracle)5f4b2972022-02-09 20:21:45 +0000637static void gfs2_invalidate_folio(struct folio *folio, size_t offset,
638 size_t length)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000639{
Matthew Wilcox (Oracle)5f4b2972022-02-09 20:21:45 +0000640 struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host);
641 size_t stop = offset + length;
642 int partial_page = (offset || length < folio_size(folio));
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100643 struct buffer_head *bh, *head;
644 unsigned long pos = 0;
645
Matthew Wilcox (Oracle)5f4b2972022-02-09 20:21:45 +0000646 BUG_ON(!folio_test_locked(folio));
Lukas Czerner5c0bb972013-05-21 23:58:49 -0400647 if (!partial_page)
Matthew Wilcox (Oracle)5f4b2972022-02-09 20:21:45 +0000648 folio_clear_checked(folio);
649 head = folio_buffers(folio);
650 if (!head)
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100651 goto out;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000652
Matthew Wilcox (Oracle)5f4b2972022-02-09 20:21:45 +0000653 bh = head;
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100654 do {
Lukas Czerner5c0bb972013-05-21 23:58:49 -0400655 if (pos + bh->b_size > stop)
656 return;
657
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100658 if (offset <= pos)
659 gfs2_discard(sdp, bh);
660 pos += bh->b_size;
661 bh = bh->b_this_page;
662 } while (bh != head);
663out:
Lukas Czerner5c0bb972013-05-21 23:58:49 -0400664 if (!partial_page)
Matthew Wilcox (Oracle)5f4b2972022-02-09 20:21:45 +0000665 filemap_release_folio(folio, 0);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000666}
667
Steven Whitehousec7b33832006-12-14 18:24:26 +0000668/**
Matthew Wilcox (Oracle)e45c20d2022-04-30 23:49:17 -0400669 * gfs2_release_folio - free the metadata associated with a folio
670 * @folio: the folio that's being released
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400671 * @gfp_mask: passed from Linux VFS, ignored by us
672 *
Matthew Wilcox (Oracle)e45c20d2022-04-30 23:49:17 -0400673 * Calls try_to_free_buffers() to free the buffers and put the folio if the
Andreas Gruenbacher0ebbe4f2018-11-06 10:31:33 +0000674 * buffers can be released.
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400675 *
Matthew Wilcox (Oracle)e45c20d2022-04-30 23:49:17 -0400676 * Returns: true if the folio was put or else false
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400677 */
678
Matthew Wilcox (Oracle)e45c20d2022-04-30 23:49:17 -0400679bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask)
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400680{
Matthew Wilcox (Oracle)e45c20d2022-04-30 23:49:17 -0400681 struct address_space *mapping = folio->mapping;
Steven Whitehouse009d8512009-12-08 12:12:13 +0000682 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400683 struct buffer_head *bh, *head;
684 struct gfs2_bufdata *bd;
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400685
Matthew Wilcox (Oracle)e45c20d2022-04-30 23:49:17 -0400686 head = folio_buffers(folio);
687 if (!head)
688 return false;
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400689
Andreas Gruenbacher1c185c02016-08-18 08:57:04 -0500690 /*
Matthew Wilcox (Oracle)e45c20d2022-04-30 23:49:17 -0400691 * mm accommodates an old ext3 case where clean folios might
692 * not have had the dirty bit cleared. Thus, it can send actual
693 * dirty folios to ->release_folio() via shrink_active_list().
Andreas Gruenbacher1c185c02016-08-18 08:57:04 -0500694 *
Matthew Wilcox (Oracle)e45c20d2022-04-30 23:49:17 -0400695 * As a workaround, we skip folios that contain dirty buffers
696 * below. Once ->release_folio isn't called on dirty folios
697 * anymore, we can warn on dirty buffers like we used to here
698 * again.
Andreas Gruenbacher1c185c02016-08-18 08:57:04 -0500699 */
700
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100701 gfs2_log_lock(sdp);
Matthew Wilcox (Oracle)e45c20d2022-04-30 23:49:17 -0400702 bh = head;
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400703 do {
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100704 if (atomic_read(&bh->b_count))
705 goto cannot_release;
706 bd = bh->b_private;
Benjamin Marzinski16ca9412013-04-05 20:31:46 -0500707 if (bd && bd->bd_tr)
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100708 goto cannot_release;
Andreas Gruenbacher1c185c02016-08-18 08:57:04 -0500709 if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
710 goto cannot_release;
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100711 bh = bh->b_this_page;
Matthew Wilcox (Oracle)e45c20d2022-04-30 23:49:17 -0400712 } while (bh != head);
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400713
Matthew Wilcox (Oracle)e45c20d2022-04-30 23:49:17 -0400714 bh = head;
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100715 do {
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400716 bd = bh->b_private;
717 if (bd) {
718 gfs2_assert_warn(sdp, bd->bd_bh == bh);
Steven Whitehousee4f29202013-11-26 13:21:08 +0000719 bd->bd_bh = NULL;
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400720 bh->b_private = NULL;
Bob Peterson019dd662020-02-17 14:14:13 -0600721 /*
722 * The bd may still be queued as a revoke, in which
723 * case we must not dequeue nor free it.
724 */
725 if (!bd->bd_blkno && !list_empty(&bd->bd_list))
726 list_del_init(&bd->bd_list);
727 if (list_empty(&bd->bd_list))
728 kmem_cache_free(gfs2_bufdata_cachep, bd);
Steven Whitehousee4f29202013-11-26 13:21:08 +0000729 }
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400730
731 bh = bh->b_this_page;
Steven Whitehouse166afcc2006-08-24 15:59:40 -0400732 } while (bh != head);
Steven Whitehousee4f29202013-11-26 13:21:08 +0000733 gfs2_log_unlock(sdp);
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400734
Matthew Wilcox (Oracle)68189fe2022-05-01 01:08:08 -0400735 return try_to_free_buffers(folio);
Steven Whitehouse8f065d32011-05-03 11:49:19 +0100736
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100737cannot_release:
738 gfs2_log_unlock(sdp);
Matthew Wilcox (Oracle)e45c20d2022-04-30 23:49:17 -0400739 return false;
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400740}
741
Christoph Hellwigeadd75352019-07-01 23:54:33 +0200742static const struct address_space_operations gfs2_aops = {
Steven Whitehouse45138992013-01-28 09:30:07 +0000743 .writepages = gfs2_writepages,
Matthew Wilcox (Oracle)f132ab72022-04-29 11:47:39 -0400744 .read_folio = gfs2_read_folio,
Matthew Wilcox (Oracle)d4388342020-06-01 21:47:02 -0700745 .readahead = gfs2_readahead,
Matthew Wilcox (Oracle)187c82c2022-02-09 20:22:03 +0000746 .dirty_folio = filemap_dirty_folio,
Matthew Wilcox (Oracle)85974472022-04-30 23:01:08 -0400747 .release_folio = iomap_release_folio,
Matthew Wilcox (Oracle)d82354f2022-02-09 20:21:33 +0000748 .invalidate_folio = iomap_invalidate_folio,
Steven Whitehouse55610932007-10-17 08:47:38 +0100749 .bmap = gfs2_bmap,
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100750 .direct_IO = noop_direct_IO,
Matthew Wilcox (Oracle)2ec810d2022-06-06 12:55:08 -0400751 .migrate_folio = filemap_migrate_folio,
Christoph Hellwig2164f9b2019-07-01 23:54:39 +0200752 .is_partially_uptodate = iomap_is_partially_uptodate,
Andi Kleenaa261f52009-09-16 11:50:16 +0200753 .error_remove_page = generic_error_remove_page,
Steven Whitehouse55610932007-10-17 08:47:38 +0100754};
755
Steven Whitehouse55610932007-10-17 08:47:38 +0100756static const struct address_space_operations gfs2_jdata_aops = {
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100757 .writepage = gfs2_jdata_writepage,
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100758 .writepages = gfs2_jdata_writepages,
Matthew Wilcox (Oracle)f132ab72022-04-29 11:47:39 -0400759 .read_folio = gfs2_read_folio,
Matthew Wilcox (Oracle)d4388342020-06-01 21:47:02 -0700760 .readahead = gfs2_readahead,
Matthew Wilcox (Oracle)e6219002022-02-09 20:22:12 +0000761 .dirty_folio = jdata_dirty_folio,
Steven Whitehouse55610932007-10-17 08:47:38 +0100762 .bmap = gfs2_bmap,
Matthew Wilcox (Oracle)5f4b2972022-02-09 20:21:45 +0000763 .invalidate_folio = gfs2_invalidate_folio,
Matthew Wilcox (Oracle)e45c20d2022-04-30 23:49:17 -0400764 .release_folio = gfs2_release_folio,
Hisashi Hifumi229615d2009-03-03 11:45:20 +0900765 .is_partially_uptodate = block_is_partially_uptodate,
Andi Kleenaa261f52009-09-16 11:50:16 +0200766 .error_remove_page = generic_error_remove_page,
Steven Whitehouse55610932007-10-17 08:47:38 +0100767};
768
769void gfs2_set_aops(struct inode *inode)
770{
Christoph Hellwigeadd75352019-07-01 23:54:33 +0200771 if (gfs2_is_jdata(GFS2_I(inode)))
Steven Whitehouse55610932007-10-17 08:47:38 +0100772 inode->i_mapping->a_ops = &gfs2_jdata_aops;
773 else
Christoph Hellwigeadd75352019-07-01 23:54:33 +0200774 inode->i_mapping->a_ops = &gfs2_aops;
Steven Whitehouse55610932007-10-17 08:47:38 +0100775}