blob: de1d5f1d9ff8561a5bfc400e4759b70635c1455f [file] [log] [blame]
Thomas Gleixner7336d0e2019-05-31 01:09:56 -07001// SPDX-License-Identifier: GPL-2.0-only
David Teiglandb3b94fa2006-01-16 16:50:04 +00002/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
Bob Petersoncf45b752008-01-31 10:31:39 -06004 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
David Teiglandb3b94fa2006-01-16 16:50:04 +00005 */
6
David Teiglandb3b94fa2006-01-16 16:50:04 +00007#include <linux/spinlock.h>
8#include <linux/completion.h>
9#include <linux/buffer_head.h>
Steven Whitehouse5c676f62006-02-27 17:23:27 -050010#include <linux/gfs2_ondisk.h>
Steven Whitehouse6802e342008-05-21 17:03:22 +010011#include <linux/bio.h>
Steven Whitehousec65f7fb2009-10-02 11:54:39 +010012#include <linux/posix_acl.h>
Andreas Gruenbacherf39814f62015-12-24 11:09:40 -050013#include <linux/security.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000014
15#include "gfs2.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050016#include "incore.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000017#include "bmap.h"
18#include "glock.h"
19#include "glops.h"
20#include "inode.h"
21#include "log.h"
22#include "meta_io.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000023#include "recovery.h"
24#include "rgrp.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050025#include "util.h"
Steven Whitehouseddacfaf2006-10-03 11:10:41 -040026#include "trans.h"
Steven Whitehouse17d539f2011-06-15 10:29:37 +010027#include "dir.h"
Abhi Dasf4686c22019-05-02 14:17:40 -050028#include "lops.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000029
Benjamin Marzinski2e60d762014-11-13 20:42:04 -060030struct workqueue_struct *gfs2_freeze_wq;
31
Bob Peterson601ef0d2020-01-28 20:23:45 +010032extern struct workqueue_struct *gfs2_control_wq;
33
Steven Whitehouse75549182011-08-02 13:09:36 +010034static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
35{
Bob Peterson15562c42015-03-16 11:52:05 -050036 fs_err(gl->gl_name.ln_sbd,
37 "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
38 "state 0x%lx\n",
Steven Whitehouse75549182011-08-02 13:09:36 +010039 bh, (unsigned long long)bh->b_blocknr, bh->b_state,
40 bh->b_page->mapping, bh->b_page->flags);
Bob Peterson15562c42015-03-16 11:52:05 -050041 fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n",
Steven Whitehouse75549182011-08-02 13:09:36 +010042 gl->gl_name.ln_type, gl->gl_name.ln_number,
43 gfs2_glock2aspace(gl));
Andreas Gruenbacherbadb55e2020-01-23 18:41:00 +010044 gfs2_lm(gl->gl_name.ln_sbd, "AIL error\n");
45 gfs2_withdraw(gl->gl_name.ln_sbd);
Steven Whitehouse75549182011-08-02 13:09:36 +010046}
47
Steven Whitehouseddacfaf2006-10-03 11:10:41 -040048/**
Steven Whitehousedba898b2011-04-14 09:54:02 +010049 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
Steven Whitehouseddacfaf2006-10-03 11:10:41 -040050 * @gl: the glock
Steven Whitehouseb5b24d72011-09-07 10:33:25 +010051 * @fsync: set when called from fsync (not all buffers will be clean)
Steven Whitehouseddacfaf2006-10-03 11:10:41 -040052 *
53 * None of the buffers should be dirty, locked, or pinned.
54 */
55
Benjamin Marzinski1bc333f2013-07-26 17:09:33 -050056static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
57 unsigned int nr_revokes)
Steven Whitehouseddacfaf2006-10-03 11:10:41 -040058{
Bob Peterson15562c42015-03-16 11:52:05 -050059 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Steven Whitehouseddacfaf2006-10-03 11:10:41 -040060 struct list_head *head = &gl->gl_ail_list;
Steven Whitehouseb5b24d72011-09-07 10:33:25 +010061 struct gfs2_bufdata *bd, *tmp;
Steven Whitehouseddacfaf2006-10-03 11:10:41 -040062 struct buffer_head *bh;
Steven Whitehouseb5b24d72011-09-07 10:33:25 +010063 const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
Steven Whitehouseddacfaf2006-10-03 11:10:41 -040064
Steven Whitehouseb5b24d72011-09-07 10:33:25 +010065 gfs2_log_lock(sdp);
Dave Chinnerd6a079e2011-03-11 11:52:25 +000066 spin_lock(&sdp->sd_ail_lock);
Benjamin Marzinski1bc333f2013-07-26 17:09:33 -050067 list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
68 if (nr_revokes == 0)
69 break;
Steven Whitehouseddacfaf2006-10-03 11:10:41 -040070 bh = bd->bd_bh;
Steven Whitehouseb5b24d72011-09-07 10:33:25 +010071 if (bh->b_state & b_state) {
72 if (fsync)
73 continue;
Steven Whitehouse75549182011-08-02 13:09:36 +010074 gfs2_ail_error(gl, bh);
Steven Whitehouseb5b24d72011-09-07 10:33:25 +010075 }
Steven Whitehouse1ad38c42007-09-03 11:01:33 +010076 gfs2_trans_add_revoke(sdp, bd);
Benjamin Marzinski1bc333f2013-07-26 17:09:33 -050077 nr_revokes--;
Steven Whitehouseddacfaf2006-10-03 11:10:41 -040078 }
Steven Whitehouse8eae1ca02012-10-15 10:57:02 +010079 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
Dave Chinnerd6a079e2011-03-11 11:52:25 +000080 spin_unlock(&sdp->sd_ail_lock);
Steven Whitehouseb5b24d72011-09-07 10:33:25 +010081 gfs2_log_unlock(sdp);
Steven Whitehousedba898b2011-04-14 09:54:02 +010082}
Steven Whitehouseddacfaf2006-10-03 11:10:41 -040083
Steven Whitehousedba898b2011-04-14 09:54:02 +010084
Bob Peterson1c634f92019-11-13 14:09:28 -060085static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
Steven Whitehousedba898b2011-04-14 09:54:02 +010086{
Bob Peterson15562c42015-03-16 11:52:05 -050087 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Steven Whitehousedba898b2011-04-14 09:54:02 +010088 struct gfs2_trans tr;
Bob Peterson1c634f92019-11-13 14:09:28 -060089 int ret;
Steven Whitehousedba898b2011-04-14 09:54:02 +010090
91 memset(&tr, 0, sizeof(tr));
Steven Whitehoused69a3c62014-02-21 15:22:35 +000092 INIT_LIST_HEAD(&tr.tr_buf);
93 INIT_LIST_HEAD(&tr.tr_databuf);
Bob Petersoncbcc89b2020-06-05 14:12:34 -050094 INIT_LIST_HEAD(&tr.tr_ail1_list);
95 INIT_LIST_HEAD(&tr.tr_ail2_list);
Steven Whitehousedba898b2011-04-14 09:54:02 +010096 tr.tr_revokes = atomic_read(&gl->gl_ail_count);
97
Bob Peterson9ff78282019-11-13 13:47:02 -060098 if (!tr.tr_revokes) {
99 bool have_revokes;
100 bool log_in_flight;
101
102 /*
103 * We have nothing on the ail, but there could be revokes on
104 * the sdp revoke queue, in which case, we still want to flush
105 * the log and wait for it to finish.
106 *
107 * If the sdp revoke list is empty too, we might still have an
108 * io outstanding for writing revokes, so we should wait for
109 * it before returning.
110 *
111 * If none of these conditions are true, our revokes are all
112 * flushed and we can return.
113 */
114 gfs2_log_lock(sdp);
115 have_revokes = !list_empty(&sdp->sd_log_revokes);
116 log_in_flight = atomic_read(&sdp->sd_log_in_flight);
117 gfs2_log_unlock(sdp);
118 if (have_revokes)
119 goto flush;
120 if (log_in_flight)
121 log_flush_wait(sdp);
Bob Peterson1c634f92019-11-13 14:09:28 -0600122 return 0;
Bob Peterson9ff78282019-11-13 13:47:02 -0600123 }
Steven Whitehousedba898b2011-04-14 09:54:02 +0100124
Benjamin Marzinski24972552014-05-01 22:26:55 -0500125 /* A shortened, inline version of gfs2_trans_begin()
126 * tr->alloced is not set since the transaction structure is
127 * on the stack */
Bob Peterson2e9eeaa2019-12-13 08:10:51 -0600128 tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes);
Fabian Frederickd29c0af2014-10-03 20:15:36 +0200129 tr.tr_ip = _RET_IP_;
Bob Peterson1c634f92019-11-13 14:09:28 -0600130 ret = gfs2_log_reserve(sdp, tr.tr_reserved);
131 if (ret < 0)
132 return ret;
Steven Whitehouse8eae1ca02012-10-15 10:57:02 +0100133 WARN_ON_ONCE(current->journal_info);
Steven Whitehousedba898b2011-04-14 09:54:02 +0100134 current->journal_info = &tr;
135
Benjamin Marzinski1bc333f2013-07-26 17:09:33 -0500136 __gfs2_ail_flush(gl, 0, tr.tr_revokes);
Steven Whitehousedba898b2011-04-14 09:54:02 +0100137
138 gfs2_trans_end(sdp);
Bob Peterson9ff78282019-11-13 13:47:02 -0600139flush:
Bob Peterson805c09072018-01-08 10:34:17 -0500140 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
141 GFS2_LFC_AIL_EMPTY_GL);
Bob Peterson1c634f92019-11-13 14:09:28 -0600142 return 0;
Steven Whitehousedba898b2011-04-14 09:54:02 +0100143}
144
Steven Whitehouseb5b24d72011-09-07 10:33:25 +0100145void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
Steven Whitehousedba898b2011-04-14 09:54:02 +0100146{
Bob Peterson15562c42015-03-16 11:52:05 -0500147 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Steven Whitehousedba898b2011-04-14 09:54:02 +0100148 unsigned int revokes = atomic_read(&gl->gl_ail_count);
Benjamin Marzinski1bc333f2013-07-26 17:09:33 -0500149 unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
Steven Whitehousedba898b2011-04-14 09:54:02 +0100150 int ret;
151
152 if (!revokes)
153 return;
154
Benjamin Marzinski1bc333f2013-07-26 17:09:33 -0500155 while (revokes > max_revokes)
156 max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
157
158 ret = gfs2_trans_begin(sdp, 0, max_revokes);
Steven Whitehousedba898b2011-04-14 09:54:02 +0100159 if (ret)
160 return;
Benjamin Marzinski1bc333f2013-07-26 17:09:33 -0500161 __gfs2_ail_flush(gl, fsync, max_revokes);
Steven Whitehouseddacfaf2006-10-03 11:10:41 -0400162 gfs2_trans_end(sdp);
Bob Peterson805c09072018-01-08 10:34:17 -0500163 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
164 GFS2_LFC_AIL_FLUSH);
Steven Whitehouseddacfaf2006-10-03 11:10:41 -0400165}
Steven Whitehouseba7f7292006-07-26 11:27:10 -0400166
167/**
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000168 * rgrp_go_sync - sync out the metadata for this glock
David Teiglandb3b94fa2006-01-16 16:50:04 +0000169 * @gl: the glock
David Teiglandb3b94fa2006-01-16 16:50:04 +0000170 *
171 * Called when demoting or unlocking an EX glock. We must flush
172 * to disk all dirty buffers/pages relating to this glock, and must not
Andreas Gruenbacher6f6597ba2017-06-30 07:55:08 -0500173 * return to caller to demote/unlock the glock until I/O is complete.
David Teiglandb3b94fa2006-01-16 16:50:04 +0000174 */
175
Bob Peterson1c634f92019-11-13 14:09:28 -0600176static int rgrp_go_sync(struct gfs2_glock *gl)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000177{
Bob Peterson15562c42015-03-16 11:52:05 -0500178 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Steven Whitehouse70d4ee92013-12-06 16:19:54 +0000179 struct address_space *mapping = &sdp->sd_aspace;
Bob Petersonb3422ca2019-11-13 11:50:30 -0600180 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000181 int error;
Steven Whitehouseb5d32be2007-01-22 12:15:34 -0500182
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000183 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
Bob Peterson1c634f92019-11-13 14:09:28 -0600184 return 0;
Steven Whitehouse8eae1ca02012-10-15 10:57:02 +0100185 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000186
Bob Peterson805c09072018-01-08 10:34:17 -0500187 gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
188 GFS2_LFC_RGRP_GO_SYNC);
Steven Whitehouse70d4ee92013-12-06 16:19:54 +0000189 filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
190 error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
Bob Peterson1c634f92019-11-13 14:09:28 -0600191 WARN_ON_ONCE(error);
Steven Whitehouse70d4ee92013-12-06 16:19:54 +0000192 mapping_set_error(mapping, error);
Bob Peterson1c634f92019-11-13 14:09:28 -0600193 if (!error)
194 error = gfs2_ail_empty_gl(gl);
Bob Peterson7c9ca622011-08-31 09:53:19 +0100195
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500196 spin_lock(&gl->gl_lockref.lock);
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100197 rgd = gl->gl_object;
198 if (rgd)
199 gfs2_free_clones(rgd);
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500200 spin_unlock(&gl->gl_lockref.lock);
Bob Peterson1c634f92019-11-13 14:09:28 -0600201 return error;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000202}
203
204/**
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000205 * rgrp_go_inval - invalidate the metadata for this glock
David Teiglandb3b94fa2006-01-16 16:50:04 +0000206 * @gl: the glock
207 * @flags:
208 *
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000209 * We never used LM_ST_DEFERRED with resource groups, so that we
210 * should always see the metadata flag set here.
211 *
David Teiglandb3b94fa2006-01-16 16:50:04 +0000212 */
213
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000214static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000215{
Bob Peterson15562c42015-03-16 11:52:05 -0500216 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Steven Whitehouse70d4ee92013-12-06 16:19:54 +0000217 struct address_space *mapping = &sdp->sd_aspace;
Andreas Gruenbacher6f6597ba2017-06-30 07:55:08 -0500218 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
Bob Peterson39b0f1e2015-06-05 08:38:57 -0500219
220 if (rgd)
221 gfs2_rgrp_brelse(rgd);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000222
Steven Whitehouse8eae1ca02012-10-15 10:57:02 +0100223 WARN_ON_ONCE(!(flags & DIO_METADATA));
Steven Whitehouse7005c3e2013-12-06 10:16:14 +0000224 truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000225
Bob Peterson39b0f1e2015-06-05 08:38:57 -0500226 if (rgd)
Bob Petersoncf45b752008-01-31 10:31:39 -0600227 rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000228}
229
Andreas Gruenbacher4fd1a572017-06-30 07:47:15 -0500230static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
231{
232 struct gfs2_inode *ip;
233
234 spin_lock(&gl->gl_lockref.lock);
235 ip = gl->gl_object;
236 if (ip)
237 set_bit(GIF_GLOP_PENDING, &ip->i_flags);
238 spin_unlock(&gl->gl_lockref.lock);
239 return ip;
240}
241
Andreas Gruenbacher6f6597ba2017-06-30 07:55:08 -0500242struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
243{
244 struct gfs2_rgrpd *rgd;
245
246 spin_lock(&gl->gl_lockref.lock);
247 rgd = gl->gl_object;
248 spin_unlock(&gl->gl_lockref.lock);
249
250 return rgd;
251}
252
Andreas Gruenbacher4fd1a572017-06-30 07:47:15 -0500253static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
254{
255 if (!ip)
256 return;
257
258 clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
259 wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
260}
261
David Teiglandb3b94fa2006-01-16 16:50:04 +0000262/**
Steven Whitehouseb5d32be2007-01-22 12:15:34 -0500263 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
264 * @gl: the glock protecting the inode
265 *
266 */
267
Bob Peterson1c634f92019-11-13 14:09:28 -0600268static int inode_go_sync(struct gfs2_glock *gl)
Steven Whitehouseb5d32be2007-01-22 12:15:34 -0500269{
Andreas Gruenbacher4fd1a572017-06-30 07:47:15 -0500270 struct gfs2_inode *ip = gfs2_glock2inode(gl);
271 int isreg = ip && S_ISREG(ip->i_inode.i_mode);
Steven Whitehouse009d8512009-12-08 12:12:13 +0000272 struct address_space *metamapping = gfs2_glock2aspace(gl);
Bob Petersonbbae10f2020-05-08 09:18:03 -0500273 int error = 0, ret;
Steven Whitehouse3042a2cc2007-11-02 08:39:34 +0000274
Andreas Gruenbacher4fd1a572017-06-30 07:47:15 -0500275 if (isreg) {
Steven Whitehouse582d2f72013-12-19 11:04:14 +0000276 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
277 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
278 inode_dio_wait(&ip->i_inode);
279 }
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000280 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
Andreas Gruenbacher4fd1a572017-06-30 07:47:15 -0500281 goto out;
Steven Whitehouseb5d32be2007-01-22 12:15:34 -0500282
Steven Whitehouse8eae1ca02012-10-15 10:57:02 +0100283 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000284
Bob Peterson805c09072018-01-08 10:34:17 -0500285 gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
286 GFS2_LFC_INODE_GO_SYNC);
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000287 filemap_fdatawrite(metamapping);
Andreas Gruenbacher4fd1a572017-06-30 07:47:15 -0500288 if (isreg) {
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000289 struct address_space *mapping = ip->i_inode.i_mapping;
290 filemap_fdatawrite(mapping);
291 error = filemap_fdatawait(mapping);
292 mapping_set_error(mapping, error);
Steven Whitehouseb5d32be2007-01-22 12:15:34 -0500293 }
Bob Petersonbbae10f2020-05-08 09:18:03 -0500294 ret = filemap_fdatawait(metamapping);
295 mapping_set_error(metamapping, ret);
296 if (!error)
297 error = ret;
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000298 gfs2_ail_empty_gl(gl);
Steven Whitehouse52fcd112009-04-20 08:58:45 +0100299 /*
300 * Writeback of the data mapping may cause the dirty flag to be set
301 * so we have to clear it again here.
302 */
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100303 smp_mb__before_atomic();
Steven Whitehouse52fcd112009-04-20 08:58:45 +0100304 clear_bit(GLF_DIRTY, &gl->gl_flags);
Andreas Gruenbacher4fd1a572017-06-30 07:47:15 -0500305
306out:
307 gfs2_clear_glop_pending(ip);
Bob Peterson1c634f92019-11-13 14:09:28 -0600308 return error;
Steven Whitehouseb5d32be2007-01-22 12:15:34 -0500309}
310
311/**
David Teiglandb3b94fa2006-01-16 16:50:04 +0000312 * inode_go_inval - prepare a inode glock to be released
313 * @gl: the glock
314 * @flags:
Geert Uytterhoeven6b49d1d2014-06-29 12:21:39 +0200315 *
316 * Normally we invalidate everything, but if we are moving into
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000317 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
318 * can keep hold of the metadata, since it won't have changed.
David Teiglandb3b94fa2006-01-16 16:50:04 +0000319 *
320 */
321
322static void inode_go_inval(struct gfs2_glock *gl, int flags)
323{
Andreas Gruenbacher4fd1a572017-06-30 07:47:15 -0500324 struct gfs2_inode *ip = gfs2_glock2inode(gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000325
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000326 if (flags & DIO_METADATA) {
Steven Whitehouse009d8512009-12-08 12:12:13 +0000327 struct address_space *mapping = gfs2_glock2aspace(gl);
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000328 truncate_inode_pages(mapping, 0);
Steven Whitehousec65f7fb2009-10-02 11:54:39 +0100329 if (ip) {
Steven Whitehouseb0041572006-11-23 10:51:34 -0500330 set_bit(GIF_INVALID, &ip->i_flags);
Steven Whitehousec65f7fb2009-10-02 11:54:39 +0100331 forget_all_cached_acls(&ip->i_inode);
Andreas Gruenbacherf39814f62015-12-24 11:09:40 -0500332 security_inode_invalidate_secctx(&ip->i_inode);
Steven Whitehouse17d539f2011-06-15 10:29:37 +0100333 gfs2_dir_hash_inval(ip);
Steven Whitehousec65f7fb2009-10-02 11:54:39 +0100334 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000335 }
Steven Whitehouseb0041572006-11-23 10:51:34 -0500336
Bob Peterson15562c42015-03-16 11:52:05 -0500337 if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
Bob Petersonc1696fb2018-01-17 00:01:33 +0100338 gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
Bob Peterson805c09072018-01-08 10:34:17 -0500339 GFS2_LOG_HEAD_FLUSH_NORMAL |
340 GFS2_LFC_INODE_GO_INVAL);
Bob Peterson15562c42015-03-16 11:52:05 -0500341 gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
Benjamin Marzinski1ce53362011-06-13 14:27:40 -0500342 }
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100343 if (ip && S_ISREG(ip->i_inode.i_mode))
Steven Whitehouseb0041572006-11-23 10:51:34 -0500344 truncate_inode_pages(ip->i_inode.i_mapping, 0);
Andreas Gruenbacher4fd1a572017-06-30 07:47:15 -0500345
346 gfs2_clear_glop_pending(ip);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000347}
348
349/**
350 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
351 * @gl: the glock
352 *
353 * Returns: 1 if it's ok
354 */
355
Steven Whitehouse97cc10252008-11-20 13:39:47 +0000356static int inode_go_demote_ok(const struct gfs2_glock *gl)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000357{
Bob Peterson15562c42015-03-16 11:52:05 -0500358 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Steven Whitehousebc015cb2011-01-19 09:30:01 +0000359
Steven Whitehouse97cc10252008-11-20 13:39:47 +0000360 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
361 return 0;
Steven Whitehousebc015cb2011-01-19 09:30:01 +0000362
Steven Whitehouse97cc10252008-11-20 13:39:47 +0000363 return 1;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000364}
365
Steven Whitehoused4b2cf12011-05-09 13:49:59 +0100366static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
367{
368 const struct gfs2_dinode *str = buf;
Deepa Dinamani95582b02018-05-08 19:36:02 -0700369 struct timespec64 atime;
Steven Whitehoused4b2cf12011-05-09 13:49:59 +0100370 u16 height, depth;
371
372 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
373 goto corrupt;
374 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
375 ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
376 ip->i_inode.i_rdev = 0;
377 switch (ip->i_inode.i_mode & S_IFMT) {
378 case S_IFBLK:
379 case S_IFCHR:
380 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
381 be32_to_cpu(str->di_minor));
382 break;
Aliasgar Surti098b9c12019-10-04 10:55:29 -0500383 }
Steven Whitehoused4b2cf12011-05-09 13:49:59 +0100384
Eric W. Biedermand0546422013-01-31 22:08:10 -0800385 i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
386 i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
Andreas Gruenbachereebd2e82017-08-01 11:33:17 -0500387 set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
Steven Whitehoused4b2cf12011-05-09 13:49:59 +0100388 i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
389 gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
390 atime.tv_sec = be64_to_cpu(str->di_atime);
391 atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
Deepa Dinamani95582b02018-05-08 19:36:02 -0700392 if (timespec64_compare(&ip->i_inode.i_atime, &atime) < 0)
Steven Whitehoused4b2cf12011-05-09 13:49:59 +0100393 ip->i_inode.i_atime = atime;
394 ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
395 ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
396 ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
397 ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
398
399 ip->i_goal = be64_to_cpu(str->di_goal_meta);
400 ip->i_generation = be64_to_cpu(str->di_generation);
401
402 ip->i_diskflags = be32_to_cpu(str->di_flags);
Steven Whitehouse9964afb2011-06-16 14:06:55 +0100403 ip->i_eattr = be64_to_cpu(str->di_eattr);
404 /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
Steven Whitehoused4b2cf12011-05-09 13:49:59 +0100405 gfs2_set_inode_flags(&ip->i_inode);
406 height = be16_to_cpu(str->di_height);
407 if (unlikely(height > GFS2_MAX_META_HEIGHT))
408 goto corrupt;
409 ip->i_height = (u8)height;
410
411 depth = be16_to_cpu(str->di_depth);
412 if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
413 goto corrupt;
414 ip->i_depth = (u8)depth;
415 ip->i_entries = be32_to_cpu(str->di_entries);
416
Steven Whitehoused4b2cf12011-05-09 13:49:59 +0100417 if (S_ISREG(ip->i_inode.i_mode))
418 gfs2_set_aops(&ip->i_inode);
419
420 return 0;
421corrupt:
422 gfs2_consist_inode(ip);
423 return -EIO;
424}
425
426/**
427 * gfs2_inode_refresh - Refresh the incore copy of the dinode
428 * @ip: The GFS2 inode
429 *
430 * Returns: errno
431 */
432
433int gfs2_inode_refresh(struct gfs2_inode *ip)
434{
435 struct buffer_head *dibh;
436 int error;
437
438 error = gfs2_meta_inode_buffer(ip, &dibh);
439 if (error)
440 return error;
441
Steven Whitehoused4b2cf12011-05-09 13:49:59 +0100442 error = gfs2_dinode_in(ip, dibh->b_data);
443 brelse(dibh);
444 clear_bit(GIF_INVALID, &ip->i_flags);
445
446 return error;
447}
448
449/**
David Teiglandb3b94fa2006-01-16 16:50:04 +0000450 * inode_go_lock - operation done after an inode lock is locked by a process
451 * @gl: the glock
452 * @flags:
453 *
454 * Returns: errno
455 */
456
457static int inode_go_lock(struct gfs2_holder *gh)
458{
459 struct gfs2_glock *gl = gh->gh_gl;
Bob Peterson15562c42015-03-16 11:52:05 -0500460 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500461 struct gfs2_inode *ip = gl->gl_object;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000462 int error = 0;
463
Bob Peterson091806ed2008-04-29 12:35:48 -0500464 if (!ip || (gh->gh_flags & GL_SKIP))
David Teiglandb3b94fa2006-01-16 16:50:04 +0000465 return 0;
466
Steven Whitehousebfded272006-11-01 16:05:38 -0500467 if (test_bit(GIF_INVALID, &ip->i_flags)) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000468 error = gfs2_inode_refresh(ip);
469 if (error)
470 return error;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000471 }
472
Steven Whitehouse582d2f72013-12-19 11:04:14 +0000473 if (gh->gh_state != LM_ST_DEFERRED)
474 inode_dio_wait(&ip->i_inode);
475
Steven Whitehouse383f01f2008-11-04 10:05:22 +0000476 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
David Teiglandb3b94fa2006-01-16 16:50:04 +0000477 (gl->gl_state == LM_ST_EXCLUSIVE) &&
Steven Whitehouse813e0c462008-11-18 13:38:48 +0000478 (gh->gh_state == LM_ST_EXCLUSIVE)) {
479 spin_lock(&sdp->sd_trunc_lock);
480 if (list_empty(&ip->i_trunc_list))
Wang Xiboe7cb5502017-07-21 07:40:59 -0500481 list_add(&ip->i_trunc_list, &sdp->sd_trunc_list);
Steven Whitehouse813e0c462008-11-18 13:38:48 +0000482 spin_unlock(&sdp->sd_trunc_lock);
483 wake_up(&sdp->sd_quota_wait);
484 return 1;
485 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000486
487 return error;
488}
489
490/**
Steven Whitehouse6802e342008-05-21 17:03:22 +0100491 * inode_go_dump - print information about an inode
492 * @seq: The iterator
493 * @ip: the inode
Bob Peterson3792ce92019-05-09 09:21:48 -0500494 * @fs_id_buf: file system id (may be empty)
Steven Whitehouse6802e342008-05-21 17:03:22 +0100495 *
Steven Whitehouse6802e342008-05-21 17:03:22 +0100496 */
497
Bob Peterson3792ce92019-05-09 09:21:48 -0500498static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
499 const char *fs_id_buf)
Steven Whitehouse6802e342008-05-21 17:03:22 +0100500{
Bob Peterson27a2660f2018-04-18 12:05:01 -0700501 struct gfs2_inode *ip = gl->gl_object;
502 struct inode *inode = &ip->i_inode;
503 unsigned long nrpages;
504
Steven Whitehouse6802e342008-05-21 17:03:22 +0100505 if (ip == NULL)
Steven Whitehouseac3beb62014-01-16 10:31:13 +0000506 return;
Bob Peterson27a2660f2018-04-18 12:05:01 -0700507
508 xa_lock_irq(&inode->i_data.i_pages);
509 nrpages = inode->i_data.nrpages;
510 xa_unlock_irq(&inode->i_data.i_pages);
511
Bob Peterson3792ce92019-05-09 09:21:48 -0500512 gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
513 "p:%lu\n", fs_id_buf,
Steven Whitehouse6802e342008-05-21 17:03:22 +0100514 (unsigned long long)ip->i_no_formal_ino,
515 (unsigned long long)ip->i_no_addr,
Steven Whitehousefa75ced2008-11-10 10:10:12 +0000516 IF2DT(ip->i_inode.i_mode), ip->i_flags,
517 (unsigned int)ip->i_diskflags,
Bob Peterson27a2660f2018-04-18 12:05:01 -0700518 (unsigned long long)i_size_read(inode), nrpages);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100519}
520
521/**
Benjamin Marzinski24972552014-05-01 22:26:55 -0500522 * freeze_go_sync - promote/demote the freeze glock
David Teiglandb3b94fa2006-01-16 16:50:04 +0000523 * @gl: the glock
524 * @state: the requested state
525 * @flags:
526 *
527 */
528
Bob Peterson1c634f92019-11-13 14:09:28 -0600529static int freeze_go_sync(struct gfs2_glock *gl)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000530{
Benjamin Marzinski2e60d762014-11-13 20:42:04 -0600531 int error = 0;
Bob Peterson15562c42015-03-16 11:52:05 -0500532 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000533
Bob Peterson541656d2020-06-25 13:29:44 -0500534 if (gl->gl_req == LM_ST_EXCLUSIVE && !gfs2_withdrawn(sdp)) {
Benjamin Marzinski2e60d762014-11-13 20:42:04 -0600535 atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
536 error = freeze_super(sdp->sd_vfs);
537 if (error) {
Bob Petersonf29e62e2019-05-13 09:42:18 -0500538 fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
539 error);
Bob Peterson601ef0d2020-01-28 20:23:45 +0100540 if (gfs2_withdrawn(sdp)) {
541 atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
Bob Peterson1c634f92019-11-13 14:09:28 -0600542 return 0;
Bob Peterson601ef0d2020-01-28 20:23:45 +0100543 }
Benjamin Marzinski2e60d762014-11-13 20:42:04 -0600544 gfs2_assert_withdraw(sdp, 0);
545 }
546 queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
Bob Peterson541656d2020-06-25 13:29:44 -0500547 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
548 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
549 GFS2_LFC_FREEZE_GO_SYNC);
550 else /* read-only mounts */
551 atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000552 }
Bob Peterson1c634f92019-11-13 14:09:28 -0600553 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000554}
555
556/**
Benjamin Marzinski24972552014-05-01 22:26:55 -0500557 * freeze_go_xmote_bh - After promoting/demoting the freeze glock
David Teiglandb3b94fa2006-01-16 16:50:04 +0000558 * @gl: the glock
559 *
560 */
561
Benjamin Marzinski24972552014-05-01 22:26:55 -0500562static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000563{
Bob Peterson15562c42015-03-16 11:52:05 -0500564 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400565 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500566 struct gfs2_glock *j_gl = ip->i_gl;
Al Viro55167622006-10-13 21:47:13 -0400567 struct gfs2_log_header_host head;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000568 int error;
569
Steven Whitehouse6802e342008-05-21 17:03:22 +0100570 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
Steven Whitehouse1a14d3a2006-11-20 10:37:45 -0500571 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000572
Abhi Dasf4686c22019-05-02 14:17:40 -0500573 error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000574 if (error)
575 gfs2_consist(sdp);
576 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
577 gfs2_consist(sdp);
578
579 /* Initialize some head of the log stuff */
Bob Petersoneb43e6602019-11-14 09:52:15 -0500580 if (!gfs2_withdrawn(sdp)) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000581 sdp->sd_log_sequence = head.lh_sequence + 1;
582 gfs2_log_pointers_init(sdp, head.lh_blkno);
583 }
584 }
Steven Whitehouse6802e342008-05-21 17:03:22 +0100585 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000586}
587
588/**
Steven Whitehouse97cc10252008-11-20 13:39:47 +0000589 * trans_go_demote_ok
590 * @gl: the glock
591 *
592 * Always returns 0
593 */
594
Benjamin Marzinski24972552014-05-01 22:26:55 -0500595static int freeze_go_demote_ok(const struct gfs2_glock *gl)
Steven Whitehouse97cc10252008-11-20 13:39:47 +0000596{
597 return 0;
598}
599
Benjamin Marzinskib94a1702009-07-23 18:52:34 -0500600/**
601 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
602 * @gl: the glock
603 *
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500604 * gl_lockref.lock lock is held while calling this
Benjamin Marzinskib94a1702009-07-23 18:52:34 -0500605 */
Steven Whitehouse81ffbf62013-04-10 10:26:55 +0100606static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
Benjamin Marzinskib94a1702009-07-23 18:52:34 -0500607{
Andreas Gruenbacher6f6597ba2017-06-30 07:55:08 -0500608 struct gfs2_inode *ip = gl->gl_object;
Bob Peterson15562c42015-03-16 11:52:05 -0500609 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Steven Whitehouse001e8e82011-03-30 14:17:51 +0100610
David Howellsbc98a422017-07-17 08:45:34 +0100611 if (!remote || sb_rdonly(sdp->sd_vfs))
Steven Whitehouse001e8e82011-03-30 14:17:51 +0100612 return;
Benjamin Marzinskib94a1702009-07-23 18:52:34 -0500613
614 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
Steven Whitehouse009d8512009-12-08 12:12:13 +0000615 gl->gl_state == LM_ST_SHARED && ip) {
Steven Whitehousee66cf162013-10-15 15:18:08 +0100616 gl->gl_lockref.count++;
Andreas Gruenbachera0e3cc62020-01-16 20:12:26 +0100617 if (!queue_delayed_work(gfs2_delete_workqueue,
618 &gl->gl_delete, 0))
Steven Whitehousee66cf162013-10-15 15:18:08 +0100619 gl->gl_lockref.count--;
Benjamin Marzinskib94a1702009-07-23 18:52:34 -0500620 }
621}
622
Andreas Gruenbachera0e3cc62020-01-16 20:12:26 +0100623static int iopen_go_demote_ok(const struct gfs2_glock *gl)
624{
625 return !gfs2_delete_work_queued(gl);
626}
627
Bob Peterson601ef0d2020-01-28 20:23:45 +0100628/**
629 * inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
630 * @gl: glock being freed
631 *
632 * For now, this is only used for the journal inode glock. In withdraw
633 * situations, we need to wait for the glock to be freed so that we know
634 * other nodes may proceed with recovery / journal replay.
635 */
636static void inode_go_free(struct gfs2_glock *gl)
637{
638 /* Note that we cannot reference gl_object because it's already set
639 * to NULL by this point in its lifecycle. */
640 if (!test_bit(GLF_FREEING, &gl->gl_flags))
641 return;
642 clear_bit_unlock(GLF_FREEING, &gl->gl_flags);
643 wake_up_bit(&gl->gl_flags, GLF_FREEING);
644}
645
646/**
647 * nondisk_go_callback - used to signal when a node did a withdraw
648 * @gl: the nondisk glock
649 * @remote: true if this came from a different cluster node
650 *
651 */
652static void nondisk_go_callback(struct gfs2_glock *gl, bool remote)
653{
654 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
655
656 /* Ignore the callback unless it's from another node, and it's the
657 live lock. */
658 if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK)
659 return;
660
661 /* First order of business is to cancel the demote request. We don't
662 * really want to demote a nondisk glock. At best it's just to inform
663 * us of another node's withdraw. We'll keep it in SH mode. */
664 clear_bit(GLF_DEMOTE, &gl->gl_flags);
665 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
666
667 /* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */
668 if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
669 test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
670 test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags))
671 return;
672
673 /* We only care when a node wants us to unlock, because that means
674 * they want a journal recovered. */
675 if (gl->gl_demote_state != LM_ST_UNLOCKED)
676 return;
677
678 if (sdp->sd_args.ar_spectator) {
679 fs_warn(sdp, "Spectator node cannot recover journals.\n");
680 return;
681 }
682
683 fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
684 set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
685 /*
686 * We can't call remote_withdraw directly here or gfs2_recover_journal
687 * because this is called from the glock unlock function and the
688 * remote_withdraw needs to enqueue and dequeue the same "live" glock
689 * we were called from. So we queue it to the control work queue in
690 * lock_dlm.
691 */
692 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
693}
694
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400695const struct gfs2_glock_operations gfs2_meta_glops = {
Steven Whitehouseea67eed2006-09-05 10:53:09 -0400696 .go_type = LM_TYPE_META,
Bob Petersona72d24012019-06-13 13:28:45 -0500697 .go_flags = GLOF_NONDISK,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000698};
699
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400700const struct gfs2_glock_operations gfs2_inode_glops = {
Bob Peterson06dfc302012-10-24 14:41:05 -0400701 .go_sync = inode_go_sync,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000702 .go_inval = inode_go_inval,
703 .go_demote_ok = inode_go_demote_ok,
704 .go_lock = inode_go_lock,
Steven Whitehouse6802e342008-05-21 17:03:22 +0100705 .go_dump = inode_go_dump,
Steven Whitehouseea67eed2006-09-05 10:53:09 -0400706 .go_type = LM_TYPE_INODE,
Andreas Gruenbacherf286d622020-01-13 21:21:49 +0100707 .go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB,
Bob Peterson601ef0d2020-01-28 20:23:45 +0100708 .go_free = inode_go_free,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000709};
710
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400711const struct gfs2_glock_operations gfs2_rgrp_glops = {
Bob Peterson06dfc302012-10-24 14:41:05 -0400712 .go_sync = rgrp_go_sync,
Steven Whitehouse6bac2432009-03-09 09:03:51 +0000713 .go_inval = rgrp_go_inval,
Bob Peterson7c9ca622011-08-31 09:53:19 +0100714 .go_lock = gfs2_rgrp_go_lock,
Steven Whitehouse09010972009-05-20 10:48:47 +0100715 .go_dump = gfs2_rgrp_dump,
Steven Whitehouseea67eed2006-09-05 10:53:09 -0400716 .go_type = LM_TYPE_RGRP,
Steven Whitehouse70d4ee92013-12-06 16:19:54 +0000717 .go_flags = GLOF_LVB,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000718};
719
Benjamin Marzinski24972552014-05-01 22:26:55 -0500720const struct gfs2_glock_operations gfs2_freeze_glops = {
721 .go_sync = freeze_go_sync,
722 .go_xmote_bh = freeze_go_xmote_bh,
723 .go_demote_ok = freeze_go_demote_ok,
Steven Whitehouseea67eed2006-09-05 10:53:09 -0400724 .go_type = LM_TYPE_NONDISK,
Bob Petersona72d24012019-06-13 13:28:45 -0500725 .go_flags = GLOF_NONDISK,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000726};
727
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400728const struct gfs2_glock_operations gfs2_iopen_glops = {
Steven Whitehouseea67eed2006-09-05 10:53:09 -0400729 .go_type = LM_TYPE_IOPEN,
Benjamin Marzinskib94a1702009-07-23 18:52:34 -0500730 .go_callback = iopen_go_callback,
Andreas Gruenbachera0e3cc62020-01-16 20:12:26 +0100731 .go_demote_ok = iopen_go_demote_ok,
Bob Petersona72d24012019-06-13 13:28:45 -0500732 .go_flags = GLOF_LRU | GLOF_NONDISK,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000733};
734
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400735const struct gfs2_glock_operations gfs2_flock_glops = {
Steven Whitehouseea67eed2006-09-05 10:53:09 -0400736 .go_type = LM_TYPE_FLOCK,
Bob Petersona72d24012019-06-13 13:28:45 -0500737 .go_flags = GLOF_LRU | GLOF_NONDISK,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000738};
739
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400740const struct gfs2_glock_operations gfs2_nondisk_glops = {
Steven Whitehouseea67eed2006-09-05 10:53:09 -0400741 .go_type = LM_TYPE_NONDISK,
Bob Petersona72d24012019-06-13 13:28:45 -0500742 .go_flags = GLOF_NONDISK,
Bob Peterson601ef0d2020-01-28 20:23:45 +0100743 .go_callback = nondisk_go_callback,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000744};
745
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400746const struct gfs2_glock_operations gfs2_quota_glops = {
Steven Whitehouseea67eed2006-09-05 10:53:09 -0400747 .go_type = LM_TYPE_QUOTA,
Bob Petersona72d24012019-06-13 13:28:45 -0500748 .go_flags = GLOF_LVB | GLOF_LRU | GLOF_NONDISK,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000749};
750
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400751const struct gfs2_glock_operations gfs2_journal_glops = {
Steven Whitehouseea67eed2006-09-05 10:53:09 -0400752 .go_type = LM_TYPE_JOURNAL,
Bob Petersona72d24012019-06-13 13:28:45 -0500753 .go_flags = GLOF_NONDISK,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000754};
755
Steven Whitehouse64d576b2009-02-12 13:31:58 +0000756const struct gfs2_glock_operations *gfs2_glops_list[] = {
757 [LM_TYPE_META] = &gfs2_meta_glops,
758 [LM_TYPE_INODE] = &gfs2_inode_glops,
759 [LM_TYPE_RGRP] = &gfs2_rgrp_glops,
Steven Whitehouse64d576b2009-02-12 13:31:58 +0000760 [LM_TYPE_IOPEN] = &gfs2_iopen_glops,
761 [LM_TYPE_FLOCK] = &gfs2_flock_glops,
762 [LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
763 [LM_TYPE_QUOTA] = &gfs2_quota_glops,
764 [LM_TYPE_JOURNAL] = &gfs2_journal_glops,
765};
766