Dave Chinner | 0b61f8a | 2018-06-05 19:42:14 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Nathan Scott | a805bad | 2006-06-19 08:40:27 +1000 | [diff] [blame] | 3 | * Copyright (c) 2000-2006 Silicon Graphics, Inc. |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 4 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | */ |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 6 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include "xfs.h" |
Dave Chinner | 70a9883 | 2013-10-23 10:36:05 +1100 | [diff] [blame] | 8 | #include "xfs_shared.h" |
Dave Chinner | 6ca1c90 | 2013-08-12 20:49:26 +1000 | [diff] [blame] | 9 | #include "xfs_format.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 10 | #include "xfs_log_format.h" |
| 11 | #include "xfs_trans_resv.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include "xfs_sb.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include "xfs_mount.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include "xfs_inode.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 15 | #include "xfs_btree.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include "xfs_bmap.h" |
Dave Chinner | a4fbe6a | 2013-10-23 10:51:50 +1100 | [diff] [blame] | 17 | #include "xfs_alloc.h" |
Christoph Hellwig | 9909c4a | 2007-10-11 18:11:14 +1000 | [diff] [blame] | 18 | #include "xfs_fsops.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 19 | #include "xfs_trans.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include "xfs_buf_item.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 21 | #include "xfs_log.h" |
David Chinner | a67d7c5 | 2007-11-23 16:29:32 +1100 | [diff] [blame] | 22 | #include "xfs_log_priv.h" |
Dave Chinner | 2b9ab5a | 2013-08-12 20:49:37 +1000 | [diff] [blame] | 23 | #include "xfs_dir2.h" |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 24 | #include "xfs_extfree_item.h" |
| 25 | #include "xfs_mru_cache.h" |
| 26 | #include "xfs_inode_item.h" |
Dave Chinner | 6d8b79c | 2012-10-08 21:56:09 +1100 | [diff] [blame] | 27 | #include "xfs_icache.h" |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 28 | #include "xfs_trace.h" |
Dave Chinner | 3ebe7d2 | 2013-06-27 16:04:53 +1000 | [diff] [blame] | 29 | #include "xfs_icreate_item.h" |
Dave Chinner | a4fbe6a | 2013-10-23 10:51:50 +1100 | [diff] [blame] | 30 | #include "xfs_filestream.h" |
| 31 | #include "xfs_quota.h" |
Brian Foster | 65b6573 | 2014-09-09 11:52:42 +1000 | [diff] [blame] | 32 | #include "xfs_sysfs.h" |
Darrick J. Wong | 30cbc59 | 2016-03-09 08:15:14 +1100 | [diff] [blame] | 33 | #include "xfs_ondisk.h" |
Darrick J. Wong | 5880f2d7 | 2016-08-03 12:04:45 +1000 | [diff] [blame] | 34 | #include "xfs_rmap_item.h" |
Darrick J. Wong | baf4bcac | 2016-10-03 09:11:20 -0700 | [diff] [blame] | 35 | #include "xfs_refcount_item.h" |
Darrick J. Wong | 6413a01 | 2016-10-03 09:11:25 -0700 | [diff] [blame] | 36 | #include "xfs_bmap_item.h" |
Darrick J. Wong | 5e7e605 | 2016-10-03 09:11:38 -0700 | [diff] [blame] | 37 | #include "xfs_reflink.h" |
Darrick J. Wong | 894ecac | 2021-01-22 16:48:44 -0800 | [diff] [blame] | 38 | #include "xfs_pwork.h" |
Dave Chinner | 9bbafc71 | 2021-06-02 10:48:24 +1000 | [diff] [blame] | 39 | #include "xfs_ag.h" |
Darrick J. Wong | f3c799c | 2021-10-12 14:11:01 -0700 | [diff] [blame] | 40 | #include "xfs_defer.h" |
Darrick J. Wong | 4136e38 | 2022-05-22 15:59:48 +1000 | [diff] [blame] | 41 | #include "xfs_attr_item.h" |
Darrick J. Wong | d9c61cc | 2022-05-27 10:33:29 +1000 | [diff] [blame] | 42 | #include "xfs_xattr.h" |
Dave Chinner | 784eb7d | 2022-07-14 11:47:42 +1000 | [diff] [blame] | 43 | #include "xfs_iunlink_item.h" |
Darrick J. Wong | 3cfb929 | 2023-03-16 09:31:20 -0700 | [diff] [blame] | 44 | #include "xfs_dahash_test.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | |
Adam Borowski | dddde68 | 2018-10-18 17:20:19 +1100 | [diff] [blame] | 46 | #include <linux/magic.h> |
Ian Kent | 73e5fff | 2019-11-04 13:58:46 -0800 | [diff] [blame] | 47 | #include <linux/fs_context.h> |
| 48 | #include <linux/fs_parser.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | |
Alexey Dobriyan | b87221d | 2009-09-21 17:01:09 -0700 | [diff] [blame] | 50 | static const struct super_operations xfs_super_operations; |
Brian Foster | 65b6573 | 2014-09-09 11:52:42 +1000 | [diff] [blame] | 51 | |
Dave Chinner | e3aed1a | 2014-09-29 10:46:08 +1000 | [diff] [blame] | 52 | static struct kset *xfs_kset; /* top-level xfs sysfs dir */ |
Brian Foster | 65b6573 | 2014-09-09 11:52:42 +1000 | [diff] [blame] | 53 | #ifdef DEBUG |
| 54 | static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */ |
| 55 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | |
Dave Chinner | 0ed17f0 | 2021-08-06 11:05:38 -0700 | [diff] [blame] | 57 | #ifdef CONFIG_HOTPLUG_CPU |
| 58 | static LIST_HEAD(xfs_mount_list); |
| 59 | static DEFINE_SPINLOCK(xfs_mount_list_lock); |
| 60 | |
| 61 | static inline void xfs_mount_list_add(struct xfs_mount *mp) |
| 62 | { |
| 63 | spin_lock(&xfs_mount_list_lock); |
| 64 | list_add(&mp->m_mount_list, &xfs_mount_list); |
| 65 | spin_unlock(&xfs_mount_list_lock); |
| 66 | } |
| 67 | |
| 68 | static inline void xfs_mount_list_del(struct xfs_mount *mp) |
| 69 | { |
| 70 | spin_lock(&xfs_mount_list_lock); |
| 71 | list_del(&mp->m_mount_list); |
| 72 | spin_unlock(&xfs_mount_list_lock); |
| 73 | } |
| 74 | #else /* !CONFIG_HOTPLUG_CPU */ |
| 75 | static inline void xfs_mount_list_add(struct xfs_mount *mp) {} |
| 76 | static inline void xfs_mount_list_del(struct xfs_mount *mp) {} |
| 77 | #endif |
| 78 | |
Ira Weiny | 8d6c344 | 2020-05-04 09:02:42 -0700 | [diff] [blame] | 79 | enum xfs_dax_mode { |
| 80 | XFS_DAX_INODE = 0, |
| 81 | XFS_DAX_ALWAYS = 1, |
| 82 | XFS_DAX_NEVER = 2, |
| 83 | }; |
| 84 | |
| 85 | static void |
| 86 | xfs_mount_set_dax_mode( |
| 87 | struct xfs_mount *mp, |
| 88 | enum xfs_dax_mode mode) |
| 89 | { |
| 90 | switch (mode) { |
| 91 | case XFS_DAX_INODE: |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 92 | mp->m_features &= ~(XFS_FEAT_DAX_ALWAYS | XFS_FEAT_DAX_NEVER); |
Ira Weiny | 8d6c344 | 2020-05-04 09:02:42 -0700 | [diff] [blame] | 93 | break; |
| 94 | case XFS_DAX_ALWAYS: |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 95 | mp->m_features |= XFS_FEAT_DAX_ALWAYS; |
| 96 | mp->m_features &= ~XFS_FEAT_DAX_NEVER; |
Ira Weiny | 8d6c344 | 2020-05-04 09:02:42 -0700 | [diff] [blame] | 97 | break; |
| 98 | case XFS_DAX_NEVER: |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 99 | mp->m_features |= XFS_FEAT_DAX_NEVER; |
| 100 | mp->m_features &= ~XFS_FEAT_DAX_ALWAYS; |
Ira Weiny | 8d6c344 | 2020-05-04 09:02:42 -0700 | [diff] [blame] | 101 | break; |
| 102 | } |
| 103 | } |
| 104 | |
| 105 | static const struct constant_table dax_param_enums[] = { |
| 106 | {"inode", XFS_DAX_INODE }, |
| 107 | {"always", XFS_DAX_ALWAYS }, |
| 108 | {"never", XFS_DAX_NEVER }, |
| 109 | {} |
| 110 | }; |
| 111 | |
Christoph Hellwig | 62a877e | 2008-07-18 17:12:36 +1000 | [diff] [blame] | 112 | /* |
| 113 | * Table driven mount option parser. |
Christoph Hellwig | 62a877e | 2008-07-18 17:12:36 +1000 | [diff] [blame] | 114 | */ |
| 115 | enum { |
Ian Kent | 8da57c5 | 2019-10-28 08:41:42 -0700 | [diff] [blame] | 116 | Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev, |
Eric Sandeen | 2e74af0 | 2016-03-02 09:55:38 +1100 | [diff] [blame] | 117 | Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid, |
Christoph Hellwig | 9407928 | 2019-04-28 08:32:52 -0700 | [diff] [blame] | 118 | Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups, |
Eric Sandeen | 1c02d50 | 2018-07-26 09:11:27 -0700 | [diff] [blame] | 119 | Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep, |
| 120 | Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2, |
| 121 | Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota, |
| 122 | Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota, |
Eric Sandeen | 2e74af0 | 2016-03-02 09:55:38 +1100 | [diff] [blame] | 123 | Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce, |
Ira Weiny | 8d6c344 | 2020-05-04 09:02:42 -0700 | [diff] [blame] | 124 | Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum, |
Christoph Hellwig | 62a877e | 2008-07-18 17:12:36 +1000 | [diff] [blame] | 125 | }; |
| 126 | |
Al Viro | d7167b1 | 2019-09-07 07:23:15 -0400 | [diff] [blame] | 127 | static const struct fs_parameter_spec xfs_fs_parameters[] = { |
Ian Kent | 73e5fff | 2019-11-04 13:58:46 -0800 | [diff] [blame] | 128 | fsparam_u32("logbufs", Opt_logbufs), |
| 129 | fsparam_string("logbsize", Opt_logbsize), |
| 130 | fsparam_string("logdev", Opt_logdev), |
| 131 | fsparam_string("rtdev", Opt_rtdev), |
| 132 | fsparam_flag("wsync", Opt_wsync), |
| 133 | fsparam_flag("noalign", Opt_noalign), |
| 134 | fsparam_flag("swalloc", Opt_swalloc), |
| 135 | fsparam_u32("sunit", Opt_sunit), |
| 136 | fsparam_u32("swidth", Opt_swidth), |
| 137 | fsparam_flag("nouuid", Opt_nouuid), |
| 138 | fsparam_flag("grpid", Opt_grpid), |
| 139 | fsparam_flag("nogrpid", Opt_nogrpid), |
| 140 | fsparam_flag("bsdgroups", Opt_bsdgroups), |
| 141 | fsparam_flag("sysvgroups", Opt_sysvgroups), |
| 142 | fsparam_string("allocsize", Opt_allocsize), |
| 143 | fsparam_flag("norecovery", Opt_norecovery), |
| 144 | fsparam_flag("inode64", Opt_inode64), |
| 145 | fsparam_flag("inode32", Opt_inode32), |
| 146 | fsparam_flag("ikeep", Opt_ikeep), |
| 147 | fsparam_flag("noikeep", Opt_noikeep), |
| 148 | fsparam_flag("largeio", Opt_largeio), |
| 149 | fsparam_flag("nolargeio", Opt_nolargeio), |
| 150 | fsparam_flag("attr2", Opt_attr2), |
| 151 | fsparam_flag("noattr2", Opt_noattr2), |
| 152 | fsparam_flag("filestreams", Opt_filestreams), |
| 153 | fsparam_flag("quota", Opt_quota), |
| 154 | fsparam_flag("noquota", Opt_noquota), |
| 155 | fsparam_flag("usrquota", Opt_usrquota), |
| 156 | fsparam_flag("grpquota", Opt_grpquota), |
| 157 | fsparam_flag("prjquota", Opt_prjquota), |
| 158 | fsparam_flag("uquota", Opt_uquota), |
| 159 | fsparam_flag("gquota", Opt_gquota), |
| 160 | fsparam_flag("pquota", Opt_pquota), |
| 161 | fsparam_flag("uqnoenforce", Opt_uqnoenforce), |
| 162 | fsparam_flag("gqnoenforce", Opt_gqnoenforce), |
| 163 | fsparam_flag("pqnoenforce", Opt_pqnoenforce), |
| 164 | fsparam_flag("qnoenforce", Opt_qnoenforce), |
| 165 | fsparam_flag("discard", Opt_discard), |
| 166 | fsparam_flag("nodiscard", Opt_nodiscard), |
| 167 | fsparam_flag("dax", Opt_dax), |
Ira Weiny | 8d6c344 | 2020-05-04 09:02:42 -0700 | [diff] [blame] | 168 | fsparam_enum("dax", Opt_dax_enum, dax_param_enums), |
Ian Kent | 73e5fff | 2019-11-04 13:58:46 -0800 | [diff] [blame] | 169 | {} |
Christoph Hellwig | 62a877e | 2008-07-18 17:12:36 +1000 | [diff] [blame] | 170 | }; |
| 171 | |
David Chinner | a67d7c5 | 2007-11-23 16:29:32 +1100 | [diff] [blame] | 172 | struct proc_xfs_info { |
Dave Chinner | cbe4dab | 2015-06-04 09:19:18 +1000 | [diff] [blame] | 173 | uint64_t flag; |
| 174 | char *str; |
David Chinner | a67d7c5 | 2007-11-23 16:29:32 +1100 | [diff] [blame] | 175 | }; |
| 176 | |
Christoph Hellwig | 21f5599 | 2019-10-28 08:41:47 -0700 | [diff] [blame] | 177 | static int |
| 178 | xfs_fs_show_options( |
| 179 | struct seq_file *m, |
| 180 | struct dentry *root) |
David Chinner | a67d7c5 | 2007-11-23 16:29:32 +1100 | [diff] [blame] | 181 | { |
| 182 | static struct proc_xfs_info xfs_info_set[] = { |
| 183 | /* the few simple ones we can get from the mount struct */ |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 184 | { XFS_FEAT_IKEEP, ",ikeep" }, |
| 185 | { XFS_FEAT_WSYNC, ",wsync" }, |
| 186 | { XFS_FEAT_NOALIGN, ",noalign" }, |
| 187 | { XFS_FEAT_SWALLOC, ",swalloc" }, |
| 188 | { XFS_FEAT_NOUUID, ",nouuid" }, |
| 189 | { XFS_FEAT_NORECOVERY, ",norecovery" }, |
| 190 | { XFS_FEAT_ATTR2, ",attr2" }, |
| 191 | { XFS_FEAT_FILESTREAMS, ",filestreams" }, |
| 192 | { XFS_FEAT_GRPID, ",grpid" }, |
| 193 | { XFS_FEAT_DISCARD, ",discard" }, |
| 194 | { XFS_FEAT_LARGE_IOSIZE, ",largeio" }, |
| 195 | { XFS_FEAT_DAX_ALWAYS, ",dax=always" }, |
| 196 | { XFS_FEAT_DAX_NEVER, ",dax=never" }, |
David Chinner | a67d7c5 | 2007-11-23 16:29:32 +1100 | [diff] [blame] | 197 | { 0, NULL } |
| 198 | }; |
Christoph Hellwig | 21f5599 | 2019-10-28 08:41:47 -0700 | [diff] [blame] | 199 | struct xfs_mount *mp = XFS_M(root->d_sb); |
David Chinner | a67d7c5 | 2007-11-23 16:29:32 +1100 | [diff] [blame] | 200 | struct proc_xfs_info *xfs_infop; |
| 201 | |
| 202 | for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) { |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 203 | if (mp->m_features & xfs_infop->flag) |
David Chinner | a67d7c5 | 2007-11-23 16:29:32 +1100 | [diff] [blame] | 204 | seq_puts(m, xfs_infop->str); |
| 205 | } |
Christoph Hellwig | 1775c50 | 2019-10-28 08:41:47 -0700 | [diff] [blame] | 206 | |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 207 | seq_printf(m, ",inode%d", xfs_has_small_inums(mp) ? 32 : 64); |
David Chinner | a67d7c5 | 2007-11-23 16:29:32 +1100 | [diff] [blame] | 208 | |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 209 | if (xfs_has_allocsize(mp)) |
Eric Sandeen | 2e74af0 | 2016-03-02 09:55:38 +1100 | [diff] [blame] | 210 | seq_printf(m, ",allocsize=%dk", |
Christoph Hellwig | aa58d44 | 2019-10-28 08:41:46 -0700 | [diff] [blame] | 211 | (1 << mp->m_allocsize_log) >> 10); |
David Chinner | a67d7c5 | 2007-11-23 16:29:32 +1100 | [diff] [blame] | 212 | |
| 213 | if (mp->m_logbufs > 0) |
Eric Sandeen | 2e74af0 | 2016-03-02 09:55:38 +1100 | [diff] [blame] | 214 | seq_printf(m, ",logbufs=%d", mp->m_logbufs); |
David Chinner | a67d7c5 | 2007-11-23 16:29:32 +1100 | [diff] [blame] | 215 | if (mp->m_logbsize > 0) |
Eric Sandeen | 2e74af0 | 2016-03-02 09:55:38 +1100 | [diff] [blame] | 216 | seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10); |
David Chinner | a67d7c5 | 2007-11-23 16:29:32 +1100 | [diff] [blame] | 217 | |
| 218 | if (mp->m_logname) |
Eric Sandeen | 2e74af0 | 2016-03-02 09:55:38 +1100 | [diff] [blame] | 219 | seq_show_option(m, "logdev", mp->m_logname); |
David Chinner | a67d7c5 | 2007-11-23 16:29:32 +1100 | [diff] [blame] | 220 | if (mp->m_rtname) |
Eric Sandeen | 2e74af0 | 2016-03-02 09:55:38 +1100 | [diff] [blame] | 221 | seq_show_option(m, "rtdev", mp->m_rtname); |
David Chinner | a67d7c5 | 2007-11-23 16:29:32 +1100 | [diff] [blame] | 222 | |
| 223 | if (mp->m_dalign > 0) |
Eric Sandeen | 2e74af0 | 2016-03-02 09:55:38 +1100 | [diff] [blame] | 224 | seq_printf(m, ",sunit=%d", |
David Chinner | a67d7c5 | 2007-11-23 16:29:32 +1100 | [diff] [blame] | 225 | (int)XFS_FSB_TO_BB(mp, mp->m_dalign)); |
| 226 | if (mp->m_swidth > 0) |
Eric Sandeen | 2e74af0 | 2016-03-02 09:55:38 +1100 | [diff] [blame] | 227 | seq_printf(m, ",swidth=%d", |
David Chinner | a67d7c5 | 2007-11-23 16:29:32 +1100 | [diff] [blame] | 228 | (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); |
| 229 | |
Christoph Hellwig | 149e53a | 2021-08-06 11:05:37 -0700 | [diff] [blame] | 230 | if (mp->m_qflags & XFS_UQUOTA_ENFD) |
| 231 | seq_puts(m, ",usrquota"); |
| 232 | else if (mp->m_qflags & XFS_UQUOTA_ACCT) |
| 233 | seq_puts(m, ",uqnoenforce"); |
David Chinner | a67d7c5 | 2007-11-23 16:29:32 +1100 | [diff] [blame] | 234 | |
Christoph Hellwig | 149e53a | 2021-08-06 11:05:37 -0700 | [diff] [blame] | 235 | if (mp->m_qflags & XFS_PQUOTA_ENFD) |
| 236 | seq_puts(m, ",prjquota"); |
| 237 | else if (mp->m_qflags & XFS_PQUOTA_ACCT) |
| 238 | seq_puts(m, ",pqnoenforce"); |
| 239 | |
| 240 | if (mp->m_qflags & XFS_GQUOTA_ENFD) |
| 241 | seq_puts(m, ",grpquota"); |
| 242 | else if (mp->m_qflags & XFS_GQUOTA_ACCT) |
| 243 | seq_puts(m, ",gqnoenforce"); |
David Chinner | a67d7c5 | 2007-11-23 16:29:32 +1100 | [diff] [blame] | 244 | |
| 245 | if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT)) |
Eric Sandeen | 2e74af0 | 2016-03-02 09:55:38 +1100 | [diff] [blame] | 246 | seq_puts(m, ",noquota"); |
Christoph Hellwig | 21f5599 | 2019-10-28 08:41:47 -0700 | [diff] [blame] | 247 | |
| 248 | return 0; |
David Chinner | a67d7c5 | 2007-11-23 16:29:32 +1100 | [diff] [blame] | 249 | } |
Eric Sandeen | 9108326 | 2019-05-01 20:26:30 -0700 | [diff] [blame] | 250 | |
Dave Chinner | 7ac2ff8 | 2023-02-13 09:14:52 +1100 | [diff] [blame] | 251 | static bool |
| 252 | xfs_set_inode_alloc_perag( |
| 253 | struct xfs_perag *pag, |
| 254 | xfs_ino_t ino, |
| 255 | xfs_agnumber_t max_metadata) |
| 256 | { |
| 257 | if (!xfs_is_inode32(pag->pag_mount)) { |
| 258 | set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate); |
| 259 | clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate); |
| 260 | return false; |
| 261 | } |
| 262 | |
| 263 | if (ino > XFS_MAXINUMBER_32) { |
| 264 | clear_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate); |
| 265 | clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate); |
| 266 | return false; |
| 267 | } |
| 268 | |
| 269 | set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate); |
| 270 | if (pag->pag_agno < max_metadata) |
| 271 | set_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate); |
| 272 | else |
| 273 | clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate); |
| 274 | return true; |
| 275 | } |
| 276 | |
Eric Sandeen | 9de67c3 | 2014-07-24 20:51:54 +1000 | [diff] [blame] | 277 | /* |
Eric Sandeen | 12c3f05 | 2016-03-02 09:58:09 +1100 | [diff] [blame] | 278 | * Set parameters for inode allocation heuristics, taking into account |
| 279 | * filesystem size and inode32/inode64 mount options; i.e. specifically |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 280 | * whether or not XFS_FEAT_SMALL_INUMS is set. |
Eric Sandeen | 12c3f05 | 2016-03-02 09:58:09 +1100 | [diff] [blame] | 281 | * |
| 282 | * Inode allocation patterns are altered only if inode32 is requested |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 283 | * (XFS_FEAT_SMALL_INUMS), and the filesystem is sufficiently large. |
Dave Chinner | 2e973b2 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 284 | * If altered, XFS_OPSTATE_INODE32 is set as well. |
Eric Sandeen | 12c3f05 | 2016-03-02 09:58:09 +1100 | [diff] [blame] | 285 | * |
| 286 | * An agcount independent of that in the mount structure is provided |
| 287 | * because in the growfs case, mp->m_sb.sb_agcount is not yet updated |
| 288 | * to the potentially higher ag count. |
| 289 | * |
| 290 | * Returns the maximum AG index which may contain inodes. |
Eric Sandeen | 9de67c3 | 2014-07-24 20:51:54 +1000 | [diff] [blame] | 291 | */ |
Carlos Maiolino | 2d2194f | 2012-09-20 10:32:38 -0300 | [diff] [blame] | 292 | xfs_agnumber_t |
Eric Sandeen | 12c3f05 | 2016-03-02 09:58:09 +1100 | [diff] [blame] | 293 | xfs_set_inode_alloc( |
| 294 | struct xfs_mount *mp, |
| 295 | xfs_agnumber_t agcount) |
Carlos Maiolino | 2d2194f | 2012-09-20 10:32:38 -0300 | [diff] [blame] | 296 | { |
Eric Sandeen | 12c3f05 | 2016-03-02 09:58:09 +1100 | [diff] [blame] | 297 | xfs_agnumber_t index; |
Carlos Maiolino | 4056c1d | 2012-09-20 10:32:40 -0300 | [diff] [blame] | 298 | xfs_agnumber_t maxagi = 0; |
Carlos Maiolino | 2d2194f | 2012-09-20 10:32:38 -0300 | [diff] [blame] | 299 | xfs_sb_t *sbp = &mp->m_sb; |
| 300 | xfs_agnumber_t max_metadata; |
Eric Sandeen | 54aa61f | 2014-07-24 20:53:10 +1000 | [diff] [blame] | 301 | xfs_agino_t agino; |
| 302 | xfs_ino_t ino; |
Carlos Maiolino | 2d2194f | 2012-09-20 10:32:38 -0300 | [diff] [blame] | 303 | |
Eric Sandeen | 12c3f05 | 2016-03-02 09:58:09 +1100 | [diff] [blame] | 304 | /* |
| 305 | * Calculate how much should be reserved for inodes to meet |
| 306 | * the max inode percentage. Used only for inode32. |
Carlos Maiolino | 2d2194f | 2012-09-20 10:32:38 -0300 | [diff] [blame] | 307 | */ |
Darrick J. Wong | ef32595 | 2019-06-05 11:19:34 -0700 | [diff] [blame] | 308 | if (M_IGEO(mp)->maxicount) { |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 309 | uint64_t icount; |
Carlos Maiolino | 2d2194f | 2012-09-20 10:32:38 -0300 | [diff] [blame] | 310 | |
| 311 | icount = sbp->sb_dblocks * sbp->sb_imax_pct; |
| 312 | do_div(icount, 100); |
| 313 | icount += sbp->sb_agblocks - 1; |
| 314 | do_div(icount, sbp->sb_agblocks); |
| 315 | max_metadata = icount; |
| 316 | } else { |
Eric Sandeen | 9de67c3 | 2014-07-24 20:51:54 +1000 | [diff] [blame] | 317 | max_metadata = agcount; |
Carlos Maiolino | 2d2194f | 2012-09-20 10:32:38 -0300 | [diff] [blame] | 318 | } |
| 319 | |
Eric Sandeen | 12c3f05 | 2016-03-02 09:58:09 +1100 | [diff] [blame] | 320 | /* Get the last possible inode in the filesystem */ |
Darrick J. Wong | 43004b2 | 2018-12-12 08:46:24 -0800 | [diff] [blame] | 321 | agino = XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1); |
Eric Sandeen | 12c3f05 | 2016-03-02 09:58:09 +1100 | [diff] [blame] | 322 | ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino); |
Eric Sandeen | 54aa61f | 2014-07-24 20:53:10 +1000 | [diff] [blame] | 323 | |
Eric Sandeen | 12c3f05 | 2016-03-02 09:58:09 +1100 | [diff] [blame] | 324 | /* |
| 325 | * If user asked for no more than 32-bit inodes, and the fs is |
Dave Chinner | 2e973b2 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 326 | * sufficiently large, set XFS_OPSTATE_INODE32 if we must alter |
Eric Sandeen | 12c3f05 | 2016-03-02 09:58:09 +1100 | [diff] [blame] | 327 | * the allocator to accommodate the request. |
| 328 | */ |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 329 | if (xfs_has_small_inums(mp) && ino > XFS_MAXINUMBER_32) |
Dave Chinner | 2e973b2 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 330 | set_bit(XFS_OPSTATE_INODE32, &mp->m_opstate); |
Eric Sandeen | 12c3f05 | 2016-03-02 09:58:09 +1100 | [diff] [blame] | 331 | else |
Dave Chinner | 2e973b2 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 332 | clear_bit(XFS_OPSTATE_INODE32, &mp->m_opstate); |
Carlos Maiolino | 2d2194f | 2012-09-20 10:32:38 -0300 | [diff] [blame] | 333 | |
Eric Sandeen | 9de67c3 | 2014-07-24 20:51:54 +1000 | [diff] [blame] | 334 | for (index = 0; index < agcount; index++) { |
Carlos Maiolino | 2d2194f | 2012-09-20 10:32:38 -0300 | [diff] [blame] | 335 | struct xfs_perag *pag; |
| 336 | |
Eric Sandeen | 12c3f05 | 2016-03-02 09:58:09 +1100 | [diff] [blame] | 337 | ino = XFS_AGINO_TO_INO(mp, index, agino); |
| 338 | |
Carlos Maiolino | 2d2194f | 2012-09-20 10:32:38 -0300 | [diff] [blame] | 339 | pag = xfs_perag_get(mp, index); |
Dave Chinner | 7ac2ff8 | 2023-02-13 09:14:52 +1100 | [diff] [blame] | 340 | if (xfs_set_inode_alloc_perag(pag, ino, max_metadata)) |
| 341 | maxagi++; |
Carlos Maiolino | 2d2194f | 2012-09-20 10:32:38 -0300 | [diff] [blame] | 342 | xfs_perag_put(pag); |
| 343 | } |
| 344 | |
Dave Chinner | 2e973b2 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 345 | return xfs_is_inode32(mp) ? maxagi : agcount; |
Carlos Maiolino | 2d2194f | 2012-09-20 10:32:38 -0300 | [diff] [blame] | 346 | } |
| 347 | |
Christoph Hellwig | 679a994 | 2021-11-29 11:21:41 +0100 | [diff] [blame] | 348 | static int |
| 349 | xfs_setup_dax_always( |
| 350 | struct xfs_mount *mp) |
Christoph Hellwig | a384f08 | 2021-08-26 15:55:09 +0200 | [diff] [blame] | 351 | { |
Christoph Hellwig | 7b0800d | 2021-11-29 11:21:42 +0100 | [diff] [blame] | 352 | if (!mp->m_ddev_targp->bt_daxdev && |
| 353 | (!mp->m_rtdev_targp || !mp->m_rtdev_targp->bt_daxdev)) { |
Christoph Hellwig | 679a994 | 2021-11-29 11:21:41 +0100 | [diff] [blame] | 354 | xfs_alert(mp, |
| 355 | "DAX unsupported by block device. Turning off DAX."); |
| 356 | goto disable_dax; |
| 357 | } |
| 358 | |
Christoph Hellwig | 7b0800d | 2021-11-29 11:21:42 +0100 | [diff] [blame] | 359 | if (mp->m_super->s_blocksize != PAGE_SIZE) { |
| 360 | xfs_alert(mp, |
| 361 | "DAX not supported for blocksize. Turning off DAX."); |
| 362 | goto disable_dax; |
| 363 | } |
| 364 | |
Shiyang Ruan | 35fcd75 | 2022-06-09 22:34:35 +0800 | [diff] [blame] | 365 | if (xfs_has_reflink(mp) && |
| 366 | bdev_is_partition(mp->m_ddev_targp->bt_bdev)) { |
| 367 | xfs_alert(mp, |
| 368 | "DAX and reflink cannot work with multi-partitions!"); |
Christoph Hellwig | 679a994 | 2021-11-29 11:21:41 +0100 | [diff] [blame] | 369 | return -EINVAL; |
| 370 | } |
| 371 | |
| 372 | xfs_warn(mp, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); |
| 373 | return 0; |
| 374 | |
| 375 | disable_dax: |
| 376 | xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER); |
| 377 | return 0; |
Christoph Hellwig | a384f08 | 2021-08-26 15:55:09 +0200 | [diff] [blame] | 378 | } |
| 379 | |
Hannes Eder | 3180e66 | 2009-03-04 19:34:10 +0100 | [diff] [blame] | 380 | STATIC int |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | xfs_blkdev_get( |
| 382 | xfs_mount_t *mp, |
| 383 | const char *name, |
| 384 | struct block_device **bdevp) |
| 385 | { |
| 386 | int error = 0; |
| 387 | |
Tejun Heo | d4d7762 | 2010-11-13 11:55:18 +0100 | [diff] [blame] | 388 | *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL, |
| 389 | mp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | if (IS_ERR(*bdevp)) { |
| 391 | error = PTR_ERR(*bdevp); |
Eric Sandeen | 77af574 | 2014-12-24 09:47:27 +1100 | [diff] [blame] | 392 | xfs_warn(mp, "Invalid device [%s], error=%d", name, error); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 393 | } |
| 394 | |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 395 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | } |
| 397 | |
Hannes Eder | 3180e66 | 2009-03-04 19:34:10 +0100 | [diff] [blame] | 398 | STATIC void |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | xfs_blkdev_put( |
| 400 | struct block_device *bdev) |
| 401 | { |
| 402 | if (bdev) |
Tejun Heo | e525fd8 | 2010-11-13 11:55:17 +0100 | [diff] [blame] | 403 | blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | } |
| 405 | |
Christoph Hellwig | 19f354d | 2008-05-20 11:31:13 +1000 | [diff] [blame] | 406 | STATIC void |
| 407 | xfs_close_devices( |
| 408 | struct xfs_mount *mp) |
| 409 | { |
| 410 | if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { |
Lachlan McIlroy | c032bfc | 2008-07-18 17:13:12 +1000 | [diff] [blame] | 411 | struct block_device *logdev = mp->m_logdev_targp->bt_bdev; |
Dan Williams | 486aff5 | 2017-08-24 15:12:50 -0700 | [diff] [blame] | 412 | |
Eric Sandeen | a1f6941 | 2018-04-06 10:09:42 -0700 | [diff] [blame] | 413 | xfs_free_buftarg(mp->m_logdev_targp); |
Lachlan McIlroy | c032bfc | 2008-07-18 17:13:12 +1000 | [diff] [blame] | 414 | xfs_blkdev_put(logdev); |
Christoph Hellwig | 19f354d | 2008-05-20 11:31:13 +1000 | [diff] [blame] | 415 | } |
| 416 | if (mp->m_rtdev_targp) { |
Lachlan McIlroy | c032bfc | 2008-07-18 17:13:12 +1000 | [diff] [blame] | 417 | struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev; |
Dan Williams | 486aff5 | 2017-08-24 15:12:50 -0700 | [diff] [blame] | 418 | |
Eric Sandeen | a1f6941 | 2018-04-06 10:09:42 -0700 | [diff] [blame] | 419 | xfs_free_buftarg(mp->m_rtdev_targp); |
Lachlan McIlroy | c032bfc | 2008-07-18 17:13:12 +1000 | [diff] [blame] | 420 | xfs_blkdev_put(rtdev); |
Christoph Hellwig | 19f354d | 2008-05-20 11:31:13 +1000 | [diff] [blame] | 421 | } |
Eric Sandeen | a1f6941 | 2018-04-06 10:09:42 -0700 | [diff] [blame] | 422 | xfs_free_buftarg(mp->m_ddev_targp); |
Christoph Hellwig | 19f354d | 2008-05-20 11:31:13 +1000 | [diff] [blame] | 423 | } |
| 424 | |
| 425 | /* |
| 426 | * The file system configurations are: |
| 427 | * (1) device (partition) with data and internal log |
| 428 | * (2) logical volume with data and log subvolumes. |
| 429 | * (3) logical volume with data, log, and realtime subvolumes. |
| 430 | * |
| 431 | * We only have to handle opening the log and realtime volumes here if |
| 432 | * they are present. The data subvolume has already been opened by |
| 433 | * get_sb_bdev() and is stored in sb->s_bdev. |
| 434 | */ |
| 435 | STATIC int |
| 436 | xfs_open_devices( |
Christoph Hellwig | 9d565ff | 2008-10-30 17:53:24 +1100 | [diff] [blame] | 437 | struct xfs_mount *mp) |
Christoph Hellwig | 19f354d | 2008-05-20 11:31:13 +1000 | [diff] [blame] | 438 | { |
| 439 | struct block_device *ddev = mp->m_super->s_bdev; |
| 440 | struct block_device *logdev = NULL, *rtdev = NULL; |
| 441 | int error; |
| 442 | |
| 443 | /* |
| 444 | * Open real time and log devices - order is important. |
| 445 | */ |
Christoph Hellwig | 9d565ff | 2008-10-30 17:53:24 +1100 | [diff] [blame] | 446 | if (mp->m_logname) { |
| 447 | error = xfs_blkdev_get(mp, mp->m_logname, &logdev); |
Christoph Hellwig | 19f354d | 2008-05-20 11:31:13 +1000 | [diff] [blame] | 448 | if (error) |
Christoph Hellwig | 5b5abbe | 2021-11-29 11:21:55 +0100 | [diff] [blame] | 449 | return error; |
Christoph Hellwig | 19f354d | 2008-05-20 11:31:13 +1000 | [diff] [blame] | 450 | } |
| 451 | |
Christoph Hellwig | 9d565ff | 2008-10-30 17:53:24 +1100 | [diff] [blame] | 452 | if (mp->m_rtname) { |
| 453 | error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev); |
Christoph Hellwig | 19f354d | 2008-05-20 11:31:13 +1000 | [diff] [blame] | 454 | if (error) |
| 455 | goto out_close_logdev; |
| 456 | |
| 457 | if (rtdev == ddev || rtdev == logdev) { |
Dave Chinner | 4f10700 | 2011-03-07 10:00:35 +1100 | [diff] [blame] | 458 | xfs_warn(mp, |
| 459 | "Cannot mount filesystem with identical rtdev and ddev/logdev."); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 460 | error = -EINVAL; |
Christoph Hellwig | 19f354d | 2008-05-20 11:31:13 +1000 | [diff] [blame] | 461 | goto out_close_rtdev; |
| 462 | } |
| 463 | } |
| 464 | |
| 465 | /* |
| 466 | * Setup xfs_mount buffer target pointers |
| 467 | */ |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 468 | error = -ENOMEM; |
Christoph Hellwig | 5b5abbe | 2021-11-29 11:21:55 +0100 | [diff] [blame] | 469 | mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev); |
Christoph Hellwig | 19f354d | 2008-05-20 11:31:13 +1000 | [diff] [blame] | 470 | if (!mp->m_ddev_targp) |
| 471 | goto out_close_rtdev; |
| 472 | |
| 473 | if (rtdev) { |
Christoph Hellwig | 5b5abbe | 2021-11-29 11:21:55 +0100 | [diff] [blame] | 474 | mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev); |
Christoph Hellwig | 19f354d | 2008-05-20 11:31:13 +1000 | [diff] [blame] | 475 | if (!mp->m_rtdev_targp) |
| 476 | goto out_free_ddev_targ; |
| 477 | } |
| 478 | |
| 479 | if (logdev && logdev != ddev) { |
Christoph Hellwig | 5b5abbe | 2021-11-29 11:21:55 +0100 | [diff] [blame] | 480 | mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev); |
Christoph Hellwig | 19f354d | 2008-05-20 11:31:13 +1000 | [diff] [blame] | 481 | if (!mp->m_logdev_targp) |
| 482 | goto out_free_rtdev_targ; |
| 483 | } else { |
| 484 | mp->m_logdev_targp = mp->m_ddev_targp; |
| 485 | } |
| 486 | |
| 487 | return 0; |
| 488 | |
| 489 | out_free_rtdev_targ: |
| 490 | if (mp->m_rtdev_targp) |
Eric Sandeen | a1f6941 | 2018-04-06 10:09:42 -0700 | [diff] [blame] | 491 | xfs_free_buftarg(mp->m_rtdev_targp); |
Christoph Hellwig | 19f354d | 2008-05-20 11:31:13 +1000 | [diff] [blame] | 492 | out_free_ddev_targ: |
Eric Sandeen | a1f6941 | 2018-04-06 10:09:42 -0700 | [diff] [blame] | 493 | xfs_free_buftarg(mp->m_ddev_targp); |
Christoph Hellwig | 19f354d | 2008-05-20 11:31:13 +1000 | [diff] [blame] | 494 | out_close_rtdev: |
Markus Elfring | d2a5e3c | 2014-12-01 08:24:20 +1100 | [diff] [blame] | 495 | xfs_blkdev_put(rtdev); |
Christoph Hellwig | 19f354d | 2008-05-20 11:31:13 +1000 | [diff] [blame] | 496 | out_close_logdev: |
Christoph Hellwig | 5b5abbe | 2021-11-29 11:21:55 +0100 | [diff] [blame] | 497 | if (logdev && logdev != ddev) |
Christoph Hellwig | 19f354d | 2008-05-20 11:31:13 +1000 | [diff] [blame] | 498 | xfs_blkdev_put(logdev); |
Christoph Hellwig | 19f354d | 2008-05-20 11:31:13 +1000 | [diff] [blame] | 499 | return error; |
| 500 | } |
| 501 | |
Christoph Hellwig | e34b562 | 2008-05-20 15:10:36 +1000 | [diff] [blame] | 502 | /* |
| 503 | * Setup xfs_mount buffer target pointers based on superblock |
| 504 | */ |
| 505 | STATIC int |
| 506 | xfs_setup_devices( |
| 507 | struct xfs_mount *mp) |
| 508 | { |
| 509 | int error; |
Christoph Hellwig | 19f354d | 2008-05-20 11:31:13 +1000 | [diff] [blame] | 510 | |
Eric Sandeen | a96c415 | 2014-04-14 19:00:29 +1000 | [diff] [blame] | 511 | error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize); |
Christoph Hellwig | e34b562 | 2008-05-20 15:10:36 +1000 | [diff] [blame] | 512 | if (error) |
| 513 | return error; |
| 514 | |
| 515 | if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { |
| 516 | unsigned int log_sector_size = BBSIZE; |
| 517 | |
Dave Chinner | 38c26bf | 2021-08-18 18:46:37 -0700 | [diff] [blame] | 518 | if (xfs_has_sector(mp)) |
Christoph Hellwig | e34b562 | 2008-05-20 15:10:36 +1000 | [diff] [blame] | 519 | log_sector_size = mp->m_sb.sb_logsectsize; |
| 520 | error = xfs_setsize_buftarg(mp->m_logdev_targp, |
Christoph Hellwig | e34b562 | 2008-05-20 15:10:36 +1000 | [diff] [blame] | 521 | log_sector_size); |
| 522 | if (error) |
| 523 | return error; |
| 524 | } |
| 525 | if (mp->m_rtdev_targp) { |
| 526 | error = xfs_setsize_buftarg(mp->m_rtdev_targp, |
Christoph Hellwig | e34b562 | 2008-05-20 15:10:36 +1000 | [diff] [blame] | 527 | mp->m_sb.sb_sectsize); |
| 528 | if (error) |
| 529 | return error; |
| 530 | } |
| 531 | |
| 532 | return 0; |
| 533 | } |
Christoph Hellwig | 19f354d | 2008-05-20 11:31:13 +1000 | [diff] [blame] | 534 | |
Christoph Hellwig | aa6bf01 | 2012-02-29 09:53:48 +0000 | [diff] [blame] | 535 | STATIC int |
| 536 | xfs_init_mount_workqueues( |
| 537 | struct xfs_mount *mp) |
| 538 | { |
Brian Foster | 78c931b | 2014-11-28 13:59:58 +1100 | [diff] [blame] | 539 | mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s", |
Darrick J. Wong | 05a302a | 2021-01-22 16:48:42 -0800 | [diff] [blame] | 540 | XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), |
| 541 | 1, mp->m_super->s_id); |
Brian Foster | 78c931b | 2014-11-28 13:59:58 +1100 | [diff] [blame] | 542 | if (!mp->m_buf_workqueue) |
| 543 | goto out; |
| 544 | |
Christoph Hellwig | aa6bf01 | 2012-02-29 09:53:48 +0000 | [diff] [blame] | 545 | mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s", |
Darrick J. Wong | 05a302a | 2021-01-22 16:48:42 -0800 | [diff] [blame] | 546 | XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), |
| 547 | 0, mp->m_super->s_id); |
Christoph Hellwig | aa6bf01 | 2012-02-29 09:53:48 +0000 | [diff] [blame] | 548 | if (!mp->m_unwritten_workqueue) |
Darrick J. Wong | 2840824 | 2019-04-15 13:13:21 -0700 | [diff] [blame] | 549 | goto out_destroy_buf; |
Christoph Hellwig | aa6bf01 | 2012-02-29 09:53:48 +0000 | [diff] [blame] | 550 | |
Dave Chinner | 5889608 | 2012-10-08 21:56:05 +1100 | [diff] [blame] | 551 | mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s", |
Darrick J. Wong | 05a302a | 2021-01-22 16:48:42 -0800 | [diff] [blame] | 552 | XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), |
| 553 | 0, mp->m_super->s_id); |
Dave Chinner | 5889608 | 2012-10-08 21:56:05 +1100 | [diff] [blame] | 554 | if (!mp->m_reclaim_workqueue) |
Dave Chinner | 33c0dd7 | 2021-08-10 18:00:45 -0700 | [diff] [blame] | 555 | goto out_destroy_unwritten; |
Dave Chinner | 5889608 | 2012-10-08 21:56:05 +1100 | [diff] [blame] | 556 | |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 557 | mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s", |
| 558 | XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM), |
Darrick J. Wong | 05a302a | 2021-01-22 16:48:42 -0800 | [diff] [blame] | 559 | 0, mp->m_super->s_id); |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 560 | if (!mp->m_blockgc_wq) |
Christoph Hellwig | 1058d0f | 2019-06-28 19:27:25 -0700 | [diff] [blame] | 561 | goto out_destroy_reclaim; |
Brian Foster | 579b62f | 2012-11-06 09:50:47 -0500 | [diff] [blame] | 562 | |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 563 | mp->m_inodegc_wq = alloc_workqueue("xfs-inodegc/%s", |
| 564 | XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), |
| 565 | 1, mp->m_super->s_id); |
| 566 | if (!mp->m_inodegc_wq) |
| 567 | goto out_destroy_blockgc; |
| 568 | |
Darrick J. Wong | 05a302a | 2021-01-22 16:48:42 -0800 | [diff] [blame] | 569 | mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", |
| 570 | XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id); |
Brian Foster | 696a562 | 2017-03-28 14:51:44 -0700 | [diff] [blame] | 571 | if (!mp->m_sync_workqueue) |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 572 | goto out_destroy_inodegc; |
Brian Foster | 696a562 | 2017-03-28 14:51:44 -0700 | [diff] [blame] | 573 | |
Christoph Hellwig | aa6bf01 | 2012-02-29 09:53:48 +0000 | [diff] [blame] | 574 | return 0; |
| 575 | |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 576 | out_destroy_inodegc: |
| 577 | destroy_workqueue(mp->m_inodegc_wq); |
| 578 | out_destroy_blockgc: |
| 579 | destroy_workqueue(mp->m_blockgc_wq); |
Dave Chinner | 5889608 | 2012-10-08 21:56:05 +1100 | [diff] [blame] | 580 | out_destroy_reclaim: |
| 581 | destroy_workqueue(mp->m_reclaim_workqueue); |
Dave Chinner | 4c2d542 | 2012-04-23 17:54:32 +1000 | [diff] [blame] | 582 | out_destroy_unwritten: |
| 583 | destroy_workqueue(mp->m_unwritten_workqueue); |
Brian Foster | 78c931b | 2014-11-28 13:59:58 +1100 | [diff] [blame] | 584 | out_destroy_buf: |
| 585 | destroy_workqueue(mp->m_buf_workqueue); |
Christoph Hellwig | aa6bf01 | 2012-02-29 09:53:48 +0000 | [diff] [blame] | 586 | out: |
| 587 | return -ENOMEM; |
| 588 | } |
| 589 | |
| 590 | STATIC void |
| 591 | xfs_destroy_mount_workqueues( |
| 592 | struct xfs_mount *mp) |
| 593 | { |
Brian Foster | 696a562 | 2017-03-28 14:51:44 -0700 | [diff] [blame] | 594 | destroy_workqueue(mp->m_sync_workqueue); |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 595 | destroy_workqueue(mp->m_blockgc_wq); |
| 596 | destroy_workqueue(mp->m_inodegc_wq); |
Dave Chinner | 5889608 | 2012-10-08 21:56:05 +1100 | [diff] [blame] | 597 | destroy_workqueue(mp->m_reclaim_workqueue); |
Christoph Hellwig | aa6bf01 | 2012-02-29 09:53:48 +0000 | [diff] [blame] | 598 | destroy_workqueue(mp->m_unwritten_workqueue); |
Brian Foster | 78c931b | 2014-11-28 13:59:58 +1100 | [diff] [blame] | 599 | destroy_workqueue(mp->m_buf_workqueue); |
Christoph Hellwig | aa6bf01 | 2012-02-29 09:53:48 +0000 | [diff] [blame] | 600 | } |
| 601 | |
Darrick J. Wong | f0f7a67 | 2020-04-12 13:11:10 -0700 | [diff] [blame] | 602 | static void |
| 603 | xfs_flush_inodes_worker( |
| 604 | struct work_struct *work) |
| 605 | { |
| 606 | struct xfs_mount *mp = container_of(work, struct xfs_mount, |
| 607 | m_flush_inodes_work); |
| 608 | struct super_block *sb = mp->m_super; |
| 609 | |
| 610 | if (down_read_trylock(&sb->s_umount)) { |
| 611 | sync_inodes_sb(sb); |
| 612 | up_read(&sb->s_umount); |
| 613 | } |
| 614 | } |
| 615 | |
Dave Chinner | 9aa0500 | 2012-10-08 21:56:04 +1100 | [diff] [blame] | 616 | /* |
| 617 | * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK |
| 618 | * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting |
| 619 | * for IO to complete so that we effectively throttle multiple callers to the |
| 620 | * rate at which IO is completing. |
| 621 | */ |
| 622 | void |
| 623 | xfs_flush_inodes( |
| 624 | struct xfs_mount *mp) |
| 625 | { |
Darrick J. Wong | f0f7a67 | 2020-04-12 13:11:10 -0700 | [diff] [blame] | 626 | /* |
| 627 | * If flush_work() returns true then that means we waited for a flush |
| 628 | * which was already in progress. Don't bother running another scan. |
| 629 | */ |
| 630 | if (flush_work(&mp->m_flush_inodes_work)) |
Darrick J. Wong | c642570 | 2020-03-27 08:49:44 -0700 | [diff] [blame] | 631 | return; |
| 632 | |
Darrick J. Wong | f0f7a67 | 2020-04-12 13:11:10 -0700 | [diff] [blame] | 633 | queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work); |
| 634 | flush_work(&mp->m_flush_inodes_work); |
Dave Chinner | 9aa0500 | 2012-10-08 21:56:04 +1100 | [diff] [blame] | 635 | } |
| 636 | |
David Chinner | bf90424 | 2008-10-30 17:36:14 +1100 | [diff] [blame] | 637 | /* Catch misguided souls that try to use this interface on XFS */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 638 | STATIC struct inode * |
Nathan Scott | a50cd26 | 2006-03-14 14:06:18 +1100 | [diff] [blame] | 639 | xfs_fs_alloc_inode( |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 640 | struct super_block *sb) |
| 641 | { |
David Chinner | bf90424 | 2008-10-30 17:36:14 +1100 | [diff] [blame] | 642 | BUG(); |
Lachlan McIlroy | 493dca6 | 2008-10-30 17:36:52 +1100 | [diff] [blame] | 643 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 644 | } |
| 645 | |
David Chinner | bf90424 | 2008-10-30 17:36:14 +1100 | [diff] [blame] | 646 | /* |
David Chinner | 99fa8cb | 2008-10-30 17:36:40 +1100 | [diff] [blame] | 647 | * Now that the generic code is guaranteed not to be accessing |
Dave Chinner | 8179c03 | 2016-05-18 13:52:42 +1000 | [diff] [blame] | 648 | * the linux inode, we can inactivate and reclaim the inode. |
David Chinner | bf90424 | 2008-10-30 17:36:14 +1100 | [diff] [blame] | 649 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 650 | STATIC void |
Nathan Scott | a50cd26 | 2006-03-14 14:06:18 +1100 | [diff] [blame] | 651 | xfs_fs_destroy_inode( |
Christoph Hellwig | 848ce8f | 2009-09-29 13:48:56 +0000 | [diff] [blame] | 652 | struct inode *inode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 653 | { |
Christoph Hellwig | 848ce8f | 2009-09-29 13:48:56 +0000 | [diff] [blame] | 654 | struct xfs_inode *ip = XFS_I(inode); |
| 655 | |
Christoph Hellwig | cca28fb | 2010-06-24 11:57:09 +1000 | [diff] [blame] | 656 | trace_xfs_destroy_inode(ip); |
David Chinner | 99fa8cb | 2008-10-30 17:36:40 +1100 | [diff] [blame] | 657 | |
Christoph Hellwig | 6552321 | 2016-11-30 14:33:25 +1100 | [diff] [blame] | 658 | ASSERT(!rwsem_is_locked(&inode->i_rwsem)); |
Dave Chinner | 8179c03 | 2016-05-18 13:52:42 +1000 | [diff] [blame] | 659 | XFS_STATS_INC(ip->i_mount, vn_rele); |
| 660 | XFS_STATS_INC(ip->i_mount, vn_remove); |
Darrick J. Wong | c076ae7 | 2021-05-31 11:32:02 -0700 | [diff] [blame] | 661 | xfs_inode_mark_reclaimable(ip); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 662 | } |
| 663 | |
Christoph Hellwig | c3b1b13 | 2018-03-06 17:04:00 -0800 | [diff] [blame] | 664 | static void |
| 665 | xfs_fs_dirty_inode( |
| 666 | struct inode *inode, |
Lukas Czerner | cbfecb9 | 2022-08-25 12:06:57 +0200 | [diff] [blame] | 667 | int flags) |
Christoph Hellwig | c3b1b13 | 2018-03-06 17:04:00 -0800 | [diff] [blame] | 668 | { |
| 669 | struct xfs_inode *ip = XFS_I(inode); |
| 670 | struct xfs_mount *mp = ip->i_mount; |
| 671 | struct xfs_trans *tp; |
| 672 | |
| 673 | if (!(inode->i_sb->s_flags & SB_LAZYTIME)) |
| 674 | return; |
Lukas Czerner | cbfecb9 | 2022-08-25 12:06:57 +0200 | [diff] [blame] | 675 | |
| 676 | /* |
| 677 | * Only do the timestamp update if the inode is dirty (I_DIRTY_SYNC) |
| 678 | * and has dirty timestamp (I_DIRTY_TIME). I_DIRTY_TIME can be passed |
| 679 | * in flags possibly together with I_DIRTY_SYNC. |
| 680 | */ |
| 681 | if ((flags & ~I_DIRTY_TIME) != I_DIRTY_SYNC || !(flags & I_DIRTY_TIME)) |
Christoph Hellwig | c3b1b13 | 2018-03-06 17:04:00 -0800 | [diff] [blame] | 682 | return; |
| 683 | |
| 684 | if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp)) |
| 685 | return; |
| 686 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
| 687 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); |
| 688 | xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP); |
| 689 | xfs_trans_commit(tp); |
| 690 | } |
| 691 | |
David Chinner | 07c8f67 | 2008-10-30 16:11:59 +1100 | [diff] [blame] | 692 | /* |
| 693 | * Slab object creation initialisation for the XFS inode. |
| 694 | * This covers only the idempotent fields in the XFS inode; |
| 695 | * all other fields need to be initialised on allocation |
Uwe Kleine-König | b595076 | 2010-11-01 15:38:34 -0400 | [diff] [blame] | 696 | * from the slab. This avoids the need to repeatedly initialise |
David Chinner | 07c8f67 | 2008-10-30 16:11:59 +1100 | [diff] [blame] | 697 | * fields in the xfs inode that left in the initialise state |
| 698 | * when freeing the inode. |
| 699 | */ |
David Chinner | bf90424 | 2008-10-30 17:36:14 +1100 | [diff] [blame] | 700 | STATIC void |
| 701 | xfs_fs_inode_init_once( |
David Chinner | 07c8f67 | 2008-10-30 16:11:59 +1100 | [diff] [blame] | 702 | void *inode) |
| 703 | { |
| 704 | struct xfs_inode *ip = inode; |
| 705 | |
| 706 | memset(ip, 0, sizeof(struct xfs_inode)); |
David Chinner | bf90424 | 2008-10-30 17:36:14 +1100 | [diff] [blame] | 707 | |
| 708 | /* vfs inode */ |
| 709 | inode_init_once(VFS_I(ip)); |
| 710 | |
| 711 | /* xfs inode */ |
David Chinner | 07c8f67 | 2008-10-30 16:11:59 +1100 | [diff] [blame] | 712 | atomic_set(&ip->i_pincount, 0); |
| 713 | spin_lock_init(&ip->i_flags_lock); |
David Chinner | 07c8f67 | 2008-10-30 16:11:59 +1100 | [diff] [blame] | 714 | |
| 715 | mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, |
| 716 | "xfsino", ip->i_ino); |
David Chinner | 07c8f67 | 2008-10-30 16:11:59 +1100 | [diff] [blame] | 717 | } |
| 718 | |
Dave Chinner | 5132ba8 | 2012-03-22 05:15:10 +0000 | [diff] [blame] | 719 | /* |
| 720 | * We do an unlocked check for XFS_IDONTCACHE here because we are already |
| 721 | * serialised against cache hits here via the inode->i_lock and igrab() in |
| 722 | * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be |
| 723 | * racing with us, and it avoids needing to grab a spinlock here for every inode |
| 724 | * we drop the final reference on. |
| 725 | */ |
| 726 | STATIC int |
| 727 | xfs_fs_drop_inode( |
| 728 | struct inode *inode) |
| 729 | { |
| 730 | struct xfs_inode *ip = XFS_I(inode); |
| 731 | |
Darrick J. Wong | 17c12bc | 2016-10-03 09:11:29 -0700 | [diff] [blame] | 732 | /* |
| 733 | * If this unlinked inode is in the middle of recovery, don't |
| 734 | * drop the inode just yet; log recovery will take care of |
| 735 | * that. See the comment for this inode flag. |
| 736 | */ |
| 737 | if (ip->i_flags & XFS_IRECOVERY) { |
Dave Chinner | e1d06e5 | 2021-08-10 17:59:02 -0700 | [diff] [blame] | 738 | ASSERT(xlog_recovery_needed(ip->i_mount->m_log)); |
Darrick J. Wong | 17c12bc | 2016-10-03 09:11:29 -0700 | [diff] [blame] | 739 | return 0; |
| 740 | } |
| 741 | |
Ira Weiny | dae2f8e | 2020-04-30 07:41:37 -0700 | [diff] [blame] | 742 | return generic_drop_inode(inode); |
Dave Chinner | 5132ba8 | 2012-03-22 05:15:10 +0000 | [diff] [blame] | 743 | } |
| 744 | |
Ian Kent | a943f37 | 2019-11-04 13:58:42 -0800 | [diff] [blame] | 745 | static void |
| 746 | xfs_mount_free( |
Christoph Hellwig | a738159 | 2008-08-13 16:04:05 +1000 | [diff] [blame] | 747 | struct xfs_mount *mp) |
| 748 | { |
Christoph Hellwig | a738159 | 2008-08-13 16:04:05 +1000 | [diff] [blame] | 749 | kfree(mp->m_rtname); |
| 750 | kfree(mp->m_logname); |
Ian Kent | a943f37 | 2019-11-04 13:58:42 -0800 | [diff] [blame] | 751 | kmem_free(mp); |
Christoph Hellwig | a738159 | 2008-08-13 16:04:05 +1000 | [diff] [blame] | 752 | } |
| 753 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 754 | STATIC int |
Christoph Hellwig | 69961a2 | 2009-10-06 20:29:28 +0000 | [diff] [blame] | 755 | xfs_fs_sync_fs( |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 756 | struct super_block *sb, |
| 757 | int wait) |
| 758 | { |
Christoph Hellwig | 745f691 | 2007-08-30 17:20:39 +1000 | [diff] [blame] | 759 | struct xfs_mount *mp = XFS_M(sb); |
Darrick J. Wong | 2d86293 | 2022-01-30 08:53:17 -0800 | [diff] [blame] | 760 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 761 | |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 762 | trace_xfs_fs_sync_fs(mp, __return_address); |
| 763 | |
Lachlan McIlroy | e893bff | 2007-10-12 11:13:35 +1000 | [diff] [blame] | 764 | /* |
Christoph Hellwig | 34625c6 | 2011-12-06 21:58:12 +0000 | [diff] [blame] | 765 | * Doing anything during the async pass would be counterproductive. |
Lachlan McIlroy | e893bff | 2007-10-12 11:13:35 +1000 | [diff] [blame] | 766 | */ |
Christoph Hellwig | 34625c6 | 2011-12-06 21:58:12 +0000 | [diff] [blame] | 767 | if (!wait) |
Christoph Hellwig | 69961a2 | 2009-10-06 20:29:28 +0000 | [diff] [blame] | 768 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 769 | |
Darrick J. Wong | 2d86293 | 2022-01-30 08:53:17 -0800 | [diff] [blame] | 770 | error = xfs_log_force(mp, XFS_LOG_SYNC); |
| 771 | if (error) |
| 772 | return error; |
| 773 | |
Christoph Hellwig | 69961a2 | 2009-10-06 20:29:28 +0000 | [diff] [blame] | 774 | if (laptop_mode) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 775 | /* |
| 776 | * The disk must be active because we're syncing. |
Dave Chinner | f661f1e | 2012-10-08 21:56:02 +1100 | [diff] [blame] | 777 | * We schedule log work now (now that the disk is |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 778 | * active) instead of later (when it might not be). |
| 779 | */ |
Dave Chinner | f661f1e | 2012-10-08 21:56:02 +1100 | [diff] [blame] | 780 | flush_delayed_work(&mp->m_log->l_work); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 781 | } |
| 782 | |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 783 | /* |
| 784 | * If we are called with page faults frozen out, it means we are about |
| 785 | * to freeze the transaction subsystem. Take the opportunity to shut |
| 786 | * down inodegc because once SB_FREEZE_FS is set it's too late to |
| 787 | * prevent inactivation races with freeze. The fs doesn't get called |
| 788 | * again by the freezing process until after SB_FREEZE_FS has been set, |
Darrick J. Wong | 6f64909 | 2021-08-06 11:05:42 -0700 | [diff] [blame] | 789 | * so it's now or never. Same logic applies to speculative allocation |
| 790 | * garbage collection. |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 791 | * |
| 792 | * We don't care if this is a normal syncfs call that does this or |
| 793 | * freeze that does this - we can run this multiple times without issue |
| 794 | * and we won't race with a restart because a restart can only occur |
| 795 | * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE. |
| 796 | */ |
Darrick J. Wong | 6f64909 | 2021-08-06 11:05:42 -0700 | [diff] [blame] | 797 | if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) { |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 798 | xfs_inodegc_stop(mp); |
Darrick J. Wong | 6f64909 | 2021-08-06 11:05:42 -0700 | [diff] [blame] | 799 | xfs_blockgc_stop(mp); |
| 800 | } |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 801 | |
Christoph Hellwig | 69961a2 | 2009-10-06 20:29:28 +0000 | [diff] [blame] | 802 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 803 | } |
| 804 | |
| 805 | STATIC int |
Nathan Scott | a50cd26 | 2006-03-14 14:06:18 +1100 | [diff] [blame] | 806 | xfs_fs_statfs( |
David Howells | 726c334 | 2006-06-23 02:02:58 -0700 | [diff] [blame] | 807 | struct dentry *dentry, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 808 | struct kstatfs *statp) |
| 809 | { |
Christoph Hellwig | 4ca488e | 2007-10-11 18:09:40 +1000 | [diff] [blame] | 810 | struct xfs_mount *mp = XFS_M(dentry->d_sb); |
| 811 | xfs_sb_t *sbp = &mp->m_sb; |
David Howells | 2b0143b | 2015-03-17 22:25:59 +0000 | [diff] [blame] | 812 | struct xfs_inode *ip = XFS_I(d_inode(dentry)); |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 813 | uint64_t fakeinos, id; |
| 814 | uint64_t icount; |
| 815 | uint64_t ifree; |
| 816 | uint64_t fdblocks; |
Christoph Hellwig | 4ca488e | 2007-10-11 18:09:40 +1000 | [diff] [blame] | 817 | xfs_extlen_t lsize; |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 818 | int64_t ffree; |
Christoph Hellwig | 4ca488e | 2007-10-11 18:09:40 +1000 | [diff] [blame] | 819 | |
Dave Chinner | 5e672cd | 2022-06-16 07:44:32 -0700 | [diff] [blame] | 820 | /* |
| 821 | * Expedite background inodegc but don't wait. We do not want to block |
| 822 | * here waiting hours for a billion extent file to be truncated. |
| 823 | */ |
| 824 | xfs_inodegc_push(mp); |
Darrick J. Wong | 01e8f37 | 2021-08-06 11:05:42 -0700 | [diff] [blame] | 825 | |
Adam Borowski | dddde68 | 2018-10-18 17:20:19 +1100 | [diff] [blame] | 826 | statp->f_type = XFS_SUPER_MAGIC; |
Christoph Hellwig | 4ca488e | 2007-10-11 18:09:40 +1000 | [diff] [blame] | 827 | statp->f_namelen = MAXNAMELEN - 1; |
| 828 | |
| 829 | id = huge_encode_dev(mp->m_ddev_targp->bt_dev); |
Al Viro | 6d1349c | 2020-09-18 16:45:50 -0400 | [diff] [blame] | 830 | statp->f_fsid = u64_to_fsid(id); |
Christoph Hellwig | 4ca488e | 2007-10-11 18:09:40 +1000 | [diff] [blame] | 831 | |
Dave Chinner | 501ab32 | 2015-02-23 21:19:28 +1100 | [diff] [blame] | 832 | icount = percpu_counter_sum(&mp->m_icount); |
Dave Chinner | e88b64e | 2015-02-23 21:19:53 +1100 | [diff] [blame] | 833 | ifree = percpu_counter_sum(&mp->m_ifree); |
Dave Chinner | 0d485ad | 2015-02-23 21:22:03 +1100 | [diff] [blame] | 834 | fdblocks = percpu_counter_sum(&mp->m_fdblocks); |
Christoph Hellwig | 4ca488e | 2007-10-11 18:09:40 +1000 | [diff] [blame] | 835 | |
| 836 | spin_lock(&mp->m_sb_lock); |
| 837 | statp->f_bsize = sbp->sb_blocksize; |
| 838 | lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; |
| 839 | statp->f_blocks = sbp->sb_dblocks - lsize; |
Dave Chinner | 0d485ad | 2015-02-23 21:22:03 +1100 | [diff] [blame] | 840 | spin_unlock(&mp->m_sb_lock); |
| 841 | |
Zheng Bin | 237aac4 | 2020-05-12 11:48:35 -0700 | [diff] [blame] | 842 | /* make sure statp->f_bfree does not underflow */ |
Darrick J. Wong | 85bcfa2 | 2022-03-16 13:38:43 -0700 | [diff] [blame] | 843 | statp->f_bfree = max_t(int64_t, 0, |
| 844 | fdblocks - xfs_fdblocks_unavailable(mp)); |
Dave Chinner | 0d485ad | 2015-02-23 21:22:03 +1100 | [diff] [blame] | 845 | statp->f_bavail = statp->f_bfree; |
| 846 | |
Darrick J. Wong | 43004b2 | 2018-12-12 08:46:24 -0800 | [diff] [blame] | 847 | fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree); |
Dave Chinner | 9bb54cb | 2018-06-07 07:54:02 -0700 | [diff] [blame] | 848 | statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER); |
Darrick J. Wong | ef32595 | 2019-06-05 11:19:34 -0700 | [diff] [blame] | 849 | if (M_IGEO(mp)->maxicount) |
Christoph Hellwig | a19d9f8 | 2009-03-29 09:51:08 +0200 | [diff] [blame] | 850 | statp->f_files = min_t(typeof(statp->f_files), |
| 851 | statp->f_files, |
Darrick J. Wong | ef32595 | 2019-06-05 11:19:34 -0700 | [diff] [blame] | 852 | M_IGEO(mp)->maxicount); |
Stuart Brodsky | 2fe3366 | 2010-08-24 11:46:05 +1000 | [diff] [blame] | 853 | |
Eric Sandeen | 01f9882 | 2015-02-06 09:53:02 +1100 | [diff] [blame] | 854 | /* If sb_icount overshot maxicount, report actual allocation */ |
| 855 | statp->f_files = max_t(typeof(statp->f_files), |
| 856 | statp->f_files, |
| 857 | sbp->sb_icount); |
| 858 | |
Stuart Brodsky | 2fe3366 | 2010-08-24 11:46:05 +1000 | [diff] [blame] | 859 | /* make sure statp->f_ffree does not underflow */ |
Dave Chinner | e88b64e | 2015-02-23 21:19:53 +1100 | [diff] [blame] | 860 | ffree = statp->f_files - (icount - ifree); |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 861 | statp->f_ffree = max_t(int64_t, ffree, 0); |
Stuart Brodsky | 2fe3366 | 2010-08-24 11:46:05 +1000 | [diff] [blame] | 862 | |
Christoph Hellwig | 4ca488e | 2007-10-11 18:09:40 +1000 | [diff] [blame] | 863 | |
Christoph Hellwig | db07349 | 2021-03-29 11:11:44 -0700 | [diff] [blame] | 864 | if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) && |
Chandra Seetharaman | 83e782e | 2013-06-27 17:25:10 -0500 | [diff] [blame] | 865 | ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) == |
| 866 | (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD)) |
Christoph Hellwig | 7d09525 | 2009-06-08 15:33:32 +0200 | [diff] [blame] | 867 | xfs_qm_statvfs(ip, statp); |
Richard Wareing | a015831 | 2018-01-08 10:41:33 -0800 | [diff] [blame] | 868 | |
| 869 | if (XFS_IS_REALTIME_MOUNT(mp) && |
Christoph Hellwig | db07349 | 2021-03-29 11:11:44 -0700 | [diff] [blame] | 870 | (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) { |
Darrick J. Wong | 2229276 | 2022-04-12 06:49:42 +1000 | [diff] [blame] | 871 | s64 freertx; |
| 872 | |
Richard Wareing | a015831 | 2018-01-08 10:41:33 -0800 | [diff] [blame] | 873 | statp->f_blocks = sbp->sb_rblocks; |
Darrick J. Wong | 2229276 | 2022-04-12 06:49:42 +1000 | [diff] [blame] | 874 | freertx = percpu_counter_sum_positive(&mp->m_frextents); |
| 875 | statp->f_bavail = statp->f_bfree = freertx * sbp->sb_rextsize; |
Richard Wareing | a015831 | 2018-01-08 10:41:33 -0800 | [diff] [blame] | 876 | } |
| 877 | |
Christoph Hellwig | 4ca488e | 2007-10-11 18:09:40 +1000 | [diff] [blame] | 878 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 879 | } |
| 880 | |
Eric Sandeen | d5db0f9 | 2010-02-05 22:59:53 +0000 | [diff] [blame] | 881 | STATIC void |
| 882 | xfs_save_resvblks(struct xfs_mount *mp) |
| 883 | { |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 884 | uint64_t resblks = 0; |
Eric Sandeen | d5db0f9 | 2010-02-05 22:59:53 +0000 | [diff] [blame] | 885 | |
| 886 | mp->m_resblks_save = mp->m_resblks; |
| 887 | xfs_reserve_blocks(mp, &resblks, NULL); |
| 888 | } |
| 889 | |
| 890 | STATIC void |
| 891 | xfs_restore_resvblks(struct xfs_mount *mp) |
| 892 | { |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 893 | uint64_t resblks; |
Eric Sandeen | d5db0f9 | 2010-02-05 22:59:53 +0000 | [diff] [blame] | 894 | |
| 895 | if (mp->m_resblks_save) { |
| 896 | resblks = mp->m_resblks_save; |
| 897 | mp->m_resblks_save = 0; |
| 898 | } else |
| 899 | resblks = xfs_default_resblks(mp); |
| 900 | |
| 901 | xfs_reserve_blocks(mp, &resblks, NULL); |
| 902 | } |
| 903 | |
Dave Chinner | c7eea6f | 2012-10-08 21:56:07 +1100 | [diff] [blame] | 904 | /* |
Christoph Hellwig | 9909c4a | 2007-10-11 18:11:14 +1000 | [diff] [blame] | 905 | * Second stage of a freeze. The data is already frozen so we only |
Dave Chinner | 61e63ec | 2015-01-22 09:10:31 +1100 | [diff] [blame] | 906 | * need to take care of the metadata. Once that's done sync the superblock |
| 907 | * to the log to dirty it in case of a crash while frozen. This ensures that we |
| 908 | * will recover the unlinked inode lists on the next mount. |
Christoph Hellwig | 9909c4a | 2007-10-11 18:11:14 +1000 | [diff] [blame] | 909 | */ |
Takashi Sato | c4be0c1 | 2009-01-09 16:40:58 -0800 | [diff] [blame] | 910 | STATIC int |
| 911 | xfs_fs_freeze( |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 912 | struct super_block *sb) |
| 913 | { |
Christoph Hellwig | 9909c4a | 2007-10-11 18:11:14 +1000 | [diff] [blame] | 914 | struct xfs_mount *mp = XFS_M(sb); |
Waiman Long | c3f2375 | 2020-07-08 10:21:44 -0700 | [diff] [blame] | 915 | unsigned int flags; |
| 916 | int ret; |
Christoph Hellwig | 9909c4a | 2007-10-11 18:11:14 +1000 | [diff] [blame] | 917 | |
Waiman Long | c3f2375 | 2020-07-08 10:21:44 -0700 | [diff] [blame] | 918 | /* |
| 919 | * The filesystem is now frozen far enough that memory reclaim |
| 920 | * cannot safely operate on the filesystem. Hence we need to |
| 921 | * set a GFP_NOFS context here to avoid recursion deadlocks. |
| 922 | */ |
| 923 | flags = memalloc_nofs_save(); |
Eric Sandeen | d5db0f9 | 2010-02-05 22:59:53 +0000 | [diff] [blame] | 924 | xfs_save_resvblks(mp); |
Brian Foster | 5b0ad7c | 2021-01-22 16:48:24 -0800 | [diff] [blame] | 925 | ret = xfs_log_quiesce(mp); |
Waiman Long | c3f2375 | 2020-07-08 10:21:44 -0700 | [diff] [blame] | 926 | memalloc_nofs_restore(flags); |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 927 | |
| 928 | /* |
| 929 | * For read-write filesystems, we need to restart the inodegc on error |
| 930 | * because we stopped it at SB_FREEZE_PAGEFAULT level and a thaw is not |
| 931 | * going to be run to restart it now. We are at SB_FREEZE_FS level |
| 932 | * here, so we can restart safely without racing with a stop in |
| 933 | * xfs_fs_sync_fs(). |
| 934 | */ |
Dave Chinner | 2e973b2 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 935 | if (ret && !xfs_is_readonly(mp)) { |
Darrick J. Wong | 6f64909 | 2021-08-06 11:05:42 -0700 | [diff] [blame] | 936 | xfs_blockgc_start(mp); |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 937 | xfs_inodegc_start(mp); |
Darrick J. Wong | 6f64909 | 2021-08-06 11:05:42 -0700 | [diff] [blame] | 938 | } |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 939 | |
Waiman Long | c3f2375 | 2020-07-08 10:21:44 -0700 | [diff] [blame] | 940 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 941 | } |
| 942 | |
| 943 | STATIC int |
Eric Sandeen | d5db0f9 | 2010-02-05 22:59:53 +0000 | [diff] [blame] | 944 | xfs_fs_unfreeze( |
| 945 | struct super_block *sb) |
| 946 | { |
| 947 | struct xfs_mount *mp = XFS_M(sb); |
| 948 | |
| 949 | xfs_restore_resvblks(mp); |
Dave Chinner | f661f1e | 2012-10-08 21:56:02 +1100 | [diff] [blame] | 950 | xfs_log_work_queue(mp); |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 951 | |
| 952 | /* |
| 953 | * Don't reactivate the inodegc worker on a readonly filesystem because |
Darrick J. Wong | 6f64909 | 2021-08-06 11:05:42 -0700 | [diff] [blame] | 954 | * inodes are sent directly to reclaim. Don't reactivate the blockgc |
| 955 | * worker because there are no speculative preallocations on a readonly |
| 956 | * filesystem. |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 957 | */ |
Dave Chinner | 2e973b2 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 958 | if (!xfs_is_readonly(mp)) { |
Darrick J. Wong | 6f64909 | 2021-08-06 11:05:42 -0700 | [diff] [blame] | 959 | xfs_blockgc_start(mp); |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 960 | xfs_inodegc_start(mp); |
Darrick J. Wong | 6f64909 | 2021-08-06 11:05:42 -0700 | [diff] [blame] | 961 | } |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 962 | |
Eric Sandeen | d5db0f9 | 2010-02-05 22:59:53 +0000 | [diff] [blame] | 963 | return 0; |
| 964 | } |
| 965 | |
Christoph Hellwig | f8f15e4 | 2008-05-20 11:30:59 +1000 | [diff] [blame] | 966 | /* |
| 967 | * This function fills in xfs_mount_t fields based on mount args. |
Christoph Hellwig | f8f15e4 | 2008-05-20 11:30:59 +1000 | [diff] [blame] | 968 | * Note: the superblock _has_ now been read in. |
| 969 | */ |
| 970 | STATIC int |
| 971 | xfs_finish_flags( |
Christoph Hellwig | f8f15e4 | 2008-05-20 11:30:59 +1000 | [diff] [blame] | 972 | struct xfs_mount *mp) |
| 973 | { |
Frederik Schwarzer | 025dfda | 2008-10-16 19:02:37 +0200 | [diff] [blame] | 974 | /* Fail a mount where the logbuf is smaller than the log stripe */ |
Dave Chinner | 38c26bf | 2021-08-18 18:46:37 -0700 | [diff] [blame] | 975 | if (xfs_has_logv2(mp)) { |
Christoph Hellwig | 9d565ff | 2008-10-30 17:53:24 +1100 | [diff] [blame] | 976 | if (mp->m_logbsize <= 0 && |
| 977 | mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) { |
Christoph Hellwig | f8f15e4 | 2008-05-20 11:30:59 +1000 | [diff] [blame] | 978 | mp->m_logbsize = mp->m_sb.sb_logsunit; |
Christoph Hellwig | 9d565ff | 2008-10-30 17:53:24 +1100 | [diff] [blame] | 979 | } else if (mp->m_logbsize > 0 && |
| 980 | mp->m_logbsize < mp->m_sb.sb_logsunit) { |
Dave Chinner | 4f10700 | 2011-03-07 10:00:35 +1100 | [diff] [blame] | 981 | xfs_warn(mp, |
| 982 | "logbuf size must be greater than or equal to log stripe size"); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 983 | return -EINVAL; |
Christoph Hellwig | f8f15e4 | 2008-05-20 11:30:59 +1000 | [diff] [blame] | 984 | } |
| 985 | } else { |
| 986 | /* Fail a mount if the logbuf is larger than 32K */ |
Christoph Hellwig | 9d565ff | 2008-10-30 17:53:24 +1100 | [diff] [blame] | 987 | if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) { |
Dave Chinner | 4f10700 | 2011-03-07 10:00:35 +1100 | [diff] [blame] | 988 | xfs_warn(mp, |
| 989 | "logbuf size for version 1 logs must be 16K or 32K"); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 990 | return -EINVAL; |
Christoph Hellwig | f8f15e4 | 2008-05-20 11:30:59 +1000 | [diff] [blame] | 991 | } |
| 992 | } |
| 993 | |
| 994 | /* |
Dave Chinner | d3eaace | 2013-06-05 12:09:09 +1000 | [diff] [blame] | 995 | * V5 filesystems always use attr2 format for attributes. |
| 996 | */ |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 997 | if (xfs_has_crc(mp) && xfs_has_noattr2(mp)) { |
Eric Sandeen | 2e74af0 | 2016-03-02 09:55:38 +1100 | [diff] [blame] | 998 | xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. " |
| 999 | "attr2 is always enabled for V5 filesystems."); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1000 | return -EINVAL; |
Dave Chinner | d3eaace | 2013-06-05 12:09:09 +1000 | [diff] [blame] | 1001 | } |
| 1002 | |
| 1003 | /* |
Christoph Hellwig | f8f15e4 | 2008-05-20 11:30:59 +1000 | [diff] [blame] | 1004 | * prohibit r/w mounts of read-only filesystems |
| 1005 | */ |
Dave Chinner | 2e973b2 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1006 | if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !xfs_is_readonly(mp)) { |
Dave Chinner | 4f10700 | 2011-03-07 10:00:35 +1100 | [diff] [blame] | 1007 | xfs_warn(mp, |
| 1008 | "cannot mount a read-only filesystem as read-write"); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1009 | return -EROFS; |
Christoph Hellwig | f8f15e4 | 2008-05-20 11:30:59 +1000 | [diff] [blame] | 1010 | } |
| 1011 | |
Christoph Hellwig | 149e53a | 2021-08-06 11:05:37 -0700 | [diff] [blame] | 1012 | if ((mp->m_qflags & XFS_GQUOTA_ACCT) && |
| 1013 | (mp->m_qflags & XFS_PQUOTA_ACCT) && |
Dave Chinner | 38c26bf | 2021-08-18 18:46:37 -0700 | [diff] [blame] | 1014 | !xfs_has_pquotino(mp)) { |
Chandra Seetharaman | d892d58 | 2013-07-19 17:36:02 -0500 | [diff] [blame] | 1015 | xfs_warn(mp, |
| 1016 | "Super block does not support project and group quota together"); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1017 | return -EINVAL; |
Chandra Seetharaman | d892d58 | 2013-07-19 17:36:02 -0500 | [diff] [blame] | 1018 | } |
| 1019 | |
Christoph Hellwig | f8f15e4 | 2008-05-20 11:30:59 +1000 | [diff] [blame] | 1020 | return 0; |
| 1021 | } |
| 1022 | |
Dave Chinner | 5681ca4 | 2015-02-23 21:22:31 +1100 | [diff] [blame] | 1023 | static int |
| 1024 | xfs_init_percpu_counters( |
| 1025 | struct xfs_mount *mp) |
| 1026 | { |
| 1027 | int error; |
| 1028 | |
| 1029 | error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL); |
| 1030 | if (error) |
Joe Perches | 5e9383f | 2015-03-25 15:00:24 +1100 | [diff] [blame] | 1031 | return -ENOMEM; |
Dave Chinner | 5681ca4 | 2015-02-23 21:22:31 +1100 | [diff] [blame] | 1032 | |
| 1033 | error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL); |
| 1034 | if (error) |
| 1035 | goto free_icount; |
| 1036 | |
| 1037 | error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL); |
| 1038 | if (error) |
| 1039 | goto free_ifree; |
| 1040 | |
Darrick J. Wong | 9fe82b8 | 2019-04-25 18:26:22 -0700 | [diff] [blame] | 1041 | error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL); |
| 1042 | if (error) |
| 1043 | goto free_fdblocks; |
| 1044 | |
Darrick J. Wong | 2229276 | 2022-04-12 06:49:42 +1000 | [diff] [blame] | 1045 | error = percpu_counter_init(&mp->m_frextents, 0, GFP_KERNEL); |
| 1046 | if (error) |
| 1047 | goto free_delalloc; |
| 1048 | |
Dave Chinner | 5681ca4 | 2015-02-23 21:22:31 +1100 | [diff] [blame] | 1049 | return 0; |
| 1050 | |
Darrick J. Wong | 2229276 | 2022-04-12 06:49:42 +1000 | [diff] [blame] | 1051 | free_delalloc: |
| 1052 | percpu_counter_destroy(&mp->m_delalloc_blks); |
Darrick J. Wong | 9fe82b8 | 2019-04-25 18:26:22 -0700 | [diff] [blame] | 1053 | free_fdblocks: |
| 1054 | percpu_counter_destroy(&mp->m_fdblocks); |
Dave Chinner | 5681ca4 | 2015-02-23 21:22:31 +1100 | [diff] [blame] | 1055 | free_ifree: |
| 1056 | percpu_counter_destroy(&mp->m_ifree); |
| 1057 | free_icount: |
| 1058 | percpu_counter_destroy(&mp->m_icount); |
| 1059 | return -ENOMEM; |
| 1060 | } |
| 1061 | |
| 1062 | void |
| 1063 | xfs_reinit_percpu_counters( |
| 1064 | struct xfs_mount *mp) |
| 1065 | { |
| 1066 | percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount); |
| 1067 | percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree); |
| 1068 | percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks); |
Darrick J. Wong | 2229276 | 2022-04-12 06:49:42 +1000 | [diff] [blame] | 1069 | percpu_counter_set(&mp->m_frextents, mp->m_sb.sb_frextents); |
Dave Chinner | 5681ca4 | 2015-02-23 21:22:31 +1100 | [diff] [blame] | 1070 | } |
| 1071 | |
| 1072 | static void |
| 1073 | xfs_destroy_percpu_counters( |
| 1074 | struct xfs_mount *mp) |
| 1075 | { |
| 1076 | percpu_counter_destroy(&mp->m_icount); |
| 1077 | percpu_counter_destroy(&mp->m_ifree); |
| 1078 | percpu_counter_destroy(&mp->m_fdblocks); |
Dave Chinner | 75c8c50f | 2021-08-18 18:46:53 -0700 | [diff] [blame] | 1079 | ASSERT(xfs_is_shutdown(mp) || |
Darrick J. Wong | 9fe82b8 | 2019-04-25 18:26:22 -0700 | [diff] [blame] | 1080 | percpu_counter_sum(&mp->m_delalloc_blks) == 0); |
| 1081 | percpu_counter_destroy(&mp->m_delalloc_blks); |
Darrick J. Wong | 2229276 | 2022-04-12 06:49:42 +1000 | [diff] [blame] | 1082 | percpu_counter_destroy(&mp->m_frextents); |
Dave Chinner | 5681ca4 | 2015-02-23 21:22:31 +1100 | [diff] [blame] | 1083 | } |
| 1084 | |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 1085 | static int |
| 1086 | xfs_inodegc_init_percpu( |
| 1087 | struct xfs_mount *mp) |
| 1088 | { |
| 1089 | struct xfs_inodegc *gc; |
| 1090 | int cpu; |
| 1091 | |
| 1092 | mp->m_inodegc = alloc_percpu(struct xfs_inodegc); |
| 1093 | if (!mp->m_inodegc) |
| 1094 | return -ENOMEM; |
| 1095 | |
| 1096 | for_each_possible_cpu(cpu) { |
| 1097 | gc = per_cpu_ptr(mp->m_inodegc, cpu); |
| 1098 | init_llist_head(&gc->list); |
| 1099 | gc->items = 0; |
Dave Chinner | 7cf2b0f | 2022-06-16 07:44:31 -0700 | [diff] [blame] | 1100 | INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker); |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 1101 | } |
| 1102 | return 0; |
| 1103 | } |
| 1104 | |
| 1105 | static void |
| 1106 | xfs_inodegc_free_percpu( |
| 1107 | struct xfs_mount *mp) |
| 1108 | { |
| 1109 | if (!mp->m_inodegc) |
| 1110 | return; |
| 1111 | free_percpu(mp->m_inodegc); |
| 1112 | } |
| 1113 | |
Ian Kent | 2f8d66b | 2019-11-04 13:58:47 -0800 | [diff] [blame] | 1114 | static void |
| 1115 | xfs_fs_put_super( |
| 1116 | struct super_block *sb) |
| 1117 | { |
| 1118 | struct xfs_mount *mp = XFS_M(sb); |
| 1119 | |
| 1120 | /* if ->fill_super failed, we have no mount to tear down */ |
| 1121 | if (!sb->s_fs_info) |
| 1122 | return; |
| 1123 | |
Lukas Herbolt | 64c80df | 2022-11-16 19:20:21 -0800 | [diff] [blame] | 1124 | xfs_notice(mp, "Unmounting Filesystem %pU", &mp->m_sb.sb_uuid); |
Ian Kent | 2f8d66b | 2019-11-04 13:58:47 -0800 | [diff] [blame] | 1125 | xfs_filestream_unmount(mp); |
| 1126 | xfs_unmountfs(mp); |
| 1127 | |
| 1128 | xfs_freesb(mp); |
| 1129 | free_percpu(mp->m_stats.xs_stats); |
Dave Chinner | 0ed17f0 | 2021-08-06 11:05:38 -0700 | [diff] [blame] | 1130 | xfs_mount_list_del(mp); |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 1131 | xfs_inodegc_free_percpu(mp); |
Ian Kent | 2f8d66b | 2019-11-04 13:58:47 -0800 | [diff] [blame] | 1132 | xfs_destroy_percpu_counters(mp); |
| 1133 | xfs_destroy_mount_workqueues(mp); |
| 1134 | xfs_close_devices(mp); |
| 1135 | |
| 1136 | sb->s_fs_info = NULL; |
| 1137 | xfs_mount_free(mp); |
| 1138 | } |
| 1139 | |
| 1140 | static long |
| 1141 | xfs_fs_nr_cached_objects( |
| 1142 | struct super_block *sb, |
| 1143 | struct shrink_control *sc) |
| 1144 | { |
| 1145 | /* Paranoia: catch incorrect calls during mount setup or teardown */ |
| 1146 | if (WARN_ON_ONCE(!sb->s_fs_info)) |
| 1147 | return 0; |
| 1148 | return xfs_reclaim_inodes_count(XFS_M(sb)); |
| 1149 | } |
| 1150 | |
| 1151 | static long |
| 1152 | xfs_fs_free_cached_objects( |
| 1153 | struct super_block *sb, |
| 1154 | struct shrink_control *sc) |
| 1155 | { |
| 1156 | return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan); |
| 1157 | } |
| 1158 | |
| 1159 | static const struct super_operations xfs_super_operations = { |
| 1160 | .alloc_inode = xfs_fs_alloc_inode, |
| 1161 | .destroy_inode = xfs_fs_destroy_inode, |
| 1162 | .dirty_inode = xfs_fs_dirty_inode, |
| 1163 | .drop_inode = xfs_fs_drop_inode, |
| 1164 | .put_super = xfs_fs_put_super, |
| 1165 | .sync_fs = xfs_fs_sync_fs, |
| 1166 | .freeze_fs = xfs_fs_freeze, |
| 1167 | .unfreeze_fs = xfs_fs_unfreeze, |
| 1168 | .statfs = xfs_fs_statfs, |
| 1169 | .show_options = xfs_fs_show_options, |
| 1170 | .nr_cached_objects = xfs_fs_nr_cached_objects, |
| 1171 | .free_cached_objects = xfs_fs_free_cached_objects, |
| 1172 | }; |
| 1173 | |
Ian Kent | 73e5fff | 2019-11-04 13:58:46 -0800 | [diff] [blame] | 1174 | static int |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1175 | suffix_kstrtoint( |
| 1176 | const char *s, |
| 1177 | unsigned int base, |
| 1178 | int *res) |
| 1179 | { |
| 1180 | int last, shift_left_factor = 0, _res; |
| 1181 | char *value; |
| 1182 | int ret = 0; |
| 1183 | |
| 1184 | value = kstrdup(s, GFP_KERNEL); |
| 1185 | if (!value) |
| 1186 | return -ENOMEM; |
| 1187 | |
| 1188 | last = strlen(value) - 1; |
| 1189 | if (value[last] == 'K' || value[last] == 'k') { |
| 1190 | shift_left_factor = 10; |
| 1191 | value[last] = '\0'; |
| 1192 | } |
| 1193 | if (value[last] == 'M' || value[last] == 'm') { |
| 1194 | shift_left_factor = 20; |
| 1195 | value[last] = '\0'; |
| 1196 | } |
| 1197 | if (value[last] == 'G' || value[last] == 'g') { |
| 1198 | shift_left_factor = 30; |
| 1199 | value[last] = '\0'; |
| 1200 | } |
| 1201 | |
| 1202 | if (kstrtoint(value, base, &_res)) |
| 1203 | ret = -EINVAL; |
| 1204 | kfree(value); |
| 1205 | *res = _res << shift_left_factor; |
| 1206 | return ret; |
| 1207 | } |
| 1208 | |
Pavel Reichl | 92cf7d3 | 2021-03-22 09:52:02 -0700 | [diff] [blame] | 1209 | static inline void |
| 1210 | xfs_fs_warn_deprecated( |
| 1211 | struct fs_context *fc, |
| 1212 | struct fs_parameter *param, |
| 1213 | uint64_t flag, |
| 1214 | bool value) |
| 1215 | { |
| 1216 | /* Don't print the warning if reconfiguring and current mount point |
| 1217 | * already had the flag set |
| 1218 | */ |
| 1219 | if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) && |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1220 | !!(XFS_M(fc->root->d_sb)->m_features & flag) == value) |
Pavel Reichl | 92cf7d3 | 2021-03-22 09:52:02 -0700 | [diff] [blame] | 1221 | return; |
| 1222 | xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key); |
| 1223 | } |
| 1224 | |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1225 | /* |
| 1226 | * Set mount state from a mount option. |
| 1227 | * |
| 1228 | * NOTE: mp->m_super is NULL here! |
| 1229 | */ |
| 1230 | static int |
Darrick J. Wong | 1e5c39d | 2020-12-04 15:59:39 -0800 | [diff] [blame] | 1231 | xfs_fs_parse_param( |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1232 | struct fs_context *fc, |
| 1233 | struct fs_parameter *param) |
| 1234 | { |
Pavel Reichl | 0f98b4e | 2021-03-22 09:52:01 -0700 | [diff] [blame] | 1235 | struct xfs_mount *parsing_mp = fc->s_fs_info; |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1236 | struct fs_parse_result result; |
| 1237 | int size = 0; |
| 1238 | int opt; |
| 1239 | |
Al Viro | d7167b1 | 2019-09-07 07:23:15 -0400 | [diff] [blame] | 1240 | opt = fs_parse(fc, xfs_fs_parameters, param, &result); |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1241 | if (opt < 0) |
| 1242 | return opt; |
| 1243 | |
| 1244 | switch (opt) { |
| 1245 | case Opt_logbufs: |
Pavel Reichl | 0f98b4e | 2021-03-22 09:52:01 -0700 | [diff] [blame] | 1246 | parsing_mp->m_logbufs = result.uint_32; |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1247 | return 0; |
| 1248 | case Opt_logbsize: |
Pavel Reichl | 0f98b4e | 2021-03-22 09:52:01 -0700 | [diff] [blame] | 1249 | if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize)) |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1250 | return -EINVAL; |
| 1251 | return 0; |
| 1252 | case Opt_logdev: |
Pavel Reichl | 0f98b4e | 2021-03-22 09:52:01 -0700 | [diff] [blame] | 1253 | kfree(parsing_mp->m_logname); |
| 1254 | parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL); |
| 1255 | if (!parsing_mp->m_logname) |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1256 | return -ENOMEM; |
| 1257 | return 0; |
| 1258 | case Opt_rtdev: |
Pavel Reichl | 0f98b4e | 2021-03-22 09:52:01 -0700 | [diff] [blame] | 1259 | kfree(parsing_mp->m_rtname); |
| 1260 | parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL); |
| 1261 | if (!parsing_mp->m_rtname) |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1262 | return -ENOMEM; |
| 1263 | return 0; |
| 1264 | case Opt_allocsize: |
| 1265 | if (suffix_kstrtoint(param->string, 10, &size)) |
| 1266 | return -EINVAL; |
Pavel Reichl | 0f98b4e | 2021-03-22 09:52:01 -0700 | [diff] [blame] | 1267 | parsing_mp->m_allocsize_log = ffs(size) - 1; |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1268 | parsing_mp->m_features |= XFS_FEAT_ALLOCSIZE; |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1269 | return 0; |
| 1270 | case Opt_grpid: |
| 1271 | case Opt_bsdgroups: |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1272 | parsing_mp->m_features |= XFS_FEAT_GRPID; |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1273 | return 0; |
| 1274 | case Opt_nogrpid: |
| 1275 | case Opt_sysvgroups: |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1276 | parsing_mp->m_features &= ~XFS_FEAT_GRPID; |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1277 | return 0; |
| 1278 | case Opt_wsync: |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1279 | parsing_mp->m_features |= XFS_FEAT_WSYNC; |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1280 | return 0; |
| 1281 | case Opt_norecovery: |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1282 | parsing_mp->m_features |= XFS_FEAT_NORECOVERY; |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1283 | return 0; |
| 1284 | case Opt_noalign: |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1285 | parsing_mp->m_features |= XFS_FEAT_NOALIGN; |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1286 | return 0; |
| 1287 | case Opt_swalloc: |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1288 | parsing_mp->m_features |= XFS_FEAT_SWALLOC; |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1289 | return 0; |
| 1290 | case Opt_sunit: |
Pavel Reichl | 0f98b4e | 2021-03-22 09:52:01 -0700 | [diff] [blame] | 1291 | parsing_mp->m_dalign = result.uint_32; |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1292 | return 0; |
| 1293 | case Opt_swidth: |
Pavel Reichl | 0f98b4e | 2021-03-22 09:52:01 -0700 | [diff] [blame] | 1294 | parsing_mp->m_swidth = result.uint_32; |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1295 | return 0; |
| 1296 | case Opt_inode32: |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1297 | parsing_mp->m_features |= XFS_FEAT_SMALL_INUMS; |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1298 | return 0; |
| 1299 | case Opt_inode64: |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1300 | parsing_mp->m_features &= ~XFS_FEAT_SMALL_INUMS; |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1301 | return 0; |
| 1302 | case Opt_nouuid: |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1303 | parsing_mp->m_features |= XFS_FEAT_NOUUID; |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1304 | return 0; |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1305 | case Opt_largeio: |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1306 | parsing_mp->m_features |= XFS_FEAT_LARGE_IOSIZE; |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1307 | return 0; |
| 1308 | case Opt_nolargeio: |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1309 | parsing_mp->m_features &= ~XFS_FEAT_LARGE_IOSIZE; |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1310 | return 0; |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1311 | case Opt_filestreams: |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1312 | parsing_mp->m_features |= XFS_FEAT_FILESTREAMS; |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1313 | return 0; |
| 1314 | case Opt_noquota: |
Pavel Reichl | 0f98b4e | 2021-03-22 09:52:01 -0700 | [diff] [blame] | 1315 | parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT; |
| 1316 | parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD; |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1317 | return 0; |
| 1318 | case Opt_quota: |
| 1319 | case Opt_uquota: |
| 1320 | case Opt_usrquota: |
Christoph Hellwig | 149e53a | 2021-08-06 11:05:37 -0700 | [diff] [blame] | 1321 | parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD); |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1322 | return 0; |
| 1323 | case Opt_qnoenforce: |
| 1324 | case Opt_uqnoenforce: |
Christoph Hellwig | 149e53a | 2021-08-06 11:05:37 -0700 | [diff] [blame] | 1325 | parsing_mp->m_qflags |= XFS_UQUOTA_ACCT; |
Pavel Reichl | 0f98b4e | 2021-03-22 09:52:01 -0700 | [diff] [blame] | 1326 | parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD; |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1327 | return 0; |
| 1328 | case Opt_pquota: |
| 1329 | case Opt_prjquota: |
Christoph Hellwig | 149e53a | 2021-08-06 11:05:37 -0700 | [diff] [blame] | 1330 | parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD); |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1331 | return 0; |
| 1332 | case Opt_pqnoenforce: |
Christoph Hellwig | 149e53a | 2021-08-06 11:05:37 -0700 | [diff] [blame] | 1333 | parsing_mp->m_qflags |= XFS_PQUOTA_ACCT; |
Pavel Reichl | 0f98b4e | 2021-03-22 09:52:01 -0700 | [diff] [blame] | 1334 | parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD; |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1335 | return 0; |
| 1336 | case Opt_gquota: |
| 1337 | case Opt_grpquota: |
Christoph Hellwig | 149e53a | 2021-08-06 11:05:37 -0700 | [diff] [blame] | 1338 | parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD); |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1339 | return 0; |
| 1340 | case Opt_gqnoenforce: |
Christoph Hellwig | 149e53a | 2021-08-06 11:05:37 -0700 | [diff] [blame] | 1341 | parsing_mp->m_qflags |= XFS_GQUOTA_ACCT; |
Pavel Reichl | 0f98b4e | 2021-03-22 09:52:01 -0700 | [diff] [blame] | 1342 | parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD; |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1343 | return 0; |
| 1344 | case Opt_discard: |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1345 | parsing_mp->m_features |= XFS_FEAT_DISCARD; |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1346 | return 0; |
| 1347 | case Opt_nodiscard: |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1348 | parsing_mp->m_features &= ~XFS_FEAT_DISCARD; |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1349 | return 0; |
| 1350 | #ifdef CONFIG_FS_DAX |
| 1351 | case Opt_dax: |
Pavel Reichl | 0f98b4e | 2021-03-22 09:52:01 -0700 | [diff] [blame] | 1352 | xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS); |
Ira Weiny | 8d6c344 | 2020-05-04 09:02:42 -0700 | [diff] [blame] | 1353 | return 0; |
| 1354 | case Opt_dax_enum: |
Pavel Reichl | 0f98b4e | 2021-03-22 09:52:01 -0700 | [diff] [blame] | 1355 | xfs_mount_set_dax_mode(parsing_mp, result.uint_32); |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1356 | return 0; |
| 1357 | #endif |
Pavel Reichl | c23c393 | 2020-09-25 11:10:29 -0700 | [diff] [blame] | 1358 | /* Following mount options will be removed in September 2025 */ |
| 1359 | case Opt_ikeep: |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1360 | xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, true); |
| 1361 | parsing_mp->m_features |= XFS_FEAT_IKEEP; |
Pavel Reichl | c23c393 | 2020-09-25 11:10:29 -0700 | [diff] [blame] | 1362 | return 0; |
| 1363 | case Opt_noikeep: |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1364 | xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, false); |
| 1365 | parsing_mp->m_features &= ~XFS_FEAT_IKEEP; |
Pavel Reichl | c23c393 | 2020-09-25 11:10:29 -0700 | [diff] [blame] | 1366 | return 0; |
| 1367 | case Opt_attr2: |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1368 | xfs_fs_warn_deprecated(fc, param, XFS_FEAT_ATTR2, true); |
| 1369 | parsing_mp->m_features |= XFS_FEAT_ATTR2; |
Pavel Reichl | c23c393 | 2020-09-25 11:10:29 -0700 | [diff] [blame] | 1370 | return 0; |
| 1371 | case Opt_noattr2: |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1372 | xfs_fs_warn_deprecated(fc, param, XFS_FEAT_NOATTR2, true); |
| 1373 | parsing_mp->m_features |= XFS_FEAT_NOATTR2; |
Pavel Reichl | c23c393 | 2020-09-25 11:10:29 -0700 | [diff] [blame] | 1374 | return 0; |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1375 | default: |
Pavel Reichl | 0f98b4e | 2021-03-22 09:52:01 -0700 | [diff] [blame] | 1376 | xfs_warn(parsing_mp, "unknown mount option [%s].", param->key); |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1377 | return -EINVAL; |
| 1378 | } |
| 1379 | |
| 1380 | return 0; |
| 1381 | } |
| 1382 | |
| 1383 | static int |
Darrick J. Wong | 1e5c39d | 2020-12-04 15:59:39 -0800 | [diff] [blame] | 1384 | xfs_fs_validate_params( |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1385 | struct xfs_mount *mp) |
| 1386 | { |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1387 | /* No recovery flag requires a read-only mount */ |
Dave Chinner | 2e973b2 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1388 | if (xfs_has_norecovery(mp) && !xfs_is_readonly(mp)) { |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1389 | xfs_warn(mp, "no-recovery mounts must be read-only."); |
| 1390 | return -EINVAL; |
| 1391 | } |
| 1392 | |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1393 | /* |
| 1394 | * We have not read the superblock at this point, so only the attr2 |
| 1395 | * mount option can set the attr2 feature by this stage. |
| 1396 | */ |
| 1397 | if (xfs_has_attr2(mp) && xfs_has_noattr2(mp)) { |
Dave Chinner | e23b55d | 2021-08-18 18:46:25 -0700 | [diff] [blame] | 1398 | xfs_warn(mp, "attr2 and noattr2 cannot both be specified."); |
| 1399 | return -EINVAL; |
| 1400 | } |
| 1401 | |
| 1402 | |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1403 | if (xfs_has_noalign(mp) && (mp->m_dalign || mp->m_swidth)) { |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1404 | xfs_warn(mp, |
| 1405 | "sunit and swidth options incompatible with the noalign option"); |
| 1406 | return -EINVAL; |
| 1407 | } |
| 1408 | |
| 1409 | if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) { |
| 1410 | xfs_warn(mp, "quota support not available in this kernel."); |
| 1411 | return -EINVAL; |
| 1412 | } |
| 1413 | |
| 1414 | if ((mp->m_dalign && !mp->m_swidth) || |
| 1415 | (!mp->m_dalign && mp->m_swidth)) { |
| 1416 | xfs_warn(mp, "sunit and swidth must be specified together"); |
| 1417 | return -EINVAL; |
| 1418 | } |
| 1419 | |
| 1420 | if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) { |
| 1421 | xfs_warn(mp, |
| 1422 | "stripe width (%d) must be a multiple of the stripe unit (%d)", |
| 1423 | mp->m_swidth, mp->m_dalign); |
| 1424 | return -EINVAL; |
| 1425 | } |
| 1426 | |
| 1427 | if (mp->m_logbufs != -1 && |
| 1428 | mp->m_logbufs != 0 && |
| 1429 | (mp->m_logbufs < XLOG_MIN_ICLOGS || |
| 1430 | mp->m_logbufs > XLOG_MAX_ICLOGS)) { |
| 1431 | xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]", |
| 1432 | mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS); |
| 1433 | return -EINVAL; |
| 1434 | } |
| 1435 | |
| 1436 | if (mp->m_logbsize != -1 && |
| 1437 | mp->m_logbsize != 0 && |
| 1438 | (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE || |
| 1439 | mp->m_logbsize > XLOG_MAX_RECORD_BSIZE || |
| 1440 | !is_power_of_2(mp->m_logbsize))) { |
| 1441 | xfs_warn(mp, |
| 1442 | "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]", |
| 1443 | mp->m_logbsize); |
| 1444 | return -EINVAL; |
| 1445 | } |
| 1446 | |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1447 | if (xfs_has_allocsize(mp) && |
Ian Kent | 8757c38 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1448 | (mp->m_allocsize_log > XFS_MAX_IO_LOG || |
| 1449 | mp->m_allocsize_log < XFS_MIN_IO_LOG)) { |
| 1450 | xfs_warn(mp, "invalid log iosize: %d [not %d-%d]", |
| 1451 | mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG); |
| 1452 | return -EINVAL; |
| 1453 | } |
| 1454 | |
| 1455 | return 0; |
| 1456 | } |
| 1457 | |
| 1458 | static int |
Darrick J. Wong | 1e5c39d | 2020-12-04 15:59:39 -0800 | [diff] [blame] | 1459 | xfs_fs_fill_super( |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1460 | struct super_block *sb, |
Ian Kent | 73e5fff | 2019-11-04 13:58:46 -0800 | [diff] [blame] | 1461 | struct fs_context *fc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1462 | { |
Ian Kent | 73e5fff | 2019-11-04 13:58:46 -0800 | [diff] [blame] | 1463 | struct xfs_mount *mp = sb->s_fs_info; |
Christoph Hellwig | f3dcc13 | 2008-03-27 18:00:54 +1100 | [diff] [blame] | 1464 | struct inode *root; |
Colin Ian King | 0279c71 | 2019-11-06 08:07:46 -0800 | [diff] [blame] | 1465 | int flags = 0, error; |
Christoph Hellwig | bdd907b | 2008-05-20 15:10:44 +1000 | [diff] [blame] | 1466 | |
Ian Kent | 7c89fcb | 2019-11-04 13:58:46 -0800 | [diff] [blame] | 1467 | mp->m_super = sb; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1468 | |
Darrick J. Wong | 1e5c39d | 2020-12-04 15:59:39 -0800 | [diff] [blame] | 1469 | error = xfs_fs_validate_params(mp); |
Christoph Hellwig | 745f691 | 2007-08-30 17:20:39 +1000 | [diff] [blame] | 1470 | if (error) |
Ian Kent | e1d3d21 | 2019-11-04 13:58:40 -0800 | [diff] [blame] | 1471 | goto out_free_names; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1472 | |
| 1473 | sb_min_blocksize(sb, BBSIZE); |
Lachlan McIlroy | 0ec5851 | 2008-06-23 13:23:01 +1000 | [diff] [blame] | 1474 | sb->s_xattr = xfs_xattr_handlers; |
Nathan Scott | a50cd26 | 2006-03-14 14:06:18 +1100 | [diff] [blame] | 1475 | sb->s_export_op = &xfs_export_operations; |
Christoph Hellwig | fcafb71 | 2009-02-09 08:47:34 +0100 | [diff] [blame] | 1476 | #ifdef CONFIG_XFS_QUOTA |
Nathan Scott | a50cd26 | 2006-03-14 14:06:18 +1100 | [diff] [blame] | 1477 | sb->s_qcop = &xfs_quotactl_operations; |
Jan Kara | 17ef4fd | 2014-09-30 22:35:33 +0200 | [diff] [blame] | 1478 | sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ; |
Christoph Hellwig | fcafb71 | 2009-02-09 08:47:34 +0100 | [diff] [blame] | 1479 | #endif |
Nathan Scott | a50cd26 | 2006-03-14 14:06:18 +1100 | [diff] [blame] | 1480 | sb->s_op = &xfs_super_operations; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1481 | |
Dave Chinner | dae5cd8 | 2018-05-10 21:50:23 -0700 | [diff] [blame] | 1482 | /* |
| 1483 | * Delay mount work if the debug hook is set. This is debug |
| 1484 | * instrumention to coordinate simulation of xfs mount failures with |
| 1485 | * VFS superblock operations |
| 1486 | */ |
| 1487 | if (xfs_globals.mount_delay) { |
| 1488 | xfs_notice(mp, "Delaying mount for %d seconds.", |
| 1489 | xfs_globals.mount_delay); |
| 1490 | msleep(xfs_globals.mount_delay * 1000); |
| 1491 | } |
| 1492 | |
Ian Kent | 73e5fff | 2019-11-04 13:58:46 -0800 | [diff] [blame] | 1493 | if (fc->sb_flags & SB_SILENT) |
Christoph Hellwig | f8f15e4 | 2008-05-20 11:30:59 +1000 | [diff] [blame] | 1494 | flags |= XFS_MFSI_QUIET; |
| 1495 | |
Christoph Hellwig | 9d565ff | 2008-10-30 17:53:24 +1100 | [diff] [blame] | 1496 | error = xfs_open_devices(mp); |
Christoph Hellwig | 19f354d | 2008-05-20 11:31:13 +1000 | [diff] [blame] | 1497 | if (error) |
Ian Kent | e1d3d21 | 2019-11-04 13:58:40 -0800 | [diff] [blame] | 1498 | goto out_free_names; |
Christoph Hellwig | f8f15e4 | 2008-05-20 11:30:59 +1000 | [diff] [blame] | 1499 | |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1500 | error = xfs_init_mount_workqueues(mp); |
Christoph Hellwig | 61ba35d | 2010-09-30 02:25:54 +0000 | [diff] [blame] | 1501 | if (error) |
| 1502 | goto out_close_devices; |
Christoph Hellwig | c962fb7 | 2008-05-20 15:10:52 +1000 | [diff] [blame] | 1503 | |
Dave Chinner | 5681ca4 | 2015-02-23 21:22:31 +1100 | [diff] [blame] | 1504 | error = xfs_init_percpu_counters(mp); |
Christoph Hellwig | aa6bf01 | 2012-02-29 09:53:48 +0000 | [diff] [blame] | 1505 | if (error) |
| 1506 | goto out_destroy_workqueues; |
| 1507 | |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 1508 | error = xfs_inodegc_init_percpu(mp); |
| 1509 | if (error) |
| 1510 | goto out_destroy_counters; |
| 1511 | |
Dave Chinner | 0ed17f0 | 2021-08-06 11:05:38 -0700 | [diff] [blame] | 1512 | /* |
| 1513 | * All percpu data structures requiring cleanup when a cpu goes offline |
| 1514 | * must be allocated before adding this @mp to the cpu-dead handler's |
| 1515 | * mount list. |
| 1516 | */ |
| 1517 | xfs_mount_list_add(mp); |
| 1518 | |
Bill O'Donnell | 225e463 | 2015-10-12 18:21:19 +1100 | [diff] [blame] | 1519 | /* Allocate stats memory before we do operations that might use it */ |
| 1520 | mp->m_stats.xs_stats = alloc_percpu(struct xfsstats); |
| 1521 | if (!mp->m_stats.xs_stats) { |
Dan Carpenter | f9d460b | 2015-10-19 08:42:47 +1100 | [diff] [blame] | 1522 | error = -ENOMEM; |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 1523 | goto out_destroy_inodegc; |
Bill O'Donnell | 225e463 | 2015-10-12 18:21:19 +1100 | [diff] [blame] | 1524 | } |
| 1525 | |
Christoph Hellwig | f8f15e4 | 2008-05-20 11:30:59 +1000 | [diff] [blame] | 1526 | error = xfs_readsb(mp, flags); |
| 1527 | if (error) |
Bill O'Donnell | 225e463 | 2015-10-12 18:21:19 +1100 | [diff] [blame] | 1528 | goto out_free_stats; |
Christoph Hellwig | 9d565ff | 2008-10-30 17:53:24 +1100 | [diff] [blame] | 1529 | |
| 1530 | error = xfs_finish_flags(mp); |
Christoph Hellwig | f8f15e4 | 2008-05-20 11:30:59 +1000 | [diff] [blame] | 1531 | if (error) |
Christoph Hellwig | effa2ed | 2008-05-20 15:11:05 +1000 | [diff] [blame] | 1532 | goto out_free_sb; |
Christoph Hellwig | f8f15e4 | 2008-05-20 11:30:59 +1000 | [diff] [blame] | 1533 | |
Christoph Hellwig | e34b562 | 2008-05-20 15:10:36 +1000 | [diff] [blame] | 1534 | error = xfs_setup_devices(mp); |
Christoph Hellwig | 19f354d | 2008-05-20 11:31:13 +1000 | [diff] [blame] | 1535 | if (error) |
Christoph Hellwig | effa2ed | 2008-05-20 15:11:05 +1000 | [diff] [blame] | 1536 | goto out_free_sb; |
Christoph Hellwig | f8f15e4 | 2008-05-20 11:30:59 +1000 | [diff] [blame] | 1537 | |
Darrick J. Wong | b96cb83 | 2020-09-10 10:57:17 -0700 | [diff] [blame] | 1538 | /* V4 support is undergoing deprecation. */ |
Dave Chinner | 38c26bf | 2021-08-18 18:46:37 -0700 | [diff] [blame] | 1539 | if (!xfs_has_crc(mp)) { |
Darrick J. Wong | b96cb83 | 2020-09-10 10:57:17 -0700 | [diff] [blame] | 1540 | #ifdef CONFIG_XFS_SUPPORT_V4 |
| 1541 | xfs_warn_once(mp, |
| 1542 | "Deprecated V4 format (crc=0) will not be supported after September 2030."); |
| 1543 | #else |
| 1544 | xfs_warn(mp, |
| 1545 | "Deprecated V4 format (crc=0) not supported by kernel."); |
| 1546 | error = -EINVAL; |
| 1547 | goto out_free_sb; |
| 1548 | #endif |
| 1549 | } |
| 1550 | |
Darrick J. Wong | 7ba8385 | 2023-04-11 19:05:19 -0700 | [diff] [blame] | 1551 | /* ASCII case insensitivity is undergoing deprecation. */ |
| 1552 | if (xfs_has_asciici(mp)) { |
| 1553 | #ifdef CONFIG_XFS_SUPPORT_ASCII_CI |
| 1554 | xfs_warn_once(mp, |
| 1555 | "Deprecated ASCII case-insensitivity feature (ascii-ci=1) will not be supported after September 2030."); |
| 1556 | #else |
| 1557 | xfs_warn(mp, |
| 1558 | "Deprecated ASCII case-insensitivity feature (ascii-ci=1) not supported by kernel."); |
| 1559 | error = -EINVAL; |
| 1560 | goto out_free_sb; |
| 1561 | #endif |
| 1562 | } |
| 1563 | |
Darrick J. Wong | 80c720b | 2020-11-24 11:45:55 -0800 | [diff] [blame] | 1564 | /* Filesystem claims it needs repair, so refuse the mount. */ |
Dave Chinner | ebd9027 | 2021-08-18 18:46:55 -0700 | [diff] [blame] | 1565 | if (xfs_has_needsrepair(mp)) { |
Darrick J. Wong | 80c720b | 2020-11-24 11:45:55 -0800 | [diff] [blame] | 1566 | xfs_warn(mp, "Filesystem needs repair. Please run xfs_repair."); |
| 1567 | error = -EFSCORRUPTED; |
| 1568 | goto out_free_sb; |
| 1569 | } |
| 1570 | |
Darrick J. Wong | 932befe | 2020-01-02 13:20:13 -0800 | [diff] [blame] | 1571 | /* |
Darrick J. Wong | 3945ae0 | 2020-11-24 11:45:54 -0800 | [diff] [blame] | 1572 | * Don't touch the filesystem if a user tool thinks it owns the primary |
| 1573 | * superblock. mkfs doesn't clear the flag from secondary supers, so |
| 1574 | * we don't check them at all. |
| 1575 | */ |
| 1576 | if (mp->m_sb.sb_inprogress) { |
| 1577 | xfs_warn(mp, "Offline file system operation in progress!"); |
| 1578 | error = -EFSCORRUPTED; |
| 1579 | goto out_free_sb; |
| 1580 | } |
| 1581 | |
| 1582 | /* |
| 1583 | * Until this is fixed only page-sized or smaller data blocks work. |
| 1584 | */ |
| 1585 | if (mp->m_sb.sb_blocksize > PAGE_SIZE) { |
| 1586 | xfs_warn(mp, |
| 1587 | "File system with blocksize %d bytes. " |
| 1588 | "Only pagesize (%ld) or less will currently work.", |
| 1589 | mp->m_sb.sb_blocksize, PAGE_SIZE); |
| 1590 | error = -ENOSYS; |
| 1591 | goto out_free_sb; |
| 1592 | } |
| 1593 | |
| 1594 | /* Ensure this filesystem fits in the page cache limits */ |
| 1595 | if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) || |
| 1596 | xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) { |
| 1597 | xfs_warn(mp, |
| 1598 | "file system too large to be mounted on this system."); |
| 1599 | error = -EFBIG; |
| 1600 | goto out_free_sb; |
| 1601 | } |
| 1602 | |
| 1603 | /* |
Darrick J. Wong | 932befe | 2020-01-02 13:20:13 -0800 | [diff] [blame] | 1604 | * XFS block mappings use 54 bits to store the logical block offset. |
| 1605 | * This should suffice to handle the maximum file size that the VFS |
| 1606 | * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT |
| 1607 | * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes |
| 1608 | * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON |
| 1609 | * to check this assertion. |
| 1610 | * |
| 1611 | * Avoid integer overflow by comparing the maximum bmbt offset to the |
| 1612 | * maximum pagecache offset in units of fs blocks. |
| 1613 | */ |
Darrick J. Wong | 33005fd | 2020-12-04 13:28:35 -0800 | [diff] [blame] | 1614 | if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) { |
Darrick J. Wong | 932befe | 2020-01-02 13:20:13 -0800 | [diff] [blame] | 1615 | xfs_warn(mp, |
| 1616 | "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!", |
| 1617 | XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE), |
| 1618 | XFS_MAX_FILEOFF); |
| 1619 | error = -EINVAL; |
| 1620 | goto out_free_sb; |
| 1621 | } |
| 1622 | |
Christoph Hellwig | f8f15e4 | 2008-05-20 11:30:59 +1000 | [diff] [blame] | 1623 | error = xfs_filestream_mount(mp); |
| 1624 | if (error) |
Christoph Hellwig | effa2ed | 2008-05-20 15:11:05 +1000 | [diff] [blame] | 1625 | goto out_free_sb; |
Christoph Hellwig | f8f15e4 | 2008-05-20 11:30:59 +1000 | [diff] [blame] | 1626 | |
Dave Chinner | 704b290 | 2011-03-26 09:14:57 +1100 | [diff] [blame] | 1627 | /* |
| 1628 | * we must configure the block size in the superblock before we run the |
| 1629 | * full mount process as the mount process can lookup and cache inodes. |
Dave Chinner | 704b290 | 2011-03-26 09:14:57 +1100 | [diff] [blame] | 1630 | */ |
Adam Borowski | dddde68 | 2018-10-18 17:20:19 +1100 | [diff] [blame] | 1631 | sb->s_magic = XFS_SUPER_MAGIC; |
Christoph Hellwig | 4ca488e | 2007-10-11 18:09:40 +1000 | [diff] [blame] | 1632 | sb->s_blocksize = mp->m_sb.sb_blocksize; |
| 1633 | sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; |
Darrick J. Wong | 932befe | 2020-01-02 13:20:13 -0800 | [diff] [blame] | 1634 | sb->s_maxbytes = MAX_LFS_FILESIZE; |
Al Viro | 8de5277 | 2012-02-06 12:45:27 -0500 | [diff] [blame] | 1635 | sb->s_max_links = XFS_MAXLINK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1636 | sb->s_time_gran = 1; |
Dave Chinner | 38c26bf | 2021-08-18 18:46:37 -0700 | [diff] [blame] | 1637 | if (xfs_has_bigtime(mp)) { |
Darrick J. Wong | f93e5436 | 2020-08-17 09:59:07 -0700 | [diff] [blame] | 1638 | sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN); |
| 1639 | sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX); |
| 1640 | } else { |
| 1641 | sb->s_time_min = XFS_LEGACY_TIME_MIN; |
| 1642 | sb->s_time_max = XFS_LEGACY_TIME_MAX; |
| 1643 | } |
Darrick J. Wong | 06dbf82 | 2020-08-24 11:58:01 -0700 | [diff] [blame] | 1644 | trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max); |
Christoph Hellwig | adfb5fb | 2019-06-28 19:30:22 -0700 | [diff] [blame] | 1645 | sb->s_iflags |= SB_I_CGROUPWB; |
| 1646 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1647 | set_posix_acl_flag(sb); |
| 1648 | |
Dave Chinner | dc037ad | 2013-06-27 16:04:59 +1000 | [diff] [blame] | 1649 | /* version 5 superblocks support inode version counters. */ |
Dave Chinner | d6837c1 | 2021-08-18 18:46:56 -0700 | [diff] [blame] | 1650 | if (xfs_has_crc(mp)) |
Matthew Garrett | 357fdad | 2017-10-18 13:56:26 -0700 | [diff] [blame] | 1651 | sb->s_flags |= SB_I_VERSION; |
Dave Chinner | dc037ad | 2013-06-27 16:04:59 +1000 | [diff] [blame] | 1652 | |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1653 | if (xfs_has_dax_always(mp)) { |
Christoph Hellwig | 679a994 | 2021-11-29 11:21:41 +0100 | [diff] [blame] | 1654 | error = xfs_setup_dax_always(mp); |
| 1655 | if (error) |
Darrick J. Wong | b6e03c1 | 2018-01-31 14:21:56 -0800 | [diff] [blame] | 1656 | goto out_filestream_unmount; |
Dave Chinner | cbe4dab | 2015-06-04 09:19:18 +1000 | [diff] [blame] | 1657 | } |
| 1658 | |
Christoph Hellwig | 7020057 | 2022-04-15 06:52:55 +0200 | [diff] [blame] | 1659 | if (xfs_has_discard(mp) && !bdev_max_discard_sectors(sb->s_bdev)) { |
| 1660 | xfs_warn(mp, |
| 1661 | "mounting with \"discard\" option, but the device does not support discard"); |
| 1662 | mp->m_features &= ~XFS_FEAT_DISCARD; |
Kenjiro Nakayama | 1e6fa68 | 2017-09-18 12:03:56 -0700 | [diff] [blame] | 1663 | } |
| 1664 | |
Dave Chinner | 38c26bf | 2021-08-18 18:46:37 -0700 | [diff] [blame] | 1665 | if (xfs_has_reflink(mp)) { |
Christoph Hellwig | 66ae56a | 2019-02-18 09:38:49 -0800 | [diff] [blame] | 1666 | if (mp->m_sb.sb_rblocks) { |
| 1667 | xfs_alert(mp, |
Darrick J. Wong | c14632d | 2018-01-31 16:38:18 -0800 | [diff] [blame] | 1668 | "reflink not compatible with realtime device!"); |
Christoph Hellwig | 66ae56a | 2019-02-18 09:38:49 -0800 | [diff] [blame] | 1669 | error = -EINVAL; |
| 1670 | goto out_filestream_unmount; |
| 1671 | } |
| 1672 | |
| 1673 | if (xfs_globals.always_cow) { |
| 1674 | xfs_info(mp, "using DEBUG-only always_cow mode."); |
| 1675 | mp->m_always_cow = true; |
| 1676 | } |
Darrick J. Wong | c14632d | 2018-01-31 16:38:18 -0800 | [diff] [blame] | 1677 | } |
| 1678 | |
Dave Chinner | 38c26bf | 2021-08-18 18:46:37 -0700 | [diff] [blame] | 1679 | if (xfs_has_rmapbt(mp) && mp->m_sb.sb_rblocks) { |
Darrick J. Wong | 1c0607a | 2016-08-03 12:20:57 +1000 | [diff] [blame] | 1680 | xfs_alert(mp, |
Darrick J. Wong | 76883f7 | 2018-01-31 09:47:25 -0800 | [diff] [blame] | 1681 | "reverse mapping btree not compatible with realtime device!"); |
| 1682 | error = -EINVAL; |
| 1683 | goto out_filestream_unmount; |
Darrick J. Wong | 738f57c | 2016-08-26 15:59:19 +1000 | [diff] [blame] | 1684 | } |
Darrick J. Wong | 1c0607a | 2016-08-03 12:20:57 +1000 | [diff] [blame] | 1685 | |
Chandan Babu R | 973ac0e | 2021-08-11 10:33:20 +0530 | [diff] [blame] | 1686 | if (xfs_has_large_extent_counts(mp)) |
| 1687 | xfs_warn(mp, |
| 1688 | "EXPERIMENTAL Large extent counts feature in use. Use at your own risk!"); |
| 1689 | |
Dave Chinner | 8a00ebe | 2012-04-13 12:10:44 +0000 | [diff] [blame] | 1690 | error = xfs_mountfs(mp); |
Christoph Hellwig | 2bcf6e9 | 2011-07-13 13:43:48 +0200 | [diff] [blame] | 1691 | if (error) |
Dave Chinner | 7e18530 | 2012-10-08 21:56:00 +1100 | [diff] [blame] | 1692 | goto out_filestream_unmount; |
Dave Chinner | 704b290 | 2011-03-26 09:14:57 +1100 | [diff] [blame] | 1693 | |
David Chinner | 0165164 | 2008-08-13 15:45:15 +1000 | [diff] [blame] | 1694 | root = igrab(VFS_I(mp->m_rootip)); |
Christoph Hellwig | f3dcc13 | 2008-03-27 18:00:54 +1100 | [diff] [blame] | 1695 | if (!root) { |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1696 | error = -ENOENT; |
Dave Chinner | 8a00ebe | 2012-04-13 12:10:44 +0000 | [diff] [blame] | 1697 | goto out_unmount; |
Christoph Hellwig | cbc89dc | 2008-02-05 12:14:01 +1100 | [diff] [blame] | 1698 | } |
Al Viro | 48fde70 | 2012-01-08 22:15:13 -0500 | [diff] [blame] | 1699 | sb->s_root = d_make_root(root); |
Christoph Hellwig | f3dcc13 | 2008-03-27 18:00:54 +1100 | [diff] [blame] | 1700 | if (!sb->s_root) { |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1701 | error = -ENOMEM; |
Dave Chinner | 8a00ebe | 2012-04-13 12:10:44 +0000 | [diff] [blame] | 1702 | goto out_unmount; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1703 | } |
Christoph Hellwig | 7439449 | 2007-08-30 17:21:22 +1000 | [diff] [blame] | 1704 | |
Dave Chinner | 7e18530 | 2012-10-08 21:56:00 +1100 | [diff] [blame] | 1705 | return 0; |
| 1706 | |
| 1707 | out_filestream_unmount: |
Christoph Hellwig | 120226c | 2008-05-20 15:11:11 +1000 | [diff] [blame] | 1708 | xfs_filestream_unmount(mp); |
Christoph Hellwig | effa2ed | 2008-05-20 15:11:05 +1000 | [diff] [blame] | 1709 | out_free_sb: |
| 1710 | xfs_freesb(mp); |
Bill O'Donnell | 225e463 | 2015-10-12 18:21:19 +1100 | [diff] [blame] | 1711 | out_free_stats: |
| 1712 | free_percpu(mp->m_stats.xs_stats); |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 1713 | out_destroy_inodegc: |
Dave Chinner | 0ed17f0 | 2021-08-06 11:05:38 -0700 | [diff] [blame] | 1714 | xfs_mount_list_del(mp); |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 1715 | xfs_inodegc_free_percpu(mp); |
Christoph Hellwig | 9d565ff | 2008-10-30 17:53:24 +1100 | [diff] [blame] | 1716 | out_destroy_counters: |
Dave Chinner | 5681ca4 | 2015-02-23 21:22:31 +1100 | [diff] [blame] | 1717 | xfs_destroy_percpu_counters(mp); |
Bill O'Donnell | 225e463 | 2015-10-12 18:21:19 +1100 | [diff] [blame] | 1718 | out_destroy_workqueues: |
Christoph Hellwig | aa6bf01 | 2012-02-29 09:53:48 +0000 | [diff] [blame] | 1719 | xfs_destroy_mount_workqueues(mp); |
Christoph Hellwig | 61ba35d | 2010-09-30 02:25:54 +0000 | [diff] [blame] | 1720 | out_close_devices: |
Christoph Hellwig | 19f354d | 2008-05-20 11:31:13 +1000 | [diff] [blame] | 1721 | xfs_close_devices(mp); |
Ian Kent | e1d3d21 | 2019-11-04 13:58:40 -0800 | [diff] [blame] | 1722 | out_free_names: |
Dave Chinner | c9fbd7b | 2018-05-10 21:50:23 -0700 | [diff] [blame] | 1723 | sb->s_fs_info = NULL; |
Ian Kent | a943f37 | 2019-11-04 13:58:42 -0800 | [diff] [blame] | 1724 | xfs_mount_free(mp); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1725 | return error; |
Christoph Hellwig | f8f15e4 | 2008-05-20 11:30:59 +1000 | [diff] [blame] | 1726 | |
Christoph Hellwig | 2bcf6e9 | 2011-07-13 13:43:48 +0200 | [diff] [blame] | 1727 | out_unmount: |
Christoph Hellwig | e48ad316 | 2008-05-20 11:30:52 +1000 | [diff] [blame] | 1728 | xfs_filestream_unmount(mp); |
Christoph Hellwig | 19f354d | 2008-05-20 11:31:13 +1000 | [diff] [blame] | 1729 | xfs_unmountfs(mp); |
Christoph Hellwig | 6203300 | 2008-08-13 16:50:21 +1000 | [diff] [blame] | 1730 | goto out_free_sb; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1731 | } |
| 1732 | |
Ian Kent | 73e5fff | 2019-11-04 13:58:46 -0800 | [diff] [blame] | 1733 | static int |
Darrick J. Wong | 1e5c39d | 2020-12-04 15:59:39 -0800 | [diff] [blame] | 1734 | xfs_fs_get_tree( |
Ian Kent | 73e5fff | 2019-11-04 13:58:46 -0800 | [diff] [blame] | 1735 | struct fs_context *fc) |
| 1736 | { |
Darrick J. Wong | 1e5c39d | 2020-12-04 15:59:39 -0800 | [diff] [blame] | 1737 | return get_tree_bdev(fc, xfs_fs_fill_super); |
Ian Kent | 73e5fff | 2019-11-04 13:58:46 -0800 | [diff] [blame] | 1738 | } |
| 1739 | |
Ian Kent | 63cd1e9 | 2019-11-04 13:58:47 -0800 | [diff] [blame] | 1740 | static int |
| 1741 | xfs_remount_rw( |
| 1742 | struct xfs_mount *mp) |
| 1743 | { |
| 1744 | struct xfs_sb *sbp = &mp->m_sb; |
| 1745 | int error; |
| 1746 | |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1747 | if (xfs_has_norecovery(mp)) { |
Ian Kent | 63cd1e9 | 2019-11-04 13:58:47 -0800 | [diff] [blame] | 1748 | xfs_warn(mp, |
| 1749 | "ro->rw transition prohibited on norecovery mount"); |
| 1750 | return -EINVAL; |
| 1751 | } |
| 1752 | |
Dave Chinner | d6837c1 | 2021-08-18 18:46:56 -0700 | [diff] [blame] | 1753 | if (xfs_sb_is_v5(sbp) && |
Ian Kent | 63cd1e9 | 2019-11-04 13:58:47 -0800 | [diff] [blame] | 1754 | xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) { |
| 1755 | xfs_warn(mp, |
| 1756 | "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem", |
| 1757 | (sbp->sb_features_ro_compat & |
| 1758 | XFS_SB_FEAT_RO_COMPAT_UNKNOWN)); |
| 1759 | return -EINVAL; |
| 1760 | } |
| 1761 | |
Dave Chinner | 2e973b2 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1762 | clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate); |
Ian Kent | 63cd1e9 | 2019-11-04 13:58:47 -0800 | [diff] [blame] | 1763 | |
| 1764 | /* |
| 1765 | * If this is the first remount to writeable state we might have some |
| 1766 | * superblock changes to update. |
| 1767 | */ |
| 1768 | if (mp->m_update_sb) { |
| 1769 | error = xfs_sync_sb(mp, false); |
| 1770 | if (error) { |
| 1771 | xfs_warn(mp, "failed to write sb changes"); |
| 1772 | return error; |
| 1773 | } |
| 1774 | mp->m_update_sb = false; |
| 1775 | } |
| 1776 | |
| 1777 | /* |
| 1778 | * Fill out the reserve pool if it is empty. Use the stashed value if |
| 1779 | * it is non-zero, otherwise go with the default. |
| 1780 | */ |
| 1781 | xfs_restore_resvblks(mp); |
| 1782 | xfs_log_work_queue(mp); |
Darrick J. Wong | c9a6526 | 2021-01-22 16:48:44 -0800 | [diff] [blame] | 1783 | xfs_blockgc_start(mp); |
Ian Kent | 63cd1e9 | 2019-11-04 13:58:47 -0800 | [diff] [blame] | 1784 | |
| 1785 | /* Create the per-AG metadata reservation pool .*/ |
| 1786 | error = xfs_fs_reserve_ag_blocks(mp); |
| 1787 | if (error && error != -ENOSPC) |
| 1788 | return error; |
| 1789 | |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 1790 | /* Re-enable the background inode inactivation worker. */ |
| 1791 | xfs_inodegc_start(mp); |
| 1792 | |
Ian Kent | 63cd1e9 | 2019-11-04 13:58:47 -0800 | [diff] [blame] | 1793 | return 0; |
| 1794 | } |
| 1795 | |
| 1796 | static int |
| 1797 | xfs_remount_ro( |
| 1798 | struct xfs_mount *mp) |
| 1799 | { |
Darrick J. Wong | 089558bc | 2021-12-06 15:38:20 -0800 | [diff] [blame] | 1800 | struct xfs_icwalk icw = { |
| 1801 | .icw_flags = XFS_ICWALK_FLAG_SYNC, |
| 1802 | }; |
| 1803 | int error; |
Ian Kent | 63cd1e9 | 2019-11-04 13:58:47 -0800 | [diff] [blame] | 1804 | |
Darrick J. Wong | b97cca3 | 2022-02-03 08:29:21 -0800 | [diff] [blame] | 1805 | /* Flush all the dirty data to disk. */ |
| 1806 | error = sync_filesystem(mp->m_super); |
| 1807 | if (error) |
| 1808 | return error; |
| 1809 | |
Ian Kent | 63cd1e9 | 2019-11-04 13:58:47 -0800 | [diff] [blame] | 1810 | /* |
| 1811 | * Cancel background eofb scanning so it cannot race with the final |
| 1812 | * log force+buftarg wait and deadlock the remount. |
| 1813 | */ |
Darrick J. Wong | c9a6526 | 2021-01-22 16:48:44 -0800 | [diff] [blame] | 1814 | xfs_blockgc_stop(mp); |
Ian Kent | 63cd1e9 | 2019-11-04 13:58:47 -0800 | [diff] [blame] | 1815 | |
Darrick J. Wong | 089558bc | 2021-12-06 15:38:20 -0800 | [diff] [blame] | 1816 | /* |
| 1817 | * Clear out all remaining COW staging extents and speculative post-EOF |
| 1818 | * preallocations so that we don't leave inodes requiring inactivation |
| 1819 | * cleanups during reclaim on a read-only mount. We must process every |
| 1820 | * cached inode, so this requires a synchronous cache scan. |
| 1821 | */ |
| 1822 | error = xfs_blockgc_free_space(mp, &icw); |
Ian Kent | 63cd1e9 | 2019-11-04 13:58:47 -0800 | [diff] [blame] | 1823 | if (error) { |
| 1824 | xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); |
| 1825 | return error; |
| 1826 | } |
| 1827 | |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 1828 | /* |
| 1829 | * Stop the inodegc background worker. xfs_fs_reconfigure already |
| 1830 | * flushed all pending inodegc work when it sync'd the filesystem. |
| 1831 | * The VFS holds s_umount, so we know that inodes cannot enter |
| 1832 | * xfs_fs_destroy_inode during a remount operation. In readonly mode |
| 1833 | * we send inodes straight to reclaim, so no inodes will be queued. |
| 1834 | */ |
| 1835 | xfs_inodegc_stop(mp); |
| 1836 | |
Ian Kent | 63cd1e9 | 2019-11-04 13:58:47 -0800 | [diff] [blame] | 1837 | /* Free the per-AG metadata reservation pool. */ |
| 1838 | error = xfs_fs_unreserve_ag_blocks(mp); |
| 1839 | if (error) { |
| 1840 | xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); |
| 1841 | return error; |
| 1842 | } |
| 1843 | |
| 1844 | /* |
| 1845 | * Before we sync the metadata, we need to free up the reserve block |
| 1846 | * pool so that the used block count in the superblock on disk is |
| 1847 | * correct at the end of the remount. Stash the current* reserve pool |
| 1848 | * size so that if we get remounted rw, we can return it to the same |
| 1849 | * size. |
| 1850 | */ |
| 1851 | xfs_save_resvblks(mp); |
| 1852 | |
Brian Foster | ea2064d | 2021-01-22 16:48:24 -0800 | [diff] [blame] | 1853 | xfs_log_clean(mp); |
Dave Chinner | 2e973b2 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1854 | set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate); |
Ian Kent | 63cd1e9 | 2019-11-04 13:58:47 -0800 | [diff] [blame] | 1855 | |
| 1856 | return 0; |
| 1857 | } |
| 1858 | |
| 1859 | /* |
| 1860 | * Logically we would return an error here to prevent users from believing |
| 1861 | * they might have changed mount options using remount which can't be changed. |
| 1862 | * |
| 1863 | * But unfortunately mount(8) adds all options from mtab and fstab to the mount |
| 1864 | * arguments in some cases so we can't blindly reject options, but have to |
| 1865 | * check for each specified option if it actually differs from the currently |
| 1866 | * set option and only reject it if that's the case. |
| 1867 | * |
| 1868 | * Until that is implemented we return success for every remount request, and |
| 1869 | * silently ignore all options that we can't actually change. |
| 1870 | */ |
| 1871 | static int |
Darrick J. Wong | 1e5c39d | 2020-12-04 15:59:39 -0800 | [diff] [blame] | 1872 | xfs_fs_reconfigure( |
Ian Kent | 63cd1e9 | 2019-11-04 13:58:47 -0800 | [diff] [blame] | 1873 | struct fs_context *fc) |
| 1874 | { |
| 1875 | struct xfs_mount *mp = XFS_M(fc->root->d_sb); |
| 1876 | struct xfs_mount *new_mp = fc->s_fs_info; |
Ian Kent | 63cd1e9 | 2019-11-04 13:58:47 -0800 | [diff] [blame] | 1877 | int flags = fc->sb_flags; |
| 1878 | int error; |
| 1879 | |
Eric Sandeen | 4750a17 | 2020-07-15 08:30:37 -0700 | [diff] [blame] | 1880 | /* version 5 superblocks always support version counters. */ |
Dave Chinner | d6837c1 | 2021-08-18 18:46:56 -0700 | [diff] [blame] | 1881 | if (xfs_has_crc(mp)) |
Eric Sandeen | 4750a17 | 2020-07-15 08:30:37 -0700 | [diff] [blame] | 1882 | fc->sb_flags |= SB_I_VERSION; |
| 1883 | |
Darrick J. Wong | 1e5c39d | 2020-12-04 15:59:39 -0800 | [diff] [blame] | 1884 | error = xfs_fs_validate_params(new_mp); |
Ian Kent | 63cd1e9 | 2019-11-04 13:58:47 -0800 | [diff] [blame] | 1885 | if (error) |
| 1886 | return error; |
| 1887 | |
Ian Kent | 63cd1e9 | 2019-11-04 13:58:47 -0800 | [diff] [blame] | 1888 | /* inode32 -> inode64 */ |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1889 | if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) { |
| 1890 | mp->m_features &= ~XFS_FEAT_SMALL_INUMS; |
Dave Chinner | d6837c1 | 2021-08-18 18:46:56 -0700 | [diff] [blame] | 1891 | mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount); |
Ian Kent | 63cd1e9 | 2019-11-04 13:58:47 -0800 | [diff] [blame] | 1892 | } |
| 1893 | |
| 1894 | /* inode64 -> inode32 */ |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1895 | if (!xfs_has_small_inums(mp) && xfs_has_small_inums(new_mp)) { |
| 1896 | mp->m_features |= XFS_FEAT_SMALL_INUMS; |
Dave Chinner | d6837c1 | 2021-08-18 18:46:56 -0700 | [diff] [blame] | 1897 | mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount); |
Ian Kent | 63cd1e9 | 2019-11-04 13:58:47 -0800 | [diff] [blame] | 1898 | } |
| 1899 | |
| 1900 | /* ro -> rw */ |
Dave Chinner | 2e973b2 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1901 | if (xfs_is_readonly(mp) && !(flags & SB_RDONLY)) { |
Ian Kent | 63cd1e9 | 2019-11-04 13:58:47 -0800 | [diff] [blame] | 1902 | error = xfs_remount_rw(mp); |
| 1903 | if (error) |
| 1904 | return error; |
| 1905 | } |
| 1906 | |
| 1907 | /* rw -> ro */ |
Dave Chinner | 2e973b2 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1908 | if (!xfs_is_readonly(mp) && (flags & SB_RDONLY)) { |
Ian Kent | 63cd1e9 | 2019-11-04 13:58:47 -0800 | [diff] [blame] | 1909 | error = xfs_remount_ro(mp); |
| 1910 | if (error) |
| 1911 | return error; |
| 1912 | } |
| 1913 | |
| 1914 | return 0; |
| 1915 | } |
| 1916 | |
Darrick J. Wong | 1e5c39d | 2020-12-04 15:59:39 -0800 | [diff] [blame] | 1917 | static void xfs_fs_free( |
Ian Kent | 73e5fff | 2019-11-04 13:58:46 -0800 | [diff] [blame] | 1918 | struct fs_context *fc) |
| 1919 | { |
| 1920 | struct xfs_mount *mp = fc->s_fs_info; |
| 1921 | |
| 1922 | /* |
| 1923 | * mp is stored in the fs_context when it is initialized. |
| 1924 | * mp is transferred to the superblock on a successful mount, |
| 1925 | * but if an error occurs before the transfer we have to free |
| 1926 | * it here. |
| 1927 | */ |
| 1928 | if (mp) |
| 1929 | xfs_mount_free(mp); |
| 1930 | } |
| 1931 | |
| 1932 | static const struct fs_context_operations xfs_context_ops = { |
Darrick J. Wong | 1e5c39d | 2020-12-04 15:59:39 -0800 | [diff] [blame] | 1933 | .parse_param = xfs_fs_parse_param, |
| 1934 | .get_tree = xfs_fs_get_tree, |
| 1935 | .reconfigure = xfs_fs_reconfigure, |
| 1936 | .free = xfs_fs_free, |
Ian Kent | 73e5fff | 2019-11-04 13:58:46 -0800 | [diff] [blame] | 1937 | }; |
| 1938 | |
| 1939 | static int xfs_init_fs_context( |
| 1940 | struct fs_context *fc) |
| 1941 | { |
| 1942 | struct xfs_mount *mp; |
| 1943 | |
Ian Kent | 50f8300 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1944 | mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO); |
Ian Kent | 73e5fff | 2019-11-04 13:58:46 -0800 | [diff] [blame] | 1945 | if (!mp) |
| 1946 | return -ENOMEM; |
| 1947 | |
Ian Kent | 50f8300 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1948 | spin_lock_init(&mp->m_sb_lock); |
Ian Kent | 50f8300 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1949 | INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC); |
| 1950 | spin_lock_init(&mp->m_perag_lock); |
| 1951 | mutex_init(&mp->m_growlock); |
Darrick J. Wong | f0f7a67 | 2020-04-12 13:11:10 -0700 | [diff] [blame] | 1952 | INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker); |
Ian Kent | 50f8300 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1953 | INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker); |
Ian Kent | 50f8300 | 2019-11-04 13:58:48 -0800 | [diff] [blame] | 1954 | mp->m_kobj.kobject.kset = xfs_kset; |
| 1955 | /* |
| 1956 | * We don't create the finobt per-ag space reservation until after log |
| 1957 | * recovery, so we must set this to true so that an ifree transaction |
| 1958 | * started during log recovery will not depend on space reservations |
| 1959 | * for finobt expansion. |
| 1960 | */ |
| 1961 | mp->m_finobt_nores = true; |
| 1962 | |
Ian Kent | 73e5fff | 2019-11-04 13:58:46 -0800 | [diff] [blame] | 1963 | /* |
| 1964 | * These can be overridden by the mount option parsing. |
| 1965 | */ |
| 1966 | mp->m_logbufs = -1; |
| 1967 | mp->m_logbsize = -1; |
| 1968 | mp->m_allocsize_log = 16; /* 64k */ |
| 1969 | |
| 1970 | /* |
| 1971 | * Copy binary VFS mount flags we are interested in. |
| 1972 | */ |
| 1973 | if (fc->sb_flags & SB_RDONLY) |
Dave Chinner | 2e973b2 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1974 | set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate); |
Ian Kent | 73e5fff | 2019-11-04 13:58:46 -0800 | [diff] [blame] | 1975 | if (fc->sb_flags & SB_DIRSYNC) |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1976 | mp->m_features |= XFS_FEAT_DIRSYNC; |
Ian Kent | 73e5fff | 2019-11-04 13:58:46 -0800 | [diff] [blame] | 1977 | if (fc->sb_flags & SB_SYNCHRONOUS) |
Dave Chinner | 0560f31 | 2021-08-18 18:46:52 -0700 | [diff] [blame] | 1978 | mp->m_features |= XFS_FEAT_WSYNC; |
Ian Kent | 73e5fff | 2019-11-04 13:58:46 -0800 | [diff] [blame] | 1979 | |
| 1980 | fc->s_fs_info = mp; |
| 1981 | fc->ops = &xfs_context_ops; |
| 1982 | |
| 1983 | return 0; |
| 1984 | } |
| 1985 | |
Andrew Morton | 5085b60 | 2007-02-20 13:57:47 -0800 | [diff] [blame] | 1986 | static struct file_system_type xfs_fs_type = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1987 | .owner = THIS_MODULE, |
| 1988 | .name = "xfs", |
Ian Kent | 73e5fff | 2019-11-04 13:58:46 -0800 | [diff] [blame] | 1989 | .init_fs_context = xfs_init_fs_context, |
Al Viro | d7167b1 | 2019-09-07 07:23:15 -0400 | [diff] [blame] | 1990 | .parameters = xfs_fs_parameters, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1991 | .kill_sb = kill_block_super, |
Christoph Hellwig | f736d93 | 2021-01-21 14:19:58 +0100 | [diff] [blame] | 1992 | .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1993 | }; |
Eric W. Biederman | 7f78e03 | 2013-03-02 19:39:14 -0800 | [diff] [blame] | 1994 | MODULE_ALIAS_FS("xfs"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1995 | |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 1996 | STATIC int __init |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 1997 | xfs_init_caches(void) |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 1998 | { |
Darrick J. Wong | 9fa47bd | 2021-09-23 12:21:37 -0700 | [diff] [blame] | 1999 | int error; |
| 2000 | |
Dave Chinner | 231f91a | 2022-07-18 18:20:37 -0700 | [diff] [blame] | 2001 | xfs_buf_cache = kmem_cache_create("xfs_buf", sizeof(struct xfs_buf), 0, |
| 2002 | SLAB_HWCACHE_ALIGN | |
| 2003 | SLAB_RECLAIM_ACCOUNT | |
| 2004 | SLAB_MEM_SPREAD, |
| 2005 | NULL); |
| 2006 | if (!xfs_buf_cache) |
| 2007 | goto out; |
| 2008 | |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2009 | xfs_log_ticket_cache = kmem_cache_create("xfs_log_ticket", |
Carlos Maiolino | b123176 | 2019-11-14 12:43:03 -0800 | [diff] [blame] | 2010 | sizeof(struct xlog_ticket), |
| 2011 | 0, 0, NULL); |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2012 | if (!xfs_log_ticket_cache) |
Dave Chinner | 231f91a | 2022-07-18 18:20:37 -0700 | [diff] [blame] | 2013 | goto out_destroy_buf_cache; |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 2014 | |
Darrick J. Wong | 9fa47bd | 2021-09-23 12:21:37 -0700 | [diff] [blame] | 2015 | error = xfs_btree_init_cur_caches(); |
| 2016 | if (error) |
Darrick J. Wong | c201d9c | 2021-10-12 14:17:01 -0700 | [diff] [blame] | 2017 | goto out_destroy_log_ticket_cache; |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 2018 | |
Darrick J. Wong | f3c799c | 2021-10-12 14:11:01 -0700 | [diff] [blame] | 2019 | error = xfs_defer_init_item_caches(); |
| 2020 | if (error) |
| 2021 | goto out_destroy_btree_cur_cache; |
| 2022 | |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2023 | xfs_da_state_cache = kmem_cache_create("xfs_da_state", |
Carlos Maiolino | b123176 | 2019-11-14 12:43:03 -0800 | [diff] [blame] | 2024 | sizeof(struct xfs_da_state), |
| 2025 | 0, 0, NULL); |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2026 | if (!xfs_da_state_cache) |
Darrick J. Wong | f3c799c | 2021-10-12 14:11:01 -0700 | [diff] [blame] | 2027 | goto out_destroy_defer_item_cache; |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 2028 | |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2029 | xfs_ifork_cache = kmem_cache_create("xfs_ifork", |
Carlos Maiolino | b123176 | 2019-11-14 12:43:03 -0800 | [diff] [blame] | 2030 | sizeof(struct xfs_ifork), |
| 2031 | 0, 0, NULL); |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2032 | if (!xfs_ifork_cache) |
| 2033 | goto out_destroy_da_state_cache; |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 2034 | |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2035 | xfs_trans_cache = kmem_cache_create("xfs_trans", |
Carlos Maiolino | b123176 | 2019-11-14 12:43:03 -0800 | [diff] [blame] | 2036 | sizeof(struct xfs_trans), |
| 2037 | 0, 0, NULL); |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2038 | if (!xfs_trans_cache) |
| 2039 | goto out_destroy_ifork_cache; |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 2040 | |
Christoph Hellwig | e98c414 | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 2041 | |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 2042 | /* |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2043 | * The size of the cache-allocated buf log item is the maximum |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 2044 | * size possible under XFS. This wastes a little bit of memory, |
| 2045 | * but it is much faster. |
| 2046 | */ |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2047 | xfs_buf_item_cache = kmem_cache_create("xfs_buf_item", |
Carlos Maiolino | b123176 | 2019-11-14 12:43:03 -0800 | [diff] [blame] | 2048 | sizeof(struct xfs_buf_log_item), |
| 2049 | 0, 0, NULL); |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2050 | if (!xfs_buf_item_cache) |
| 2051 | goto out_destroy_trans_cache; |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 2052 | |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2053 | xfs_efd_cache = kmem_cache_create("xfs_efd_item", |
Darrick J. Wong | 3c5aaac | 2022-10-21 09:10:05 -0700 | [diff] [blame] | 2054 | xfs_efd_log_item_sizeof(XFS_EFD_MAX_FAST_EXTENTS), |
| 2055 | 0, 0, NULL); |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2056 | if (!xfs_efd_cache) |
| 2057 | goto out_destroy_buf_item_cache; |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 2058 | |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2059 | xfs_efi_cache = kmem_cache_create("xfs_efi_item", |
Darrick J. Wong | 3c5aaac | 2022-10-21 09:10:05 -0700 | [diff] [blame] | 2060 | xfs_efi_log_item_sizeof(XFS_EFI_MAX_FAST_EXTENTS), |
| 2061 | 0, 0, NULL); |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2062 | if (!xfs_efi_cache) |
| 2063 | goto out_destroy_efd_cache; |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 2064 | |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2065 | xfs_inode_cache = kmem_cache_create("xfs_inode", |
Carlos Maiolino | b123176 | 2019-11-14 12:43:03 -0800 | [diff] [blame] | 2066 | sizeof(struct xfs_inode), 0, |
| 2067 | (SLAB_HWCACHE_ALIGN | |
| 2068 | SLAB_RECLAIM_ACCOUNT | |
| 2069 | SLAB_MEM_SPREAD | SLAB_ACCOUNT), |
| 2070 | xfs_fs_inode_init_once); |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2071 | if (!xfs_inode_cache) |
| 2072 | goto out_destroy_efi_cache; |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 2073 | |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2074 | xfs_ili_cache = kmem_cache_create("xfs_ili", |
Carlos Maiolino | b123176 | 2019-11-14 12:43:03 -0800 | [diff] [blame] | 2075 | sizeof(struct xfs_inode_log_item), 0, |
Dave Chinner | d59eada | 2020-03-24 20:10:28 -0700 | [diff] [blame] | 2076 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, |
| 2077 | NULL); |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2078 | if (!xfs_ili_cache) |
| 2079 | goto out_destroy_inode_cache; |
Carlos Maiolino | b123176 | 2019-11-14 12:43:03 -0800 | [diff] [blame] | 2080 | |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2081 | xfs_icreate_cache = kmem_cache_create("xfs_icr", |
Carlos Maiolino | b123176 | 2019-11-14 12:43:03 -0800 | [diff] [blame] | 2082 | sizeof(struct xfs_icreate_item), |
| 2083 | 0, 0, NULL); |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2084 | if (!xfs_icreate_cache) |
| 2085 | goto out_destroy_ili_cache; |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 2086 | |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2087 | xfs_rud_cache = kmem_cache_create("xfs_rud_item", |
Carlos Maiolino | b123176 | 2019-11-14 12:43:03 -0800 | [diff] [blame] | 2088 | sizeof(struct xfs_rud_log_item), |
| 2089 | 0, 0, NULL); |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2090 | if (!xfs_rud_cache) |
| 2091 | goto out_destroy_icreate_cache; |
Darrick J. Wong | 5880f2d7 | 2016-08-03 12:04:45 +1000 | [diff] [blame] | 2092 | |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2093 | xfs_rui_cache = kmem_cache_create("xfs_rui_item", |
Darrick J. Wong | cd00158 | 2016-09-19 10:24:27 +1000 | [diff] [blame] | 2094 | xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS), |
Carlos Maiolino | b123176 | 2019-11-14 12:43:03 -0800 | [diff] [blame] | 2095 | 0, 0, NULL); |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2096 | if (!xfs_rui_cache) |
| 2097 | goto out_destroy_rud_cache; |
Darrick J. Wong | 5880f2d7 | 2016-08-03 12:04:45 +1000 | [diff] [blame] | 2098 | |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2099 | xfs_cud_cache = kmem_cache_create("xfs_cud_item", |
Carlos Maiolino | b123176 | 2019-11-14 12:43:03 -0800 | [diff] [blame] | 2100 | sizeof(struct xfs_cud_log_item), |
| 2101 | 0, 0, NULL); |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2102 | if (!xfs_cud_cache) |
| 2103 | goto out_destroy_rui_cache; |
Darrick J. Wong | baf4bcac | 2016-10-03 09:11:20 -0700 | [diff] [blame] | 2104 | |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2105 | xfs_cui_cache = kmem_cache_create("xfs_cui_item", |
Darrick J. Wong | baf4bcac | 2016-10-03 09:11:20 -0700 | [diff] [blame] | 2106 | xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS), |
Carlos Maiolino | b123176 | 2019-11-14 12:43:03 -0800 | [diff] [blame] | 2107 | 0, 0, NULL); |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2108 | if (!xfs_cui_cache) |
| 2109 | goto out_destroy_cud_cache; |
Darrick J. Wong | baf4bcac | 2016-10-03 09:11:20 -0700 | [diff] [blame] | 2110 | |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2111 | xfs_bud_cache = kmem_cache_create("xfs_bud_item", |
Carlos Maiolino | b123176 | 2019-11-14 12:43:03 -0800 | [diff] [blame] | 2112 | sizeof(struct xfs_bud_log_item), |
| 2113 | 0, 0, NULL); |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2114 | if (!xfs_bud_cache) |
| 2115 | goto out_destroy_cui_cache; |
Darrick J. Wong | 6413a01 | 2016-10-03 09:11:25 -0700 | [diff] [blame] | 2116 | |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2117 | xfs_bui_cache = kmem_cache_create("xfs_bui_item", |
Darrick J. Wong | 6413a01 | 2016-10-03 09:11:25 -0700 | [diff] [blame] | 2118 | xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS), |
Carlos Maiolino | b123176 | 2019-11-14 12:43:03 -0800 | [diff] [blame] | 2119 | 0, 0, NULL); |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2120 | if (!xfs_bui_cache) |
| 2121 | goto out_destroy_bud_cache; |
Darrick J. Wong | 6413a01 | 2016-10-03 09:11:25 -0700 | [diff] [blame] | 2122 | |
Darrick J. Wong | 4136e38 | 2022-05-22 15:59:48 +1000 | [diff] [blame] | 2123 | xfs_attrd_cache = kmem_cache_create("xfs_attrd_item", |
| 2124 | sizeof(struct xfs_attrd_log_item), |
| 2125 | 0, 0, NULL); |
| 2126 | if (!xfs_attrd_cache) |
| 2127 | goto out_destroy_bui_cache; |
| 2128 | |
| 2129 | xfs_attri_cache = kmem_cache_create("xfs_attri_item", |
| 2130 | sizeof(struct xfs_attri_log_item), |
| 2131 | 0, 0, NULL); |
| 2132 | if (!xfs_attri_cache) |
| 2133 | goto out_destroy_attrd_cache; |
| 2134 | |
Dave Chinner | 784eb7d | 2022-07-14 11:47:42 +1000 | [diff] [blame] | 2135 | xfs_iunlink_cache = kmem_cache_create("xfs_iul_item", |
| 2136 | sizeof(struct xfs_iunlink_item), |
| 2137 | 0, 0, NULL); |
| 2138 | if (!xfs_iunlink_cache) |
| 2139 | goto out_destroy_attri_cache; |
| 2140 | |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 2141 | return 0; |
| 2142 | |
Dave Chinner | 784eb7d | 2022-07-14 11:47:42 +1000 | [diff] [blame] | 2143 | out_destroy_attri_cache: |
| 2144 | kmem_cache_destroy(xfs_attri_cache); |
Darrick J. Wong | 4136e38 | 2022-05-22 15:59:48 +1000 | [diff] [blame] | 2145 | out_destroy_attrd_cache: |
| 2146 | kmem_cache_destroy(xfs_attrd_cache); |
| 2147 | out_destroy_bui_cache: |
| 2148 | kmem_cache_destroy(xfs_bui_cache); |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2149 | out_destroy_bud_cache: |
| 2150 | kmem_cache_destroy(xfs_bud_cache); |
| 2151 | out_destroy_cui_cache: |
| 2152 | kmem_cache_destroy(xfs_cui_cache); |
| 2153 | out_destroy_cud_cache: |
| 2154 | kmem_cache_destroy(xfs_cud_cache); |
| 2155 | out_destroy_rui_cache: |
| 2156 | kmem_cache_destroy(xfs_rui_cache); |
| 2157 | out_destroy_rud_cache: |
| 2158 | kmem_cache_destroy(xfs_rud_cache); |
| 2159 | out_destroy_icreate_cache: |
| 2160 | kmem_cache_destroy(xfs_icreate_cache); |
| 2161 | out_destroy_ili_cache: |
| 2162 | kmem_cache_destroy(xfs_ili_cache); |
| 2163 | out_destroy_inode_cache: |
| 2164 | kmem_cache_destroy(xfs_inode_cache); |
| 2165 | out_destroy_efi_cache: |
| 2166 | kmem_cache_destroy(xfs_efi_cache); |
| 2167 | out_destroy_efd_cache: |
| 2168 | kmem_cache_destroy(xfs_efd_cache); |
| 2169 | out_destroy_buf_item_cache: |
| 2170 | kmem_cache_destroy(xfs_buf_item_cache); |
| 2171 | out_destroy_trans_cache: |
| 2172 | kmem_cache_destroy(xfs_trans_cache); |
| 2173 | out_destroy_ifork_cache: |
| 2174 | kmem_cache_destroy(xfs_ifork_cache); |
| 2175 | out_destroy_da_state_cache: |
| 2176 | kmem_cache_destroy(xfs_da_state_cache); |
Darrick J. Wong | f3c799c | 2021-10-12 14:11:01 -0700 | [diff] [blame] | 2177 | out_destroy_defer_item_cache: |
| 2178 | xfs_defer_destroy_item_caches(); |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2179 | out_destroy_btree_cur_cache: |
Darrick J. Wong | 9fa47bd | 2021-09-23 12:21:37 -0700 | [diff] [blame] | 2180 | xfs_btree_destroy_cur_caches(); |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2181 | out_destroy_log_ticket_cache: |
| 2182 | kmem_cache_destroy(xfs_log_ticket_cache); |
Dave Chinner | 231f91a | 2022-07-18 18:20:37 -0700 | [diff] [blame] | 2183 | out_destroy_buf_cache: |
| 2184 | kmem_cache_destroy(xfs_buf_cache); |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 2185 | out: |
| 2186 | return -ENOMEM; |
| 2187 | } |
| 2188 | |
| 2189 | STATIC void |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2190 | xfs_destroy_caches(void) |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 2191 | { |
Kirill A. Shutemov | 8c0a853 | 2012-09-26 11:33:07 +1000 | [diff] [blame] | 2192 | /* |
| 2193 | * Make sure all delayed rcu free are flushed before we |
| 2194 | * destroy caches. |
| 2195 | */ |
| 2196 | rcu_barrier(); |
Dave Chinner | 784eb7d | 2022-07-14 11:47:42 +1000 | [diff] [blame] | 2197 | kmem_cache_destroy(xfs_iunlink_cache); |
Darrick J. Wong | 4136e38 | 2022-05-22 15:59:48 +1000 | [diff] [blame] | 2198 | kmem_cache_destroy(xfs_attri_cache); |
| 2199 | kmem_cache_destroy(xfs_attrd_cache); |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2200 | kmem_cache_destroy(xfs_bui_cache); |
| 2201 | kmem_cache_destroy(xfs_bud_cache); |
| 2202 | kmem_cache_destroy(xfs_cui_cache); |
| 2203 | kmem_cache_destroy(xfs_cud_cache); |
| 2204 | kmem_cache_destroy(xfs_rui_cache); |
| 2205 | kmem_cache_destroy(xfs_rud_cache); |
| 2206 | kmem_cache_destroy(xfs_icreate_cache); |
| 2207 | kmem_cache_destroy(xfs_ili_cache); |
| 2208 | kmem_cache_destroy(xfs_inode_cache); |
| 2209 | kmem_cache_destroy(xfs_efi_cache); |
| 2210 | kmem_cache_destroy(xfs_efd_cache); |
| 2211 | kmem_cache_destroy(xfs_buf_item_cache); |
| 2212 | kmem_cache_destroy(xfs_trans_cache); |
| 2213 | kmem_cache_destroy(xfs_ifork_cache); |
| 2214 | kmem_cache_destroy(xfs_da_state_cache); |
Darrick J. Wong | f3c799c | 2021-10-12 14:11:01 -0700 | [diff] [blame] | 2215 | xfs_defer_destroy_item_caches(); |
Darrick J. Wong | 9fa47bd | 2021-09-23 12:21:37 -0700 | [diff] [blame] | 2216 | xfs_btree_destroy_cur_caches(); |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2217 | kmem_cache_destroy(xfs_log_ticket_cache); |
Dave Chinner | 231f91a | 2022-07-18 18:20:37 -0700 | [diff] [blame] | 2218 | kmem_cache_destroy(xfs_buf_cache); |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 2219 | } |
| 2220 | |
| 2221 | STATIC int __init |
Dave Chinner | 0bf6a5b | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 2222 | xfs_init_workqueues(void) |
| 2223 | { |
| 2224 | /* |
Dave Chinner | c999a22 | 2012-03-22 05:15:07 +0000 | [diff] [blame] | 2225 | * The allocation workqueue can be used in memory reclaim situations |
| 2226 | * (writepage path), and parallelism is only limited by the number of |
| 2227 | * AGs in all the filesystems mounted. Hence use the default large |
| 2228 | * max_active value for this workqueue. |
| 2229 | */ |
Brian Foster | 8018ec0 | 2014-09-09 11:44:46 +1000 | [diff] [blame] | 2230 | xfs_alloc_wq = alloc_workqueue("xfsalloc", |
Darrick J. Wong | 05a302a | 2021-01-22 16:48:42 -0800 | [diff] [blame] | 2231 | XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0); |
Dave Chinner | c999a22 | 2012-03-22 05:15:07 +0000 | [diff] [blame] | 2232 | if (!xfs_alloc_wq) |
Dave Chinner | 5889608 | 2012-10-08 21:56:05 +1100 | [diff] [blame] | 2233 | return -ENOMEM; |
Dave Chinner | c999a22 | 2012-03-22 05:15:07 +0000 | [diff] [blame] | 2234 | |
Darrick J. Wong | 05a302a | 2021-01-22 16:48:42 -0800 | [diff] [blame] | 2235 | xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND), |
| 2236 | 0); |
Christoph Hellwig | 4560e78 | 2017-02-07 14:07:58 -0800 | [diff] [blame] | 2237 | if (!xfs_discard_wq) |
| 2238 | goto out_free_alloc_wq; |
| 2239 | |
Dave Chinner | 0bf6a5b | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 2240 | return 0; |
Christoph Hellwig | 4560e78 | 2017-02-07 14:07:58 -0800 | [diff] [blame] | 2241 | out_free_alloc_wq: |
| 2242 | destroy_workqueue(xfs_alloc_wq); |
| 2243 | return -ENOMEM; |
Dave Chinner | 0bf6a5b | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 2244 | } |
| 2245 | |
Luck, Tony | 39411f8 | 2011-04-11 12:06:12 -0700 | [diff] [blame] | 2246 | STATIC void |
Dave Chinner | 0bf6a5b | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 2247 | xfs_destroy_workqueues(void) |
| 2248 | { |
Christoph Hellwig | 4560e78 | 2017-02-07 14:07:58 -0800 | [diff] [blame] | 2249 | destroy_workqueue(xfs_discard_wq); |
Dave Chinner | c999a22 | 2012-03-22 05:15:07 +0000 | [diff] [blame] | 2250 | destroy_workqueue(xfs_alloc_wq); |
Dave Chinner | 0bf6a5b | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 2251 | } |
| 2252 | |
Dave Chinner | f1653c2 | 2021-08-06 11:05:37 -0700 | [diff] [blame] | 2253 | #ifdef CONFIG_HOTPLUG_CPU |
| 2254 | static int |
| 2255 | xfs_cpu_dead( |
| 2256 | unsigned int cpu) |
| 2257 | { |
Dave Chinner | 0ed17f0 | 2021-08-06 11:05:38 -0700 | [diff] [blame] | 2258 | struct xfs_mount *mp, *n; |
| 2259 | |
| 2260 | spin_lock(&xfs_mount_list_lock); |
| 2261 | list_for_each_entry_safe(mp, n, &xfs_mount_list, m_mount_list) { |
| 2262 | spin_unlock(&xfs_mount_list_lock); |
Dave Chinner | ab23a77 | 2021-08-06 11:05:39 -0700 | [diff] [blame] | 2263 | xfs_inodegc_cpu_dead(mp, cpu); |
Dave Chinner | af1c214 | 2022-07-02 02:13:52 +1000 | [diff] [blame] | 2264 | xlog_cil_pcp_dead(mp->m_log, cpu); |
Dave Chinner | 0ed17f0 | 2021-08-06 11:05:38 -0700 | [diff] [blame] | 2265 | spin_lock(&xfs_mount_list_lock); |
| 2266 | } |
| 2267 | spin_unlock(&xfs_mount_list_lock); |
Dave Chinner | f1653c2 | 2021-08-06 11:05:37 -0700 | [diff] [blame] | 2268 | return 0; |
| 2269 | } |
| 2270 | |
| 2271 | static int __init |
| 2272 | xfs_cpu_hotplug_init(void) |
| 2273 | { |
| 2274 | int error; |
| 2275 | |
| 2276 | error = cpuhp_setup_state_nocalls(CPUHP_XFS_DEAD, "xfs:dead", NULL, |
| 2277 | xfs_cpu_dead); |
| 2278 | if (error < 0) |
| 2279 | xfs_alert(NULL, |
| 2280 | "Failed to initialise CPU hotplug, error %d. XFS is non-functional.", |
| 2281 | error); |
| 2282 | return error; |
| 2283 | } |
| 2284 | |
| 2285 | static void |
| 2286 | xfs_cpu_hotplug_destroy(void) |
| 2287 | { |
| 2288 | cpuhp_remove_state_nocalls(CPUHP_XFS_DEAD); |
| 2289 | } |
| 2290 | |
| 2291 | #else /* !CONFIG_HOTPLUG_CPU */ |
| 2292 | static inline int xfs_cpu_hotplug_init(void) { return 0; } |
| 2293 | static inline void xfs_cpu_hotplug_destroy(void) {} |
| 2294 | #endif |
| 2295 | |
Dave Chinner | 0bf6a5b | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 2296 | STATIC int __init |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 2297 | init_xfs_fs(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2298 | { |
| 2299 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2300 | |
Darrick J. Wong | 30cbc59 | 2016-03-09 08:15:14 +1100 | [diff] [blame] | 2301 | xfs_check_ondisk_structs(); |
| 2302 | |
Darrick J. Wong | 3cfb929 | 2023-03-16 09:31:20 -0700 | [diff] [blame] | 2303 | error = xfs_dahash_test(); |
| 2304 | if (error) |
| 2305 | return error; |
| 2306 | |
Christoph Hellwig | 6579591 | 2008-11-28 14:23:33 +1100 | [diff] [blame] | 2307 | printk(KERN_INFO XFS_VERSION_STRING " with " |
| 2308 | XFS_BUILD_OPTIONS " enabled\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2309 | |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 2310 | xfs_dir_startup(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2311 | |
Dave Chinner | f1653c2 | 2021-08-06 11:05:37 -0700 | [diff] [blame] | 2312 | error = xfs_cpu_hotplug_init(); |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 2313 | if (error) |
| 2314 | goto out; |
| 2315 | |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2316 | error = xfs_init_caches(); |
Dave Chinner | f1653c2 | 2021-08-06 11:05:37 -0700 | [diff] [blame] | 2317 | if (error) |
| 2318 | goto out_destroy_hp; |
| 2319 | |
Dave Chinner | 0bf6a5b | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 2320 | error = xfs_init_workqueues(); |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 2321 | if (error) |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2322 | goto out_destroy_caches; |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 2323 | |
Dave Chinner | 0bf6a5b | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 2324 | error = xfs_mru_cache_init(); |
| 2325 | if (error) |
| 2326 | goto out_destroy_wq; |
| 2327 | |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 2328 | error = xfs_init_procfs(); |
| 2329 | if (error) |
Dave Chinner | 231f91a | 2022-07-18 18:20:37 -0700 | [diff] [blame] | 2330 | goto out_mru_cache_uninit; |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 2331 | |
| 2332 | error = xfs_sysctl_register(); |
| 2333 | if (error) |
| 2334 | goto out_cleanup_procfs; |
| 2335 | |
Brian Foster | 3d87122 | 2014-07-15 07:41:37 +1000 | [diff] [blame] | 2336 | xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj); |
| 2337 | if (!xfs_kset) { |
| 2338 | error = -ENOMEM; |
Bill O'Donnell | bb230c1 | 2015-10-12 05:15:45 +1100 | [diff] [blame] | 2339 | goto out_sysctl_unregister; |
Brian Foster | 3d87122 | 2014-07-15 07:41:37 +1000 | [diff] [blame] | 2340 | } |
| 2341 | |
Bill O'Donnell | 80529c4 | 2015-10-12 05:19:45 +1100 | [diff] [blame] | 2342 | xfsstats.xs_kobj.kobject.kset = xfs_kset; |
| 2343 | |
| 2344 | xfsstats.xs_stats = alloc_percpu(struct xfsstats); |
| 2345 | if (!xfsstats.xs_stats) { |
| 2346 | error = -ENOMEM; |
| 2347 | goto out_kset_unregister; |
| 2348 | } |
| 2349 | |
| 2350 | error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL, |
Bill O'Donnell | bb230c1 | 2015-10-12 05:15:45 +1100 | [diff] [blame] | 2351 | "stats"); |
| 2352 | if (error) |
Bill O'Donnell | 80529c4 | 2015-10-12 05:19:45 +1100 | [diff] [blame] | 2353 | goto out_free_stats; |
Bill O'Donnell | bb230c1 | 2015-10-12 05:15:45 +1100 | [diff] [blame] | 2354 | |
Brian Foster | 65b6573 | 2014-09-09 11:52:42 +1000 | [diff] [blame] | 2355 | #ifdef DEBUG |
| 2356 | xfs_dbg_kobj.kobject.kset = xfs_kset; |
| 2357 | error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug"); |
Christoph Hellwig | a05931c | 2012-03-13 08:52:37 +0000 | [diff] [blame] | 2358 | if (error) |
Bill O'Donnell | bb230c1 | 2015-10-12 05:15:45 +1100 | [diff] [blame] | 2359 | goto out_remove_stats_kobj; |
Brian Foster | 65b6573 | 2014-09-09 11:52:42 +1000 | [diff] [blame] | 2360 | #endif |
| 2361 | |
| 2362 | error = xfs_qm_init(); |
| 2363 | if (error) |
Bill O'Donnell | bb230c1 | 2015-10-12 05:15:45 +1100 | [diff] [blame] | 2364 | goto out_remove_dbg_kobj; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2365 | |
| 2366 | error = register_filesystem(&xfs_fs_type); |
| 2367 | if (error) |
Christoph Hellwig | a05931c | 2012-03-13 08:52:37 +0000 | [diff] [blame] | 2368 | goto out_qm_exit; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2369 | return 0; |
| 2370 | |
Christoph Hellwig | a05931c | 2012-03-13 08:52:37 +0000 | [diff] [blame] | 2371 | out_qm_exit: |
| 2372 | xfs_qm_exit(); |
Bill O'Donnell | bb230c1 | 2015-10-12 05:15:45 +1100 | [diff] [blame] | 2373 | out_remove_dbg_kobj: |
Brian Foster | 65b6573 | 2014-09-09 11:52:42 +1000 | [diff] [blame] | 2374 | #ifdef DEBUG |
| 2375 | xfs_sysfs_del(&xfs_dbg_kobj); |
Bill O'Donnell | bb230c1 | 2015-10-12 05:15:45 +1100 | [diff] [blame] | 2376 | out_remove_stats_kobj: |
Brian Foster | 65b6573 | 2014-09-09 11:52:42 +1000 | [diff] [blame] | 2377 | #endif |
Bill O'Donnell | 80529c4 | 2015-10-12 05:19:45 +1100 | [diff] [blame] | 2378 | xfs_sysfs_del(&xfsstats.xs_kobj); |
| 2379 | out_free_stats: |
| 2380 | free_percpu(xfsstats.xs_stats); |
Bill O'Donnell | bb230c1 | 2015-10-12 05:15:45 +1100 | [diff] [blame] | 2381 | out_kset_unregister: |
Brian Foster | 3d87122 | 2014-07-15 07:41:37 +1000 | [diff] [blame] | 2382 | kset_unregister(xfs_kset); |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 2383 | out_sysctl_unregister: |
| 2384 | xfs_sysctl_unregister(); |
| 2385 | out_cleanup_procfs: |
| 2386 | xfs_cleanup_procfs(); |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 2387 | out_mru_cache_uninit: |
| 2388 | xfs_mru_cache_uninit(); |
Dave Chinner | 0bf6a5b | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 2389 | out_destroy_wq: |
| 2390 | xfs_destroy_workqueues(); |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2391 | out_destroy_caches: |
| 2392 | xfs_destroy_caches(); |
Dave Chinner | f1653c2 | 2021-08-06 11:05:37 -0700 | [diff] [blame] | 2393 | out_destroy_hp: |
| 2394 | xfs_cpu_hotplug_destroy(); |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 2395 | out: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2396 | return error; |
| 2397 | } |
| 2398 | |
| 2399 | STATIC void __exit |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 2400 | exit_xfs_fs(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2401 | { |
Christoph Hellwig | a05931c | 2012-03-13 08:52:37 +0000 | [diff] [blame] | 2402 | xfs_qm_exit(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2403 | unregister_filesystem(&xfs_fs_type); |
Brian Foster | 65b6573 | 2014-09-09 11:52:42 +1000 | [diff] [blame] | 2404 | #ifdef DEBUG |
| 2405 | xfs_sysfs_del(&xfs_dbg_kobj); |
| 2406 | #endif |
Bill O'Donnell | 80529c4 | 2015-10-12 05:19:45 +1100 | [diff] [blame] | 2407 | xfs_sysfs_del(&xfsstats.xs_kobj); |
| 2408 | free_percpu(xfsstats.xs_stats); |
Brian Foster | 3d87122 | 2014-07-15 07:41:37 +1000 | [diff] [blame] | 2409 | kset_unregister(xfs_kset); |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 2410 | xfs_sysctl_unregister(); |
| 2411 | xfs_cleanup_procfs(); |
Christoph Hellwig | 9f8868f | 2008-07-18 17:11:46 +1000 | [diff] [blame] | 2412 | xfs_mru_cache_uninit(); |
Dave Chinner | 0bf6a5b | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 2413 | xfs_destroy_workqueues(); |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 2414 | xfs_destroy_caches(); |
Darrick J. Wong | af3b638 | 2015-11-03 13:06:34 +1100 | [diff] [blame] | 2415 | xfs_uuid_table_free(); |
Dave Chinner | f1653c2 | 2021-08-06 11:05:37 -0700 | [diff] [blame] | 2416 | xfs_cpu_hotplug_destroy(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2417 | } |
| 2418 | |
| 2419 | module_init(init_xfs_fs); |
| 2420 | module_exit(exit_xfs_fs); |
| 2421 | |
| 2422 | MODULE_AUTHOR("Silicon Graphics, Inc."); |
| 2423 | MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled"); |
| 2424 | MODULE_LICENSE("GPL"); |