Dave Chinner | 0b61f8a | 2018-06-05 19:42:14 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (c) 2000-2006 Silicon Graphics, Inc. |
| 4 | * All Rights Reserved. |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 5 | */ |
| 6 | #include "xfs.h" |
| 7 | #include "xfs_fs.h" |
Dave Chinner | 70a9883 | 2013-10-23 10:36:05 +1100 | [diff] [blame] | 8 | #include "xfs_shared.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 9 | #include "xfs_format.h" |
| 10 | #include "xfs_log_format.h" |
| 11 | #include "xfs_trans_resv.h" |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 12 | #include "xfs_mount.h" |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 13 | #include "xfs_inode.h" |
Darrick J. Wong | e9e899a | 2017-10-31 12:04:49 -0700 | [diff] [blame] | 14 | #include "xfs_errortag.h" |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 15 | #include "xfs_error.h" |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 16 | #include "xfs_icache.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 17 | #include "xfs_trans.h" |
Dave Chinner | a4fbe6a | 2013-10-23 10:51:50 +1100 | [diff] [blame] | 18 | #include "xfs_ialloc.h" |
Amir Goldstein | a324cbf | 2017-01-17 11:41:44 -0800 | [diff] [blame] | 19 | #include "xfs_dir2.h" |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 20 | |
Jeff Layton | f0e2828 | 2017-12-11 06:35:19 -0500 | [diff] [blame] | 21 | #include <linux/iversion.h> |
| 22 | |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 23 | /* |
Dave Chinner | d891400 | 2013-08-27 11:39:37 +1000 | [diff] [blame] | 24 | * If we are doing readahead on an inode buffer, we might be in log recovery |
| 25 | * reading an inode allocation buffer that hasn't yet been replayed, and hence |
| 26 | * has not had the inode cores stamped into it. Hence for readahead, the buffer |
| 27 | * may be potentially invalid. |
| 28 | * |
Dave Chinner | b79f4a1 | 2016-01-12 07:03:44 +1100 | [diff] [blame] | 29 | * If the readahead buffer is invalid, we need to mark it with an error and |
| 30 | * clear the DONE status of the buffer so that a followup read will re-read it |
| 31 | * from disk. We don't report the error otherwise to avoid warnings during log |
Keyur Patel | 06734e3 | 2020-06-29 14:44:35 -0700 | [diff] [blame] | 32 | * recovery and we don't get unnecessary panics on debug kernels. We use EIO here |
Dave Chinner | b79f4a1 | 2016-01-12 07:03:44 +1100 | [diff] [blame] | 33 | * because all we want to do is say readahead failed; there is no-one to report |
| 34 | * the error to, so this will distinguish it from a non-ra verifier failure. |
Keyur Patel | 06734e3 | 2020-06-29 14:44:35 -0700 | [diff] [blame] | 35 | * Changes to this readahead error behaviour also need to be reflected in |
Dave Chinner | 7d6a13f | 2016-01-12 07:04:01 +1100 | [diff] [blame] | 36 | * xfs_dquot_buf_readahead_verify(). |
Dave Chinner | d891400 | 2013-08-27 11:39:37 +1000 | [diff] [blame] | 37 | */ |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 38 | static void |
| 39 | xfs_inode_buf_verify( |
Dave Chinner | d891400 | 2013-08-27 11:39:37 +1000 | [diff] [blame] | 40 | struct xfs_buf *bp, |
| 41 | bool readahead) |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 42 | { |
Christoph Hellwig | dbd329f1 | 2019-06-28 19:27:29 -0700 | [diff] [blame] | 43 | struct xfs_mount *mp = bp->b_mount; |
Darrick J. Wong | 6a96c56 | 2018-03-23 10:06:56 -0700 | [diff] [blame] | 44 | xfs_agnumber_t agno; |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 45 | int i; |
| 46 | int ni; |
| 47 | |
| 48 | /* |
| 49 | * Validate the magic number and version of every inode in the buffer |
| 50 | */ |
Dave Chinner | 04fcad8 | 2021-08-18 18:46:57 -0700 | [diff] [blame] | 51 | agno = xfs_daddr_to_agno(mp, xfs_buf_daddr(bp)); |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 52 | ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock; |
| 53 | for (i = 0; i < ni; i++) { |
Christoph Hellwig | de38db7 | 2021-10-11 16:11:21 -0700 | [diff] [blame] | 54 | struct xfs_dinode *dip; |
| 55 | xfs_agino_t unlinked_ino; |
| 56 | int di_ok; |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 57 | |
Christoph Hellwig | 88ee2df | 2015-06-22 09:44:29 +1000 | [diff] [blame] | 58 | dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog)); |
Darrick J. Wong | 6a96c56 | 2018-03-23 10:06:56 -0700 | [diff] [blame] | 59 | unlinked_ino = be32_to_cpu(dip->di_next_unlinked); |
Darrick J. Wong | 15baadf | 2019-02-16 11:47:28 -0800 | [diff] [blame] | 60 | di_ok = xfs_verify_magic16(bp, dip->di_magic) && |
Dave Chinner | cf28e17 | 2021-08-18 18:46:57 -0700 | [diff] [blame] | 61 | xfs_dinode_good_version(mp, dip->di_version) && |
Darrick J. Wong | 7d36c19 | 2019-02-07 10:37:13 -0800 | [diff] [blame] | 62 | xfs_verify_agino_or_null(mp, agno, unlinked_ino); |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 63 | if (unlikely(XFS_TEST_ERROR(!di_ok, mp, |
Darrick J. Wong | 9e24cfd | 2017-06-20 17:54:47 -0700 | [diff] [blame] | 64 | XFS_ERRTAG_ITOBP_INOTOBP))) { |
Dave Chinner | d891400 | 2013-08-27 11:39:37 +1000 | [diff] [blame] | 65 | if (readahead) { |
| 66 | bp->b_flags &= ~XBF_DONE; |
Dave Chinner | b79f4a1 | 2016-01-12 07:03:44 +1100 | [diff] [blame] | 67 | xfs_buf_ioerror(bp, -EIO); |
Dave Chinner | d891400 | 2013-08-27 11:39:37 +1000 | [diff] [blame] | 68 | return; |
| 69 | } |
| 70 | |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 71 | #ifdef DEBUG |
Dave Chinner | 74ffa79 | 2013-09-03 21:47:38 +1000 | [diff] [blame] | 72 | xfs_alert(mp, |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 73 | "bad inode magic/vsn daddr %lld #%d (magic=%x)", |
Dave Chinner | 9343ee7 | 2021-08-18 18:47:05 -0700 | [diff] [blame] | 74 | (unsigned long long)xfs_buf_daddr(bp), i, |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 75 | be16_to_cpu(dip->di_magic)); |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 76 | #endif |
Darrick J. Wong | 6edb181 | 2018-03-23 10:06:53 -0700 | [diff] [blame] | 77 | xfs_buf_verifier_error(bp, -EFSCORRUPTED, |
| 78 | __func__, dip, sizeof(*dip), |
| 79 | NULL); |
Darrick J. Wong | 6a96c56 | 2018-03-23 10:06:56 -0700 | [diff] [blame] | 80 | return; |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 81 | } |
| 82 | } |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 83 | } |
| 84 | |
| 85 | |
| 86 | static void |
| 87 | xfs_inode_buf_read_verify( |
| 88 | struct xfs_buf *bp) |
| 89 | { |
Dave Chinner | d891400 | 2013-08-27 11:39:37 +1000 | [diff] [blame] | 90 | xfs_inode_buf_verify(bp, false); |
| 91 | } |
| 92 | |
| 93 | static void |
| 94 | xfs_inode_buf_readahead_verify( |
| 95 | struct xfs_buf *bp) |
| 96 | { |
| 97 | xfs_inode_buf_verify(bp, true); |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 98 | } |
| 99 | |
| 100 | static void |
| 101 | xfs_inode_buf_write_verify( |
| 102 | struct xfs_buf *bp) |
| 103 | { |
Dave Chinner | d891400 | 2013-08-27 11:39:37 +1000 | [diff] [blame] | 104 | xfs_inode_buf_verify(bp, false); |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 105 | } |
| 106 | |
| 107 | const struct xfs_buf_ops xfs_inode_buf_ops = { |
Eric Sandeen | 233135b | 2016-01-04 16:10:19 +1100 | [diff] [blame] | 108 | .name = "xfs_inode", |
Darrick J. Wong | 15baadf | 2019-02-16 11:47:28 -0800 | [diff] [blame] | 109 | .magic16 = { cpu_to_be16(XFS_DINODE_MAGIC), |
| 110 | cpu_to_be16(XFS_DINODE_MAGIC) }, |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 111 | .verify_read = xfs_inode_buf_read_verify, |
| 112 | .verify_write = xfs_inode_buf_write_verify, |
| 113 | }; |
| 114 | |
Dave Chinner | d891400 | 2013-08-27 11:39:37 +1000 | [diff] [blame] | 115 | const struct xfs_buf_ops xfs_inode_buf_ra_ops = { |
Brian Foster | e34d3e7 | 2019-02-07 10:45:45 -0800 | [diff] [blame] | 116 | .name = "xfs_inode_ra", |
Darrick J. Wong | 15baadf | 2019-02-16 11:47:28 -0800 | [diff] [blame] | 117 | .magic16 = { cpu_to_be16(XFS_DINODE_MAGIC), |
| 118 | cpu_to_be16(XFS_DINODE_MAGIC) }, |
Dave Chinner | d891400 | 2013-08-27 11:39:37 +1000 | [diff] [blame] | 119 | .verify_read = xfs_inode_buf_readahead_verify, |
| 120 | .verify_write = xfs_inode_buf_write_verify, |
| 121 | }; |
| 122 | |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 123 | |
| 124 | /* |
| 125 | * This routine is called to map an inode to the buffer containing the on-disk |
| 126 | * version of the inode. It returns a pointer to the buffer containing the |
Christoph Hellwig | af9dcdd | 2021-03-29 11:11:37 -0700 | [diff] [blame] | 127 | * on-disk inode in the bpp parameter. |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 128 | */ |
| 129 | int |
| 130 | xfs_imap_to_bp( |
| 131 | struct xfs_mount *mp, |
| 132 | struct xfs_trans *tp, |
| 133 | struct xfs_imap *imap, |
Christoph Hellwig | af9dcdd | 2021-03-29 11:11:37 -0700 | [diff] [blame] | 134 | struct xfs_buf **bpp) |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 135 | { |
Christoph Hellwig | af9dcdd | 2021-03-29 11:11:37 -0700 | [diff] [blame] | 136 | return xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno, |
| 137 | imap->im_len, XBF_UNMAPPED, bpp, |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 138 | &xfs_inode_buf_ops); |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 139 | } |
| 140 | |
Darrick J. Wong | f93e5436 | 2020-08-17 09:59:07 -0700 | [diff] [blame] | 141 | static inline struct timespec64 xfs_inode_decode_bigtime(uint64_t ts) |
| 142 | { |
| 143 | struct timespec64 tv; |
| 144 | uint32_t n; |
| 145 | |
| 146 | tv.tv_sec = xfs_bigtime_to_unix(div_u64_rem(ts, NSEC_PER_SEC, &n)); |
| 147 | tv.tv_nsec = n; |
| 148 | |
| 149 | return tv; |
| 150 | } |
| 151 | |
Darrick J. Wong | 5a0bb06 | 2020-08-24 15:15:46 -0700 | [diff] [blame] | 152 | /* Convert an ondisk timestamp to an incore timestamp. */ |
| 153 | struct timespec64 |
| 154 | xfs_inode_from_disk_ts( |
Darrick J. Wong | f93e5436 | 2020-08-17 09:59:07 -0700 | [diff] [blame] | 155 | struct xfs_dinode *dip, |
Darrick J. Wong | 5a0bb06 | 2020-08-24 15:15:46 -0700 | [diff] [blame] | 156 | const xfs_timestamp_t ts) |
| 157 | { |
| 158 | struct timespec64 tv; |
| 159 | struct xfs_legacy_timestamp *lts; |
| 160 | |
Darrick J. Wong | f93e5436 | 2020-08-17 09:59:07 -0700 | [diff] [blame] | 161 | if (xfs_dinode_has_bigtime(dip)) |
| 162 | return xfs_inode_decode_bigtime(be64_to_cpu(ts)); |
| 163 | |
Darrick J. Wong | 5a0bb06 | 2020-08-24 15:15:46 -0700 | [diff] [blame] | 164 | lts = (struct xfs_legacy_timestamp *)&ts; |
| 165 | tv.tv_sec = (int)be32_to_cpu(lts->t_sec); |
| 166 | tv.tv_nsec = (int)be32_to_cpu(lts->t_nsec); |
| 167 | |
| 168 | return tv; |
| 169 | } |
| 170 | |
Christoph Hellwig | cb7d585 | 2020-05-14 14:00:02 -0700 | [diff] [blame] | 171 | int |
Dave Chinner | 3987848 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 172 | xfs_inode_from_disk( |
| 173 | struct xfs_inode *ip, |
Dave Chinner | f8d55aa052 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 174 | struct xfs_dinode *from) |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 175 | { |
Dave Chinner | 3987848 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 176 | struct inode *inode = VFS_I(ip); |
Christoph Hellwig | 9229d18 | 2020-05-14 14:01:17 -0700 | [diff] [blame] | 177 | int error; |
Christoph Hellwig | 2d6051d | 2020-05-14 14:01:18 -0700 | [diff] [blame] | 178 | xfs_failaddr_t fa; |
Christoph Hellwig | 9229d18 | 2020-05-14 14:01:17 -0700 | [diff] [blame] | 179 | |
| 180 | ASSERT(ip->i_cowfp == NULL); |
| 181 | ASSERT(ip->i_afp == NULL); |
Dave Chinner | 3987848 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 182 | |
Christoph Hellwig | 2d6051d | 2020-05-14 14:01:18 -0700 | [diff] [blame] | 183 | fa = xfs_dinode_verify(ip->i_mount, ip->i_ino, from); |
| 184 | if (fa) { |
| 185 | xfs_inode_verifier_error(ip, -EFSCORRUPTED, "dinode", from, |
| 186 | sizeof(*from), fa); |
| 187 | return -EFSCORRUPTED; |
| 188 | } |
| 189 | |
Dave Chinner | faeb4e4 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 190 | /* |
Christoph Hellwig | 0bce817 | 2020-05-14 14:01:17 -0700 | [diff] [blame] | 191 | * First get the permanent information that is needed to allocate an |
| 192 | * inode. If the inode is unused, mode is zero and we shouldn't mess |
Keyur Patel | 06734e3 | 2020-06-29 14:44:35 -0700 | [diff] [blame] | 193 | * with the uninitialized part of it. |
Christoph Hellwig | 0bce817 | 2020-05-14 14:01:17 -0700 | [diff] [blame] | 194 | */ |
Dave Chinner | ebd9027 | 2021-08-18 18:46:55 -0700 | [diff] [blame] | 195 | if (!xfs_has_v3inodes(ip->i_mount)) |
Christoph Hellwig | ee7b83f | 2021-03-29 11:11:43 -0700 | [diff] [blame] | 196 | ip->i_flushiter = be16_to_cpu(from->di_flushiter); |
Christoph Hellwig | 0bce817 | 2020-05-14 14:01:17 -0700 | [diff] [blame] | 197 | inode->i_generation = be32_to_cpu(from->di_gen); |
| 198 | inode->i_mode = be16_to_cpu(from->di_mode); |
| 199 | if (!inode->i_mode) |
| 200 | return 0; |
| 201 | |
| 202 | /* |
Dave Chinner | faeb4e4 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 203 | * Convert v1 inodes immediately to v2 inode format as this is the |
| 204 | * minimum inode version format we support in the rest of the code. |
Christoph Hellwig | 6471e9c | 2020-03-18 08:15:11 -0700 | [diff] [blame] | 205 | * They will also be unconditionally written back to disk as v2 inodes. |
Dave Chinner | faeb4e4 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 206 | */ |
Christoph Hellwig | 6471e9c | 2020-03-18 08:15:11 -0700 | [diff] [blame] | 207 | if (unlikely(from->di_version == 1)) { |
Dave Chinner | 54d7b5c | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 208 | set_nlink(inode, be16_to_cpu(from->di_onlink)); |
Christoph Hellwig | ceaf603 | 2021-03-29 11:11:39 -0700 | [diff] [blame] | 209 | ip->i_projid = 0; |
Dave Chinner | faeb4e4 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 210 | } else { |
Dave Chinner | 54d7b5c | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 211 | set_nlink(inode, be32_to_cpu(from->di_nlink)); |
Christoph Hellwig | ceaf603 | 2021-03-29 11:11:39 -0700 | [diff] [blame] | 212 | ip->i_projid = (prid_t)be16_to_cpu(from->di_projid_hi) << 16 | |
Christoph Hellwig | de7a866 | 2019-11-12 08:22:54 -0800 | [diff] [blame] | 213 | be16_to_cpu(from->di_projid_lo); |
Dave Chinner | faeb4e4 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 214 | } |
| 215 | |
Christoph Hellwig | ba8adad | 2020-02-21 08:31:27 -0800 | [diff] [blame] | 216 | i_uid_write(inode, be32_to_cpu(from->di_uid)); |
| 217 | i_gid_write(inode, be32_to_cpu(from->di_gid)); |
Dave Chinner | 3987848 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 218 | |
| 219 | /* |
| 220 | * Time is signed, so need to convert to signed 32 bit before |
| 221 | * storing in inode timestamp which may be 64 bit. Otherwise |
| 222 | * a time before epoch is converted to a time long after epoch |
| 223 | * on 64 bit systems. |
| 224 | */ |
Darrick J. Wong | f93e5436 | 2020-08-17 09:59:07 -0700 | [diff] [blame] | 225 | inode->i_atime = xfs_inode_from_disk_ts(from, from->di_atime); |
| 226 | inode->i_mtime = xfs_inode_from_disk_ts(from, from->di_mtime); |
| 227 | inode->i_ctime = xfs_inode_from_disk_ts(from, from->di_ctime); |
Dave Chinner | 3987848 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 228 | |
Christoph Hellwig | 13d2c10 | 2021-03-29 11:11:40 -0700 | [diff] [blame] | 229 | ip->i_disk_size = be64_to_cpu(from->di_size); |
Christoph Hellwig | 6e73a54 | 2021-03-29 11:11:40 -0700 | [diff] [blame] | 230 | ip->i_nblocks = be64_to_cpu(from->di_nblocks); |
Christoph Hellwig | 031474c | 2021-03-29 11:11:41 -0700 | [diff] [blame] | 231 | ip->i_extsize = be32_to_cpu(from->di_extsize); |
Christoph Hellwig | 7821ea3 | 2021-03-29 11:11:44 -0700 | [diff] [blame] | 232 | ip->i_forkoff = from->di_forkoff; |
Christoph Hellwig | db07349 | 2021-03-29 11:11:44 -0700 | [diff] [blame] | 233 | ip->i_diflags = be16_to_cpu(from->di_flags); |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 234 | |
Christoph Hellwig | 9b3beb0 | 2021-03-29 11:11:38 -0700 | [diff] [blame] | 235 | if (from->di_dmevmask || from->di_dmstate) |
| 236 | xfs_iflags_set(ip, XFS_IPRESERVE_DM_FIELDS); |
| 237 | |
Dave Chinner | ebd9027 | 2021-08-18 18:46:55 -0700 | [diff] [blame] | 238 | if (xfs_has_v3inodes(ip->i_mount)) { |
Jeff Layton | f0e2828 | 2017-12-11 06:35:19 -0500 | [diff] [blame] | 239 | inode_set_iversion_queried(inode, |
| 240 | be64_to_cpu(from->di_changecount)); |
Christoph Hellwig | e98d5e8 | 2021-03-29 11:11:45 -0700 | [diff] [blame] | 241 | ip->i_crtime = xfs_inode_from_disk_ts(from, from->di_crtime); |
Christoph Hellwig | 3e09ab8 | 2021-03-29 11:11:45 -0700 | [diff] [blame] | 242 | ip->i_diflags2 = be64_to_cpu(from->di_flags2); |
Christoph Hellwig | b33ce57 | 2021-03-29 11:11:42 -0700 | [diff] [blame] | 243 | ip->i_cowextsize = be32_to_cpu(from->di_cowextsize); |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 244 | } |
Christoph Hellwig | cb7d585 | 2020-05-14 14:00:02 -0700 | [diff] [blame] | 245 | |
Christoph Hellwig | 9229d18 | 2020-05-14 14:01:17 -0700 | [diff] [blame] | 246 | error = xfs_iformat_data_fork(ip, from); |
| 247 | if (error) |
| 248 | return error; |
Christoph Hellwig | 09c38ed | 2020-05-18 10:27:21 -0700 | [diff] [blame] | 249 | if (from->di_forkoff) { |
Christoph Hellwig | 9229d18 | 2020-05-14 14:01:17 -0700 | [diff] [blame] | 250 | error = xfs_iformat_attr_fork(ip, from); |
| 251 | if (error) |
| 252 | goto out_destroy_data_fork; |
| 253 | } |
| 254 | if (xfs_is_reflink_inode(ip)) |
| 255 | xfs_ifork_init_cow(ip); |
| 256 | return 0; |
| 257 | |
| 258 | out_destroy_data_fork: |
Christoph Hellwig | ef83851 | 2020-05-18 10:29:27 -0700 | [diff] [blame] | 259 | xfs_idestroy_fork(&ip->i_df); |
Christoph Hellwig | 9229d18 | 2020-05-14 14:01:17 -0700 | [diff] [blame] | 260 | return error; |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 261 | } |
| 262 | |
Darrick J. Wong | 5a0bb06 | 2020-08-24 15:15:46 -0700 | [diff] [blame] | 263 | /* Convert an incore timestamp to an ondisk timestamp. */ |
| 264 | static inline xfs_timestamp_t |
| 265 | xfs_inode_to_disk_ts( |
Darrick J. Wong | f93e5436 | 2020-08-17 09:59:07 -0700 | [diff] [blame] | 266 | struct xfs_inode *ip, |
Darrick J. Wong | 5a0bb06 | 2020-08-24 15:15:46 -0700 | [diff] [blame] | 267 | const struct timespec64 tv) |
| 268 | { |
| 269 | struct xfs_legacy_timestamp *lts; |
| 270 | xfs_timestamp_t ts; |
| 271 | |
Darrick J. Wong | f93e5436 | 2020-08-17 09:59:07 -0700 | [diff] [blame] | 272 | if (xfs_inode_has_bigtime(ip)) |
| 273 | return cpu_to_be64(xfs_inode_encode_bigtime(tv)); |
| 274 | |
Darrick J. Wong | 5a0bb06 | 2020-08-24 15:15:46 -0700 | [diff] [blame] | 275 | lts = (struct xfs_legacy_timestamp *)&ts; |
| 276 | lts->t_sec = cpu_to_be32(tv.tv_sec); |
| 277 | lts->t_nsec = cpu_to_be32(tv.tv_nsec); |
| 278 | |
| 279 | return ts; |
| 280 | } |
| 281 | |
Chandan Babu R | 52a4a14 | 2022-03-08 09:34:28 +0000 | [diff] [blame] | 282 | static inline void |
| 283 | xfs_inode_to_disk_iext_counters( |
| 284 | struct xfs_inode *ip, |
| 285 | struct xfs_dinode *to) |
| 286 | { |
| 287 | if (xfs_inode_has_large_extent_counts(ip)) { |
| 288 | to->di_big_nextents = cpu_to_be64(xfs_ifork_nextents(&ip->i_df)); |
| 289 | to->di_big_anextents = cpu_to_be32(xfs_ifork_nextents(ip->i_afp)); |
| 290 | /* |
| 291 | * We might be upgrading the inode to use larger extent counters |
| 292 | * than was previously used. Hence zero the unused field. |
| 293 | */ |
| 294 | to->di_nrext64_pad = cpu_to_be16(0); |
| 295 | } else { |
| 296 | to->di_nextents = cpu_to_be32(xfs_ifork_nextents(&ip->i_df)); |
| 297 | to->di_anextents = cpu_to_be16(xfs_ifork_nextents(ip->i_afp)); |
| 298 | } |
| 299 | } |
| 300 | |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 301 | void |
Dave Chinner | 3987848 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 302 | xfs_inode_to_disk( |
| 303 | struct xfs_inode *ip, |
Dave Chinner | 93f958f | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 304 | struct xfs_dinode *to, |
| 305 | xfs_lsn_t lsn) |
Dave Chinner | 3987848 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 306 | { |
Dave Chinner | 3987848 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 307 | struct inode *inode = VFS_I(ip); |
| 308 | |
Dave Chinner | 93f958f | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 309 | to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC); |
Dave Chinner | faeb4e4 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 310 | to->di_onlink = 0; |
Dave Chinner | 93f958f | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 311 | |
Christoph Hellwig | f7e67b2 | 2020-05-18 10:28:05 -0700 | [diff] [blame] | 312 | to->di_format = xfs_ifork_format(&ip->i_df); |
Christoph Hellwig | ba8adad | 2020-02-21 08:31:27 -0800 | [diff] [blame] | 313 | to->di_uid = cpu_to_be32(i_uid_read(inode)); |
| 314 | to->di_gid = cpu_to_be32(i_gid_read(inode)); |
Christoph Hellwig | ceaf603 | 2021-03-29 11:11:39 -0700 | [diff] [blame] | 315 | to->di_projid_lo = cpu_to_be16(ip->i_projid & 0xffff); |
| 316 | to->di_projid_hi = cpu_to_be16(ip->i_projid >> 16); |
Dave Chinner | 3987848 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 317 | |
Darrick J. Wong | f93e5436 | 2020-08-17 09:59:07 -0700 | [diff] [blame] | 318 | to->di_atime = xfs_inode_to_disk_ts(ip, inode->i_atime); |
| 319 | to->di_mtime = xfs_inode_to_disk_ts(ip, inode->i_mtime); |
| 320 | to->di_ctime = xfs_inode_to_disk_ts(ip, inode->i_ctime); |
Dave Chinner | 54d7b5c | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 321 | to->di_nlink = cpu_to_be32(inode->i_nlink); |
Dave Chinner | 9e9a267 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 322 | to->di_gen = cpu_to_be32(inode->i_generation); |
Dave Chinner | c19b3b05 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 323 | to->di_mode = cpu_to_be16(inode->i_mode); |
Dave Chinner | 3987848 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 324 | |
Christoph Hellwig | 13d2c10 | 2021-03-29 11:11:40 -0700 | [diff] [blame] | 325 | to->di_size = cpu_to_be64(ip->i_disk_size); |
Christoph Hellwig | 6e73a54 | 2021-03-29 11:11:40 -0700 | [diff] [blame] | 326 | to->di_nblocks = cpu_to_be64(ip->i_nblocks); |
Christoph Hellwig | 031474c | 2021-03-29 11:11:41 -0700 | [diff] [blame] | 327 | to->di_extsize = cpu_to_be32(ip->i_extsize); |
Christoph Hellwig | 7821ea3 | 2021-03-29 11:11:44 -0700 | [diff] [blame] | 328 | to->di_forkoff = ip->i_forkoff; |
Christoph Hellwig | f7e67b2 | 2020-05-18 10:28:05 -0700 | [diff] [blame] | 329 | to->di_aformat = xfs_ifork_format(ip->i_afp); |
Christoph Hellwig | db07349 | 2021-03-29 11:11:44 -0700 | [diff] [blame] | 330 | to->di_flags = cpu_to_be16(ip->i_diflags); |
Dave Chinner | 3987848 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 331 | |
Dave Chinner | ebd9027 | 2021-08-18 18:46:55 -0700 | [diff] [blame] | 332 | if (xfs_has_v3inodes(ip->i_mount)) { |
Christoph Hellwig | 6471e9c | 2020-03-18 08:15:11 -0700 | [diff] [blame] | 333 | to->di_version = 3; |
Jeff Layton | f0e2828 | 2017-12-11 06:35:19 -0500 | [diff] [blame] | 334 | to->di_changecount = cpu_to_be64(inode_peek_iversion(inode)); |
Christoph Hellwig | e98d5e8 | 2021-03-29 11:11:45 -0700 | [diff] [blame] | 335 | to->di_crtime = xfs_inode_to_disk_ts(ip, ip->i_crtime); |
Christoph Hellwig | 3e09ab8 | 2021-03-29 11:11:45 -0700 | [diff] [blame] | 336 | to->di_flags2 = cpu_to_be64(ip->i_diflags2); |
Christoph Hellwig | b33ce57 | 2021-03-29 11:11:42 -0700 | [diff] [blame] | 337 | to->di_cowextsize = cpu_to_be32(ip->i_cowextsize); |
Dave Chinner | 93f958f | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 338 | to->di_ino = cpu_to_be64(ip->i_ino); |
| 339 | to->di_lsn = cpu_to_be64(lsn); |
| 340 | memset(to->di_pad2, 0, sizeof(to->di_pad2)); |
| 341 | uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid); |
Chandan Babu R | 52a4a14 | 2022-03-08 09:34:28 +0000 | [diff] [blame] | 342 | to->di_v3_pad = 0; |
Dave Chinner | 3987848 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 343 | } else { |
Christoph Hellwig | 6471e9c | 2020-03-18 08:15:11 -0700 | [diff] [blame] | 344 | to->di_version = 2; |
Christoph Hellwig | 965e0a1 | 2021-03-29 11:11:42 -0700 | [diff] [blame] | 345 | to->di_flushiter = cpu_to_be16(ip->i_flushiter); |
Chandan Babu R | 52a4a14 | 2022-03-08 09:34:28 +0000 | [diff] [blame] | 346 | memset(to->di_v2_pad, 0, sizeof(to->di_v2_pad)); |
Dave Chinner | 3987848 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 347 | } |
Chandan Babu R | 52a4a14 | 2022-03-08 09:34:28 +0000 | [diff] [blame] | 348 | |
| 349 | xfs_inode_to_disk_iext_counters(ip, to); |
Dave Chinner | 3987848 | 2016-02-09 16:54:58 +1100 | [diff] [blame] | 350 | } |
| 351 | |
Dave Chinner | 23fcb33 | 2018-06-21 23:25:57 -0700 | [diff] [blame] | 352 | static xfs_failaddr_t |
| 353 | xfs_dinode_verify_fork( |
| 354 | struct xfs_dinode *dip, |
| 355 | struct xfs_mount *mp, |
| 356 | int whichfork) |
| 357 | { |
Chandan Babu R | dd95a6c | 2020-08-27 15:34:34 +0530 | [diff] [blame] | 358 | xfs_extnum_t di_nextents; |
Chandan Babu R | 9feb8f1 | 2020-08-27 15:09:10 +0530 | [diff] [blame] | 359 | xfs_extnum_t max_extents; |
Dave Chinner | 1eb70f5 | 2022-05-04 12:13:53 +1000 | [diff] [blame] | 360 | mode_t mode = be16_to_cpu(dip->di_mode); |
| 361 | uint32_t fork_size = XFS_DFORK_SIZE(dip, mp, whichfork); |
| 362 | uint32_t fork_format = XFS_DFORK_FORMAT(dip, whichfork); |
Dave Chinner | 23fcb33 | 2018-06-21 23:25:57 -0700 | [diff] [blame] | 363 | |
Chandan Babu R | dd95a6c | 2020-08-27 15:34:34 +0530 | [diff] [blame] | 364 | di_nextents = xfs_dfork_nextents(dip, whichfork); |
| 365 | |
Dave Chinner | 1eb70f5 | 2022-05-04 12:13:53 +1000 | [diff] [blame] | 366 | /* |
| 367 | * For fork types that can contain local data, check that the fork |
| 368 | * format matches the size of local data contained within the fork. |
| 369 | * |
| 370 | * For all types, check that when the size says the should be in extent |
| 371 | * or btree format, the inode isn't claiming it is in local format. |
| 372 | */ |
| 373 | if (whichfork == XFS_DATA_FORK) { |
| 374 | if (S_ISDIR(mode) || S_ISLNK(mode)) { |
| 375 | if (be64_to_cpu(dip->di_size) <= fork_size && |
| 376 | fork_format != XFS_DINODE_FMT_LOCAL) |
Dave Chinner | 23fcb33 | 2018-06-21 23:25:57 -0700 | [diff] [blame] | 377 | return __this_address; |
| 378 | } |
Dave Chinner | 1eb70f5 | 2022-05-04 12:13:53 +1000 | [diff] [blame] | 379 | |
| 380 | if (be64_to_cpu(dip->di_size) > fork_size && |
| 381 | fork_format == XFS_DINODE_FMT_LOCAL) |
| 382 | return __this_address; |
| 383 | } |
| 384 | |
| 385 | switch (fork_format) { |
| 386 | case XFS_DINODE_FMT_LOCAL: |
| 387 | /* |
| 388 | * No local regular files yet. |
| 389 | */ |
| 390 | if (S_ISREG(mode) && whichfork == XFS_DATA_FORK) |
| 391 | return __this_address; |
Dave Chinner | 23fcb33 | 2018-06-21 23:25:57 -0700 | [diff] [blame] | 392 | if (di_nextents) |
| 393 | return __this_address; |
| 394 | break; |
| 395 | case XFS_DINODE_FMT_EXTENTS: |
| 396 | if (di_nextents > XFS_DFORK_MAXEXT(dip, mp, whichfork)) |
| 397 | return __this_address; |
| 398 | break; |
| 399 | case XFS_DINODE_FMT_BTREE: |
Chandan Babu R | df9ad5c | 2021-11-16 09:54:37 +0000 | [diff] [blame] | 400 | max_extents = xfs_iext_max_nextents( |
| 401 | xfs_dinode_has_large_extent_counts(dip), |
| 402 | whichfork); |
Chandan Babu R | 9feb8f1 | 2020-08-27 15:09:10 +0530 | [diff] [blame] | 403 | if (di_nextents > max_extents) |
Dave Chinner | 23fcb33 | 2018-06-21 23:25:57 -0700 | [diff] [blame] | 404 | return __this_address; |
Dave Chinner | 23fcb33 | 2018-06-21 23:25:57 -0700 | [diff] [blame] | 405 | break; |
| 406 | default: |
| 407 | return __this_address; |
| 408 | } |
| 409 | return NULL; |
| 410 | } |
| 411 | |
Eric Sandeen | 339e1a3 | 2018-09-29 13:50:13 +1000 | [diff] [blame] | 412 | static xfs_failaddr_t |
| 413 | xfs_dinode_verify_forkoff( |
| 414 | struct xfs_dinode *dip, |
| 415 | struct xfs_mount *mp) |
| 416 | { |
Christoph Hellwig | 09c38ed | 2020-05-18 10:27:21 -0700 | [diff] [blame] | 417 | if (!dip->di_forkoff) |
Eric Sandeen | 339e1a3 | 2018-09-29 13:50:13 +1000 | [diff] [blame] | 418 | return NULL; |
| 419 | |
| 420 | switch (dip->di_format) { |
| 421 | case XFS_DINODE_FMT_DEV: |
| 422 | if (dip->di_forkoff != (roundup(sizeof(xfs_dev_t), 8) >> 3)) |
| 423 | return __this_address; |
| 424 | break; |
| 425 | case XFS_DINODE_FMT_LOCAL: /* fall through ... */ |
| 426 | case XFS_DINODE_FMT_EXTENTS: /* fall through ... */ |
| 427 | case XFS_DINODE_FMT_BTREE: |
Christoph Hellwig | e9e2eae | 2020-03-18 08:15:10 -0700 | [diff] [blame] | 428 | if (dip->di_forkoff >= (XFS_LITINO(mp) >> 3)) |
Eric Sandeen | 339e1a3 | 2018-09-29 13:50:13 +1000 | [diff] [blame] | 429 | return __this_address; |
| 430 | break; |
| 431 | default: |
| 432 | return __this_address; |
| 433 | } |
| 434 | return NULL; |
| 435 | } |
| 436 | |
Chandan Babu R | 52a4a14 | 2022-03-08 09:34:28 +0000 | [diff] [blame] | 437 | static xfs_failaddr_t |
| 438 | xfs_dinode_verify_nrext64( |
| 439 | struct xfs_mount *mp, |
| 440 | struct xfs_dinode *dip) |
| 441 | { |
| 442 | if (xfs_dinode_has_large_extent_counts(dip)) { |
| 443 | if (!xfs_has_large_extent_counts(mp)) |
| 444 | return __this_address; |
| 445 | if (dip->di_nrext64_pad != 0) |
| 446 | return __this_address; |
| 447 | } else if (dip->di_version >= 3) { |
| 448 | if (dip->di_v3_pad != 0) |
| 449 | return __this_address; |
| 450 | } |
| 451 | |
| 452 | return NULL; |
| 453 | } |
| 454 | |
Darrick J. Wong | a6a781a | 2018-01-08 10:51:03 -0800 | [diff] [blame] | 455 | xfs_failaddr_t |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 456 | xfs_dinode_verify( |
| 457 | struct xfs_mount *mp, |
Darrick J. Wong | 420fbeb | 2016-11-08 11:56:06 +1100 | [diff] [blame] | 458 | xfs_ino_t ino, |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 459 | struct xfs_dinode *dip) |
| 460 | { |
Dave Chinner | 7d71a67 | 2018-06-05 10:06:44 -0700 | [diff] [blame] | 461 | xfs_failaddr_t fa; |
Amir Goldstein | 3c6f46e | 2017-01-17 11:41:41 -0800 | [diff] [blame] | 462 | uint16_t mode; |
Darrick J. Wong | c8e156a | 2016-10-03 09:11:50 -0700 | [diff] [blame] | 463 | uint16_t flags; |
| 464 | uint64_t flags2; |
Darrick J. Wong | 71493b8 | 2018-01-08 10:51:04 -0800 | [diff] [blame] | 465 | uint64_t di_size; |
Chandan Babu R | dd95a6c | 2020-08-27 15:34:34 +0530 | [diff] [blame] | 466 | xfs_extnum_t nextents; |
| 467 | xfs_extnum_t naextents; |
| 468 | xfs_filblks_t nblocks; |
Darrick J. Wong | c8e156a | 2016-10-03 09:11:50 -0700 | [diff] [blame] | 469 | |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 470 | if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC)) |
Darrick J. Wong | a6a781a | 2018-01-08 10:51:03 -0800 | [diff] [blame] | 471 | return __this_address; |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 472 | |
Darrick J. Wong | 50aa90e | 2018-01-08 10:51:04 -0800 | [diff] [blame] | 473 | /* Verify v3 integrity information first */ |
| 474 | if (dip->di_version >= 3) { |
Dave Chinner | ebd9027 | 2021-08-18 18:46:55 -0700 | [diff] [blame] | 475 | if (!xfs_has_v3inodes(mp)) |
Darrick J. Wong | 50aa90e | 2018-01-08 10:51:04 -0800 | [diff] [blame] | 476 | return __this_address; |
| 477 | if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize, |
| 478 | XFS_DINODE_CRC_OFF)) |
| 479 | return __this_address; |
| 480 | if (be64_to_cpu(dip->di_ino) != ino) |
| 481 | return __this_address; |
| 482 | if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid)) |
| 483 | return __this_address; |
| 484 | } |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 485 | |
Darrick J. Wong | ef388e2 | 2016-12-05 12:38:38 +1100 | [diff] [blame] | 486 | /* don't allow invalid i_size */ |
Darrick J. Wong | 71493b8 | 2018-01-08 10:51:04 -0800 | [diff] [blame] | 487 | di_size = be64_to_cpu(dip->di_size); |
| 488 | if (di_size & (1ULL << 63)) |
Darrick J. Wong | a6a781a | 2018-01-08 10:51:03 -0800 | [diff] [blame] | 489 | return __this_address; |
Darrick J. Wong | ef388e2 | 2016-12-05 12:38:38 +1100 | [diff] [blame] | 490 | |
Amir Goldstein | 3c6f46e | 2017-01-17 11:41:41 -0800 | [diff] [blame] | 491 | mode = be16_to_cpu(dip->di_mode); |
Amir Goldstein | a324cbf | 2017-01-17 11:41:44 -0800 | [diff] [blame] | 492 | if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN) |
Darrick J. Wong | a6a781a | 2018-01-08 10:51:03 -0800 | [diff] [blame] | 493 | return __this_address; |
Amir Goldstein | 3c6f46e | 2017-01-17 11:41:41 -0800 | [diff] [blame] | 494 | |
| 495 | /* No zero-length symlinks/dirs. */ |
Darrick J. Wong | 71493b8 | 2018-01-08 10:51:04 -0800 | [diff] [blame] | 496 | if ((S_ISLNK(mode) || S_ISDIR(mode)) && di_size == 0) |
Darrick J. Wong | a6a781a | 2018-01-08 10:51:03 -0800 | [diff] [blame] | 497 | return __this_address; |
Darrick J. Wong | ef388e2 | 2016-12-05 12:38:38 +1100 | [diff] [blame] | 498 | |
Chandan Babu R | 52a4a14 | 2022-03-08 09:34:28 +0000 | [diff] [blame] | 499 | fa = xfs_dinode_verify_nrext64(mp, dip); |
| 500 | if (fa) |
| 501 | return fa; |
| 502 | |
Chandan Babu R | dd95a6c | 2020-08-27 15:34:34 +0530 | [diff] [blame] | 503 | nextents = xfs_dfork_data_extents(dip); |
| 504 | naextents = xfs_dfork_attr_extents(dip); |
| 505 | nblocks = be64_to_cpu(dip->di_nblocks); |
| 506 | |
Darrick J. Wong | 71493b8 | 2018-01-08 10:51:04 -0800 | [diff] [blame] | 507 | /* Fork checks carried over from xfs_iformat_fork */ |
Chandan Babu R | dd95a6c | 2020-08-27 15:34:34 +0530 | [diff] [blame] | 508 | if (mode && nextents + naextents > nblocks) |
Darrick J. Wong | 71493b8 | 2018-01-08 10:51:04 -0800 | [diff] [blame] | 509 | return __this_address; |
| 510 | |
Chandan Babu R | 83a21c1 | 2022-03-29 06:14:00 +0000 | [diff] [blame] | 511 | if (S_ISDIR(mode) && nextents > mp->m_dir_geo->max_extents) |
| 512 | return __this_address; |
| 513 | |
Darrick J. Wong | 71493b8 | 2018-01-08 10:51:04 -0800 | [diff] [blame] | 514 | if (mode && XFS_DFORK_BOFF(dip) > mp->m_sb.sb_inodesize) |
| 515 | return __this_address; |
| 516 | |
| 517 | flags = be16_to_cpu(dip->di_flags); |
| 518 | |
| 519 | if (mode && (flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp) |
| 520 | return __this_address; |
| 521 | |
Eric Sandeen | 339e1a3 | 2018-09-29 13:50:13 +1000 | [diff] [blame] | 522 | /* check for illegal values of forkoff */ |
| 523 | fa = xfs_dinode_verify_forkoff(dip, mp); |
| 524 | if (fa) |
| 525 | return fa; |
| 526 | |
Darrick J. Wong | 71493b8 | 2018-01-08 10:51:04 -0800 | [diff] [blame] | 527 | /* Do we have appropriate data fork formats for the mode? */ |
| 528 | switch (mode & S_IFMT) { |
| 529 | case S_IFIFO: |
| 530 | case S_IFCHR: |
| 531 | case S_IFBLK: |
| 532 | case S_IFSOCK: |
| 533 | if (dip->di_format != XFS_DINODE_FMT_DEV) |
| 534 | return __this_address; |
| 535 | break; |
| 536 | case S_IFREG: |
| 537 | case S_IFLNK: |
| 538 | case S_IFDIR: |
Dave Chinner | 23fcb33 | 2018-06-21 23:25:57 -0700 | [diff] [blame] | 539 | fa = xfs_dinode_verify_fork(dip, mp, XFS_DATA_FORK); |
| 540 | if (fa) |
| 541 | return fa; |
Darrick J. Wong | 71493b8 | 2018-01-08 10:51:04 -0800 | [diff] [blame] | 542 | break; |
| 543 | case 0: |
| 544 | /* Uninitialized inode ok. */ |
| 545 | break; |
| 546 | default: |
| 547 | return __this_address; |
| 548 | } |
| 549 | |
Christoph Hellwig | 09c38ed | 2020-05-18 10:27:21 -0700 | [diff] [blame] | 550 | if (dip->di_forkoff) { |
Dave Chinner | 23fcb33 | 2018-06-21 23:25:57 -0700 | [diff] [blame] | 551 | fa = xfs_dinode_verify_fork(dip, mp, XFS_ATTR_FORK); |
| 552 | if (fa) |
| 553 | return fa; |
Eric Sandeen | b42db08 | 2018-04-16 23:06:53 -0700 | [diff] [blame] | 554 | } else { |
| 555 | /* |
| 556 | * If there is no fork offset, this may be a freshly-made inode |
| 557 | * in a new disk cluster, in which case di_aformat is zeroed. |
| 558 | * Otherwise, such an inode must be in EXTENTS format; this goes |
| 559 | * for freed inodes as well. |
| 560 | */ |
| 561 | switch (dip->di_aformat) { |
| 562 | case 0: |
| 563 | case XFS_DINODE_FMT_EXTENTS: |
| 564 | break; |
| 565 | default: |
| 566 | return __this_address; |
| 567 | } |
Chandan Babu R | dd95a6c | 2020-08-27 15:34:34 +0530 | [diff] [blame] | 568 | if (naextents) |
Eric Sandeen | b42db08 | 2018-04-16 23:06:53 -0700 | [diff] [blame] | 569 | return __this_address; |
Darrick J. Wong | 71493b8 | 2018-01-08 10:51:04 -0800 | [diff] [blame] | 570 | } |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 571 | |
Dave Chinner | 7d71a67 | 2018-06-05 10:06:44 -0700 | [diff] [blame] | 572 | /* extent size hint validation */ |
| 573 | fa = xfs_inode_validate_extsize(mp, be32_to_cpu(dip->di_extsize), |
| 574 | mode, flags); |
| 575 | if (fa) |
| 576 | return fa; |
| 577 | |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 578 | /* only version 3 or greater inodes are extensively verified here */ |
| 579 | if (dip->di_version < 3) |
Darrick J. Wong | a6a781a | 2018-01-08 10:51:03 -0800 | [diff] [blame] | 580 | return NULL; |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 581 | |
Darrick J. Wong | c8e156a | 2016-10-03 09:11:50 -0700 | [diff] [blame] | 582 | flags2 = be64_to_cpu(dip->di_flags2); |
| 583 | |
| 584 | /* don't allow reflink/cowextsize if we don't have reflink */ |
| 585 | if ((flags2 & (XFS_DIFLAG2_REFLINK | XFS_DIFLAG2_COWEXTSIZE)) && |
Dave Chinner | 38c26bf | 2021-08-18 18:46:37 -0700 | [diff] [blame] | 586 | !xfs_has_reflink(mp)) |
Darrick J. Wong | a6a781a | 2018-01-08 10:51:03 -0800 | [diff] [blame] | 587 | return __this_address; |
Darrick J. Wong | c8e156a | 2016-10-03 09:11:50 -0700 | [diff] [blame] | 588 | |
Darrick J. Wong | 71493b8 | 2018-01-08 10:51:04 -0800 | [diff] [blame] | 589 | /* only regular files get reflink */ |
| 590 | if ((flags2 & XFS_DIFLAG2_REFLINK) && (mode & S_IFMT) != S_IFREG) |
| 591 | return __this_address; |
Darrick J. Wong | c8e156a | 2016-10-03 09:11:50 -0700 | [diff] [blame] | 592 | |
| 593 | /* don't let reflink and realtime mix */ |
| 594 | if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags & XFS_DIFLAG_REALTIME)) |
Darrick J. Wong | a6a781a | 2018-01-08 10:51:03 -0800 | [diff] [blame] | 595 | return __this_address; |
Darrick J. Wong | c8e156a | 2016-10-03 09:11:50 -0700 | [diff] [blame] | 596 | |
Dave Chinner | 02a0fda | 2018-06-05 10:09:33 -0700 | [diff] [blame] | 597 | /* COW extent size hint validation */ |
| 598 | fa = xfs_inode_validate_cowextsize(mp, be32_to_cpu(dip->di_cowextsize), |
| 599 | mode, flags, flags2); |
| 600 | if (fa) |
| 601 | return fa; |
| 602 | |
Darrick J. Wong | f93e5436 | 2020-08-17 09:59:07 -0700 | [diff] [blame] | 603 | /* bigtime iflag can only happen on bigtime filesystems */ |
| 604 | if (xfs_dinode_has_bigtime(dip) && |
Dave Chinner | ebd9027 | 2021-08-18 18:46:55 -0700 | [diff] [blame] | 605 | !xfs_has_bigtime(mp)) |
Darrick J. Wong | f93e5436 | 2020-08-17 09:59:07 -0700 | [diff] [blame] | 606 | return __this_address; |
| 607 | |
Darrick J. Wong | a6a781a | 2018-01-08 10:51:03 -0800 | [diff] [blame] | 608 | return NULL; |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 609 | } |
| 610 | |
| 611 | void |
| 612 | xfs_dinode_calc_crc( |
| 613 | struct xfs_mount *mp, |
| 614 | struct xfs_dinode *dip) |
| 615 | { |
Darrick J. Wong | c8ce540 | 2017-06-16 11:00:05 -0700 | [diff] [blame] | 616 | uint32_t crc; |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 617 | |
| 618 | if (dip->di_version < 3) |
| 619 | return; |
| 620 | |
Dave Chinner | 38c26bf | 2021-08-18 18:46:37 -0700 | [diff] [blame] | 621 | ASSERT(xfs_has_crc(mp)); |
Dave Chinner | cae028d | 2016-12-05 14:40:32 +1100 | [diff] [blame] | 622 | crc = xfs_start_cksum_update((char *)dip, mp->m_sb.sb_inodesize, |
Eric Sandeen | 533b81c | 2014-02-27 15:15:27 +1100 | [diff] [blame] | 623 | XFS_DINODE_CRC_OFF); |
Dave Chinner | 1fd7115 | 2013-08-12 20:49:35 +1000 | [diff] [blame] | 624 | dip->di_crc = xfs_end_cksum(crc); |
| 625 | } |
| 626 | |
| 627 | /* |
Darrick J. Wong | 8bb82bc | 2018-03-23 10:06:55 -0700 | [diff] [blame] | 628 | * Validate di_extsize hint. |
| 629 | * |
Darrick J. Wong | 6b69e48 | 2021-05-12 12:49:19 -0700 | [diff] [blame] | 630 | * 1. Extent size hint is only valid for directories and regular files. |
| 631 | * 2. FS_XFLAG_EXTSIZE is only valid for regular files. |
| 632 | * 3. FS_XFLAG_EXTSZINHERIT is only valid for directories. |
| 633 | * 4. Hint cannot be larger than MAXTEXTLEN. |
| 634 | * 5. Can be changed on directories at any time. |
| 635 | * 6. Hint value of 0 turns off hints, clears inode flags. |
| 636 | * 7. Extent size must be a multiple of the appropriate block size. |
| 637 | * For realtime files, this is the rt extent size. |
| 638 | * 8. For non-realtime files, the extent size hint must be limited |
| 639 | * to half the AG size to avoid alignment extending the extent beyond the |
| 640 | * limits of the AG. |
Darrick J. Wong | 8bb82bc | 2018-03-23 10:06:55 -0700 | [diff] [blame] | 641 | */ |
| 642 | xfs_failaddr_t |
| 643 | xfs_inode_validate_extsize( |
| 644 | struct xfs_mount *mp, |
| 645 | uint32_t extsize, |
| 646 | uint16_t mode, |
| 647 | uint16_t flags) |
| 648 | { |
| 649 | bool rt_flag; |
| 650 | bool hint_flag; |
| 651 | bool inherit_flag; |
| 652 | uint32_t extsize_bytes; |
| 653 | uint32_t blocksize_bytes; |
| 654 | |
| 655 | rt_flag = (flags & XFS_DIFLAG_REALTIME); |
| 656 | hint_flag = (flags & XFS_DIFLAG_EXTSIZE); |
| 657 | inherit_flag = (flags & XFS_DIFLAG_EXTSZINHERIT); |
| 658 | extsize_bytes = XFS_FSB_TO_B(mp, extsize); |
| 659 | |
Darrick J. Wong | 603f000 | 2021-05-12 12:51:26 -0700 | [diff] [blame] | 660 | /* |
| 661 | * This comment describes a historic gap in this verifier function. |
| 662 | * |
Darrick J. Wong | 83193e5 | 2021-07-12 12:58:50 -0700 | [diff] [blame] | 663 | * For a directory with both RTINHERIT and EXTSZINHERIT flags set, this |
| 664 | * function has never checked that the extent size hint is an integer |
| 665 | * multiple of the realtime extent size. Since we allow users to set |
| 666 | * this combination on non-rt filesystems /and/ to change the rt |
| 667 | * extent size when adding a rt device to a filesystem, the net effect |
| 668 | * is that users can configure a filesystem anticipating one rt |
| 669 | * geometry and change their minds later. Directories do not use the |
| 670 | * extent size hint, so this is harmless for them. |
Darrick J. Wong | 603f000 | 2021-05-12 12:51:26 -0700 | [diff] [blame] | 671 | * |
| 672 | * If a directory with a misaligned extent size hint is allowed to |
| 673 | * propagate that hint into a new regular realtime file, the result |
| 674 | * is that the inode cluster buffer verifier will trigger a corruption |
Darrick J. Wong | 83193e5 | 2021-07-12 12:58:50 -0700 | [diff] [blame] | 675 | * shutdown the next time it is run, because the verifier has always |
| 676 | * enforced the alignment rule for regular files. |
Darrick J. Wong | 603f000 | 2021-05-12 12:51:26 -0700 | [diff] [blame] | 677 | * |
Darrick J. Wong | 83193e5 | 2021-07-12 12:58:50 -0700 | [diff] [blame] | 678 | * Because we allow administrators to set a new rt extent size when |
| 679 | * adding a rt section, we cannot add a check to this verifier because |
| 680 | * that will result a new source of directory corruption errors when |
| 681 | * reading an existing filesystem. Instead, we rely on callers to |
| 682 | * decide when alignment checks are appropriate, and fix things up as |
| 683 | * needed. |
Darrick J. Wong | 603f000 | 2021-05-12 12:51:26 -0700 | [diff] [blame] | 684 | */ |
| 685 | |
Darrick J. Wong | 8bb82bc | 2018-03-23 10:06:55 -0700 | [diff] [blame] | 686 | if (rt_flag) |
Darrick J. Wong | a7bcb14 | 2021-05-31 11:31:56 -0700 | [diff] [blame] | 687 | blocksize_bytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize); |
Darrick J. Wong | 8bb82bc | 2018-03-23 10:06:55 -0700 | [diff] [blame] | 688 | else |
| 689 | blocksize_bytes = mp->m_sb.sb_blocksize; |
| 690 | |
| 691 | if ((hint_flag || inherit_flag) && !(S_ISDIR(mode) || S_ISREG(mode))) |
| 692 | return __this_address; |
| 693 | |
| 694 | if (hint_flag && !S_ISREG(mode)) |
| 695 | return __this_address; |
| 696 | |
| 697 | if (inherit_flag && !S_ISDIR(mode)) |
| 698 | return __this_address; |
| 699 | |
| 700 | if ((hint_flag || inherit_flag) && extsize == 0) |
| 701 | return __this_address; |
| 702 | |
Eric Sandeen | d4a34e1 | 2018-07-24 11:34:52 -0700 | [diff] [blame] | 703 | /* free inodes get flags set to zero but extsize remains */ |
| 704 | if (mode && !(hint_flag || inherit_flag) && extsize != 0) |
Darrick J. Wong | 8bb82bc | 2018-03-23 10:06:55 -0700 | [diff] [blame] | 705 | return __this_address; |
| 706 | |
| 707 | if (extsize_bytes % blocksize_bytes) |
| 708 | return __this_address; |
| 709 | |
Chandan Babu R | 95f0b95 | 2021-08-09 12:05:22 +0530 | [diff] [blame] | 710 | if (extsize > XFS_MAX_BMBT_EXTLEN) |
Darrick J. Wong | 8bb82bc | 2018-03-23 10:06:55 -0700 | [diff] [blame] | 711 | return __this_address; |
| 712 | |
| 713 | if (!rt_flag && extsize > mp->m_sb.sb_agblocks / 2) |
| 714 | return __this_address; |
| 715 | |
| 716 | return NULL; |
| 717 | } |
| 718 | |
| 719 | /* |
| 720 | * Validate di_cowextsize hint. |
| 721 | * |
Darrick J. Wong | 6b69e48 | 2021-05-12 12:49:19 -0700 | [diff] [blame] | 722 | * 1. CoW extent size hint can only be set if reflink is enabled on the fs. |
| 723 | * The inode does not have to have any shared blocks, but it must be a v3. |
| 724 | * 2. FS_XFLAG_COWEXTSIZE is only valid for directories and regular files; |
| 725 | * for a directory, the hint is propagated to new files. |
| 726 | * 3. Can be changed on files & directories at any time. |
| 727 | * 4. Hint value of 0 turns off hints, clears inode flags. |
| 728 | * 5. Extent size must be a multiple of the appropriate block size. |
| 729 | * 6. The extent size hint must be limited to half the AG size to avoid |
| 730 | * alignment extending the extent beyond the limits of the AG. |
Darrick J. Wong | 8bb82bc | 2018-03-23 10:06:55 -0700 | [diff] [blame] | 731 | */ |
| 732 | xfs_failaddr_t |
| 733 | xfs_inode_validate_cowextsize( |
| 734 | struct xfs_mount *mp, |
| 735 | uint32_t cowextsize, |
| 736 | uint16_t mode, |
| 737 | uint16_t flags, |
| 738 | uint64_t flags2) |
| 739 | { |
| 740 | bool rt_flag; |
| 741 | bool hint_flag; |
| 742 | uint32_t cowextsize_bytes; |
| 743 | |
| 744 | rt_flag = (flags & XFS_DIFLAG_REALTIME); |
| 745 | hint_flag = (flags2 & XFS_DIFLAG2_COWEXTSIZE); |
| 746 | cowextsize_bytes = XFS_FSB_TO_B(mp, cowextsize); |
| 747 | |
Dave Chinner | 38c26bf | 2021-08-18 18:46:37 -0700 | [diff] [blame] | 748 | if (hint_flag && !xfs_has_reflink(mp)) |
Darrick J. Wong | 8bb82bc | 2018-03-23 10:06:55 -0700 | [diff] [blame] | 749 | return __this_address; |
| 750 | |
| 751 | if (hint_flag && !(S_ISDIR(mode) || S_ISREG(mode))) |
| 752 | return __this_address; |
| 753 | |
| 754 | if (hint_flag && cowextsize == 0) |
| 755 | return __this_address; |
| 756 | |
Eric Sandeen | d4a34e1 | 2018-07-24 11:34:52 -0700 | [diff] [blame] | 757 | /* free inodes get flags set to zero but cowextsize remains */ |
| 758 | if (mode && !hint_flag && cowextsize != 0) |
Darrick J. Wong | 8bb82bc | 2018-03-23 10:06:55 -0700 | [diff] [blame] | 759 | return __this_address; |
| 760 | |
| 761 | if (hint_flag && rt_flag) |
| 762 | return __this_address; |
| 763 | |
| 764 | if (cowextsize_bytes % mp->m_sb.sb_blocksize) |
| 765 | return __this_address; |
| 766 | |
Chandan Babu R | 95f0b95 | 2021-08-09 12:05:22 +0530 | [diff] [blame] | 767 | if (cowextsize > XFS_MAX_BMBT_EXTLEN) |
Darrick J. Wong | 8bb82bc | 2018-03-23 10:06:55 -0700 | [diff] [blame] | 768 | return __this_address; |
| 769 | |
| 770 | if (cowextsize > mp->m_sb.sb_agblocks / 2) |
| 771 | return __this_address; |
| 772 | |
| 773 | return NULL; |
| 774 | } |