blob: 3b1b63f9d886ec764cbd348d3707c109d9d34ab9 [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0
Dave Chinner1fd71152013-08-12 20:49:35 +10002/*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * All Rights Reserved.
Dave Chinner1fd71152013-08-12 20:49:35 +10005 */
6#include "xfs.h"
7#include "xfs_fs.h"
Dave Chinner70a98832013-10-23 10:36:05 +11008#include "xfs_shared.h"
Dave Chinner239880e2013-10-23 10:50:10 +11009#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
Dave Chinner1fd71152013-08-12 20:49:35 +100012#include "xfs_mount.h"
Dave Chinner1fd71152013-08-12 20:49:35 +100013#include "xfs_inode.h"
Darrick J. Wonge9e899a2017-10-31 12:04:49 -070014#include "xfs_errortag.h"
Dave Chinner1fd71152013-08-12 20:49:35 +100015#include "xfs_error.h"
Dave Chinner1fd71152013-08-12 20:49:35 +100016#include "xfs_icache.h"
Dave Chinner239880e2013-10-23 10:50:10 +110017#include "xfs_trans.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110018#include "xfs_ialloc.h"
Amir Goldsteina324cbf2017-01-17 11:41:44 -080019#include "xfs_dir2.h"
Dave Chinner1fd71152013-08-12 20:49:35 +100020
Jeff Laytonf0e28282017-12-11 06:35:19 -050021#include <linux/iversion.h>
22
Dave Chinner1fd71152013-08-12 20:49:35 +100023/*
Dave Chinnerd8914002013-08-27 11:39:37 +100024 * If we are doing readahead on an inode buffer, we might be in log recovery
25 * reading an inode allocation buffer that hasn't yet been replayed, and hence
26 * has not had the inode cores stamped into it. Hence for readahead, the buffer
27 * may be potentially invalid.
28 *
Dave Chinnerb79f4a12016-01-12 07:03:44 +110029 * If the readahead buffer is invalid, we need to mark it with an error and
30 * clear the DONE status of the buffer so that a followup read will re-read it
31 * from disk. We don't report the error otherwise to avoid warnings during log
Keyur Patel06734e32020-06-29 14:44:35 -070032 * recovery and we don't get unnecessary panics on debug kernels. We use EIO here
Dave Chinnerb79f4a12016-01-12 07:03:44 +110033 * because all we want to do is say readahead failed; there is no-one to report
34 * the error to, so this will distinguish it from a non-ra verifier failure.
Keyur Patel06734e32020-06-29 14:44:35 -070035 * Changes to this readahead error behaviour also need to be reflected in
Dave Chinner7d6a13f2016-01-12 07:04:01 +110036 * xfs_dquot_buf_readahead_verify().
Dave Chinnerd8914002013-08-27 11:39:37 +100037 */
Dave Chinner1fd71152013-08-12 20:49:35 +100038static void
39xfs_inode_buf_verify(
Dave Chinnerd8914002013-08-27 11:39:37 +100040 struct xfs_buf *bp,
41 bool readahead)
Dave Chinner1fd71152013-08-12 20:49:35 +100042{
Christoph Hellwigdbd329f12019-06-28 19:27:29 -070043 struct xfs_mount *mp = bp->b_mount;
Darrick J. Wong6a96c562018-03-23 10:06:56 -070044 xfs_agnumber_t agno;
Dave Chinner1fd71152013-08-12 20:49:35 +100045 int i;
46 int ni;
47
48 /*
49 * Validate the magic number and version of every inode in the buffer
50 */
Dave Chinner04fcad82021-08-18 18:46:57 -070051 agno = xfs_daddr_to_agno(mp, xfs_buf_daddr(bp));
Dave Chinner1fd71152013-08-12 20:49:35 +100052 ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock;
53 for (i = 0; i < ni; i++) {
Christoph Hellwigde38db72021-10-11 16:11:21 -070054 struct xfs_dinode *dip;
55 xfs_agino_t unlinked_ino;
56 int di_ok;
Dave Chinner1fd71152013-08-12 20:49:35 +100057
Christoph Hellwig88ee2df2015-06-22 09:44:29 +100058 dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog));
Darrick J. Wong6a96c562018-03-23 10:06:56 -070059 unlinked_ino = be32_to_cpu(dip->di_next_unlinked);
Darrick J. Wong15baadf2019-02-16 11:47:28 -080060 di_ok = xfs_verify_magic16(bp, dip->di_magic) &&
Dave Chinnercf28e172021-08-18 18:46:57 -070061 xfs_dinode_good_version(mp, dip->di_version) &&
Darrick J. Wong7d36c192019-02-07 10:37:13 -080062 xfs_verify_agino_or_null(mp, agno, unlinked_ino);
Dave Chinner1fd71152013-08-12 20:49:35 +100063 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
Darrick J. Wong9e24cfd2017-06-20 17:54:47 -070064 XFS_ERRTAG_ITOBP_INOTOBP))) {
Dave Chinnerd8914002013-08-27 11:39:37 +100065 if (readahead) {
66 bp->b_flags &= ~XBF_DONE;
Dave Chinnerb79f4a12016-01-12 07:03:44 +110067 xfs_buf_ioerror(bp, -EIO);
Dave Chinnerd8914002013-08-27 11:39:37 +100068 return;
69 }
70
Dave Chinner1fd71152013-08-12 20:49:35 +100071#ifdef DEBUG
Dave Chinner74ffa792013-09-03 21:47:38 +100072 xfs_alert(mp,
Dave Chinner1fd71152013-08-12 20:49:35 +100073 "bad inode magic/vsn daddr %lld #%d (magic=%x)",
Dave Chinner9343ee72021-08-18 18:47:05 -070074 (unsigned long long)xfs_buf_daddr(bp), i,
Dave Chinner1fd71152013-08-12 20:49:35 +100075 be16_to_cpu(dip->di_magic));
Dave Chinner1fd71152013-08-12 20:49:35 +100076#endif
Darrick J. Wong6edb1812018-03-23 10:06:53 -070077 xfs_buf_verifier_error(bp, -EFSCORRUPTED,
78 __func__, dip, sizeof(*dip),
79 NULL);
Darrick J. Wong6a96c562018-03-23 10:06:56 -070080 return;
Dave Chinner1fd71152013-08-12 20:49:35 +100081 }
82 }
Dave Chinner1fd71152013-08-12 20:49:35 +100083}
84
85
86static void
87xfs_inode_buf_read_verify(
88 struct xfs_buf *bp)
89{
Dave Chinnerd8914002013-08-27 11:39:37 +100090 xfs_inode_buf_verify(bp, false);
91}
92
93static void
94xfs_inode_buf_readahead_verify(
95 struct xfs_buf *bp)
96{
97 xfs_inode_buf_verify(bp, true);
Dave Chinner1fd71152013-08-12 20:49:35 +100098}
99
100static void
101xfs_inode_buf_write_verify(
102 struct xfs_buf *bp)
103{
Dave Chinnerd8914002013-08-27 11:39:37 +1000104 xfs_inode_buf_verify(bp, false);
Dave Chinner1fd71152013-08-12 20:49:35 +1000105}
106
107const struct xfs_buf_ops xfs_inode_buf_ops = {
Eric Sandeen233135b2016-01-04 16:10:19 +1100108 .name = "xfs_inode",
Darrick J. Wong15baadf2019-02-16 11:47:28 -0800109 .magic16 = { cpu_to_be16(XFS_DINODE_MAGIC),
110 cpu_to_be16(XFS_DINODE_MAGIC) },
Dave Chinner1fd71152013-08-12 20:49:35 +1000111 .verify_read = xfs_inode_buf_read_verify,
112 .verify_write = xfs_inode_buf_write_verify,
113};
114
Dave Chinnerd8914002013-08-27 11:39:37 +1000115const struct xfs_buf_ops xfs_inode_buf_ra_ops = {
Brian Fostere34d3e72019-02-07 10:45:45 -0800116 .name = "xfs_inode_ra",
Darrick J. Wong15baadf2019-02-16 11:47:28 -0800117 .magic16 = { cpu_to_be16(XFS_DINODE_MAGIC),
118 cpu_to_be16(XFS_DINODE_MAGIC) },
Dave Chinnerd8914002013-08-27 11:39:37 +1000119 .verify_read = xfs_inode_buf_readahead_verify,
120 .verify_write = xfs_inode_buf_write_verify,
121};
122
Dave Chinner1fd71152013-08-12 20:49:35 +1000123
124/*
125 * This routine is called to map an inode to the buffer containing the on-disk
126 * version of the inode. It returns a pointer to the buffer containing the
Christoph Hellwigaf9dcdd2021-03-29 11:11:37 -0700127 * on-disk inode in the bpp parameter.
Dave Chinner1fd71152013-08-12 20:49:35 +1000128 */
129int
130xfs_imap_to_bp(
131 struct xfs_mount *mp,
132 struct xfs_trans *tp,
133 struct xfs_imap *imap,
Christoph Hellwigaf9dcdd2021-03-29 11:11:37 -0700134 struct xfs_buf **bpp)
Dave Chinner1fd71152013-08-12 20:49:35 +1000135{
Christoph Hellwigaf9dcdd2021-03-29 11:11:37 -0700136 return xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
137 imap->im_len, XBF_UNMAPPED, bpp,
Dave Chinner1fd71152013-08-12 20:49:35 +1000138 &xfs_inode_buf_ops);
Dave Chinner1fd71152013-08-12 20:49:35 +1000139}
140
Darrick J. Wongf93e54362020-08-17 09:59:07 -0700141static inline struct timespec64 xfs_inode_decode_bigtime(uint64_t ts)
142{
143 struct timespec64 tv;
144 uint32_t n;
145
146 tv.tv_sec = xfs_bigtime_to_unix(div_u64_rem(ts, NSEC_PER_SEC, &n));
147 tv.tv_nsec = n;
148
149 return tv;
150}
151
Darrick J. Wong5a0bb062020-08-24 15:15:46 -0700152/* Convert an ondisk timestamp to an incore timestamp. */
153struct timespec64
154xfs_inode_from_disk_ts(
Darrick J. Wongf93e54362020-08-17 09:59:07 -0700155 struct xfs_dinode *dip,
Darrick J. Wong5a0bb062020-08-24 15:15:46 -0700156 const xfs_timestamp_t ts)
157{
158 struct timespec64 tv;
159 struct xfs_legacy_timestamp *lts;
160
Darrick J. Wongf93e54362020-08-17 09:59:07 -0700161 if (xfs_dinode_has_bigtime(dip))
162 return xfs_inode_decode_bigtime(be64_to_cpu(ts));
163
Darrick J. Wong5a0bb062020-08-24 15:15:46 -0700164 lts = (struct xfs_legacy_timestamp *)&ts;
165 tv.tv_sec = (int)be32_to_cpu(lts->t_sec);
166 tv.tv_nsec = (int)be32_to_cpu(lts->t_nsec);
167
168 return tv;
169}
170
Christoph Hellwigcb7d5852020-05-14 14:00:02 -0700171int
Dave Chinner39878482016-02-09 16:54:58 +1100172xfs_inode_from_disk(
173 struct xfs_inode *ip,
Dave Chinnerf8d55aa0522016-02-09 16:54:58 +1100174 struct xfs_dinode *from)
Dave Chinner1fd71152013-08-12 20:49:35 +1000175{
Dave Chinner39878482016-02-09 16:54:58 +1100176 struct inode *inode = VFS_I(ip);
Christoph Hellwig9229d182020-05-14 14:01:17 -0700177 int error;
Christoph Hellwig2d6051d2020-05-14 14:01:18 -0700178 xfs_failaddr_t fa;
Christoph Hellwig9229d182020-05-14 14:01:17 -0700179
180 ASSERT(ip->i_cowfp == NULL);
181 ASSERT(ip->i_afp == NULL);
Dave Chinner39878482016-02-09 16:54:58 +1100182
Christoph Hellwig2d6051d2020-05-14 14:01:18 -0700183 fa = xfs_dinode_verify(ip->i_mount, ip->i_ino, from);
184 if (fa) {
185 xfs_inode_verifier_error(ip, -EFSCORRUPTED, "dinode", from,
186 sizeof(*from), fa);
187 return -EFSCORRUPTED;
188 }
189
Dave Chinnerfaeb4e42016-02-09 16:54:58 +1100190 /*
Christoph Hellwig0bce8172020-05-14 14:01:17 -0700191 * First get the permanent information that is needed to allocate an
192 * inode. If the inode is unused, mode is zero and we shouldn't mess
Keyur Patel06734e32020-06-29 14:44:35 -0700193 * with the uninitialized part of it.
Christoph Hellwig0bce8172020-05-14 14:01:17 -0700194 */
Dave Chinnerebd90272021-08-18 18:46:55 -0700195 if (!xfs_has_v3inodes(ip->i_mount))
Christoph Hellwigee7b83f2021-03-29 11:11:43 -0700196 ip->i_flushiter = be16_to_cpu(from->di_flushiter);
Christoph Hellwig0bce8172020-05-14 14:01:17 -0700197 inode->i_generation = be32_to_cpu(from->di_gen);
198 inode->i_mode = be16_to_cpu(from->di_mode);
199 if (!inode->i_mode)
200 return 0;
201
202 /*
Dave Chinnerfaeb4e42016-02-09 16:54:58 +1100203 * Convert v1 inodes immediately to v2 inode format as this is the
204 * minimum inode version format we support in the rest of the code.
Christoph Hellwig6471e9c2020-03-18 08:15:11 -0700205 * They will also be unconditionally written back to disk as v2 inodes.
Dave Chinnerfaeb4e42016-02-09 16:54:58 +1100206 */
Christoph Hellwig6471e9c2020-03-18 08:15:11 -0700207 if (unlikely(from->di_version == 1)) {
Dave Chinner54d7b5c2016-02-09 16:54:58 +1100208 set_nlink(inode, be16_to_cpu(from->di_onlink));
Christoph Hellwigceaf6032021-03-29 11:11:39 -0700209 ip->i_projid = 0;
Dave Chinnerfaeb4e42016-02-09 16:54:58 +1100210 } else {
Dave Chinner54d7b5c2016-02-09 16:54:58 +1100211 set_nlink(inode, be32_to_cpu(from->di_nlink));
Christoph Hellwigceaf6032021-03-29 11:11:39 -0700212 ip->i_projid = (prid_t)be16_to_cpu(from->di_projid_hi) << 16 |
Christoph Hellwigde7a8662019-11-12 08:22:54 -0800213 be16_to_cpu(from->di_projid_lo);
Dave Chinnerfaeb4e42016-02-09 16:54:58 +1100214 }
215
Christoph Hellwigba8adad2020-02-21 08:31:27 -0800216 i_uid_write(inode, be32_to_cpu(from->di_uid));
217 i_gid_write(inode, be32_to_cpu(from->di_gid));
Dave Chinner39878482016-02-09 16:54:58 +1100218
219 /*
220 * Time is signed, so need to convert to signed 32 bit before
221 * storing in inode timestamp which may be 64 bit. Otherwise
222 * a time before epoch is converted to a time long after epoch
223 * on 64 bit systems.
224 */
Darrick J. Wongf93e54362020-08-17 09:59:07 -0700225 inode->i_atime = xfs_inode_from_disk_ts(from, from->di_atime);
226 inode->i_mtime = xfs_inode_from_disk_ts(from, from->di_mtime);
227 inode->i_ctime = xfs_inode_from_disk_ts(from, from->di_ctime);
Dave Chinner39878482016-02-09 16:54:58 +1100228
Christoph Hellwig13d2c102021-03-29 11:11:40 -0700229 ip->i_disk_size = be64_to_cpu(from->di_size);
Christoph Hellwig6e73a542021-03-29 11:11:40 -0700230 ip->i_nblocks = be64_to_cpu(from->di_nblocks);
Christoph Hellwig031474c2021-03-29 11:11:41 -0700231 ip->i_extsize = be32_to_cpu(from->di_extsize);
Christoph Hellwig7821ea32021-03-29 11:11:44 -0700232 ip->i_forkoff = from->di_forkoff;
Christoph Hellwigdb073492021-03-29 11:11:44 -0700233 ip->i_diflags = be16_to_cpu(from->di_flags);
Dave Chinner1fd71152013-08-12 20:49:35 +1000234
Christoph Hellwig9b3beb02021-03-29 11:11:38 -0700235 if (from->di_dmevmask || from->di_dmstate)
236 xfs_iflags_set(ip, XFS_IPRESERVE_DM_FIELDS);
237
Dave Chinnerebd90272021-08-18 18:46:55 -0700238 if (xfs_has_v3inodes(ip->i_mount)) {
Jeff Laytonf0e28282017-12-11 06:35:19 -0500239 inode_set_iversion_queried(inode,
240 be64_to_cpu(from->di_changecount));
Christoph Hellwige98d5e82021-03-29 11:11:45 -0700241 ip->i_crtime = xfs_inode_from_disk_ts(from, from->di_crtime);
Christoph Hellwig3e09ab82021-03-29 11:11:45 -0700242 ip->i_diflags2 = be64_to_cpu(from->di_flags2);
Christoph Hellwigb33ce572021-03-29 11:11:42 -0700243 ip->i_cowextsize = be32_to_cpu(from->di_cowextsize);
Dave Chinner1fd71152013-08-12 20:49:35 +1000244 }
Christoph Hellwigcb7d5852020-05-14 14:00:02 -0700245
Christoph Hellwig9229d182020-05-14 14:01:17 -0700246 error = xfs_iformat_data_fork(ip, from);
247 if (error)
248 return error;
Christoph Hellwig09c38ed2020-05-18 10:27:21 -0700249 if (from->di_forkoff) {
Christoph Hellwig9229d182020-05-14 14:01:17 -0700250 error = xfs_iformat_attr_fork(ip, from);
251 if (error)
252 goto out_destroy_data_fork;
253 }
254 if (xfs_is_reflink_inode(ip))
255 xfs_ifork_init_cow(ip);
256 return 0;
257
258out_destroy_data_fork:
Christoph Hellwigef838512020-05-18 10:29:27 -0700259 xfs_idestroy_fork(&ip->i_df);
Christoph Hellwig9229d182020-05-14 14:01:17 -0700260 return error;
Dave Chinner1fd71152013-08-12 20:49:35 +1000261}
262
Darrick J. Wong5a0bb062020-08-24 15:15:46 -0700263/* Convert an incore timestamp to an ondisk timestamp. */
264static inline xfs_timestamp_t
265xfs_inode_to_disk_ts(
Darrick J. Wongf93e54362020-08-17 09:59:07 -0700266 struct xfs_inode *ip,
Darrick J. Wong5a0bb062020-08-24 15:15:46 -0700267 const struct timespec64 tv)
268{
269 struct xfs_legacy_timestamp *lts;
270 xfs_timestamp_t ts;
271
Darrick J. Wongf93e54362020-08-17 09:59:07 -0700272 if (xfs_inode_has_bigtime(ip))
273 return cpu_to_be64(xfs_inode_encode_bigtime(tv));
274
Darrick J. Wong5a0bb062020-08-24 15:15:46 -0700275 lts = (struct xfs_legacy_timestamp *)&ts;
276 lts->t_sec = cpu_to_be32(tv.tv_sec);
277 lts->t_nsec = cpu_to_be32(tv.tv_nsec);
278
279 return ts;
280}
281
Chandan Babu R52a4a142022-03-08 09:34:28 +0000282static inline void
283xfs_inode_to_disk_iext_counters(
284 struct xfs_inode *ip,
285 struct xfs_dinode *to)
286{
287 if (xfs_inode_has_large_extent_counts(ip)) {
288 to->di_big_nextents = cpu_to_be64(xfs_ifork_nextents(&ip->i_df));
289 to->di_big_anextents = cpu_to_be32(xfs_ifork_nextents(ip->i_afp));
290 /*
291 * We might be upgrading the inode to use larger extent counters
292 * than was previously used. Hence zero the unused field.
293 */
294 to->di_nrext64_pad = cpu_to_be16(0);
295 } else {
296 to->di_nextents = cpu_to_be32(xfs_ifork_nextents(&ip->i_df));
297 to->di_anextents = cpu_to_be16(xfs_ifork_nextents(ip->i_afp));
298 }
299}
300
Dave Chinner1fd71152013-08-12 20:49:35 +1000301void
Dave Chinner39878482016-02-09 16:54:58 +1100302xfs_inode_to_disk(
303 struct xfs_inode *ip,
Dave Chinner93f958f2016-02-09 16:54:58 +1100304 struct xfs_dinode *to,
305 xfs_lsn_t lsn)
Dave Chinner39878482016-02-09 16:54:58 +1100306{
Dave Chinner39878482016-02-09 16:54:58 +1100307 struct inode *inode = VFS_I(ip);
308
Dave Chinner93f958f2016-02-09 16:54:58 +1100309 to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
Dave Chinnerfaeb4e42016-02-09 16:54:58 +1100310 to->di_onlink = 0;
Dave Chinner93f958f2016-02-09 16:54:58 +1100311
Christoph Hellwigf7e67b22020-05-18 10:28:05 -0700312 to->di_format = xfs_ifork_format(&ip->i_df);
Christoph Hellwigba8adad2020-02-21 08:31:27 -0800313 to->di_uid = cpu_to_be32(i_uid_read(inode));
314 to->di_gid = cpu_to_be32(i_gid_read(inode));
Christoph Hellwigceaf6032021-03-29 11:11:39 -0700315 to->di_projid_lo = cpu_to_be16(ip->i_projid & 0xffff);
316 to->di_projid_hi = cpu_to_be16(ip->i_projid >> 16);
Dave Chinner39878482016-02-09 16:54:58 +1100317
Darrick J. Wongf93e54362020-08-17 09:59:07 -0700318 to->di_atime = xfs_inode_to_disk_ts(ip, inode->i_atime);
319 to->di_mtime = xfs_inode_to_disk_ts(ip, inode->i_mtime);
320 to->di_ctime = xfs_inode_to_disk_ts(ip, inode->i_ctime);
Dave Chinner54d7b5c2016-02-09 16:54:58 +1100321 to->di_nlink = cpu_to_be32(inode->i_nlink);
Dave Chinner9e9a2672016-02-09 16:54:58 +1100322 to->di_gen = cpu_to_be32(inode->i_generation);
Dave Chinnerc19b3b052016-02-09 16:54:58 +1100323 to->di_mode = cpu_to_be16(inode->i_mode);
Dave Chinner39878482016-02-09 16:54:58 +1100324
Christoph Hellwig13d2c102021-03-29 11:11:40 -0700325 to->di_size = cpu_to_be64(ip->i_disk_size);
Christoph Hellwig6e73a542021-03-29 11:11:40 -0700326 to->di_nblocks = cpu_to_be64(ip->i_nblocks);
Christoph Hellwig031474c2021-03-29 11:11:41 -0700327 to->di_extsize = cpu_to_be32(ip->i_extsize);
Christoph Hellwig7821ea32021-03-29 11:11:44 -0700328 to->di_forkoff = ip->i_forkoff;
Christoph Hellwigf7e67b22020-05-18 10:28:05 -0700329 to->di_aformat = xfs_ifork_format(ip->i_afp);
Christoph Hellwigdb073492021-03-29 11:11:44 -0700330 to->di_flags = cpu_to_be16(ip->i_diflags);
Dave Chinner39878482016-02-09 16:54:58 +1100331
Dave Chinnerebd90272021-08-18 18:46:55 -0700332 if (xfs_has_v3inodes(ip->i_mount)) {
Christoph Hellwig6471e9c2020-03-18 08:15:11 -0700333 to->di_version = 3;
Jeff Laytonf0e28282017-12-11 06:35:19 -0500334 to->di_changecount = cpu_to_be64(inode_peek_iversion(inode));
Christoph Hellwige98d5e82021-03-29 11:11:45 -0700335 to->di_crtime = xfs_inode_to_disk_ts(ip, ip->i_crtime);
Christoph Hellwig3e09ab82021-03-29 11:11:45 -0700336 to->di_flags2 = cpu_to_be64(ip->i_diflags2);
Christoph Hellwigb33ce572021-03-29 11:11:42 -0700337 to->di_cowextsize = cpu_to_be32(ip->i_cowextsize);
Dave Chinner93f958f2016-02-09 16:54:58 +1100338 to->di_ino = cpu_to_be64(ip->i_ino);
339 to->di_lsn = cpu_to_be64(lsn);
340 memset(to->di_pad2, 0, sizeof(to->di_pad2));
341 uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
Chandan Babu R52a4a142022-03-08 09:34:28 +0000342 to->di_v3_pad = 0;
Dave Chinner39878482016-02-09 16:54:58 +1100343 } else {
Christoph Hellwig6471e9c2020-03-18 08:15:11 -0700344 to->di_version = 2;
Christoph Hellwig965e0a12021-03-29 11:11:42 -0700345 to->di_flushiter = cpu_to_be16(ip->i_flushiter);
Chandan Babu R52a4a142022-03-08 09:34:28 +0000346 memset(to->di_v2_pad, 0, sizeof(to->di_v2_pad));
Dave Chinner39878482016-02-09 16:54:58 +1100347 }
Chandan Babu R52a4a142022-03-08 09:34:28 +0000348
349 xfs_inode_to_disk_iext_counters(ip, to);
Dave Chinner39878482016-02-09 16:54:58 +1100350}
351
Dave Chinner23fcb332018-06-21 23:25:57 -0700352static xfs_failaddr_t
353xfs_dinode_verify_fork(
354 struct xfs_dinode *dip,
355 struct xfs_mount *mp,
356 int whichfork)
357{
Chandan Babu Rdd95a6c2020-08-27 15:34:34 +0530358 xfs_extnum_t di_nextents;
Chandan Babu R9feb8f12020-08-27 15:09:10 +0530359 xfs_extnum_t max_extents;
Dave Chinner1eb70f52022-05-04 12:13:53 +1000360 mode_t mode = be16_to_cpu(dip->di_mode);
361 uint32_t fork_size = XFS_DFORK_SIZE(dip, mp, whichfork);
362 uint32_t fork_format = XFS_DFORK_FORMAT(dip, whichfork);
Dave Chinner23fcb332018-06-21 23:25:57 -0700363
Chandan Babu Rdd95a6c2020-08-27 15:34:34 +0530364 di_nextents = xfs_dfork_nextents(dip, whichfork);
365
Dave Chinner1eb70f52022-05-04 12:13:53 +1000366 /*
367 * For fork types that can contain local data, check that the fork
368 * format matches the size of local data contained within the fork.
369 *
370 * For all types, check that when the size says the should be in extent
371 * or btree format, the inode isn't claiming it is in local format.
372 */
373 if (whichfork == XFS_DATA_FORK) {
374 if (S_ISDIR(mode) || S_ISLNK(mode)) {
375 if (be64_to_cpu(dip->di_size) <= fork_size &&
376 fork_format != XFS_DINODE_FMT_LOCAL)
Dave Chinner23fcb332018-06-21 23:25:57 -0700377 return __this_address;
378 }
Dave Chinner1eb70f52022-05-04 12:13:53 +1000379
380 if (be64_to_cpu(dip->di_size) > fork_size &&
381 fork_format == XFS_DINODE_FMT_LOCAL)
382 return __this_address;
383 }
384
385 switch (fork_format) {
386 case XFS_DINODE_FMT_LOCAL:
387 /*
388 * No local regular files yet.
389 */
390 if (S_ISREG(mode) && whichfork == XFS_DATA_FORK)
391 return __this_address;
Dave Chinner23fcb332018-06-21 23:25:57 -0700392 if (di_nextents)
393 return __this_address;
394 break;
395 case XFS_DINODE_FMT_EXTENTS:
396 if (di_nextents > XFS_DFORK_MAXEXT(dip, mp, whichfork))
397 return __this_address;
398 break;
399 case XFS_DINODE_FMT_BTREE:
Chandan Babu Rdf9ad5c2021-11-16 09:54:37 +0000400 max_extents = xfs_iext_max_nextents(
401 xfs_dinode_has_large_extent_counts(dip),
402 whichfork);
Chandan Babu R9feb8f12020-08-27 15:09:10 +0530403 if (di_nextents > max_extents)
Dave Chinner23fcb332018-06-21 23:25:57 -0700404 return __this_address;
Dave Chinner23fcb332018-06-21 23:25:57 -0700405 break;
406 default:
407 return __this_address;
408 }
409 return NULL;
410}
411
Eric Sandeen339e1a32018-09-29 13:50:13 +1000412static xfs_failaddr_t
413xfs_dinode_verify_forkoff(
414 struct xfs_dinode *dip,
415 struct xfs_mount *mp)
416{
Christoph Hellwig09c38ed2020-05-18 10:27:21 -0700417 if (!dip->di_forkoff)
Eric Sandeen339e1a32018-09-29 13:50:13 +1000418 return NULL;
419
420 switch (dip->di_format) {
421 case XFS_DINODE_FMT_DEV:
422 if (dip->di_forkoff != (roundup(sizeof(xfs_dev_t), 8) >> 3))
423 return __this_address;
424 break;
425 case XFS_DINODE_FMT_LOCAL: /* fall through ... */
426 case XFS_DINODE_FMT_EXTENTS: /* fall through ... */
427 case XFS_DINODE_FMT_BTREE:
Christoph Hellwige9e2eae2020-03-18 08:15:10 -0700428 if (dip->di_forkoff >= (XFS_LITINO(mp) >> 3))
Eric Sandeen339e1a32018-09-29 13:50:13 +1000429 return __this_address;
430 break;
431 default:
432 return __this_address;
433 }
434 return NULL;
435}
436
Chandan Babu R52a4a142022-03-08 09:34:28 +0000437static xfs_failaddr_t
438xfs_dinode_verify_nrext64(
439 struct xfs_mount *mp,
440 struct xfs_dinode *dip)
441{
442 if (xfs_dinode_has_large_extent_counts(dip)) {
443 if (!xfs_has_large_extent_counts(mp))
444 return __this_address;
445 if (dip->di_nrext64_pad != 0)
446 return __this_address;
447 } else if (dip->di_version >= 3) {
448 if (dip->di_v3_pad != 0)
449 return __this_address;
450 }
451
452 return NULL;
453}
454
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800455xfs_failaddr_t
Dave Chinner1fd71152013-08-12 20:49:35 +1000456xfs_dinode_verify(
457 struct xfs_mount *mp,
Darrick J. Wong420fbeb2016-11-08 11:56:06 +1100458 xfs_ino_t ino,
Dave Chinner1fd71152013-08-12 20:49:35 +1000459 struct xfs_dinode *dip)
460{
Dave Chinner7d71a672018-06-05 10:06:44 -0700461 xfs_failaddr_t fa;
Amir Goldstein3c6f46e2017-01-17 11:41:41 -0800462 uint16_t mode;
Darrick J. Wongc8e156a2016-10-03 09:11:50 -0700463 uint16_t flags;
464 uint64_t flags2;
Darrick J. Wong71493b82018-01-08 10:51:04 -0800465 uint64_t di_size;
Chandan Babu Rdd95a6c2020-08-27 15:34:34 +0530466 xfs_extnum_t nextents;
467 xfs_extnum_t naextents;
468 xfs_filblks_t nblocks;
Darrick J. Wongc8e156a2016-10-03 09:11:50 -0700469
Dave Chinner1fd71152013-08-12 20:49:35 +1000470 if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800471 return __this_address;
Dave Chinner1fd71152013-08-12 20:49:35 +1000472
Darrick J. Wong50aa90e2018-01-08 10:51:04 -0800473 /* Verify v3 integrity information first */
474 if (dip->di_version >= 3) {
Dave Chinnerebd90272021-08-18 18:46:55 -0700475 if (!xfs_has_v3inodes(mp))
Darrick J. Wong50aa90e2018-01-08 10:51:04 -0800476 return __this_address;
477 if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
478 XFS_DINODE_CRC_OFF))
479 return __this_address;
480 if (be64_to_cpu(dip->di_ino) != ino)
481 return __this_address;
482 if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid))
483 return __this_address;
484 }
Dave Chinner1fd71152013-08-12 20:49:35 +1000485
Darrick J. Wongef388e22016-12-05 12:38:38 +1100486 /* don't allow invalid i_size */
Darrick J. Wong71493b82018-01-08 10:51:04 -0800487 di_size = be64_to_cpu(dip->di_size);
488 if (di_size & (1ULL << 63))
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800489 return __this_address;
Darrick J. Wongef388e22016-12-05 12:38:38 +1100490
Amir Goldstein3c6f46e2017-01-17 11:41:41 -0800491 mode = be16_to_cpu(dip->di_mode);
Amir Goldsteina324cbf2017-01-17 11:41:44 -0800492 if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800493 return __this_address;
Amir Goldstein3c6f46e2017-01-17 11:41:41 -0800494
495 /* No zero-length symlinks/dirs. */
Darrick J. Wong71493b82018-01-08 10:51:04 -0800496 if ((S_ISLNK(mode) || S_ISDIR(mode)) && di_size == 0)
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800497 return __this_address;
Darrick J. Wongef388e22016-12-05 12:38:38 +1100498
Chandan Babu R52a4a142022-03-08 09:34:28 +0000499 fa = xfs_dinode_verify_nrext64(mp, dip);
500 if (fa)
501 return fa;
502
Chandan Babu Rdd95a6c2020-08-27 15:34:34 +0530503 nextents = xfs_dfork_data_extents(dip);
504 naextents = xfs_dfork_attr_extents(dip);
505 nblocks = be64_to_cpu(dip->di_nblocks);
506
Darrick J. Wong71493b82018-01-08 10:51:04 -0800507 /* Fork checks carried over from xfs_iformat_fork */
Chandan Babu Rdd95a6c2020-08-27 15:34:34 +0530508 if (mode && nextents + naextents > nblocks)
Darrick J. Wong71493b82018-01-08 10:51:04 -0800509 return __this_address;
510
Chandan Babu R83a21c12022-03-29 06:14:00 +0000511 if (S_ISDIR(mode) && nextents > mp->m_dir_geo->max_extents)
512 return __this_address;
513
Darrick J. Wong71493b82018-01-08 10:51:04 -0800514 if (mode && XFS_DFORK_BOFF(dip) > mp->m_sb.sb_inodesize)
515 return __this_address;
516
517 flags = be16_to_cpu(dip->di_flags);
518
519 if (mode && (flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp)
520 return __this_address;
521
Eric Sandeen339e1a32018-09-29 13:50:13 +1000522 /* check for illegal values of forkoff */
523 fa = xfs_dinode_verify_forkoff(dip, mp);
524 if (fa)
525 return fa;
526
Darrick J. Wong71493b82018-01-08 10:51:04 -0800527 /* Do we have appropriate data fork formats for the mode? */
528 switch (mode & S_IFMT) {
529 case S_IFIFO:
530 case S_IFCHR:
531 case S_IFBLK:
532 case S_IFSOCK:
533 if (dip->di_format != XFS_DINODE_FMT_DEV)
534 return __this_address;
535 break;
536 case S_IFREG:
537 case S_IFLNK:
538 case S_IFDIR:
Dave Chinner23fcb332018-06-21 23:25:57 -0700539 fa = xfs_dinode_verify_fork(dip, mp, XFS_DATA_FORK);
540 if (fa)
541 return fa;
Darrick J. Wong71493b82018-01-08 10:51:04 -0800542 break;
543 case 0:
544 /* Uninitialized inode ok. */
545 break;
546 default:
547 return __this_address;
548 }
549
Christoph Hellwig09c38ed2020-05-18 10:27:21 -0700550 if (dip->di_forkoff) {
Dave Chinner23fcb332018-06-21 23:25:57 -0700551 fa = xfs_dinode_verify_fork(dip, mp, XFS_ATTR_FORK);
552 if (fa)
553 return fa;
Eric Sandeenb42db082018-04-16 23:06:53 -0700554 } else {
555 /*
556 * If there is no fork offset, this may be a freshly-made inode
557 * in a new disk cluster, in which case di_aformat is zeroed.
558 * Otherwise, such an inode must be in EXTENTS format; this goes
559 * for freed inodes as well.
560 */
561 switch (dip->di_aformat) {
562 case 0:
563 case XFS_DINODE_FMT_EXTENTS:
564 break;
565 default:
566 return __this_address;
567 }
Chandan Babu Rdd95a6c2020-08-27 15:34:34 +0530568 if (naextents)
Eric Sandeenb42db082018-04-16 23:06:53 -0700569 return __this_address;
Darrick J. Wong71493b82018-01-08 10:51:04 -0800570 }
Dave Chinner1fd71152013-08-12 20:49:35 +1000571
Dave Chinner7d71a672018-06-05 10:06:44 -0700572 /* extent size hint validation */
573 fa = xfs_inode_validate_extsize(mp, be32_to_cpu(dip->di_extsize),
574 mode, flags);
575 if (fa)
576 return fa;
577
Dave Chinner1fd71152013-08-12 20:49:35 +1000578 /* only version 3 or greater inodes are extensively verified here */
579 if (dip->di_version < 3)
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800580 return NULL;
Dave Chinner1fd71152013-08-12 20:49:35 +1000581
Darrick J. Wongc8e156a2016-10-03 09:11:50 -0700582 flags2 = be64_to_cpu(dip->di_flags2);
583
584 /* don't allow reflink/cowextsize if we don't have reflink */
585 if ((flags2 & (XFS_DIFLAG2_REFLINK | XFS_DIFLAG2_COWEXTSIZE)) &&
Dave Chinner38c26bf2021-08-18 18:46:37 -0700586 !xfs_has_reflink(mp))
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800587 return __this_address;
Darrick J. Wongc8e156a2016-10-03 09:11:50 -0700588
Darrick J. Wong71493b82018-01-08 10:51:04 -0800589 /* only regular files get reflink */
590 if ((flags2 & XFS_DIFLAG2_REFLINK) && (mode & S_IFMT) != S_IFREG)
591 return __this_address;
Darrick J. Wongc8e156a2016-10-03 09:11:50 -0700592
593 /* don't let reflink and realtime mix */
594 if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags & XFS_DIFLAG_REALTIME))
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800595 return __this_address;
Darrick J. Wongc8e156a2016-10-03 09:11:50 -0700596
Dave Chinner02a0fda2018-06-05 10:09:33 -0700597 /* COW extent size hint validation */
598 fa = xfs_inode_validate_cowextsize(mp, be32_to_cpu(dip->di_cowextsize),
599 mode, flags, flags2);
600 if (fa)
601 return fa;
602
Darrick J. Wongf93e54362020-08-17 09:59:07 -0700603 /* bigtime iflag can only happen on bigtime filesystems */
604 if (xfs_dinode_has_bigtime(dip) &&
Dave Chinnerebd90272021-08-18 18:46:55 -0700605 !xfs_has_bigtime(mp))
Darrick J. Wongf93e54362020-08-17 09:59:07 -0700606 return __this_address;
607
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800608 return NULL;
Dave Chinner1fd71152013-08-12 20:49:35 +1000609}
610
611void
612xfs_dinode_calc_crc(
613 struct xfs_mount *mp,
614 struct xfs_dinode *dip)
615{
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700616 uint32_t crc;
Dave Chinner1fd71152013-08-12 20:49:35 +1000617
618 if (dip->di_version < 3)
619 return;
620
Dave Chinner38c26bf2021-08-18 18:46:37 -0700621 ASSERT(xfs_has_crc(mp));
Dave Chinnercae028d2016-12-05 14:40:32 +1100622 crc = xfs_start_cksum_update((char *)dip, mp->m_sb.sb_inodesize,
Eric Sandeen533b81c2014-02-27 15:15:27 +1100623 XFS_DINODE_CRC_OFF);
Dave Chinner1fd71152013-08-12 20:49:35 +1000624 dip->di_crc = xfs_end_cksum(crc);
625}
626
627/*
Darrick J. Wong8bb82bc2018-03-23 10:06:55 -0700628 * Validate di_extsize hint.
629 *
Darrick J. Wong6b69e482021-05-12 12:49:19 -0700630 * 1. Extent size hint is only valid for directories and regular files.
631 * 2. FS_XFLAG_EXTSIZE is only valid for regular files.
632 * 3. FS_XFLAG_EXTSZINHERIT is only valid for directories.
633 * 4. Hint cannot be larger than MAXTEXTLEN.
634 * 5. Can be changed on directories at any time.
635 * 6. Hint value of 0 turns off hints, clears inode flags.
636 * 7. Extent size must be a multiple of the appropriate block size.
637 * For realtime files, this is the rt extent size.
638 * 8. For non-realtime files, the extent size hint must be limited
639 * to half the AG size to avoid alignment extending the extent beyond the
640 * limits of the AG.
Darrick J. Wong8bb82bc2018-03-23 10:06:55 -0700641 */
642xfs_failaddr_t
643xfs_inode_validate_extsize(
644 struct xfs_mount *mp,
645 uint32_t extsize,
646 uint16_t mode,
647 uint16_t flags)
648{
649 bool rt_flag;
650 bool hint_flag;
651 bool inherit_flag;
652 uint32_t extsize_bytes;
653 uint32_t blocksize_bytes;
654
655 rt_flag = (flags & XFS_DIFLAG_REALTIME);
656 hint_flag = (flags & XFS_DIFLAG_EXTSIZE);
657 inherit_flag = (flags & XFS_DIFLAG_EXTSZINHERIT);
658 extsize_bytes = XFS_FSB_TO_B(mp, extsize);
659
Darrick J. Wong603f0002021-05-12 12:51:26 -0700660 /*
661 * This comment describes a historic gap in this verifier function.
662 *
Darrick J. Wong83193e52021-07-12 12:58:50 -0700663 * For a directory with both RTINHERIT and EXTSZINHERIT flags set, this
664 * function has never checked that the extent size hint is an integer
665 * multiple of the realtime extent size. Since we allow users to set
666 * this combination on non-rt filesystems /and/ to change the rt
667 * extent size when adding a rt device to a filesystem, the net effect
668 * is that users can configure a filesystem anticipating one rt
669 * geometry and change their minds later. Directories do not use the
670 * extent size hint, so this is harmless for them.
Darrick J. Wong603f0002021-05-12 12:51:26 -0700671 *
672 * If a directory with a misaligned extent size hint is allowed to
673 * propagate that hint into a new regular realtime file, the result
674 * is that the inode cluster buffer verifier will trigger a corruption
Darrick J. Wong83193e52021-07-12 12:58:50 -0700675 * shutdown the next time it is run, because the verifier has always
676 * enforced the alignment rule for regular files.
Darrick J. Wong603f0002021-05-12 12:51:26 -0700677 *
Darrick J. Wong83193e52021-07-12 12:58:50 -0700678 * Because we allow administrators to set a new rt extent size when
679 * adding a rt section, we cannot add a check to this verifier because
680 * that will result a new source of directory corruption errors when
681 * reading an existing filesystem. Instead, we rely on callers to
682 * decide when alignment checks are appropriate, and fix things up as
683 * needed.
Darrick J. Wong603f0002021-05-12 12:51:26 -0700684 */
685
Darrick J. Wong8bb82bc2018-03-23 10:06:55 -0700686 if (rt_flag)
Darrick J. Wonga7bcb142021-05-31 11:31:56 -0700687 blocksize_bytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize);
Darrick J. Wong8bb82bc2018-03-23 10:06:55 -0700688 else
689 blocksize_bytes = mp->m_sb.sb_blocksize;
690
691 if ((hint_flag || inherit_flag) && !(S_ISDIR(mode) || S_ISREG(mode)))
692 return __this_address;
693
694 if (hint_flag && !S_ISREG(mode))
695 return __this_address;
696
697 if (inherit_flag && !S_ISDIR(mode))
698 return __this_address;
699
700 if ((hint_flag || inherit_flag) && extsize == 0)
701 return __this_address;
702
Eric Sandeend4a34e12018-07-24 11:34:52 -0700703 /* free inodes get flags set to zero but extsize remains */
704 if (mode && !(hint_flag || inherit_flag) && extsize != 0)
Darrick J. Wong8bb82bc2018-03-23 10:06:55 -0700705 return __this_address;
706
707 if (extsize_bytes % blocksize_bytes)
708 return __this_address;
709
Chandan Babu R95f0b952021-08-09 12:05:22 +0530710 if (extsize > XFS_MAX_BMBT_EXTLEN)
Darrick J. Wong8bb82bc2018-03-23 10:06:55 -0700711 return __this_address;
712
713 if (!rt_flag && extsize > mp->m_sb.sb_agblocks / 2)
714 return __this_address;
715
716 return NULL;
717}
718
719/*
720 * Validate di_cowextsize hint.
721 *
Darrick J. Wong6b69e482021-05-12 12:49:19 -0700722 * 1. CoW extent size hint can only be set if reflink is enabled on the fs.
723 * The inode does not have to have any shared blocks, but it must be a v3.
724 * 2. FS_XFLAG_COWEXTSIZE is only valid for directories and regular files;
725 * for a directory, the hint is propagated to new files.
726 * 3. Can be changed on files & directories at any time.
727 * 4. Hint value of 0 turns off hints, clears inode flags.
728 * 5. Extent size must be a multiple of the appropriate block size.
729 * 6. The extent size hint must be limited to half the AG size to avoid
730 * alignment extending the extent beyond the limits of the AG.
Darrick J. Wong8bb82bc2018-03-23 10:06:55 -0700731 */
732xfs_failaddr_t
733xfs_inode_validate_cowextsize(
734 struct xfs_mount *mp,
735 uint32_t cowextsize,
736 uint16_t mode,
737 uint16_t flags,
738 uint64_t flags2)
739{
740 bool rt_flag;
741 bool hint_flag;
742 uint32_t cowextsize_bytes;
743
744 rt_flag = (flags & XFS_DIFLAG_REALTIME);
745 hint_flag = (flags2 & XFS_DIFLAG2_COWEXTSIZE);
746 cowextsize_bytes = XFS_FSB_TO_B(mp, cowextsize);
747
Dave Chinner38c26bf2021-08-18 18:46:37 -0700748 if (hint_flag && !xfs_has_reflink(mp))
Darrick J. Wong8bb82bc2018-03-23 10:06:55 -0700749 return __this_address;
750
751 if (hint_flag && !(S_ISDIR(mode) || S_ISREG(mode)))
752 return __this_address;
753
754 if (hint_flag && cowextsize == 0)
755 return __this_address;
756
Eric Sandeend4a34e12018-07-24 11:34:52 -0700757 /* free inodes get flags set to zero but cowextsize remains */
758 if (mode && !hint_flag && cowextsize != 0)
Darrick J. Wong8bb82bc2018-03-23 10:06:55 -0700759 return __this_address;
760
761 if (hint_flag && rt_flag)
762 return __this_address;
763
764 if (cowextsize_bytes % mp->m_sb.sb_blocksize)
765 return __this_address;
766
Chandan Babu R95f0b952021-08-09 12:05:22 +0530767 if (cowextsize > XFS_MAX_BMBT_EXTLEN)
Darrick J. Wong8bb82bc2018-03-23 10:06:55 -0700768 return __this_address;
769
770 if (cowextsize > mp->m_sb.sb_agblocks / 2)
771 return __this_address;
772
773 return NULL;
774}