blob: 9d11ae01590919740a2d59c84c8e5094be6eb041 [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0
Dave Chinner5c4d97d2013-08-12 20:49:33 +10002/*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * All Rights Reserved.
Dave Chinner5c4d97d2013-08-12 20:49:33 +10005 */
Dave Chinner5c4d97d2013-08-12 20:49:33 +10006
7#include "xfs.h"
8#include "xfs_fs.h"
Darrick J. Wong5467b342019-06-28 19:25:35 -07009#include "xfs_shared.h"
Dave Chinner5c4d97d2013-08-12 20:49:33 +100010#include "xfs_format.h"
Dave Chinner239880e2013-10-23 10:50:10 +110011#include "xfs_log_format.h"
12#include "xfs_trans_resv.h"
Dave Chinner5c4d97d2013-08-12 20:49:33 +100013#include "xfs_mount.h"
Dave Chinner5c4d97d2013-08-12 20:49:33 +100014#include "xfs_inode.h"
Dave Chinner239880e2013-10-23 10:50:10 +110015#include "xfs_trans.h"
Dave Chinner5c4d97d2013-08-12 20:49:33 +100016#include "xfs_inode_item.h"
Darrick J. Wongb3bf6072017-02-02 15:13:59 -080017#include "xfs_btree.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110018#include "xfs_bmap_btree.h"
Dave Chinner5c4d97d2013-08-12 20:49:33 +100019#include "xfs_bmap.h"
20#include "xfs_error.h"
Dave Chinner5c4d97d2013-08-12 20:49:33 +100021#include "xfs_trace.h"
Darrick J. Wong244efea2016-02-08 15:00:01 +110022#include "xfs_da_format.h"
Darrick J. Wong630a04e2017-03-15 00:24:25 -070023#include "xfs_da_btree.h"
24#include "xfs_dir2_priv.h"
Darrick J. Wong9cfb9b42018-01-08 10:51:06 -080025#include "xfs_attr_leaf.h"
Chandan Babu Rb9b7e1d2021-01-22 16:48:10 -080026#include "xfs_types.h"
Chandan Babu Rf9fa8712021-01-22 16:48:15 -080027#include "xfs_errortag.h"
Darrick J. Wongbaf44fa2024-02-22 12:32:43 -080028#include "xfs_health.h"
Darrick J. Wong622d88e22024-02-22 12:45:01 -080029#include "xfs_symlink_remote.h"
Dave Chinner5c4d97d2013-08-12 20:49:33 +100030
Darrick J. Wong182696f2021-10-12 11:09:23 -070031struct kmem_cache *xfs_ifork_cache;
Dave Chinner5c4d97d2013-08-12 20:49:33 +100032
Christoph Hellwig143f4ae2016-04-06 07:41:43 +100033void
34xfs_init_local_fork(
35 struct xfs_inode *ip,
36 int whichfork,
37 const void *data,
Dave Chinner3f8a4f12019-10-17 13:40:33 -070038 int64_t size)
Christoph Hellwig143f4ae2016-04-06 07:41:43 +100039{
Darrick J. Wong732436e2022-07-09 10:56:05 -070040 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
Dave Chinnerb2c28032022-05-04 11:45:50 +100041 int mem_size = size;
Christoph Hellwig30ee0522016-04-06 07:53:29 +100042 bool zero_terminate;
43
44 /*
45 * If we are using the local fork to store a symlink body we need to
46 * zero-terminate it so that we can pass it back to the VFS directly.
47 * Overallocate the in-memory fork by one for that and add a zero
48 * to terminate it below.
49 */
50 zero_terminate = S_ISLNK(VFS_I(ip)->i_mode);
51 if (zero_terminate)
52 mem_size++;
Christoph Hellwig143f4ae2016-04-06 07:41:43 +100053
Christoph Hellwig43518812017-11-03 10:34:45 -070054 if (size) {
Dave Chinner94a69db2024-01-16 09:59:45 +110055 char *new_data = kmalloc(mem_size,
56 GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
Christoph Hellwig6e145f92023-12-20 07:34:55 +010057
58 memcpy(new_data, data, size);
Christoph Hellwig30ee0522016-04-06 07:53:29 +100059 if (zero_terminate)
Christoph Hellwig6e145f92023-12-20 07:34:55 +010060 new_data[size] = '\0';
61
62 ifp->if_data = new_data;
Christoph Hellwig43518812017-11-03 10:34:45 -070063 } else {
Christoph Hellwig6e145f92023-12-20 07:34:55 +010064 ifp->if_data = NULL;
Christoph Hellwig30ee0522016-04-06 07:53:29 +100065 }
Christoph Hellwig143f4ae2016-04-06 07:41:43 +100066
67 ifp->if_bytes = size;
Christoph Hellwig143f4ae2016-04-06 07:41:43 +100068}
69
Dave Chinner5c4d97d2013-08-12 20:49:33 +100070/*
71 * The file is in-lined in the on-disk inode.
Dave Chinner5c4d97d2013-08-12 20:49:33 +100072 */
73STATIC int
74xfs_iformat_local(
Christoph Hellwigde38db72021-10-11 16:11:21 -070075 struct xfs_inode *ip,
76 struct xfs_dinode *dip,
77 int whichfork,
78 int size)
Dave Chinner5c4d97d2013-08-12 20:49:33 +100079{
Dave Chinner5c4d97d2013-08-12 20:49:33 +100080 /*
81 * If the size is unreasonable, then something
82 * is wrong and we just bail out rather than crash in
Dave Chinnerf078d4e2024-01-16 09:59:40 +110083 * kmalloc() or memcpy() below.
Dave Chinner5c4d97d2013-08-12 20:49:33 +100084 */
85 if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
86 xfs_warn(ip->i_mount,
Zeng Heng78b0f582022-09-19 06:47:14 +100087 "corrupt inode %llu (bad size %d for local fork, size = %zd).",
Dave Chinner5c4d97d2013-08-12 20:49:33 +100088 (unsigned long long) ip->i_ino, size,
89 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork));
Darrick J. Wong90a58f92018-03-23 10:06:52 -070090 xfs_inode_verifier_error(ip, -EFSCORRUPTED,
91 "xfs_iformat_local", dip, sizeof(*dip),
92 __this_address);
Darrick J. Wongbaf44fa2024-02-22 12:32:43 -080093 xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE);
Dave Chinner24513372014-06-25 14:58:08 +100094 return -EFSCORRUPTED;
Dave Chinner5c4d97d2013-08-12 20:49:33 +100095 }
Christoph Hellwig143f4ae2016-04-06 07:41:43 +100096
97 xfs_init_local_fork(ip, whichfork, XFS_DFORK_PTR(dip, whichfork), size);
Dave Chinner5c4d97d2013-08-12 20:49:33 +100098 return 0;
99}
100
101/*
Christoph Hellwig0c1d9e42017-04-20 09:42:48 -0700102 * The file consists of a set of extents all of which fit into the on-disk
Christoph Hellwig43518812017-11-03 10:34:45 -0700103 * inode.
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000104 */
105STATIC int
106xfs_iformat_extents(
Christoph Hellwig0c1d9e42017-04-20 09:42:48 -0700107 struct xfs_inode *ip,
108 struct xfs_dinode *dip,
109 int whichfork)
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000110{
Christoph Hellwig0c1d9e42017-04-20 09:42:48 -0700111 struct xfs_mount *mp = ip->i_mount;
Darrick J. Wong732436e2022-07-09 10:56:05 -0700112 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
Christoph Hellwige8e0e172017-10-19 11:06:29 -0700113 int state = xfs_bmap_fork_to_state(whichfork);
Chandan Babu Rdd95a6c2020-08-27 15:34:34 +0530114 xfs_extnum_t nex = xfs_dfork_nextents(dip, whichfork);
Christoph Hellwig0c1d9e42017-04-20 09:42:48 -0700115 int size = nex * sizeof(xfs_bmbt_rec_t);
Christoph Hellwigb2b17122017-11-03 10:34:43 -0700116 struct xfs_iext_cursor icur;
Christoph Hellwig0c1d9e42017-04-20 09:42:48 -0700117 struct xfs_bmbt_rec *dp;
Christoph Hellwig6bdcf262017-11-03 10:34:46 -0700118 struct xfs_bmbt_irec new;
Christoph Hellwig0c1d9e42017-04-20 09:42:48 -0700119 int i;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000120
121 /*
Christoph Hellwig0c1d9e42017-04-20 09:42:48 -0700122 * If the number of extents is unreasonable, then something is wrong and
Dave Chinnerf078d4e2024-01-16 09:59:40 +1100123 * we just bail out rather than crash in kmalloc() or memcpy() below.
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000124 */
Christoph Hellwig0c1d9e42017-04-20 09:42:48 -0700125 if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, mp, whichfork))) {
Chandan Babu R755c38f2021-11-16 07:28:40 +0000126 xfs_warn(ip->i_mount, "corrupt inode %llu ((a)extents = %llu).",
127 ip->i_ino, nex);
Darrick J. Wong90a58f92018-03-23 10:06:52 -0700128 xfs_inode_verifier_error(ip, -EFSCORRUPTED,
129 "xfs_iformat_extents(1)", dip, sizeof(*dip),
130 __this_address);
Darrick J. Wongbaf44fa2024-02-22 12:32:43 -0800131 xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE);
Dave Chinner24513372014-06-25 14:58:08 +1000132 return -EFSCORRUPTED;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000133 }
134
Christoph Hellwig6bdcf262017-11-03 10:34:46 -0700135 ifp->if_bytes = 0;
Christoph Hellwig6e145f92023-12-20 07:34:55 +0100136 ifp->if_data = NULL;
Christoph Hellwig6bdcf262017-11-03 10:34:46 -0700137 ifp->if_height = 0;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000138 if (size) {
139 dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork);
Christoph Hellwigb2b17122017-11-03 10:34:43 -0700140
141 xfs_iext_first(ifp, &icur);
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000142 for (i = 0; i < nex; i++, dp++) {
Darrick J. Wong30b09842018-03-23 10:06:52 -0700143 xfs_failaddr_t fa;
144
Christoph Hellwigdac9c9b2017-11-03 10:34:47 -0700145 xfs_bmbt_disk_get_all(dp, &new);
Darrick J. Wong30b09842018-03-23 10:06:52 -0700146 fa = xfs_bmap_validate_extent(ip, whichfork, &new);
147 if (fa) {
148 xfs_inode_verifier_error(ip, -EFSCORRUPTED,
149 "xfs_iformat_extents(2)",
150 dp, sizeof(*dp), fa);
Darrick J. Wongbaf44fa2024-02-22 12:32:43 -0800151 xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE);
Darrick J. Wong6a3bd8f2023-04-11 19:00:05 -0700152 return xfs_bmap_complain_bad_rec(ip, whichfork,
153 fa, &new);
Christoph Hellwig0c1d9e42017-04-20 09:42:48 -0700154 }
Christoph Hellwigb2b17122017-11-03 10:34:43 -0700155
Christoph Hellwig0254c2f2017-11-03 10:34:46 -0700156 xfs_iext_insert(ip, &icur, &new, state);
Christoph Hellwigb2b17122017-11-03 10:34:43 -0700157 trace_xfs_read_extent(ip, &icur, state, _THIS_IP_);
158 xfs_iext_next(ifp, &icur);
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000159 }
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000160 }
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000161 return 0;
162}
163
164/*
165 * The file has too many extents to fit into
166 * the inode, so they are in B-tree format.
167 * Allocate a buffer for the root of the B-tree
168 * and copy the root into it. The i_extents
169 * field will remain NULL until all of the
170 * extents are read in (when they are needed).
171 */
172STATIC int
173xfs_iformat_btree(
Christoph Hellwigde38db72021-10-11 16:11:21 -0700174 struct xfs_inode *ip,
175 struct xfs_dinode *dip,
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000176 int whichfork)
177{
178 struct xfs_mount *mp = ip->i_mount;
179 xfs_bmdr_block_t *dfp;
Christoph Hellwig3ba738d2018-07-17 16:51:50 -0700180 struct xfs_ifork *ifp;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000181 /* REFERENCED */
182 int nrecs;
183 int size;
Darrick J. Wongb3bf6072017-02-02 15:13:59 -0800184 int level;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000185
Darrick J. Wong732436e2022-07-09 10:56:05 -0700186 ifp = xfs_ifork_ptr(ip, whichfork);
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000187 dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork);
188 size = XFS_BMAP_BROOT_SPACE(mp, dfp);
189 nrecs = be16_to_cpu(dfp->bb_numrecs);
Darrick J. Wongb3bf6072017-02-02 15:13:59 -0800190 level = be16_to_cpu(dfp->bb_level);
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000191
192 /*
193 * blow out if -- fork has less extents than can fit in
194 * fork (fork shouldn't be a btree format), root btree
195 * block has more records than can fit into the fork,
196 * or the number of extents is greater than the number of
197 * blocks.
198 */
Christoph Hellwigdaf83962020-05-18 10:27:22 -0700199 if (unlikely(ifp->if_nextents <= XFS_IFORK_MAXEXT(ip, whichfork) ||
Darrick J. Wong55e45422018-01-16 18:54:13 -0800200 nrecs == 0 ||
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000201 XFS_BMDR_SPACE_CALC(nrecs) >
202 XFS_DFORK_SIZE(dip, mp, whichfork) ||
Christoph Hellwig6e73a542021-03-29 11:11:40 -0700203 ifp->if_nextents > ip->i_nblocks) ||
Darrick J. Wong973975b2021-03-22 09:51:54 -0700204 level == 0 || level > XFS_BM_MAXLEVELS(mp, whichfork)) {
Zeng Heng78b0f582022-09-19 06:47:14 +1000205 xfs_warn(mp, "corrupt inode %llu (btree).",
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000206 (unsigned long long) ip->i_ino);
Darrick J. Wong90a58f92018-03-23 10:06:52 -0700207 xfs_inode_verifier_error(ip, -EFSCORRUPTED,
208 "xfs_iformat_btree", dfp, size,
209 __this_address);
Darrick J. Wongbaf44fa2024-02-22 12:32:43 -0800210 xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE);
Dave Chinner24513372014-06-25 14:58:08 +1000211 return -EFSCORRUPTED;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000212 }
213
214 ifp->if_broot_bytes = size;
Dave Chinner94a69db2024-01-16 09:59:45 +1100215 ifp->if_broot = kmalloc(size,
216 GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000217 ASSERT(ifp->if_broot != NULL);
218 /*
219 * Copy and convert from the on-disk structure
220 * to the in-memory structure.
221 */
222 xfs_bmdr_to_bmbt(ip, dfp, XFS_DFORK_SIZE(dip, ip->i_mount, whichfork),
223 ifp->if_broot, size);
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000224
Christoph Hellwig6bdcf262017-11-03 10:34:46 -0700225 ifp->if_bytes = 0;
Christoph Hellwig6e145f92023-12-20 07:34:55 +0100226 ifp->if_data = NULL;
Christoph Hellwig6bdcf262017-11-03 10:34:46 -0700227 ifp->if_height = 0;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000228 return 0;
229}
230
Christoph Hellwig9229d182020-05-14 14:01:17 -0700231int
232xfs_iformat_data_fork(
233 struct xfs_inode *ip,
234 struct xfs_dinode *dip)
235{
236 struct inode *inode = VFS_I(ip);
Christoph Hellwig0f45a1b2020-05-14 14:01:31 -0700237 int error;
Christoph Hellwig9229d182020-05-14 14:01:17 -0700238
Christoph Hellwigdaf83962020-05-18 10:27:22 -0700239 /*
240 * Initialize the extent count early, as the per-format routines may
Darrick J. Wongc95356c2023-04-12 15:49:10 +1000241 * depend on it. Use release semantics to set needextents /after/ we
242 * set the format. This ensures that we can use acquire semantics on
243 * needextents in xfs_need_iread_extents() and be guaranteed to see a
244 * valid format value after that load.
Christoph Hellwigdaf83962020-05-18 10:27:22 -0700245 */
Christoph Hellwigf7e67b22020-05-18 10:28:05 -0700246 ip->i_df.if_format = dip->di_format;
Chandan Babu Rdd95a6c2020-08-27 15:34:34 +0530247 ip->i_df.if_nextents = xfs_dfork_data_extents(dip);
Darrick J. Wongc95356c2023-04-12 15:49:10 +1000248 smp_store_release(&ip->i_df.if_needextents,
249 ip->i_df.if_format == XFS_DINODE_FMT_BTREE ? 1 : 0);
Christoph Hellwigdaf83962020-05-18 10:27:22 -0700250
Christoph Hellwig9229d182020-05-14 14:01:17 -0700251 switch (inode->i_mode & S_IFMT) {
252 case S_IFIFO:
253 case S_IFCHR:
254 case S_IFBLK:
255 case S_IFSOCK:
Christoph Hellwig13d2c102021-03-29 11:11:40 -0700256 ip->i_disk_size = 0;
Christoph Hellwig9229d182020-05-14 14:01:17 -0700257 inode->i_rdev = xfs_to_linux_dev_t(xfs_dinode_get_rdev(dip));
258 return 0;
259 case S_IFREG:
260 case S_IFLNK:
261 case S_IFDIR:
Christoph Hellwigf7e67b22020-05-18 10:28:05 -0700262 switch (ip->i_df.if_format) {
Christoph Hellwig9229d182020-05-14 14:01:17 -0700263 case XFS_DINODE_FMT_LOCAL:
Christoph Hellwig0f45a1b2020-05-14 14:01:31 -0700264 error = xfs_iformat_local(ip, dip, XFS_DATA_FORK,
Christoph Hellwig9229d182020-05-14 14:01:17 -0700265 be64_to_cpu(dip->di_size));
Christoph Hellwig0f45a1b2020-05-14 14:01:31 -0700266 if (!error)
267 error = xfs_ifork_verify_local_data(ip);
268 return error;
Christoph Hellwig9229d182020-05-14 14:01:17 -0700269 case XFS_DINODE_FMT_EXTENTS:
270 return xfs_iformat_extents(ip, dip, XFS_DATA_FORK);
271 case XFS_DINODE_FMT_BTREE:
272 return xfs_iformat_btree(ip, dip, XFS_DATA_FORK);
273 default:
274 xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__,
275 dip, sizeof(*dip), __this_address);
Darrick J. Wongbaf44fa2024-02-22 12:32:43 -0800276 xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE);
Christoph Hellwig9229d182020-05-14 14:01:17 -0700277 return -EFSCORRUPTED;
278 }
279 break;
280 default:
281 xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
282 sizeof(*dip), __this_address);
Darrick J. Wongbaf44fa2024-02-22 12:32:43 -0800283 xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE);
Christoph Hellwig9229d182020-05-14 14:01:17 -0700284 return -EFSCORRUPTED;
285 }
286}
287
288static uint16_t
289xfs_dfork_attr_shortform_size(
290 struct xfs_dinode *dip)
291{
Christoph Hellwig41414722023-12-20 07:35:01 +0100292 struct xfs_attr_sf_hdr *sf = XFS_DFORK_APTR(dip);
Christoph Hellwig9229d182020-05-14 14:01:17 -0700293
Christoph Hellwig41414722023-12-20 07:35:01 +0100294 return be16_to_cpu(sf->totsize);
Christoph Hellwig9229d182020-05-14 14:01:17 -0700295}
296
Darrick J. Wong2ed5b092022-07-09 10:56:06 -0700297void
298xfs_ifork_init_attr(
299 struct xfs_inode *ip,
Dave Chinnere6a688c2021-03-22 09:52:03 -0700300 enum xfs_dinode_fmt format,
301 xfs_extnum_t nextents)
302{
Darrick J. Wongc95356c2023-04-12 15:49:10 +1000303 /*
304 * Initialize the extent count early, as the per-format routines may
305 * depend on it. Use release semantics to set needextents /after/ we
306 * set the format. This ensures that we can use acquire semantics on
307 * needextents in xfs_need_iread_extents() and be guaranteed to see a
308 * valid format value after that load.
309 */
Darrick J. Wong2ed5b092022-07-09 10:56:06 -0700310 ip->i_af.if_format = format;
311 ip->i_af.if_nextents = nextents;
Darrick J. Wongc95356c2023-04-12 15:49:10 +1000312 smp_store_release(&ip->i_af.if_needextents,
313 ip->i_af.if_format == XFS_DINODE_FMT_BTREE ? 1 : 0);
Darrick J. Wong2ed5b092022-07-09 10:56:06 -0700314}
315
316void
317xfs_ifork_zap_attr(
318 struct xfs_inode *ip)
319{
Darrick J. Wongc78c2d02022-07-19 09:14:55 -0700320 xfs_idestroy_fork(&ip->i_af);
Darrick J. Wong2ed5b092022-07-09 10:56:06 -0700321 memset(&ip->i_af, 0, sizeof(struct xfs_ifork));
322 ip->i_af.if_format = XFS_DINODE_FMT_EXTENTS;
Dave Chinnere6a688c2021-03-22 09:52:03 -0700323}
324
Christoph Hellwig9229d182020-05-14 14:01:17 -0700325int
326xfs_iformat_attr_fork(
327 struct xfs_inode *ip,
328 struct xfs_dinode *dip)
329{
Chandan Babu Rdd95a6c2020-08-27 15:34:34 +0530330 xfs_extnum_t naextents = xfs_dfork_attr_extents(dip);
Christoph Hellwig9229d182020-05-14 14:01:17 -0700331 int error = 0;
332
Christoph Hellwigdaf83962020-05-18 10:27:22 -0700333 /*
334 * Initialize the extent count early, as the per-format routines may
335 * depend on it.
336 */
Darrick J. Wong2ed5b092022-07-09 10:56:06 -0700337 xfs_ifork_init_attr(ip, dip->di_aformat, naextents);
Christoph Hellwigdaf83962020-05-18 10:27:22 -0700338
Darrick J. Wong2ed5b092022-07-09 10:56:06 -0700339 switch (ip->i_af.if_format) {
Christoph Hellwig9229d182020-05-14 14:01:17 -0700340 case XFS_DINODE_FMT_LOCAL:
341 error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK,
342 xfs_dfork_attr_shortform_size(dip));
Christoph Hellwig0f45a1b2020-05-14 14:01:31 -0700343 if (!error)
344 error = xfs_ifork_verify_local_attr(ip);
Christoph Hellwig9229d182020-05-14 14:01:17 -0700345 break;
346 case XFS_DINODE_FMT_EXTENTS:
347 error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK);
348 break;
349 case XFS_DINODE_FMT_BTREE:
350 error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK);
351 break;
352 default:
353 xfs_inode_verifier_error(ip, error, __func__, dip,
354 sizeof(*dip), __this_address);
Darrick J. Wongbaf44fa2024-02-22 12:32:43 -0800355 xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE);
Christoph Hellwig9229d182020-05-14 14:01:17 -0700356 error = -EFSCORRUPTED;
357 break;
358 }
359
Darrick J. Wong2ed5b092022-07-09 10:56:06 -0700360 if (error)
361 xfs_ifork_zap_attr(ip);
Christoph Hellwig9229d182020-05-14 14:01:17 -0700362 return error;
363}
364
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000365/*
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000366 * Reallocate the space for if_broot based on the number of records
367 * being added or deleted as indicated in rec_diff. Move the records
368 * and pointers in if_broot to fit the new size. When shrinking this
369 * will eliminate holes between the records and pointers created by
370 * the caller. When growing this will create holes to be filled in
371 * by the caller.
372 *
373 * The caller must not request to add more records than would fit in
374 * the on-disk inode root. If the if_broot is currently NULL, then
Zhi Yong Wuf6c27342013-08-07 10:11:04 +0000375 * if we are adding records, one will be allocated. The caller must also
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000376 * not request that the number of records go below zero, although
377 * it can go to zero.
378 *
379 * ip -- the inode whose if_broot area is changing
380 * ext_diff -- the change in the number of records, positive or negative,
381 * requested for the if_broot array.
382 */
383void
384xfs_iroot_realloc(
385 xfs_inode_t *ip,
386 int rec_diff,
387 int whichfork)
388{
389 struct xfs_mount *mp = ip->i_mount;
390 int cur_max;
Christoph Hellwig3ba738d2018-07-17 16:51:50 -0700391 struct xfs_ifork *ifp;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000392 struct xfs_btree_block *new_broot;
393 int new_max;
394 size_t new_size;
395 char *np;
396 char *op;
397
398 /*
399 * Handle the degenerate case quietly.
400 */
401 if (rec_diff == 0) {
402 return;
403 }
404
Darrick J. Wong732436e2022-07-09 10:56:05 -0700405 ifp = xfs_ifork_ptr(ip, whichfork);
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000406 if (rec_diff > 0) {
407 /*
408 * If there wasn't any memory allocated before, just
409 * allocate it now and get out.
410 */
411 if (ifp->if_broot_bytes == 0) {
412 new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, rec_diff);
Dave Chinnerf078d4e2024-01-16 09:59:40 +1100413 ifp->if_broot = kmalloc(new_size,
Dave Chinner0b3a76e2024-01-16 09:59:46 +1100414 GFP_KERNEL | __GFP_NOFAIL);
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000415 ifp->if_broot_bytes = (int)new_size;
416 return;
417 }
418
419 /*
420 * If there is already an existing if_broot, then we need
421 * to realloc() it and shift the pointers to their new
422 * location. The records don't change location because
423 * they are kept butted up against the btree block header.
424 */
425 cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0);
426 new_max = cur_max + rec_diff;
427 new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, new_max);
Carlos Maiolino771915c2020-08-26 14:05:56 -0700428 ifp->if_broot = krealloc(ifp->if_broot, new_size,
Dave Chinner0b3a76e2024-01-16 09:59:46 +1100429 GFP_KERNEL | __GFP_NOFAIL);
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000430 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
431 ifp->if_broot_bytes);
432 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
433 (int)new_size);
434 ifp->if_broot_bytes = (int)new_size;
435 ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <=
Darrick J. Wongc01147d2022-07-09 10:56:07 -0700436 xfs_inode_fork_size(ip, whichfork));
Christoph Hellwigd5cf09b2014-07-30 09:12:05 +1000437 memmove(np, op, cur_max * (uint)sizeof(xfs_fsblock_t));
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000438 return;
439 }
440
441 /*
442 * rec_diff is less than 0. In this case, we are shrinking the
443 * if_broot buffer. It must already exist. If we go to zero
444 * records, just get rid of the root and clear the status bit.
445 */
446 ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0));
447 cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0);
448 new_max = cur_max + rec_diff;
449 ASSERT(new_max >= 0);
450 if (new_max > 0)
451 new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, new_max);
452 else
453 new_size = 0;
454 if (new_size > 0) {
Dave Chinner0b3a76e2024-01-16 09:59:46 +1100455 new_broot = kmalloc(new_size, GFP_KERNEL | __GFP_NOFAIL);
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000456 /*
457 * First copy over the btree block header.
458 */
459 memcpy(new_broot, ifp->if_broot,
460 XFS_BMBT_BLOCK_LEN(ip->i_mount));
461 } else {
462 new_broot = NULL;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000463 }
464
465 /*
466 * Only copy the records and pointers if there are any.
467 */
468 if (new_max > 0) {
469 /*
470 * First copy the records.
471 */
472 op = (char *)XFS_BMBT_REC_ADDR(mp, ifp->if_broot, 1);
473 np = (char *)XFS_BMBT_REC_ADDR(mp, new_broot, 1);
474 memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t));
475
476 /*
477 * Then copy the pointers.
478 */
479 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
480 ifp->if_broot_bytes);
481 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, new_broot, 1,
482 (int)new_size);
Christoph Hellwigd5cf09b2014-07-30 09:12:05 +1000483 memcpy(np, op, new_max * (uint)sizeof(xfs_fsblock_t));
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000484 }
Dave Chinnerd4c75a12024-01-16 09:59:43 +1100485 kfree(ifp->if_broot);
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000486 ifp->if_broot = new_broot;
487 ifp->if_broot_bytes = (int)new_size;
488 if (ifp->if_broot)
489 ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <=
Darrick J. Wongc01147d2022-07-09 10:56:07 -0700490 xfs_inode_fork_size(ip, whichfork));
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000491 return;
492}
493
494
495/*
496 * This is called when the amount of space needed for if_data
497 * is increased or decreased. The change in size is indicated by
498 * the number of bytes that need to be added or deleted in the
499 * byte_diff parameter.
500 *
501 * If the amount of space needed has decreased below the size of the
502 * inline buffer, then switch to using the inline buffer. Otherwise,
Dave Chinnerf078d4e2024-01-16 09:59:40 +1100503 * use krealloc() or kmalloc() to adjust the size of the buffer
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000504 * to what is needed.
505 *
506 * ip -- the inode whose if_data area is changing
507 * byte_diff -- the change in the number of bytes, positive or negative,
508 * requested for the if_data array.
509 */
Christoph Hellwig45c76a22023-12-20 07:34:56 +0100510void *
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000511xfs_idata_realloc(
Christoph Hellwig1216b582018-07-17 16:51:50 -0700512 struct xfs_inode *ip,
Dave Chinner3f8a4f12019-10-17 13:40:33 -0700513 int64_t byte_diff,
Christoph Hellwig1216b582018-07-17 16:51:50 -0700514 int whichfork)
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000515{
Darrick J. Wong732436e2022-07-09 10:56:05 -0700516 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
Dave Chinner3f8a4f12019-10-17 13:40:33 -0700517 int64_t new_size = ifp->if_bytes + byte_diff;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000518
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000519 ASSERT(new_size >= 0);
Darrick J. Wongc01147d2022-07-09 10:56:07 -0700520 ASSERT(new_size <= xfs_inode_fork_size(ip, whichfork));
Christoph Hellwig1216b582018-07-17 16:51:50 -0700521
Christoph Hellwig45c76a22023-12-20 07:34:56 +0100522 if (byte_diff) {
523 ifp->if_data = krealloc(ifp->if_data, new_size,
Dave Chinner0b3a76e2024-01-16 09:59:46 +1100524 GFP_KERNEL | __GFP_NOFAIL);
Christoph Hellwig45c76a22023-12-20 07:34:56 +0100525 if (new_size == 0)
526 ifp->if_data = NULL;
527 ifp->if_bytes = new_size;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000528 }
Christoph Hellwig1216b582018-07-17 16:51:50 -0700529
Christoph Hellwig45c76a22023-12-20 07:34:56 +0100530 return ifp->if_data;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000531}
532
Darrick J. Wong8f71bed2023-12-15 10:03:39 -0800533/* Free all memory and reset a fork back to its initial state. */
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000534void
535xfs_idestroy_fork(
Christoph Hellwigef838512020-05-18 10:29:27 -0700536 struct xfs_ifork *ifp)
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000537{
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000538 if (ifp->if_broot != NULL) {
Dave Chinnerd4c75a12024-01-16 09:59:43 +1100539 kfree(ifp->if_broot);
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000540 ifp->if_broot = NULL;
541 }
542
Christoph Hellwig0eba0482021-04-13 11:15:10 -0700543 switch (ifp->if_format) {
544 case XFS_DINODE_FMT_LOCAL:
Dave Chinnerd4c75a12024-01-16 09:59:43 +1100545 kfree(ifp->if_data);
Christoph Hellwig6e145f92023-12-20 07:34:55 +0100546 ifp->if_data = NULL;
Christoph Hellwig0eba0482021-04-13 11:15:10 -0700547 break;
548 case XFS_DINODE_FMT_EXTENTS:
549 case XFS_DINODE_FMT_BTREE:
Christoph Hellwigef838512020-05-18 10:29:27 -0700550 if (ifp->if_height)
551 xfs_iext_destroy(ifp);
Christoph Hellwig0eba0482021-04-13 11:15:10 -0700552 break;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000553 }
554}
555
556/*
Christoph Hellwigda776502013-12-13 11:34:04 +1100557 * Convert in-core extents to on-disk form
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000558 *
Christoph Hellwigda776502013-12-13 11:34:04 +1100559 * In the case of the data fork, the in-core and on-disk fork sizes can be
560 * different due to delayed allocation extents. We only copy on-disk extents
561 * here, so callers must always use the physical fork size to determine the
562 * size of the buffer passed to this routine. We will return the size actually
563 * used.
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000564 */
565int
566xfs_iextents_copy(
Christoph Hellwig71565f42017-11-03 10:34:42 -0700567 struct xfs_inode *ip,
568 struct xfs_bmbt_rec *dp,
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000569 int whichfork)
570{
Christoph Hellwige8e0e172017-10-19 11:06:29 -0700571 int state = xfs_bmap_fork_to_state(whichfork);
Darrick J. Wong732436e2022-07-09 10:56:05 -0700572 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
Christoph Hellwigb2b17122017-11-03 10:34:43 -0700573 struct xfs_iext_cursor icur;
Christoph Hellwig71565f42017-11-03 10:34:42 -0700574 struct xfs_bmbt_irec rec;
Dave Chinner3f8a4f12019-10-17 13:40:33 -0700575 int64_t copied = 0;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000576
Matthew Wilcox (Oracle)3fed24f2024-02-19 15:41:12 +0000577 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED);
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000578 ASSERT(ifp->if_bytes > 0);
579
Christoph Hellwigb2b17122017-11-03 10:34:43 -0700580 for_each_xfs_iext(ifp, &icur, &rec) {
Christoph Hellwig71565f42017-11-03 10:34:42 -0700581 if (isnullstartblock(rec.br_startblock))
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000582 continue;
Darrick J. Wong30b09842018-03-23 10:06:52 -0700583 ASSERT(xfs_bmap_validate_extent(ip, whichfork, &rec) == NULL);
Christoph Hellwig71565f42017-11-03 10:34:42 -0700584 xfs_bmbt_disk_set_all(dp, &rec);
Christoph Hellwigb2b17122017-11-03 10:34:43 -0700585 trace_xfs_write_extent(ip, &icur, state, _RET_IP_);
Christoph Hellwig71565f42017-11-03 10:34:42 -0700586 copied += sizeof(struct xfs_bmbt_rec);
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000587 dp++;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000588 }
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000589
Christoph Hellwig71565f42017-11-03 10:34:42 -0700590 ASSERT(copied > 0);
591 ASSERT(copied <= ifp->if_bytes);
592 return copied;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000593}
594
595/*
596 * Each of the following cases stores data into the same region
597 * of the on-disk inode, so only one of them can be valid at
598 * any given time. While it is possible to have conflicting formats
599 * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is
600 * in EXTENTS format, this can only happen when the fork has
601 * changed formats after being modified but before being flushed.
602 * In these cases, the format always takes precedence, because the
603 * format indicates the current state of the fork.
604 */
Darrick J. Wong005c5db2017-03-28 14:51:10 -0700605void
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000606xfs_iflush_fork(
Christoph Hellwigde38db72021-10-11 16:11:21 -0700607 struct xfs_inode *ip,
608 struct xfs_dinode *dip,
Christoph Hellwigfd9cbe52020-04-30 12:52:19 -0700609 struct xfs_inode_log_item *iip,
Eric Sandeenfd9fdba2014-04-14 19:04:46 +1000610 int whichfork)
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000611{
612 char *cp;
Christoph Hellwig3ba738d2018-07-17 16:51:50 -0700613 struct xfs_ifork *ifp;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000614 xfs_mount_t *mp;
615 static const short brootflag[2] =
616 { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
617 static const short dataflag[2] =
618 { XFS_ILOG_DDATA, XFS_ILOG_ADATA };
619 static const short extflag[2] =
620 { XFS_ILOG_DEXT, XFS_ILOG_AEXT };
621
622 if (!iip)
Darrick J. Wong005c5db2017-03-28 14:51:10 -0700623 return;
Darrick J. Wong732436e2022-07-09 10:56:05 -0700624 ifp = xfs_ifork_ptr(ip, whichfork);
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000625 /*
626 * This can happen if we gave up in iformat in an error path,
627 * for the attribute fork.
628 */
629 if (!ifp) {
630 ASSERT(whichfork == XFS_ATTR_FORK);
Darrick J. Wong005c5db2017-03-28 14:51:10 -0700631 return;
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000632 }
633 cp = XFS_DFORK_PTR(dip, whichfork);
634 mp = ip->i_mount;
Christoph Hellwigf7e67b22020-05-18 10:28:05 -0700635 switch (ifp->if_format) {
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000636 case XFS_DINODE_FMT_LOCAL:
637 if ((iip->ili_fields & dataflag[whichfork]) &&
638 (ifp->if_bytes > 0)) {
Christoph Hellwig6e145f92023-12-20 07:34:55 +0100639 ASSERT(ifp->if_data != NULL);
Darrick J. Wongc01147d2022-07-09 10:56:07 -0700640 ASSERT(ifp->if_bytes <= xfs_inode_fork_size(ip, whichfork));
Christoph Hellwig6e145f92023-12-20 07:34:55 +0100641 memcpy(cp, ifp->if_data, ifp->if_bytes);
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000642 }
643 break;
644
645 case XFS_DINODE_FMT_EXTENTS:
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000646 if ((iip->ili_fields & extflag[whichfork]) &&
647 (ifp->if_bytes > 0)) {
Christoph Hellwigdaf83962020-05-18 10:27:22 -0700648 ASSERT(ifp->if_nextents > 0);
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000649 (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp,
650 whichfork);
651 }
652 break;
653
654 case XFS_DINODE_FMT_BTREE:
655 if ((iip->ili_fields & brootflag[whichfork]) &&
656 (ifp->if_broot_bytes > 0)) {
657 ASSERT(ifp->if_broot != NULL);
658 ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <=
Darrick J. Wongc01147d2022-07-09 10:56:07 -0700659 xfs_inode_fork_size(ip, whichfork));
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000660 xfs_bmbt_to_bmdr(mp, ifp->if_broot, ifp->if_broot_bytes,
661 (xfs_bmdr_block_t *)cp,
662 XFS_DFORK_SIZE(dip, mp, whichfork));
663 }
664 break;
665
666 case XFS_DINODE_FMT_DEV:
667 if (iip->ili_fields & XFS_ILOG_DEV) {
668 ASSERT(whichfork == XFS_DATA_FORK);
Christoph Hellwig274e0a12017-11-20 08:56:52 -0800669 xfs_dinode_put_rdev(dip,
670 linux_to_xfs_dev_t(VFS_I(ip)->i_rdev));
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000671 }
672 break;
673
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000674 default:
675 ASSERT(0);
676 break;
677 }
678}
679
Darrick J. Wong3993bae2016-10-03 09:11:32 -0700680/* Convert bmap state flags to an inode fork. */
681struct xfs_ifork *
682xfs_iext_state_to_fork(
683 struct xfs_inode *ip,
684 int state)
685{
686 if (state & BMAP_COWFORK)
687 return ip->i_cowfp;
688 else if (state & BMAP_ATTRFORK)
Darrick J. Wong2ed5b092022-07-09 10:56:06 -0700689 return &ip->i_af;
Darrick J. Wong3993bae2016-10-03 09:11:32 -0700690 return &ip->i_df;
691}
692
Dave Chinner5c4d97d2013-08-12 20:49:33 +1000693/*
Darrick J. Wong3993bae2016-10-03 09:11:32 -0700694 * Initialize an inode's copy-on-write fork.
695 */
696void
697xfs_ifork_init_cow(
698 struct xfs_inode *ip)
699{
700 if (ip->i_cowfp)
701 return;
702
Darrick J. Wong182696f2021-10-12 11:09:23 -0700703 ip->i_cowfp = kmem_cache_zalloc(xfs_ifork_cache,
Dave Chinner94a69db2024-01-16 09:59:45 +1100704 GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
Christoph Hellwigf7e67b22020-05-18 10:28:05 -0700705 ip->i_cowfp->if_format = XFS_DINODE_FMT_EXTENTS;
Darrick J. Wong3993bae2016-10-03 09:11:32 -0700706}
Darrick J. Wong9cfb9b42018-01-08 10:51:06 -0800707
Darrick J. Wong9cfb9b42018-01-08 10:51:06 -0800708/* Verify the inline contents of the data fork of an inode. */
Christoph Hellwig7c7ba212020-05-14 14:01:19 -0700709int
710xfs_ifork_verify_local_data(
Christoph Hellwig1934c8b2020-05-14 14:01:19 -0700711 struct xfs_inode *ip)
Darrick J. Wong9cfb9b42018-01-08 10:51:06 -0800712{
Christoph Hellwig7c7ba212020-05-14 14:01:19 -0700713 xfs_failaddr_t fa = NULL;
Darrick J. Wong9cfb9b42018-01-08 10:51:06 -0800714
Darrick J. Wong9cfb9b42018-01-08 10:51:06 -0800715 switch (VFS_I(ip)->i_mode & S_IFMT) {
Darrick J. Wonge744cef2023-12-15 10:03:37 -0800716 case S_IFDIR: {
717 struct xfs_mount *mp = ip->i_mount;
718 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
Christoph Hellwig6e145f92023-12-20 07:34:55 +0100719 struct xfs_dir2_sf_hdr *sfp = ifp->if_data;
Darrick J. Wonge744cef2023-12-15 10:03:37 -0800720
Darrick J. Wonge744cef2023-12-15 10:03:37 -0800721 fa = xfs_dir2_sf_verify(mp, sfp, ifp->if_bytes);
Christoph Hellwig7c7ba212020-05-14 14:01:19 -0700722 break;
Darrick J. Wonge744cef2023-12-15 10:03:37 -0800723 }
724 case S_IFLNK: {
725 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
726
Christoph Hellwig6e145f92023-12-20 07:34:55 +0100727 fa = xfs_symlink_shortform_verify(ifp->if_data, ifp->if_bytes);
Christoph Hellwig7c7ba212020-05-14 14:01:19 -0700728 break;
Darrick J. Wonge744cef2023-12-15 10:03:37 -0800729 }
Darrick J. Wong9cfb9b42018-01-08 10:51:06 -0800730 default:
Christoph Hellwig7c7ba212020-05-14 14:01:19 -0700731 break;
Darrick J. Wong9cfb9b42018-01-08 10:51:06 -0800732 }
Christoph Hellwig7c7ba212020-05-14 14:01:19 -0700733
734 if (fa) {
735 xfs_inode_verifier_error(ip, -EFSCORRUPTED, "data fork",
Christoph Hellwig6e145f92023-12-20 07:34:55 +0100736 ip->i_df.if_data, ip->i_df.if_bytes, fa);
Christoph Hellwig7c7ba212020-05-14 14:01:19 -0700737 return -EFSCORRUPTED;
738 }
739
740 return 0;
Darrick J. Wong9cfb9b42018-01-08 10:51:06 -0800741}
742
743/* Verify the inline contents of the attr fork of an inode. */
Christoph Hellwig7c7ba212020-05-14 14:01:19 -0700744int
745xfs_ifork_verify_local_attr(
Christoph Hellwig1934c8b2020-05-14 14:01:19 -0700746 struct xfs_inode *ip)
Darrick J. Wong9cfb9b42018-01-08 10:51:06 -0800747{
Darrick J. Wong2ed5b092022-07-09 10:56:06 -0700748 struct xfs_ifork *ifp = &ip->i_af;
Christoph Hellwig7c7ba212020-05-14 14:01:19 -0700749 xfs_failaddr_t fa;
750
Darrick J. Wonge744cef2023-12-15 10:03:37 -0800751 if (!xfs_inode_has_attr_fork(ip)) {
Christoph Hellwig7c7ba212020-05-14 14:01:19 -0700752 fa = __this_address;
Darrick J. Wonge744cef2023-12-15 10:03:37 -0800753 } else {
Christoph Hellwig6e145f92023-12-20 07:34:55 +0100754 struct xfs_ifork *ifp = &ip->i_af;
Christoph Hellwig7c7ba212020-05-14 14:01:19 -0700755
Christoph Hellwig6e145f92023-12-20 07:34:55 +0100756 ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
757 fa = xfs_attr_shortform_verify(ifp->if_data, ifp->if_bytes);
Darrick J. Wonge744cef2023-12-15 10:03:37 -0800758 }
Christoph Hellwig7c7ba212020-05-14 14:01:19 -0700759 if (fa) {
760 xfs_inode_verifier_error(ip, -EFSCORRUPTED, "attr fork",
Christoph Hellwig6e145f92023-12-20 07:34:55 +0100761 ifp->if_data, ifp->if_bytes, fa);
Christoph Hellwig7c7ba212020-05-14 14:01:19 -0700762 return -EFSCORRUPTED;
763 }
764
765 return 0;
Darrick J. Wong9cfb9b42018-01-08 10:51:06 -0800766}
Chandan Babu Rb9b7e1d2021-01-22 16:48:10 -0800767
Christoph Hellwig25576c52024-05-02 09:33:55 +0200768/*
769 * Check if the inode fork supports adding nr_to_add more extents.
770 *
771 * If it doesn't but we can upgrade it to large extent counters, do the upgrade.
772 * If we can't upgrade or are already using big counters but still can't fit the
773 * additional extents, return -EFBIG.
774 */
Chandan Babu Rb9b7e1d2021-01-22 16:48:10 -0800775int
Christoph Hellwig25576c52024-05-02 09:33:55 +0200776xfs_iext_count_extend(
777 struct xfs_trans *tp,
Chandan Babu Rb9b7e1d2021-01-22 16:48:10 -0800778 struct xfs_inode *ip,
779 int whichfork,
Christoph Hellwig25576c52024-05-02 09:33:55 +0200780 uint nr_to_add)
Chandan Babu Rb9b7e1d2021-01-22 16:48:10 -0800781{
Christoph Hellwig25576c52024-05-02 09:33:55 +0200782 struct xfs_mount *mp = ip->i_mount;
783 bool has_large =
784 xfs_inode_has_large_extent_counts(ip);
Darrick J. Wong732436e2022-07-09 10:56:05 -0700785 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
Chandan Babu Rb9b7e1d2021-01-22 16:48:10 -0800786 uint64_t nr_exts;
787
Christoph Hellwig25576c52024-05-02 09:33:55 +0200788 ASSERT(nr_to_add <= XFS_MAX_EXTCNT_UPGRADE_NR);
789
Chandan Babu Rb9b7e1d2021-01-22 16:48:10 -0800790 if (whichfork == XFS_COW_FORK)
791 return 0;
792
Christoph Hellwig25576c52024-05-02 09:33:55 +0200793 /* no point in upgrading if if_nextents overflows */
Chandan Babu Rb9b7e1d2021-01-22 16:48:10 -0800794 nr_exts = ifp->if_nextents + nr_to_add;
Christoph Hellwig25576c52024-05-02 09:33:55 +0200795 if (nr_exts < ifp->if_nextents)
Chandan Babu Rb9b7e1d2021-01-22 16:48:10 -0800796 return -EFBIG;
797
Christoph Hellwig25576c52024-05-02 09:33:55 +0200798 if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_REDUCE_MAX_IEXTENTS) &&
799 nr_exts > 10)
Chandan Babu R4f86bb42022-03-09 07:49:36 +0000800 return -EFBIG;
801
Christoph Hellwig25576c52024-05-02 09:33:55 +0200802 if (nr_exts > xfs_iext_max_nextents(has_large, whichfork)) {
803 if (has_large || !xfs_has_large_extent_counts(mp))
804 return -EFBIG;
805 ip->i_diflags2 |= XFS_DIFLAG2_NREXT64;
806 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
807 }
Chandan Babu R4f86bb42022-03-09 07:49:36 +0000808 return 0;
809}
Darrick J. Wong5049ff42024-02-22 12:43:36 -0800810
811/* Decide if a file mapping is on the realtime device or not. */
812bool
813xfs_ifork_is_realtime(
814 struct xfs_inode *ip,
815 int whichfork)
816{
817 return XFS_IS_REALTIME_INODE(ip) && whichfork != XFS_ATTR_FORK;
818}