Dave Chinner | 0b61f8a | 2018-06-05 19:42:14 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (c) 2000-2006 Silicon Graphics, Inc. |
| 4 | * All Rights Reserved. |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 5 | */ |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 6 | |
| 7 | #include "xfs.h" |
| 8 | #include "xfs_fs.h" |
Darrick J. Wong | 5467b34 | 2019-06-28 19:25:35 -0700 | [diff] [blame] | 9 | #include "xfs_shared.h" |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 10 | #include "xfs_format.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 11 | #include "xfs_log_format.h" |
| 12 | #include "xfs_trans_resv.h" |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 13 | #include "xfs_mount.h" |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 14 | #include "xfs_inode.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 15 | #include "xfs_trans.h" |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 16 | #include "xfs_inode_item.h" |
Darrick J. Wong | b3bf607 | 2017-02-02 15:13:59 -0800 | [diff] [blame] | 17 | #include "xfs_btree.h" |
Dave Chinner | a4fbe6a | 2013-10-23 10:51:50 +1100 | [diff] [blame] | 18 | #include "xfs_bmap_btree.h" |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 19 | #include "xfs_bmap.h" |
| 20 | #include "xfs_error.h" |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 21 | #include "xfs_trace.h" |
Darrick J. Wong | 244efea | 2016-02-08 15:00:01 +1100 | [diff] [blame] | 22 | #include "xfs_da_format.h" |
Darrick J. Wong | 630a04e | 2017-03-15 00:24:25 -0700 | [diff] [blame] | 23 | #include "xfs_da_btree.h" |
| 24 | #include "xfs_dir2_priv.h" |
Darrick J. Wong | 9cfb9b4 | 2018-01-08 10:51:06 -0800 | [diff] [blame] | 25 | #include "xfs_attr_leaf.h" |
Chandan Babu R | b9b7e1d | 2021-01-22 16:48:10 -0800 | [diff] [blame] | 26 | #include "xfs_types.h" |
Chandan Babu R | f9fa871 | 2021-01-22 16:48:15 -0800 | [diff] [blame] | 27 | #include "xfs_errortag.h" |
Darrick J. Wong | baf44fa | 2024-02-22 12:32:43 -0800 | [diff] [blame] | 28 | #include "xfs_health.h" |
Darrick J. Wong | 622d88e2 | 2024-02-22 12:45:01 -0800 | [diff] [blame] | 29 | #include "xfs_symlink_remote.h" |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 30 | |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 31 | struct kmem_cache *xfs_ifork_cache; |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 32 | |
Christoph Hellwig | 143f4ae | 2016-04-06 07:41:43 +1000 | [diff] [blame] | 33 | void |
| 34 | xfs_init_local_fork( |
| 35 | struct xfs_inode *ip, |
| 36 | int whichfork, |
| 37 | const void *data, |
Dave Chinner | 3f8a4f1 | 2019-10-17 13:40:33 -0700 | [diff] [blame] | 38 | int64_t size) |
Christoph Hellwig | 143f4ae | 2016-04-06 07:41:43 +1000 | [diff] [blame] | 39 | { |
Darrick J. Wong | 732436e | 2022-07-09 10:56:05 -0700 | [diff] [blame] | 40 | struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); |
Dave Chinner | b2c2803 | 2022-05-04 11:45:50 +1000 | [diff] [blame] | 41 | int mem_size = size; |
Christoph Hellwig | 30ee052 | 2016-04-06 07:53:29 +1000 | [diff] [blame] | 42 | bool zero_terminate; |
| 43 | |
| 44 | /* |
| 45 | * If we are using the local fork to store a symlink body we need to |
| 46 | * zero-terminate it so that we can pass it back to the VFS directly. |
| 47 | * Overallocate the in-memory fork by one for that and add a zero |
| 48 | * to terminate it below. |
| 49 | */ |
| 50 | zero_terminate = S_ISLNK(VFS_I(ip)->i_mode); |
| 51 | if (zero_terminate) |
| 52 | mem_size++; |
Christoph Hellwig | 143f4ae | 2016-04-06 07:41:43 +1000 | [diff] [blame] | 53 | |
Christoph Hellwig | 4351881 | 2017-11-03 10:34:45 -0700 | [diff] [blame] | 54 | if (size) { |
Dave Chinner | 94a69db | 2024-01-16 09:59:45 +1100 | [diff] [blame] | 55 | char *new_data = kmalloc(mem_size, |
| 56 | GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL); |
Christoph Hellwig | 6e145f9 | 2023-12-20 07:34:55 +0100 | [diff] [blame] | 57 | |
| 58 | memcpy(new_data, data, size); |
Christoph Hellwig | 30ee052 | 2016-04-06 07:53:29 +1000 | [diff] [blame] | 59 | if (zero_terminate) |
Christoph Hellwig | 6e145f9 | 2023-12-20 07:34:55 +0100 | [diff] [blame] | 60 | new_data[size] = '\0'; |
| 61 | |
| 62 | ifp->if_data = new_data; |
Christoph Hellwig | 4351881 | 2017-11-03 10:34:45 -0700 | [diff] [blame] | 63 | } else { |
Christoph Hellwig | 6e145f9 | 2023-12-20 07:34:55 +0100 | [diff] [blame] | 64 | ifp->if_data = NULL; |
Christoph Hellwig | 30ee052 | 2016-04-06 07:53:29 +1000 | [diff] [blame] | 65 | } |
Christoph Hellwig | 143f4ae | 2016-04-06 07:41:43 +1000 | [diff] [blame] | 66 | |
| 67 | ifp->if_bytes = size; |
Christoph Hellwig | 143f4ae | 2016-04-06 07:41:43 +1000 | [diff] [blame] | 68 | } |
| 69 | |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 70 | /* |
| 71 | * The file is in-lined in the on-disk inode. |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 72 | */ |
| 73 | STATIC int |
| 74 | xfs_iformat_local( |
Christoph Hellwig | de38db7 | 2021-10-11 16:11:21 -0700 | [diff] [blame] | 75 | struct xfs_inode *ip, |
| 76 | struct xfs_dinode *dip, |
| 77 | int whichfork, |
| 78 | int size) |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 79 | { |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 80 | /* |
| 81 | * If the size is unreasonable, then something |
| 82 | * is wrong and we just bail out rather than crash in |
Dave Chinner | f078d4e | 2024-01-16 09:59:40 +1100 | [diff] [blame] | 83 | * kmalloc() or memcpy() below. |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 84 | */ |
| 85 | if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) { |
| 86 | xfs_warn(ip->i_mount, |
Zeng Heng | 78b0f58 | 2022-09-19 06:47:14 +1000 | [diff] [blame] | 87 | "corrupt inode %llu (bad size %d for local fork, size = %zd).", |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 88 | (unsigned long long) ip->i_ino, size, |
| 89 | XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)); |
Darrick J. Wong | 90a58f9 | 2018-03-23 10:06:52 -0700 | [diff] [blame] | 90 | xfs_inode_verifier_error(ip, -EFSCORRUPTED, |
| 91 | "xfs_iformat_local", dip, sizeof(*dip), |
| 92 | __this_address); |
Darrick J. Wong | baf44fa | 2024-02-22 12:32:43 -0800 | [diff] [blame] | 93 | xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 94 | return -EFSCORRUPTED; |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 95 | } |
Christoph Hellwig | 143f4ae | 2016-04-06 07:41:43 +1000 | [diff] [blame] | 96 | |
| 97 | xfs_init_local_fork(ip, whichfork, XFS_DFORK_PTR(dip, whichfork), size); |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 98 | return 0; |
| 99 | } |
| 100 | |
| 101 | /* |
Christoph Hellwig | 0c1d9e4 | 2017-04-20 09:42:48 -0700 | [diff] [blame] | 102 | * The file consists of a set of extents all of which fit into the on-disk |
Christoph Hellwig | 4351881 | 2017-11-03 10:34:45 -0700 | [diff] [blame] | 103 | * inode. |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 104 | */ |
| 105 | STATIC int |
| 106 | xfs_iformat_extents( |
Christoph Hellwig | 0c1d9e4 | 2017-04-20 09:42:48 -0700 | [diff] [blame] | 107 | struct xfs_inode *ip, |
| 108 | struct xfs_dinode *dip, |
| 109 | int whichfork) |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 110 | { |
Christoph Hellwig | 0c1d9e4 | 2017-04-20 09:42:48 -0700 | [diff] [blame] | 111 | struct xfs_mount *mp = ip->i_mount; |
Darrick J. Wong | 732436e | 2022-07-09 10:56:05 -0700 | [diff] [blame] | 112 | struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); |
Christoph Hellwig | e8e0e17 | 2017-10-19 11:06:29 -0700 | [diff] [blame] | 113 | int state = xfs_bmap_fork_to_state(whichfork); |
Chandan Babu R | dd95a6c | 2020-08-27 15:34:34 +0530 | [diff] [blame] | 114 | xfs_extnum_t nex = xfs_dfork_nextents(dip, whichfork); |
Christoph Hellwig | 0c1d9e4 | 2017-04-20 09:42:48 -0700 | [diff] [blame] | 115 | int size = nex * sizeof(xfs_bmbt_rec_t); |
Christoph Hellwig | b2b1712 | 2017-11-03 10:34:43 -0700 | [diff] [blame] | 116 | struct xfs_iext_cursor icur; |
Christoph Hellwig | 0c1d9e4 | 2017-04-20 09:42:48 -0700 | [diff] [blame] | 117 | struct xfs_bmbt_rec *dp; |
Christoph Hellwig | 6bdcf26 | 2017-11-03 10:34:46 -0700 | [diff] [blame] | 118 | struct xfs_bmbt_irec new; |
Christoph Hellwig | 0c1d9e4 | 2017-04-20 09:42:48 -0700 | [diff] [blame] | 119 | int i; |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 120 | |
| 121 | /* |
Christoph Hellwig | 0c1d9e4 | 2017-04-20 09:42:48 -0700 | [diff] [blame] | 122 | * If the number of extents is unreasonable, then something is wrong and |
Dave Chinner | f078d4e | 2024-01-16 09:59:40 +1100 | [diff] [blame] | 123 | * we just bail out rather than crash in kmalloc() or memcpy() below. |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 124 | */ |
Christoph Hellwig | 0c1d9e4 | 2017-04-20 09:42:48 -0700 | [diff] [blame] | 125 | if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, mp, whichfork))) { |
Chandan Babu R | 755c38f | 2021-11-16 07:28:40 +0000 | [diff] [blame] | 126 | xfs_warn(ip->i_mount, "corrupt inode %llu ((a)extents = %llu).", |
| 127 | ip->i_ino, nex); |
Darrick J. Wong | 90a58f9 | 2018-03-23 10:06:52 -0700 | [diff] [blame] | 128 | xfs_inode_verifier_error(ip, -EFSCORRUPTED, |
| 129 | "xfs_iformat_extents(1)", dip, sizeof(*dip), |
| 130 | __this_address); |
Darrick J. Wong | baf44fa | 2024-02-22 12:32:43 -0800 | [diff] [blame] | 131 | xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 132 | return -EFSCORRUPTED; |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 133 | } |
| 134 | |
Christoph Hellwig | 6bdcf26 | 2017-11-03 10:34:46 -0700 | [diff] [blame] | 135 | ifp->if_bytes = 0; |
Christoph Hellwig | 6e145f9 | 2023-12-20 07:34:55 +0100 | [diff] [blame] | 136 | ifp->if_data = NULL; |
Christoph Hellwig | 6bdcf26 | 2017-11-03 10:34:46 -0700 | [diff] [blame] | 137 | ifp->if_height = 0; |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 138 | if (size) { |
| 139 | dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork); |
Christoph Hellwig | b2b1712 | 2017-11-03 10:34:43 -0700 | [diff] [blame] | 140 | |
| 141 | xfs_iext_first(ifp, &icur); |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 142 | for (i = 0; i < nex; i++, dp++) { |
Darrick J. Wong | 30b0984 | 2018-03-23 10:06:52 -0700 | [diff] [blame] | 143 | xfs_failaddr_t fa; |
| 144 | |
Christoph Hellwig | dac9c9b | 2017-11-03 10:34:47 -0700 | [diff] [blame] | 145 | xfs_bmbt_disk_get_all(dp, &new); |
Darrick J. Wong | 30b0984 | 2018-03-23 10:06:52 -0700 | [diff] [blame] | 146 | fa = xfs_bmap_validate_extent(ip, whichfork, &new); |
| 147 | if (fa) { |
| 148 | xfs_inode_verifier_error(ip, -EFSCORRUPTED, |
| 149 | "xfs_iformat_extents(2)", |
| 150 | dp, sizeof(*dp), fa); |
Darrick J. Wong | baf44fa | 2024-02-22 12:32:43 -0800 | [diff] [blame] | 151 | xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE); |
Darrick J. Wong | 6a3bd8f | 2023-04-11 19:00:05 -0700 | [diff] [blame] | 152 | return xfs_bmap_complain_bad_rec(ip, whichfork, |
| 153 | fa, &new); |
Christoph Hellwig | 0c1d9e4 | 2017-04-20 09:42:48 -0700 | [diff] [blame] | 154 | } |
Christoph Hellwig | b2b1712 | 2017-11-03 10:34:43 -0700 | [diff] [blame] | 155 | |
Christoph Hellwig | 0254c2f | 2017-11-03 10:34:46 -0700 | [diff] [blame] | 156 | xfs_iext_insert(ip, &icur, &new, state); |
Christoph Hellwig | b2b1712 | 2017-11-03 10:34:43 -0700 | [diff] [blame] | 157 | trace_xfs_read_extent(ip, &icur, state, _THIS_IP_); |
| 158 | xfs_iext_next(ifp, &icur); |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 159 | } |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 160 | } |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 161 | return 0; |
| 162 | } |
| 163 | |
| 164 | /* |
| 165 | * The file has too many extents to fit into |
| 166 | * the inode, so they are in B-tree format. |
| 167 | * Allocate a buffer for the root of the B-tree |
| 168 | * and copy the root into it. The i_extents |
| 169 | * field will remain NULL until all of the |
| 170 | * extents are read in (when they are needed). |
| 171 | */ |
| 172 | STATIC int |
| 173 | xfs_iformat_btree( |
Christoph Hellwig | de38db7 | 2021-10-11 16:11:21 -0700 | [diff] [blame] | 174 | struct xfs_inode *ip, |
| 175 | struct xfs_dinode *dip, |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 176 | int whichfork) |
| 177 | { |
| 178 | struct xfs_mount *mp = ip->i_mount; |
| 179 | xfs_bmdr_block_t *dfp; |
Christoph Hellwig | 3ba738d | 2018-07-17 16:51:50 -0700 | [diff] [blame] | 180 | struct xfs_ifork *ifp; |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 181 | /* REFERENCED */ |
| 182 | int nrecs; |
| 183 | int size; |
Darrick J. Wong | b3bf607 | 2017-02-02 15:13:59 -0800 | [diff] [blame] | 184 | int level; |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 185 | |
Darrick J. Wong | 732436e | 2022-07-09 10:56:05 -0700 | [diff] [blame] | 186 | ifp = xfs_ifork_ptr(ip, whichfork); |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 187 | dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork); |
| 188 | size = XFS_BMAP_BROOT_SPACE(mp, dfp); |
| 189 | nrecs = be16_to_cpu(dfp->bb_numrecs); |
Darrick J. Wong | b3bf607 | 2017-02-02 15:13:59 -0800 | [diff] [blame] | 190 | level = be16_to_cpu(dfp->bb_level); |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 191 | |
| 192 | /* |
| 193 | * blow out if -- fork has less extents than can fit in |
| 194 | * fork (fork shouldn't be a btree format), root btree |
| 195 | * block has more records than can fit into the fork, |
| 196 | * or the number of extents is greater than the number of |
| 197 | * blocks. |
| 198 | */ |
Christoph Hellwig | daf8396 | 2020-05-18 10:27:22 -0700 | [diff] [blame] | 199 | if (unlikely(ifp->if_nextents <= XFS_IFORK_MAXEXT(ip, whichfork) || |
Darrick J. Wong | 55e4542 | 2018-01-16 18:54:13 -0800 | [diff] [blame] | 200 | nrecs == 0 || |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 201 | XFS_BMDR_SPACE_CALC(nrecs) > |
| 202 | XFS_DFORK_SIZE(dip, mp, whichfork) || |
Christoph Hellwig | 6e73a54 | 2021-03-29 11:11:40 -0700 | [diff] [blame] | 203 | ifp->if_nextents > ip->i_nblocks) || |
Darrick J. Wong | 973975b | 2021-03-22 09:51:54 -0700 | [diff] [blame] | 204 | level == 0 || level > XFS_BM_MAXLEVELS(mp, whichfork)) { |
Zeng Heng | 78b0f58 | 2022-09-19 06:47:14 +1000 | [diff] [blame] | 205 | xfs_warn(mp, "corrupt inode %llu (btree).", |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 206 | (unsigned long long) ip->i_ino); |
Darrick J. Wong | 90a58f9 | 2018-03-23 10:06:52 -0700 | [diff] [blame] | 207 | xfs_inode_verifier_error(ip, -EFSCORRUPTED, |
| 208 | "xfs_iformat_btree", dfp, size, |
| 209 | __this_address); |
Darrick J. Wong | baf44fa | 2024-02-22 12:32:43 -0800 | [diff] [blame] | 210 | xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 211 | return -EFSCORRUPTED; |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 212 | } |
| 213 | |
| 214 | ifp->if_broot_bytes = size; |
Dave Chinner | 94a69db | 2024-01-16 09:59:45 +1100 | [diff] [blame] | 215 | ifp->if_broot = kmalloc(size, |
| 216 | GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL); |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 217 | ASSERT(ifp->if_broot != NULL); |
| 218 | /* |
| 219 | * Copy and convert from the on-disk structure |
| 220 | * to the in-memory structure. |
| 221 | */ |
| 222 | xfs_bmdr_to_bmbt(ip, dfp, XFS_DFORK_SIZE(dip, ip->i_mount, whichfork), |
| 223 | ifp->if_broot, size); |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 224 | |
Christoph Hellwig | 6bdcf26 | 2017-11-03 10:34:46 -0700 | [diff] [blame] | 225 | ifp->if_bytes = 0; |
Christoph Hellwig | 6e145f9 | 2023-12-20 07:34:55 +0100 | [diff] [blame] | 226 | ifp->if_data = NULL; |
Christoph Hellwig | 6bdcf26 | 2017-11-03 10:34:46 -0700 | [diff] [blame] | 227 | ifp->if_height = 0; |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 228 | return 0; |
| 229 | } |
| 230 | |
Christoph Hellwig | 9229d18 | 2020-05-14 14:01:17 -0700 | [diff] [blame] | 231 | int |
| 232 | xfs_iformat_data_fork( |
| 233 | struct xfs_inode *ip, |
| 234 | struct xfs_dinode *dip) |
| 235 | { |
| 236 | struct inode *inode = VFS_I(ip); |
Christoph Hellwig | 0f45a1b | 2020-05-14 14:01:31 -0700 | [diff] [blame] | 237 | int error; |
Christoph Hellwig | 9229d18 | 2020-05-14 14:01:17 -0700 | [diff] [blame] | 238 | |
Christoph Hellwig | daf8396 | 2020-05-18 10:27:22 -0700 | [diff] [blame] | 239 | /* |
| 240 | * Initialize the extent count early, as the per-format routines may |
Darrick J. Wong | c95356c | 2023-04-12 15:49:10 +1000 | [diff] [blame] | 241 | * depend on it. Use release semantics to set needextents /after/ we |
| 242 | * set the format. This ensures that we can use acquire semantics on |
| 243 | * needextents in xfs_need_iread_extents() and be guaranteed to see a |
| 244 | * valid format value after that load. |
Christoph Hellwig | daf8396 | 2020-05-18 10:27:22 -0700 | [diff] [blame] | 245 | */ |
Christoph Hellwig | f7e67b2 | 2020-05-18 10:28:05 -0700 | [diff] [blame] | 246 | ip->i_df.if_format = dip->di_format; |
Chandan Babu R | dd95a6c | 2020-08-27 15:34:34 +0530 | [diff] [blame] | 247 | ip->i_df.if_nextents = xfs_dfork_data_extents(dip); |
Darrick J. Wong | c95356c | 2023-04-12 15:49:10 +1000 | [diff] [blame] | 248 | smp_store_release(&ip->i_df.if_needextents, |
| 249 | ip->i_df.if_format == XFS_DINODE_FMT_BTREE ? 1 : 0); |
Christoph Hellwig | daf8396 | 2020-05-18 10:27:22 -0700 | [diff] [blame] | 250 | |
Christoph Hellwig | 9229d18 | 2020-05-14 14:01:17 -0700 | [diff] [blame] | 251 | switch (inode->i_mode & S_IFMT) { |
| 252 | case S_IFIFO: |
| 253 | case S_IFCHR: |
| 254 | case S_IFBLK: |
| 255 | case S_IFSOCK: |
Christoph Hellwig | 13d2c10 | 2021-03-29 11:11:40 -0700 | [diff] [blame] | 256 | ip->i_disk_size = 0; |
Christoph Hellwig | 9229d18 | 2020-05-14 14:01:17 -0700 | [diff] [blame] | 257 | inode->i_rdev = xfs_to_linux_dev_t(xfs_dinode_get_rdev(dip)); |
| 258 | return 0; |
| 259 | case S_IFREG: |
| 260 | case S_IFLNK: |
| 261 | case S_IFDIR: |
Christoph Hellwig | f7e67b2 | 2020-05-18 10:28:05 -0700 | [diff] [blame] | 262 | switch (ip->i_df.if_format) { |
Christoph Hellwig | 9229d18 | 2020-05-14 14:01:17 -0700 | [diff] [blame] | 263 | case XFS_DINODE_FMT_LOCAL: |
Christoph Hellwig | 0f45a1b | 2020-05-14 14:01:31 -0700 | [diff] [blame] | 264 | error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, |
Christoph Hellwig | 9229d18 | 2020-05-14 14:01:17 -0700 | [diff] [blame] | 265 | be64_to_cpu(dip->di_size)); |
Christoph Hellwig | 0f45a1b | 2020-05-14 14:01:31 -0700 | [diff] [blame] | 266 | if (!error) |
| 267 | error = xfs_ifork_verify_local_data(ip); |
| 268 | return error; |
Christoph Hellwig | 9229d18 | 2020-05-14 14:01:17 -0700 | [diff] [blame] | 269 | case XFS_DINODE_FMT_EXTENTS: |
| 270 | return xfs_iformat_extents(ip, dip, XFS_DATA_FORK); |
| 271 | case XFS_DINODE_FMT_BTREE: |
| 272 | return xfs_iformat_btree(ip, dip, XFS_DATA_FORK); |
| 273 | default: |
| 274 | xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, |
| 275 | dip, sizeof(*dip), __this_address); |
Darrick J. Wong | baf44fa | 2024-02-22 12:32:43 -0800 | [diff] [blame] | 276 | xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE); |
Christoph Hellwig | 9229d18 | 2020-05-14 14:01:17 -0700 | [diff] [blame] | 277 | return -EFSCORRUPTED; |
| 278 | } |
| 279 | break; |
| 280 | default: |
| 281 | xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip, |
| 282 | sizeof(*dip), __this_address); |
Darrick J. Wong | baf44fa | 2024-02-22 12:32:43 -0800 | [diff] [blame] | 283 | xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE); |
Christoph Hellwig | 9229d18 | 2020-05-14 14:01:17 -0700 | [diff] [blame] | 284 | return -EFSCORRUPTED; |
| 285 | } |
| 286 | } |
| 287 | |
| 288 | static uint16_t |
| 289 | xfs_dfork_attr_shortform_size( |
| 290 | struct xfs_dinode *dip) |
| 291 | { |
Christoph Hellwig | 4141472 | 2023-12-20 07:35:01 +0100 | [diff] [blame] | 292 | struct xfs_attr_sf_hdr *sf = XFS_DFORK_APTR(dip); |
Christoph Hellwig | 9229d18 | 2020-05-14 14:01:17 -0700 | [diff] [blame] | 293 | |
Christoph Hellwig | 4141472 | 2023-12-20 07:35:01 +0100 | [diff] [blame] | 294 | return be16_to_cpu(sf->totsize); |
Christoph Hellwig | 9229d18 | 2020-05-14 14:01:17 -0700 | [diff] [blame] | 295 | } |
| 296 | |
Darrick J. Wong | 2ed5b09 | 2022-07-09 10:56:06 -0700 | [diff] [blame] | 297 | void |
| 298 | xfs_ifork_init_attr( |
| 299 | struct xfs_inode *ip, |
Dave Chinner | e6a688c | 2021-03-22 09:52:03 -0700 | [diff] [blame] | 300 | enum xfs_dinode_fmt format, |
| 301 | xfs_extnum_t nextents) |
| 302 | { |
Darrick J. Wong | c95356c | 2023-04-12 15:49:10 +1000 | [diff] [blame] | 303 | /* |
| 304 | * Initialize the extent count early, as the per-format routines may |
| 305 | * depend on it. Use release semantics to set needextents /after/ we |
| 306 | * set the format. This ensures that we can use acquire semantics on |
| 307 | * needextents in xfs_need_iread_extents() and be guaranteed to see a |
| 308 | * valid format value after that load. |
| 309 | */ |
Darrick J. Wong | 2ed5b09 | 2022-07-09 10:56:06 -0700 | [diff] [blame] | 310 | ip->i_af.if_format = format; |
| 311 | ip->i_af.if_nextents = nextents; |
Darrick J. Wong | c95356c | 2023-04-12 15:49:10 +1000 | [diff] [blame] | 312 | smp_store_release(&ip->i_af.if_needextents, |
| 313 | ip->i_af.if_format == XFS_DINODE_FMT_BTREE ? 1 : 0); |
Darrick J. Wong | 2ed5b09 | 2022-07-09 10:56:06 -0700 | [diff] [blame] | 314 | } |
| 315 | |
| 316 | void |
| 317 | xfs_ifork_zap_attr( |
| 318 | struct xfs_inode *ip) |
| 319 | { |
Darrick J. Wong | c78c2d0 | 2022-07-19 09:14:55 -0700 | [diff] [blame] | 320 | xfs_idestroy_fork(&ip->i_af); |
Darrick J. Wong | 2ed5b09 | 2022-07-09 10:56:06 -0700 | [diff] [blame] | 321 | memset(&ip->i_af, 0, sizeof(struct xfs_ifork)); |
| 322 | ip->i_af.if_format = XFS_DINODE_FMT_EXTENTS; |
Dave Chinner | e6a688c | 2021-03-22 09:52:03 -0700 | [diff] [blame] | 323 | } |
| 324 | |
Christoph Hellwig | 9229d18 | 2020-05-14 14:01:17 -0700 | [diff] [blame] | 325 | int |
| 326 | xfs_iformat_attr_fork( |
| 327 | struct xfs_inode *ip, |
| 328 | struct xfs_dinode *dip) |
| 329 | { |
Chandan Babu R | dd95a6c | 2020-08-27 15:34:34 +0530 | [diff] [blame] | 330 | xfs_extnum_t naextents = xfs_dfork_attr_extents(dip); |
Christoph Hellwig | 9229d18 | 2020-05-14 14:01:17 -0700 | [diff] [blame] | 331 | int error = 0; |
| 332 | |
Christoph Hellwig | daf8396 | 2020-05-18 10:27:22 -0700 | [diff] [blame] | 333 | /* |
| 334 | * Initialize the extent count early, as the per-format routines may |
| 335 | * depend on it. |
| 336 | */ |
Darrick J. Wong | 2ed5b09 | 2022-07-09 10:56:06 -0700 | [diff] [blame] | 337 | xfs_ifork_init_attr(ip, dip->di_aformat, naextents); |
Christoph Hellwig | daf8396 | 2020-05-18 10:27:22 -0700 | [diff] [blame] | 338 | |
Darrick J. Wong | 2ed5b09 | 2022-07-09 10:56:06 -0700 | [diff] [blame] | 339 | switch (ip->i_af.if_format) { |
Christoph Hellwig | 9229d18 | 2020-05-14 14:01:17 -0700 | [diff] [blame] | 340 | case XFS_DINODE_FMT_LOCAL: |
| 341 | error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, |
| 342 | xfs_dfork_attr_shortform_size(dip)); |
Christoph Hellwig | 0f45a1b | 2020-05-14 14:01:31 -0700 | [diff] [blame] | 343 | if (!error) |
| 344 | error = xfs_ifork_verify_local_attr(ip); |
Christoph Hellwig | 9229d18 | 2020-05-14 14:01:17 -0700 | [diff] [blame] | 345 | break; |
| 346 | case XFS_DINODE_FMT_EXTENTS: |
| 347 | error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK); |
| 348 | break; |
| 349 | case XFS_DINODE_FMT_BTREE: |
| 350 | error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK); |
| 351 | break; |
| 352 | default: |
| 353 | xfs_inode_verifier_error(ip, error, __func__, dip, |
| 354 | sizeof(*dip), __this_address); |
Darrick J. Wong | baf44fa | 2024-02-22 12:32:43 -0800 | [diff] [blame] | 355 | xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE); |
Christoph Hellwig | 9229d18 | 2020-05-14 14:01:17 -0700 | [diff] [blame] | 356 | error = -EFSCORRUPTED; |
| 357 | break; |
| 358 | } |
| 359 | |
Darrick J. Wong | 2ed5b09 | 2022-07-09 10:56:06 -0700 | [diff] [blame] | 360 | if (error) |
| 361 | xfs_ifork_zap_attr(ip); |
Christoph Hellwig | 9229d18 | 2020-05-14 14:01:17 -0700 | [diff] [blame] | 362 | return error; |
| 363 | } |
| 364 | |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 365 | /* |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 366 | * Reallocate the space for if_broot based on the number of records |
| 367 | * being added or deleted as indicated in rec_diff. Move the records |
| 368 | * and pointers in if_broot to fit the new size. When shrinking this |
| 369 | * will eliminate holes between the records and pointers created by |
| 370 | * the caller. When growing this will create holes to be filled in |
| 371 | * by the caller. |
| 372 | * |
| 373 | * The caller must not request to add more records than would fit in |
| 374 | * the on-disk inode root. If the if_broot is currently NULL, then |
Zhi Yong Wu | f6c2734 | 2013-08-07 10:11:04 +0000 | [diff] [blame] | 375 | * if we are adding records, one will be allocated. The caller must also |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 376 | * not request that the number of records go below zero, although |
| 377 | * it can go to zero. |
| 378 | * |
| 379 | * ip -- the inode whose if_broot area is changing |
| 380 | * ext_diff -- the change in the number of records, positive or negative, |
| 381 | * requested for the if_broot array. |
| 382 | */ |
| 383 | void |
| 384 | xfs_iroot_realloc( |
| 385 | xfs_inode_t *ip, |
| 386 | int rec_diff, |
| 387 | int whichfork) |
| 388 | { |
| 389 | struct xfs_mount *mp = ip->i_mount; |
| 390 | int cur_max; |
Christoph Hellwig | 3ba738d | 2018-07-17 16:51:50 -0700 | [diff] [blame] | 391 | struct xfs_ifork *ifp; |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 392 | struct xfs_btree_block *new_broot; |
| 393 | int new_max; |
| 394 | size_t new_size; |
| 395 | char *np; |
| 396 | char *op; |
| 397 | |
| 398 | /* |
| 399 | * Handle the degenerate case quietly. |
| 400 | */ |
| 401 | if (rec_diff == 0) { |
| 402 | return; |
| 403 | } |
| 404 | |
Darrick J. Wong | 732436e | 2022-07-09 10:56:05 -0700 | [diff] [blame] | 405 | ifp = xfs_ifork_ptr(ip, whichfork); |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 406 | if (rec_diff > 0) { |
| 407 | /* |
| 408 | * If there wasn't any memory allocated before, just |
| 409 | * allocate it now and get out. |
| 410 | */ |
| 411 | if (ifp->if_broot_bytes == 0) { |
| 412 | new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, rec_diff); |
Dave Chinner | f078d4e | 2024-01-16 09:59:40 +1100 | [diff] [blame] | 413 | ifp->if_broot = kmalloc(new_size, |
Dave Chinner | 0b3a76e | 2024-01-16 09:59:46 +1100 | [diff] [blame] | 414 | GFP_KERNEL | __GFP_NOFAIL); |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 415 | ifp->if_broot_bytes = (int)new_size; |
| 416 | return; |
| 417 | } |
| 418 | |
| 419 | /* |
| 420 | * If there is already an existing if_broot, then we need |
| 421 | * to realloc() it and shift the pointers to their new |
| 422 | * location. The records don't change location because |
| 423 | * they are kept butted up against the btree block header. |
| 424 | */ |
| 425 | cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0); |
| 426 | new_max = cur_max + rec_diff; |
| 427 | new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, new_max); |
Carlos Maiolino | 771915c | 2020-08-26 14:05:56 -0700 | [diff] [blame] | 428 | ifp->if_broot = krealloc(ifp->if_broot, new_size, |
Dave Chinner | 0b3a76e | 2024-01-16 09:59:46 +1100 | [diff] [blame] | 429 | GFP_KERNEL | __GFP_NOFAIL); |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 430 | op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, |
| 431 | ifp->if_broot_bytes); |
| 432 | np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, |
| 433 | (int)new_size); |
| 434 | ifp->if_broot_bytes = (int)new_size; |
| 435 | ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <= |
Darrick J. Wong | c01147d | 2022-07-09 10:56:07 -0700 | [diff] [blame] | 436 | xfs_inode_fork_size(ip, whichfork)); |
Christoph Hellwig | d5cf09b | 2014-07-30 09:12:05 +1000 | [diff] [blame] | 437 | memmove(np, op, cur_max * (uint)sizeof(xfs_fsblock_t)); |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 438 | return; |
| 439 | } |
| 440 | |
| 441 | /* |
| 442 | * rec_diff is less than 0. In this case, we are shrinking the |
| 443 | * if_broot buffer. It must already exist. If we go to zero |
| 444 | * records, just get rid of the root and clear the status bit. |
| 445 | */ |
| 446 | ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0)); |
| 447 | cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0); |
| 448 | new_max = cur_max + rec_diff; |
| 449 | ASSERT(new_max >= 0); |
| 450 | if (new_max > 0) |
| 451 | new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, new_max); |
| 452 | else |
| 453 | new_size = 0; |
| 454 | if (new_size > 0) { |
Dave Chinner | 0b3a76e | 2024-01-16 09:59:46 +1100 | [diff] [blame] | 455 | new_broot = kmalloc(new_size, GFP_KERNEL | __GFP_NOFAIL); |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 456 | /* |
| 457 | * First copy over the btree block header. |
| 458 | */ |
| 459 | memcpy(new_broot, ifp->if_broot, |
| 460 | XFS_BMBT_BLOCK_LEN(ip->i_mount)); |
| 461 | } else { |
| 462 | new_broot = NULL; |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 463 | } |
| 464 | |
| 465 | /* |
| 466 | * Only copy the records and pointers if there are any. |
| 467 | */ |
| 468 | if (new_max > 0) { |
| 469 | /* |
| 470 | * First copy the records. |
| 471 | */ |
| 472 | op = (char *)XFS_BMBT_REC_ADDR(mp, ifp->if_broot, 1); |
| 473 | np = (char *)XFS_BMBT_REC_ADDR(mp, new_broot, 1); |
| 474 | memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t)); |
| 475 | |
| 476 | /* |
| 477 | * Then copy the pointers. |
| 478 | */ |
| 479 | op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, |
| 480 | ifp->if_broot_bytes); |
| 481 | np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, new_broot, 1, |
| 482 | (int)new_size); |
Christoph Hellwig | d5cf09b | 2014-07-30 09:12:05 +1000 | [diff] [blame] | 483 | memcpy(np, op, new_max * (uint)sizeof(xfs_fsblock_t)); |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 484 | } |
Dave Chinner | d4c75a1 | 2024-01-16 09:59:43 +1100 | [diff] [blame] | 485 | kfree(ifp->if_broot); |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 486 | ifp->if_broot = new_broot; |
| 487 | ifp->if_broot_bytes = (int)new_size; |
| 488 | if (ifp->if_broot) |
| 489 | ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <= |
Darrick J. Wong | c01147d | 2022-07-09 10:56:07 -0700 | [diff] [blame] | 490 | xfs_inode_fork_size(ip, whichfork)); |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 491 | return; |
| 492 | } |
| 493 | |
| 494 | |
| 495 | /* |
| 496 | * This is called when the amount of space needed for if_data |
| 497 | * is increased or decreased. The change in size is indicated by |
| 498 | * the number of bytes that need to be added or deleted in the |
| 499 | * byte_diff parameter. |
| 500 | * |
| 501 | * If the amount of space needed has decreased below the size of the |
| 502 | * inline buffer, then switch to using the inline buffer. Otherwise, |
Dave Chinner | f078d4e | 2024-01-16 09:59:40 +1100 | [diff] [blame] | 503 | * use krealloc() or kmalloc() to adjust the size of the buffer |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 504 | * to what is needed. |
| 505 | * |
| 506 | * ip -- the inode whose if_data area is changing |
| 507 | * byte_diff -- the change in the number of bytes, positive or negative, |
| 508 | * requested for the if_data array. |
| 509 | */ |
Christoph Hellwig | 45c76a2 | 2023-12-20 07:34:56 +0100 | [diff] [blame] | 510 | void * |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 511 | xfs_idata_realloc( |
Christoph Hellwig | 1216b58 | 2018-07-17 16:51:50 -0700 | [diff] [blame] | 512 | struct xfs_inode *ip, |
Dave Chinner | 3f8a4f1 | 2019-10-17 13:40:33 -0700 | [diff] [blame] | 513 | int64_t byte_diff, |
Christoph Hellwig | 1216b58 | 2018-07-17 16:51:50 -0700 | [diff] [blame] | 514 | int whichfork) |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 515 | { |
Darrick J. Wong | 732436e | 2022-07-09 10:56:05 -0700 | [diff] [blame] | 516 | struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); |
Dave Chinner | 3f8a4f1 | 2019-10-17 13:40:33 -0700 | [diff] [blame] | 517 | int64_t new_size = ifp->if_bytes + byte_diff; |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 518 | |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 519 | ASSERT(new_size >= 0); |
Darrick J. Wong | c01147d | 2022-07-09 10:56:07 -0700 | [diff] [blame] | 520 | ASSERT(new_size <= xfs_inode_fork_size(ip, whichfork)); |
Christoph Hellwig | 1216b58 | 2018-07-17 16:51:50 -0700 | [diff] [blame] | 521 | |
Christoph Hellwig | 45c76a2 | 2023-12-20 07:34:56 +0100 | [diff] [blame] | 522 | if (byte_diff) { |
| 523 | ifp->if_data = krealloc(ifp->if_data, new_size, |
Dave Chinner | 0b3a76e | 2024-01-16 09:59:46 +1100 | [diff] [blame] | 524 | GFP_KERNEL | __GFP_NOFAIL); |
Christoph Hellwig | 45c76a2 | 2023-12-20 07:34:56 +0100 | [diff] [blame] | 525 | if (new_size == 0) |
| 526 | ifp->if_data = NULL; |
| 527 | ifp->if_bytes = new_size; |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 528 | } |
Christoph Hellwig | 1216b58 | 2018-07-17 16:51:50 -0700 | [diff] [blame] | 529 | |
Christoph Hellwig | 45c76a2 | 2023-12-20 07:34:56 +0100 | [diff] [blame] | 530 | return ifp->if_data; |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 531 | } |
| 532 | |
Darrick J. Wong | 8f71bed | 2023-12-15 10:03:39 -0800 | [diff] [blame] | 533 | /* Free all memory and reset a fork back to its initial state. */ |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 534 | void |
| 535 | xfs_idestroy_fork( |
Christoph Hellwig | ef83851 | 2020-05-18 10:29:27 -0700 | [diff] [blame] | 536 | struct xfs_ifork *ifp) |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 537 | { |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 538 | if (ifp->if_broot != NULL) { |
Dave Chinner | d4c75a1 | 2024-01-16 09:59:43 +1100 | [diff] [blame] | 539 | kfree(ifp->if_broot); |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 540 | ifp->if_broot = NULL; |
| 541 | } |
| 542 | |
Christoph Hellwig | 0eba048 | 2021-04-13 11:15:10 -0700 | [diff] [blame] | 543 | switch (ifp->if_format) { |
| 544 | case XFS_DINODE_FMT_LOCAL: |
Dave Chinner | d4c75a1 | 2024-01-16 09:59:43 +1100 | [diff] [blame] | 545 | kfree(ifp->if_data); |
Christoph Hellwig | 6e145f9 | 2023-12-20 07:34:55 +0100 | [diff] [blame] | 546 | ifp->if_data = NULL; |
Christoph Hellwig | 0eba048 | 2021-04-13 11:15:10 -0700 | [diff] [blame] | 547 | break; |
| 548 | case XFS_DINODE_FMT_EXTENTS: |
| 549 | case XFS_DINODE_FMT_BTREE: |
Christoph Hellwig | ef83851 | 2020-05-18 10:29:27 -0700 | [diff] [blame] | 550 | if (ifp->if_height) |
| 551 | xfs_iext_destroy(ifp); |
Christoph Hellwig | 0eba048 | 2021-04-13 11:15:10 -0700 | [diff] [blame] | 552 | break; |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 553 | } |
| 554 | } |
| 555 | |
| 556 | /* |
Christoph Hellwig | da77650 | 2013-12-13 11:34:04 +1100 | [diff] [blame] | 557 | * Convert in-core extents to on-disk form |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 558 | * |
Christoph Hellwig | da77650 | 2013-12-13 11:34:04 +1100 | [diff] [blame] | 559 | * In the case of the data fork, the in-core and on-disk fork sizes can be |
| 560 | * different due to delayed allocation extents. We only copy on-disk extents |
| 561 | * here, so callers must always use the physical fork size to determine the |
| 562 | * size of the buffer passed to this routine. We will return the size actually |
| 563 | * used. |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 564 | */ |
| 565 | int |
| 566 | xfs_iextents_copy( |
Christoph Hellwig | 71565f4 | 2017-11-03 10:34:42 -0700 | [diff] [blame] | 567 | struct xfs_inode *ip, |
| 568 | struct xfs_bmbt_rec *dp, |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 569 | int whichfork) |
| 570 | { |
Christoph Hellwig | e8e0e17 | 2017-10-19 11:06:29 -0700 | [diff] [blame] | 571 | int state = xfs_bmap_fork_to_state(whichfork); |
Darrick J. Wong | 732436e | 2022-07-09 10:56:05 -0700 | [diff] [blame] | 572 | struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); |
Christoph Hellwig | b2b1712 | 2017-11-03 10:34:43 -0700 | [diff] [blame] | 573 | struct xfs_iext_cursor icur; |
Christoph Hellwig | 71565f4 | 2017-11-03 10:34:42 -0700 | [diff] [blame] | 574 | struct xfs_bmbt_irec rec; |
Dave Chinner | 3f8a4f1 | 2019-10-17 13:40:33 -0700 | [diff] [blame] | 575 | int64_t copied = 0; |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 576 | |
Matthew Wilcox (Oracle) | 3fed24f | 2024-02-19 15:41:12 +0000 | [diff] [blame] | 577 | xfs_assert_ilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED); |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 578 | ASSERT(ifp->if_bytes > 0); |
| 579 | |
Christoph Hellwig | b2b1712 | 2017-11-03 10:34:43 -0700 | [diff] [blame] | 580 | for_each_xfs_iext(ifp, &icur, &rec) { |
Christoph Hellwig | 71565f4 | 2017-11-03 10:34:42 -0700 | [diff] [blame] | 581 | if (isnullstartblock(rec.br_startblock)) |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 582 | continue; |
Darrick J. Wong | 30b0984 | 2018-03-23 10:06:52 -0700 | [diff] [blame] | 583 | ASSERT(xfs_bmap_validate_extent(ip, whichfork, &rec) == NULL); |
Christoph Hellwig | 71565f4 | 2017-11-03 10:34:42 -0700 | [diff] [blame] | 584 | xfs_bmbt_disk_set_all(dp, &rec); |
Christoph Hellwig | b2b1712 | 2017-11-03 10:34:43 -0700 | [diff] [blame] | 585 | trace_xfs_write_extent(ip, &icur, state, _RET_IP_); |
Christoph Hellwig | 71565f4 | 2017-11-03 10:34:42 -0700 | [diff] [blame] | 586 | copied += sizeof(struct xfs_bmbt_rec); |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 587 | dp++; |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 588 | } |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 589 | |
Christoph Hellwig | 71565f4 | 2017-11-03 10:34:42 -0700 | [diff] [blame] | 590 | ASSERT(copied > 0); |
| 591 | ASSERT(copied <= ifp->if_bytes); |
| 592 | return copied; |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 593 | } |
| 594 | |
| 595 | /* |
| 596 | * Each of the following cases stores data into the same region |
| 597 | * of the on-disk inode, so only one of them can be valid at |
| 598 | * any given time. While it is possible to have conflicting formats |
| 599 | * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is |
| 600 | * in EXTENTS format, this can only happen when the fork has |
| 601 | * changed formats after being modified but before being flushed. |
| 602 | * In these cases, the format always takes precedence, because the |
| 603 | * format indicates the current state of the fork. |
| 604 | */ |
Darrick J. Wong | 005c5db | 2017-03-28 14:51:10 -0700 | [diff] [blame] | 605 | void |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 606 | xfs_iflush_fork( |
Christoph Hellwig | de38db7 | 2021-10-11 16:11:21 -0700 | [diff] [blame] | 607 | struct xfs_inode *ip, |
| 608 | struct xfs_dinode *dip, |
Christoph Hellwig | fd9cbe5 | 2020-04-30 12:52:19 -0700 | [diff] [blame] | 609 | struct xfs_inode_log_item *iip, |
Eric Sandeen | fd9fdba | 2014-04-14 19:04:46 +1000 | [diff] [blame] | 610 | int whichfork) |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 611 | { |
| 612 | char *cp; |
Christoph Hellwig | 3ba738d | 2018-07-17 16:51:50 -0700 | [diff] [blame] | 613 | struct xfs_ifork *ifp; |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 614 | xfs_mount_t *mp; |
| 615 | static const short brootflag[2] = |
| 616 | { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT }; |
| 617 | static const short dataflag[2] = |
| 618 | { XFS_ILOG_DDATA, XFS_ILOG_ADATA }; |
| 619 | static const short extflag[2] = |
| 620 | { XFS_ILOG_DEXT, XFS_ILOG_AEXT }; |
| 621 | |
| 622 | if (!iip) |
Darrick J. Wong | 005c5db | 2017-03-28 14:51:10 -0700 | [diff] [blame] | 623 | return; |
Darrick J. Wong | 732436e | 2022-07-09 10:56:05 -0700 | [diff] [blame] | 624 | ifp = xfs_ifork_ptr(ip, whichfork); |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 625 | /* |
| 626 | * This can happen if we gave up in iformat in an error path, |
| 627 | * for the attribute fork. |
| 628 | */ |
| 629 | if (!ifp) { |
| 630 | ASSERT(whichfork == XFS_ATTR_FORK); |
Darrick J. Wong | 005c5db | 2017-03-28 14:51:10 -0700 | [diff] [blame] | 631 | return; |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 632 | } |
| 633 | cp = XFS_DFORK_PTR(dip, whichfork); |
| 634 | mp = ip->i_mount; |
Christoph Hellwig | f7e67b2 | 2020-05-18 10:28:05 -0700 | [diff] [blame] | 635 | switch (ifp->if_format) { |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 636 | case XFS_DINODE_FMT_LOCAL: |
| 637 | if ((iip->ili_fields & dataflag[whichfork]) && |
| 638 | (ifp->if_bytes > 0)) { |
Christoph Hellwig | 6e145f9 | 2023-12-20 07:34:55 +0100 | [diff] [blame] | 639 | ASSERT(ifp->if_data != NULL); |
Darrick J. Wong | c01147d | 2022-07-09 10:56:07 -0700 | [diff] [blame] | 640 | ASSERT(ifp->if_bytes <= xfs_inode_fork_size(ip, whichfork)); |
Christoph Hellwig | 6e145f9 | 2023-12-20 07:34:55 +0100 | [diff] [blame] | 641 | memcpy(cp, ifp->if_data, ifp->if_bytes); |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 642 | } |
| 643 | break; |
| 644 | |
| 645 | case XFS_DINODE_FMT_EXTENTS: |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 646 | if ((iip->ili_fields & extflag[whichfork]) && |
| 647 | (ifp->if_bytes > 0)) { |
Christoph Hellwig | daf8396 | 2020-05-18 10:27:22 -0700 | [diff] [blame] | 648 | ASSERT(ifp->if_nextents > 0); |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 649 | (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp, |
| 650 | whichfork); |
| 651 | } |
| 652 | break; |
| 653 | |
| 654 | case XFS_DINODE_FMT_BTREE: |
| 655 | if ((iip->ili_fields & brootflag[whichfork]) && |
| 656 | (ifp->if_broot_bytes > 0)) { |
| 657 | ASSERT(ifp->if_broot != NULL); |
| 658 | ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <= |
Darrick J. Wong | c01147d | 2022-07-09 10:56:07 -0700 | [diff] [blame] | 659 | xfs_inode_fork_size(ip, whichfork)); |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 660 | xfs_bmbt_to_bmdr(mp, ifp->if_broot, ifp->if_broot_bytes, |
| 661 | (xfs_bmdr_block_t *)cp, |
| 662 | XFS_DFORK_SIZE(dip, mp, whichfork)); |
| 663 | } |
| 664 | break; |
| 665 | |
| 666 | case XFS_DINODE_FMT_DEV: |
| 667 | if (iip->ili_fields & XFS_ILOG_DEV) { |
| 668 | ASSERT(whichfork == XFS_DATA_FORK); |
Christoph Hellwig | 274e0a1 | 2017-11-20 08:56:52 -0800 | [diff] [blame] | 669 | xfs_dinode_put_rdev(dip, |
| 670 | linux_to_xfs_dev_t(VFS_I(ip)->i_rdev)); |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 671 | } |
| 672 | break; |
| 673 | |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 674 | default: |
| 675 | ASSERT(0); |
| 676 | break; |
| 677 | } |
| 678 | } |
| 679 | |
Darrick J. Wong | 3993bae | 2016-10-03 09:11:32 -0700 | [diff] [blame] | 680 | /* Convert bmap state flags to an inode fork. */ |
| 681 | struct xfs_ifork * |
| 682 | xfs_iext_state_to_fork( |
| 683 | struct xfs_inode *ip, |
| 684 | int state) |
| 685 | { |
| 686 | if (state & BMAP_COWFORK) |
| 687 | return ip->i_cowfp; |
| 688 | else if (state & BMAP_ATTRFORK) |
Darrick J. Wong | 2ed5b09 | 2022-07-09 10:56:06 -0700 | [diff] [blame] | 689 | return &ip->i_af; |
Darrick J. Wong | 3993bae | 2016-10-03 09:11:32 -0700 | [diff] [blame] | 690 | return &ip->i_df; |
| 691 | } |
| 692 | |
Dave Chinner | 5c4d97d | 2013-08-12 20:49:33 +1000 | [diff] [blame] | 693 | /* |
Darrick J. Wong | 3993bae | 2016-10-03 09:11:32 -0700 | [diff] [blame] | 694 | * Initialize an inode's copy-on-write fork. |
| 695 | */ |
| 696 | void |
| 697 | xfs_ifork_init_cow( |
| 698 | struct xfs_inode *ip) |
| 699 | { |
| 700 | if (ip->i_cowfp) |
| 701 | return; |
| 702 | |
Darrick J. Wong | 182696f | 2021-10-12 11:09:23 -0700 | [diff] [blame] | 703 | ip->i_cowfp = kmem_cache_zalloc(xfs_ifork_cache, |
Dave Chinner | 94a69db | 2024-01-16 09:59:45 +1100 | [diff] [blame] | 704 | GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL); |
Christoph Hellwig | f7e67b2 | 2020-05-18 10:28:05 -0700 | [diff] [blame] | 705 | ip->i_cowfp->if_format = XFS_DINODE_FMT_EXTENTS; |
Darrick J. Wong | 3993bae | 2016-10-03 09:11:32 -0700 | [diff] [blame] | 706 | } |
Darrick J. Wong | 9cfb9b4 | 2018-01-08 10:51:06 -0800 | [diff] [blame] | 707 | |
Darrick J. Wong | 9cfb9b4 | 2018-01-08 10:51:06 -0800 | [diff] [blame] | 708 | /* Verify the inline contents of the data fork of an inode. */ |
Christoph Hellwig | 7c7ba21 | 2020-05-14 14:01:19 -0700 | [diff] [blame] | 709 | int |
| 710 | xfs_ifork_verify_local_data( |
Christoph Hellwig | 1934c8b | 2020-05-14 14:01:19 -0700 | [diff] [blame] | 711 | struct xfs_inode *ip) |
Darrick J. Wong | 9cfb9b4 | 2018-01-08 10:51:06 -0800 | [diff] [blame] | 712 | { |
Christoph Hellwig | 7c7ba21 | 2020-05-14 14:01:19 -0700 | [diff] [blame] | 713 | xfs_failaddr_t fa = NULL; |
Darrick J. Wong | 9cfb9b4 | 2018-01-08 10:51:06 -0800 | [diff] [blame] | 714 | |
Darrick J. Wong | 9cfb9b4 | 2018-01-08 10:51:06 -0800 | [diff] [blame] | 715 | switch (VFS_I(ip)->i_mode & S_IFMT) { |
Darrick J. Wong | e744cef | 2023-12-15 10:03:37 -0800 | [diff] [blame] | 716 | case S_IFDIR: { |
| 717 | struct xfs_mount *mp = ip->i_mount; |
| 718 | struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK); |
Christoph Hellwig | 6e145f9 | 2023-12-20 07:34:55 +0100 | [diff] [blame] | 719 | struct xfs_dir2_sf_hdr *sfp = ifp->if_data; |
Darrick J. Wong | e744cef | 2023-12-15 10:03:37 -0800 | [diff] [blame] | 720 | |
Darrick J. Wong | e744cef | 2023-12-15 10:03:37 -0800 | [diff] [blame] | 721 | fa = xfs_dir2_sf_verify(mp, sfp, ifp->if_bytes); |
Christoph Hellwig | 7c7ba21 | 2020-05-14 14:01:19 -0700 | [diff] [blame] | 722 | break; |
Darrick J. Wong | e744cef | 2023-12-15 10:03:37 -0800 | [diff] [blame] | 723 | } |
| 724 | case S_IFLNK: { |
| 725 | struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK); |
| 726 | |
Christoph Hellwig | 6e145f9 | 2023-12-20 07:34:55 +0100 | [diff] [blame] | 727 | fa = xfs_symlink_shortform_verify(ifp->if_data, ifp->if_bytes); |
Christoph Hellwig | 7c7ba21 | 2020-05-14 14:01:19 -0700 | [diff] [blame] | 728 | break; |
Darrick J. Wong | e744cef | 2023-12-15 10:03:37 -0800 | [diff] [blame] | 729 | } |
Darrick J. Wong | 9cfb9b4 | 2018-01-08 10:51:06 -0800 | [diff] [blame] | 730 | default: |
Christoph Hellwig | 7c7ba21 | 2020-05-14 14:01:19 -0700 | [diff] [blame] | 731 | break; |
Darrick J. Wong | 9cfb9b4 | 2018-01-08 10:51:06 -0800 | [diff] [blame] | 732 | } |
Christoph Hellwig | 7c7ba21 | 2020-05-14 14:01:19 -0700 | [diff] [blame] | 733 | |
| 734 | if (fa) { |
| 735 | xfs_inode_verifier_error(ip, -EFSCORRUPTED, "data fork", |
Christoph Hellwig | 6e145f9 | 2023-12-20 07:34:55 +0100 | [diff] [blame] | 736 | ip->i_df.if_data, ip->i_df.if_bytes, fa); |
Christoph Hellwig | 7c7ba21 | 2020-05-14 14:01:19 -0700 | [diff] [blame] | 737 | return -EFSCORRUPTED; |
| 738 | } |
| 739 | |
| 740 | return 0; |
Darrick J. Wong | 9cfb9b4 | 2018-01-08 10:51:06 -0800 | [diff] [blame] | 741 | } |
| 742 | |
| 743 | /* Verify the inline contents of the attr fork of an inode. */ |
Christoph Hellwig | 7c7ba21 | 2020-05-14 14:01:19 -0700 | [diff] [blame] | 744 | int |
| 745 | xfs_ifork_verify_local_attr( |
Christoph Hellwig | 1934c8b | 2020-05-14 14:01:19 -0700 | [diff] [blame] | 746 | struct xfs_inode *ip) |
Darrick J. Wong | 9cfb9b4 | 2018-01-08 10:51:06 -0800 | [diff] [blame] | 747 | { |
Darrick J. Wong | 2ed5b09 | 2022-07-09 10:56:06 -0700 | [diff] [blame] | 748 | struct xfs_ifork *ifp = &ip->i_af; |
Christoph Hellwig | 7c7ba21 | 2020-05-14 14:01:19 -0700 | [diff] [blame] | 749 | xfs_failaddr_t fa; |
| 750 | |
Darrick J. Wong | e744cef | 2023-12-15 10:03:37 -0800 | [diff] [blame] | 751 | if (!xfs_inode_has_attr_fork(ip)) { |
Christoph Hellwig | 7c7ba21 | 2020-05-14 14:01:19 -0700 | [diff] [blame] | 752 | fa = __this_address; |
Darrick J. Wong | e744cef | 2023-12-15 10:03:37 -0800 | [diff] [blame] | 753 | } else { |
Christoph Hellwig | 6e145f9 | 2023-12-20 07:34:55 +0100 | [diff] [blame] | 754 | struct xfs_ifork *ifp = &ip->i_af; |
Christoph Hellwig | 7c7ba21 | 2020-05-14 14:01:19 -0700 | [diff] [blame] | 755 | |
Christoph Hellwig | 6e145f9 | 2023-12-20 07:34:55 +0100 | [diff] [blame] | 756 | ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL); |
| 757 | fa = xfs_attr_shortform_verify(ifp->if_data, ifp->if_bytes); |
Darrick J. Wong | e744cef | 2023-12-15 10:03:37 -0800 | [diff] [blame] | 758 | } |
Christoph Hellwig | 7c7ba21 | 2020-05-14 14:01:19 -0700 | [diff] [blame] | 759 | if (fa) { |
| 760 | xfs_inode_verifier_error(ip, -EFSCORRUPTED, "attr fork", |
Christoph Hellwig | 6e145f9 | 2023-12-20 07:34:55 +0100 | [diff] [blame] | 761 | ifp->if_data, ifp->if_bytes, fa); |
Christoph Hellwig | 7c7ba21 | 2020-05-14 14:01:19 -0700 | [diff] [blame] | 762 | return -EFSCORRUPTED; |
| 763 | } |
| 764 | |
| 765 | return 0; |
Darrick J. Wong | 9cfb9b4 | 2018-01-08 10:51:06 -0800 | [diff] [blame] | 766 | } |
Chandan Babu R | b9b7e1d | 2021-01-22 16:48:10 -0800 | [diff] [blame] | 767 | |
Christoph Hellwig | 25576c5 | 2024-05-02 09:33:55 +0200 | [diff] [blame] | 768 | /* |
| 769 | * Check if the inode fork supports adding nr_to_add more extents. |
| 770 | * |
| 771 | * If it doesn't but we can upgrade it to large extent counters, do the upgrade. |
| 772 | * If we can't upgrade or are already using big counters but still can't fit the |
| 773 | * additional extents, return -EFBIG. |
| 774 | */ |
Chandan Babu R | b9b7e1d | 2021-01-22 16:48:10 -0800 | [diff] [blame] | 775 | int |
Christoph Hellwig | 25576c5 | 2024-05-02 09:33:55 +0200 | [diff] [blame] | 776 | xfs_iext_count_extend( |
| 777 | struct xfs_trans *tp, |
Chandan Babu R | b9b7e1d | 2021-01-22 16:48:10 -0800 | [diff] [blame] | 778 | struct xfs_inode *ip, |
| 779 | int whichfork, |
Christoph Hellwig | 25576c5 | 2024-05-02 09:33:55 +0200 | [diff] [blame] | 780 | uint nr_to_add) |
Chandan Babu R | b9b7e1d | 2021-01-22 16:48:10 -0800 | [diff] [blame] | 781 | { |
Christoph Hellwig | 25576c5 | 2024-05-02 09:33:55 +0200 | [diff] [blame] | 782 | struct xfs_mount *mp = ip->i_mount; |
| 783 | bool has_large = |
| 784 | xfs_inode_has_large_extent_counts(ip); |
Darrick J. Wong | 732436e | 2022-07-09 10:56:05 -0700 | [diff] [blame] | 785 | struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); |
Chandan Babu R | b9b7e1d | 2021-01-22 16:48:10 -0800 | [diff] [blame] | 786 | uint64_t nr_exts; |
| 787 | |
Christoph Hellwig | 25576c5 | 2024-05-02 09:33:55 +0200 | [diff] [blame] | 788 | ASSERT(nr_to_add <= XFS_MAX_EXTCNT_UPGRADE_NR); |
| 789 | |
Chandan Babu R | b9b7e1d | 2021-01-22 16:48:10 -0800 | [diff] [blame] | 790 | if (whichfork == XFS_COW_FORK) |
| 791 | return 0; |
| 792 | |
Christoph Hellwig | 25576c5 | 2024-05-02 09:33:55 +0200 | [diff] [blame] | 793 | /* no point in upgrading if if_nextents overflows */ |
Chandan Babu R | b9b7e1d | 2021-01-22 16:48:10 -0800 | [diff] [blame] | 794 | nr_exts = ifp->if_nextents + nr_to_add; |
Christoph Hellwig | 25576c5 | 2024-05-02 09:33:55 +0200 | [diff] [blame] | 795 | if (nr_exts < ifp->if_nextents) |
Chandan Babu R | b9b7e1d | 2021-01-22 16:48:10 -0800 | [diff] [blame] | 796 | return -EFBIG; |
| 797 | |
Christoph Hellwig | 25576c5 | 2024-05-02 09:33:55 +0200 | [diff] [blame] | 798 | if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_REDUCE_MAX_IEXTENTS) && |
| 799 | nr_exts > 10) |
Chandan Babu R | 4f86bb4 | 2022-03-09 07:49:36 +0000 | [diff] [blame] | 800 | return -EFBIG; |
| 801 | |
Christoph Hellwig | 25576c5 | 2024-05-02 09:33:55 +0200 | [diff] [blame] | 802 | if (nr_exts > xfs_iext_max_nextents(has_large, whichfork)) { |
| 803 | if (has_large || !xfs_has_large_extent_counts(mp)) |
| 804 | return -EFBIG; |
| 805 | ip->i_diflags2 |= XFS_DIFLAG2_NREXT64; |
| 806 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
| 807 | } |
Chandan Babu R | 4f86bb4 | 2022-03-09 07:49:36 +0000 | [diff] [blame] | 808 | return 0; |
| 809 | } |
Darrick J. Wong | 5049ff4 | 2024-02-22 12:43:36 -0800 | [diff] [blame] | 810 | |
| 811 | /* Decide if a file mapping is on the realtime device or not. */ |
| 812 | bool |
| 813 | xfs_ifork_is_realtime( |
| 814 | struct xfs_inode *ip, |
| 815 | int whichfork) |
| 816 | { |
| 817 | return XFS_IS_REALTIME_INODE(ip) && whichfork != XFS_ATTR_FORK; |
| 818 | } |