blob: aff82ed112c93c26f43bed5ada5fd4b82e4e3711 [file] [log] [blame]
Dave Chinner0b61f8a2018-06-05 19:42:14 -07001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Nathan Scott7b718762005-11-02 14:58:39 +11003 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include "xfs.h"
Nathan Scotta844f452005-11-02 14:38:42 +11007#include "xfs_fs.h"
Dave Chinner632b89e2013-10-29 22:11:58 +11008#include "xfs_shared.h"
Dave Chinner6ca1c902013-08-12 20:49:26 +10009#include "xfs_format.h"
Dave Chinner239880e2013-10-23 10:50:10 +110010#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
Nathan Scotta844f452005-11-02 14:38:42 +110012#include "xfs_bit.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include "xfs_mount.h"
Darrick J. Wong3ab78df2016-08-03 11:15:38 +100014#include "xfs_defer.h"
Nathan Scotta844f452005-11-02 14:38:42 +110015#include "xfs_inode.h"
Dave Chinner239880e2013-10-23 10:50:10 +110016#include "xfs_trans.h"
Nathan Scotta844f452005-11-02 14:38:42 +110017#include "xfs_inode_item.h"
18#include "xfs_alloc.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include "xfs_btree.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110020#include "xfs_bmap_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include "xfs_bmap.h"
22#include "xfs_error.h"
23#include "xfs_quota.h"
Dave Chinner3d3e6f62012-11-12 22:54:08 +110024#include "xfs_trace.h"
Christoph Hellwigee1a47a2013-04-21 14:53:46 -050025#include "xfs_cksum.h"
Darrick J. Wong340785c2016-08-03 11:33:42 +100026#include "xfs_rmap.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Linus Torvalds1da177e2005-04-16 15:20:36 -070028/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070029 * Convert on-disk form of btree root to in-memory form.
30 */
31void
32xfs_bmdr_to_bmbt(
Christoph Hellwigee1a47a2013-04-21 14:53:46 -050033 struct xfs_inode *ip,
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 xfs_bmdr_block_t *dblock,
35 int dblocklen,
Christoph Hellwig7cc95a82008-10-30 17:14:34 +110036 struct xfs_btree_block *rblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 int rblocklen)
38{
Christoph Hellwigee1a47a2013-04-21 14:53:46 -050039 struct xfs_mount *mp = ip->i_mount;
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 int dmxr;
41 xfs_bmbt_key_t *fkp;
Christoph Hellwig576039c2006-09-28 10:58:06 +100042 __be64 *fpp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070043 xfs_bmbt_key_t *tkp;
Christoph Hellwig576039c2006-09-28 10:58:06 +100044 __be64 *tpp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Eric Sandeenb6f41e42017-01-27 23:16:39 -080046 xfs_btree_init_block_int(mp, rblock, XFS_BUF_DADDR_NULL,
47 XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
Eric Sandeenf88ae462017-01-27 23:16:37 -080048 XFS_BTREE_LONG_PTRS);
Christoph Hellwig16259e72005-11-02 15:11:25 +110049 rblock->bb_level = dblock->bb_level;
50 ASSERT(be16_to_cpu(rblock->bb_level) > 0);
51 rblock->bb_numrecs = dblock->bb_numrecs;
Eric Sandeen152d93b2014-04-14 18:58:51 +100052 dmxr = xfs_bmdr_maxrecs(dblocklen, 0);
Christoph Hellwig136341b2008-10-30 17:11:40 +110053 fkp = XFS_BMDR_KEY_ADDR(dblock, 1);
54 tkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
55 fpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
Christoph Hellwig60197e82008-10-30 17:11:19 +110056 tpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
Christoph Hellwig16259e72005-11-02 15:11:25 +110057 dmxr = be16_to_cpu(dblock->bb_numrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
Christoph Hellwig576039c2006-09-28 10:58:06 +100059 memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060}
61
Linus Torvalds1da177e2005-04-16 15:20:36 -070062void
Christoph Hellwig6bdcf262017-11-03 10:34:46 -070063xfs_bmbt_disk_get_all(
64 struct xfs_bmbt_rec *rec,
65 struct xfs_bmbt_irec *irec)
Linus Torvalds1da177e2005-04-16 15:20:36 -070066{
Christoph Hellwig6bdcf262017-11-03 10:34:46 -070067 uint64_t l0 = get_unaligned_be64(&rec->l0);
68 uint64_t l1 = get_unaligned_be64(&rec->l1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
Christoph Hellwig6bdcf262017-11-03 10:34:46 -070070 irec->br_startoff = (l0 & xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
71 irec->br_startblock = ((l0 & xfs_mask64lo(9)) << 43) | (l1 >> 21);
72 irec->br_blockcount = l1 & xfs_mask64lo(21);
73 if (l0 >> (64 - BMBT_EXNTFLAG_BITLEN))
74 irec->br_state = XFS_EXT_UNWRITTEN;
75 else
76 irec->br_state = XFS_EXT_NORM;
Linus Torvalds1da177e2005-04-16 15:20:36 -070077}
78
Linus Torvalds1da177e2005-04-16 15:20:36 -070079/*
80 * Extract the blockcount field from an on disk bmap extent record.
81 */
82xfs_filblks_t
83xfs_bmbt_disk_get_blockcount(
84 xfs_bmbt_rec_t *r)
85{
Eric Sandeenfb825572009-01-09 15:53:54 +110086 return (xfs_filblks_t)(be64_to_cpu(r->l1) & xfs_mask64lo(21));
Linus Torvalds1da177e2005-04-16 15:20:36 -070087}
88
89/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 * Extract the startoff field from a disk format bmap extent record.
91 */
92xfs_fileoff_t
93xfs_bmbt_disk_get_startoff(
94 xfs_bmbt_rec_t *r)
95{
Christoph Hellwigcd8b0a92007-08-16 16:24:15 +100096 return ((xfs_fileoff_t)be64_to_cpu(r->l0) &
Eric Sandeenfb825572009-01-09 15:53:54 +110097 xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -070098}
Linus Torvalds1da177e2005-04-16 15:20:36 -070099
Christoph Hellwig8cba4342007-08-16 16:23:53 +1000100/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 * Set all the fields in a bmap extent record from the uncompressed form.
102 */
103void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104xfs_bmbt_disk_set_all(
Christoph Hellwiga67d00a2017-10-17 14:16:26 -0700105 struct xfs_bmbt_rec *r,
106 struct xfs_bmbt_irec *s)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107{
Christoph Hellwiga67d00a2017-10-17 14:16:26 -0700108 int extent_flag = (s->br_state != XFS_EXT_NORM);
109
110 ASSERT(s->br_state == XFS_EXT_NORM || s->br_state == XFS_EXT_UNWRITTEN);
111 ASSERT(!(s->br_startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)));
112 ASSERT(!(s->br_blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)));
113 ASSERT(!(s->br_startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)));
114
Christoph Hellwig135dcc12017-11-03 10:34:45 -0700115 put_unaligned_be64(
Christoph Hellwiga67d00a2017-10-17 14:16:26 -0700116 ((xfs_bmbt_rec_base_t)extent_flag << 63) |
117 ((xfs_bmbt_rec_base_t)s->br_startoff << 9) |
Christoph Hellwig135dcc12017-11-03 10:34:45 -0700118 ((xfs_bmbt_rec_base_t)s->br_startblock >> 43), &r->l0);
119 put_unaligned_be64(
Christoph Hellwiga67d00a2017-10-17 14:16:26 -0700120 ((xfs_bmbt_rec_base_t)s->br_startblock << 21) |
121 ((xfs_bmbt_rec_base_t)s->br_blockcount &
Christoph Hellwig135dcc12017-11-03 10:34:45 -0700122 (xfs_bmbt_rec_base_t)xfs_mask64lo(21)), &r->l1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
125/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 * Convert in-memory form of btree root to on-disk form.
127 */
128void
129xfs_bmbt_to_bmdr(
Christoph Hellwig60197e82008-10-30 17:11:19 +1100130 struct xfs_mount *mp,
Christoph Hellwig7cc95a82008-10-30 17:14:34 +1100131 struct xfs_btree_block *rblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 int rblocklen,
133 xfs_bmdr_block_t *dblock,
134 int dblocklen)
135{
136 int dmxr;
137 xfs_bmbt_key_t *fkp;
Christoph Hellwig576039c2006-09-28 10:58:06 +1000138 __be64 *fpp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 xfs_bmbt_key_t *tkp;
Christoph Hellwig576039c2006-09-28 10:58:06 +1000140 __be64 *tpp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500142 if (xfs_sb_version_hascrc(&mp->m_sb)) {
143 ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_CRC_MAGIC));
Eric Sandeence748ea2015-07-29 11:53:31 +1000144 ASSERT(uuid_equal(&rblock->bb_u.l.bb_uuid,
145 &mp->m_sb.sb_meta_uuid));
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500146 ASSERT(rblock->bb_u.l.bb_blkno ==
147 cpu_to_be64(XFS_BUF_DADDR_NULL));
148 } else
149 ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_MAGIC));
Christoph Hellwigd5cf09b2014-07-30 09:12:05 +1000150 ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK));
151 ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK));
Christoph Hellwig69ef9212011-07-08 14:36:05 +0200152 ASSERT(rblock->bb_level != 0);
Christoph Hellwig16259e72005-11-02 15:11:25 +1100153 dblock->bb_level = rblock->bb_level;
154 dblock->bb_numrecs = rblock->bb_numrecs;
Eric Sandeen152d93b2014-04-14 18:58:51 +1000155 dmxr = xfs_bmdr_maxrecs(dblocklen, 0);
Christoph Hellwig136341b2008-10-30 17:11:40 +1100156 fkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
157 tkp = XFS_BMDR_KEY_ADDR(dblock, 1);
Christoph Hellwig60197e82008-10-30 17:11:19 +1100158 fpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
Christoph Hellwig136341b2008-10-30 17:11:40 +1100159 tpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
Christoph Hellwig16259e72005-11-02 15:11:25 +1100160 dmxr = be16_to_cpu(dblock->bb_numrecs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
Christoph Hellwig576039c2006-09-28 10:58:06 +1000162 memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163}
164
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100165STATIC struct xfs_btree_cur *
166xfs_bmbt_dup_cursor(
167 struct xfs_btree_cur *cur)
168{
169 struct xfs_btree_cur *new;
170
171 new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp,
172 cur->bc_private.b.ip, cur->bc_private.b.whichfork);
173
174 /*
Darrick J. Wong2c3234d2016-08-03 11:19:29 +1000175 * Copy the firstblock, dfops, and flags values,
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100176 * since init cursor doesn't get them.
177 */
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100178 new->bc_private.b.flags = cur->bc_private.b.flags;
179
180 return new;
181}
182
Christoph Hellwig4b22a572008-10-30 16:57:40 +1100183STATIC void
184xfs_bmbt_update_cursor(
185 struct xfs_btree_cur *src,
186 struct xfs_btree_cur *dst)
187{
Brian Fostercf612de2018-07-11 22:26:29 -0700188 ASSERT((dst->bc_tp->t_firstblock != NULLFSBLOCK) ||
Christoph Hellwig4b22a572008-10-30 16:57:40 +1100189 (dst->bc_private.b.ip->i_d.di_flags & XFS_DIFLAG_REALTIME));
Christoph Hellwig4b22a572008-10-30 16:57:40 +1100190
191 dst->bc_private.b.allocated += src->bc_private.b.allocated;
Brian Fostercf612de2018-07-11 22:26:29 -0700192 dst->bc_tp->t_firstblock = src->bc_tp->t_firstblock;
Christoph Hellwig4b22a572008-10-30 16:57:40 +1100193
194 src->bc_private.b.allocated = 0;
195}
196
Christoph Hellwigce5e42d2008-10-30 16:55:23 +1100197STATIC int
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100198xfs_bmbt_alloc_block(
199 struct xfs_btree_cur *cur,
200 union xfs_btree_ptr *start,
201 union xfs_btree_ptr *new,
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100202 int *stat)
203{
204 xfs_alloc_arg_t args; /* block allocation args */
205 int error; /* error return value */
206
207 memset(&args, 0, sizeof(args));
208 args.tp = cur->bc_tp;
209 args.mp = cur->bc_mp;
Brian Fostercf612de2018-07-11 22:26:29 -0700210 args.fsbno = cur->bc_tp->t_firstblock;
Darrick J. Wong340785c2016-08-03 11:33:42 +1000211 xfs_rmap_ino_bmbt_owner(&args.oinfo, cur->bc_private.b.ip->i_ino,
212 cur->bc_private.b.whichfork);
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100213
214 if (args.fsbno == NULLFSBLOCK) {
215 args.fsbno = be64_to_cpu(start->l);
216 args.type = XFS_ALLOCTYPE_START_BNO;
217 /*
218 * Make sure there is sufficient room left in the AG to
219 * complete a full tree split for an extent insert. If
220 * we are converting the middle part of an extent then
221 * we may need space for two tree splits.
222 *
223 * We are relying on the caller to make the correct block
224 * reservation for this operation to succeed. If the
225 * reservation amount is insufficient then we may fail a
226 * block allocation here and corrupt the filesystem.
227 */
Christoph Hellwiga7e5d032016-03-02 09:58:21 +1100228 args.minleft = args.tp->t_blk_res;
Brian Foster1214f1c2018-08-01 07:20:31 -0700229 } else if (cur->bc_tp->t_flags & XFS_TRANS_LOWMODE) {
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100230 args.type = XFS_ALLOCTYPE_START_BNO;
231 } else {
232 args.type = XFS_ALLOCTYPE_NEAR_BNO;
233 }
234
235 args.minlen = args.maxlen = args.prod = 1;
236 args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL;
Christoph Hellwiga7e5d032016-03-02 09:58:21 +1100237 if (!args.wasdel && args.tp->t_blk_res == 0) {
Dave Chinner24513372014-06-25 14:58:08 +1000238 error = -ENOSPC;
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100239 goto error0;
240 }
241 error = xfs_alloc_vextent(&args);
242 if (error)
243 goto error0;
244
245 if (args.fsbno == NULLFSBLOCK && args.minleft) {
246 /*
247 * Could not find an AG with enough free space to satisfy
Christoph Hellwig255c5162017-01-09 13:36:19 -0800248 * a full btree split. Try again and if
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100249 * successful activate the lowspace algorithm.
250 */
251 args.fsbno = 0;
252 args.type = XFS_ALLOCTYPE_FIRST_AG;
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100253 error = xfs_alloc_vextent(&args);
254 if (error)
255 goto error0;
Brian Foster1214f1c2018-08-01 07:20:31 -0700256 cur->bc_tp->t_flags |= XFS_TRANS_LOWMODE;
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100257 }
Christoph Hellwig2fcc3192017-03-08 10:38:53 -0800258 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100259 *stat = 0;
260 return 0;
261 }
Carlos Maiolinoe157ebd2018-03-06 17:03:30 -0800262
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100263 ASSERT(args.len == 1);
Brian Fostercf612de2018-07-11 22:26:29 -0700264 cur->bc_tp->t_firstblock = args.fsbno;
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100265 cur->bc_private.b.allocated++;
266 cur->bc_private.b.ip->i_d.di_nblocks++;
267 xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
Christoph Hellwig7d095252009-06-08 15:33:32 +0200268 xfs_trans_mod_dquot_byino(args.tp, cur->bc_private.b.ip,
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100269 XFS_TRANS_DQ_BCOUNT, 1L);
270
271 new->l = cpu_to_be64(args.fsbno);
272
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100273 *stat = 1;
274 return 0;
275
276 error0:
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100277 return error;
278}
279
280STATIC int
Christoph Hellwigd4b3a4b2008-10-30 16:57:51 +1100281xfs_bmbt_free_block(
282 struct xfs_btree_cur *cur,
283 struct xfs_buf *bp)
284{
285 struct xfs_mount *mp = cur->bc_mp;
286 struct xfs_inode *ip = cur->bc_private.b.ip;
287 struct xfs_trans *tp = cur->bc_tp;
288 xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
Darrick J. Wong340785c2016-08-03 11:33:42 +1000289 struct xfs_owner_info oinfo;
Christoph Hellwigd4b3a4b2008-10-30 16:57:51 +1100290
Darrick J. Wong340785c2016-08-03 11:33:42 +1000291 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_private.b.whichfork);
Brian Foster0f37d172018-08-01 07:20:34 -0700292 xfs_bmap_add_free(cur->bc_tp, fsbno, 1, &oinfo);
Christoph Hellwigd4b3a4b2008-10-30 16:57:51 +1100293 ip->i_d.di_nblocks--;
294
295 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
Christoph Hellwig7d095252009-06-08 15:33:32 +0200296 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
Christoph Hellwigd4b3a4b2008-10-30 16:57:51 +1100297 return 0;
298}
299
300STATIC int
Christoph Hellwig91cca5df2008-10-30 16:58:01 +1100301xfs_bmbt_get_minrecs(
302 struct xfs_btree_cur *cur,
303 int level)
304{
Christoph Hellwig60197e82008-10-30 17:11:19 +1100305 if (level == cur->bc_nlevels - 1) {
306 struct xfs_ifork *ifp;
307
308 ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
309 cur->bc_private.b.whichfork);
310
311 return xfs_bmbt_maxrecs(cur->bc_mp,
312 ifp->if_broot_bytes, level == 0) / 2;
313 }
314
315 return cur->bc_mp->m_bmap_dmnr[level != 0];
Christoph Hellwig91cca5df2008-10-30 16:58:01 +1100316}
317
Christoph Hellwig60197e82008-10-30 17:11:19 +1100318int
Christoph Hellwigce5e42d2008-10-30 16:55:23 +1100319xfs_bmbt_get_maxrecs(
320 struct xfs_btree_cur *cur,
321 int level)
322{
Christoph Hellwig60197e82008-10-30 17:11:19 +1100323 if (level == cur->bc_nlevels - 1) {
324 struct xfs_ifork *ifp;
325
326 ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
327 cur->bc_private.b.whichfork);
328
329 return xfs_bmbt_maxrecs(cur->bc_mp,
330 ifp->if_broot_bytes, level == 0);
331 }
332
333 return cur->bc_mp->m_bmap_dmxr[level != 0];
334
Christoph Hellwigce5e42d2008-10-30 16:55:23 +1100335}
336
Christoph Hellwig4b22a572008-10-30 16:57:40 +1100337/*
338 * Get the maximum records we could store in the on-disk format.
339 *
340 * For non-root nodes this is equivalent to xfs_bmbt_get_maxrecs, but
341 * for the root node this checks the available space in the dinode fork
342 * so that we can resize the in-memory buffer to match it. After a
343 * resize to the maximum size this function returns the same value
344 * as xfs_bmbt_get_maxrecs for the root node, too.
345 */
346STATIC int
347xfs_bmbt_get_dmaxrecs(
348 struct xfs_btree_cur *cur,
349 int level)
350{
Christoph Hellwig60197e82008-10-30 17:11:19 +1100351 if (level != cur->bc_nlevels - 1)
352 return cur->bc_mp->m_bmap_dmxr[level != 0];
Eric Sandeen152d93b2014-04-14 18:58:51 +1000353 return xfs_bmdr_maxrecs(cur->bc_private.b.forksize, level == 0);
Christoph Hellwig4b22a572008-10-30 16:57:40 +1100354}
355
Christoph Hellwigfe033cc2008-10-30 16:56:09 +1100356STATIC void
357xfs_bmbt_init_key_from_rec(
358 union xfs_btree_key *key,
359 union xfs_btree_rec *rec)
360{
361 key->bmbt.br_startoff =
362 cpu_to_be64(xfs_bmbt_disk_get_startoff(&rec->bmbt));
363}
364
365STATIC void
Darrick J. Wong118bb472017-06-16 11:00:08 -0700366xfs_bmbt_init_high_key_from_rec(
367 union xfs_btree_key *key,
368 union xfs_btree_rec *rec)
369{
370 key->bmbt.br_startoff = cpu_to_be64(
371 xfs_bmbt_disk_get_startoff(&rec->bmbt) +
372 xfs_bmbt_disk_get_blockcount(&rec->bmbt) - 1);
373}
374
375STATIC void
Christoph Hellwig4b22a572008-10-30 16:57:40 +1100376xfs_bmbt_init_rec_from_cur(
377 struct xfs_btree_cur *cur,
378 union xfs_btree_rec *rec)
379{
380 xfs_bmbt_disk_set_all(&rec->bmbt, &cur->bc_rec.b);
381}
382
383STATIC void
Christoph Hellwigfe033cc2008-10-30 16:56:09 +1100384xfs_bmbt_init_ptr_from_cur(
385 struct xfs_btree_cur *cur,
386 union xfs_btree_ptr *ptr)
387{
388 ptr->l = 0;
389}
390
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700391STATIC int64_t
Christoph Hellwigfe033cc2008-10-30 16:56:09 +1100392xfs_bmbt_key_diff(
393 struct xfs_btree_cur *cur,
394 union xfs_btree_key *key)
395{
Darrick J. Wongc8ce5402017-06-16 11:00:05 -0700396 return (int64_t)be64_to_cpu(key->bmbt.br_startoff) -
Christoph Hellwigfe033cc2008-10-30 16:56:09 +1100397 cur->bc_rec.b.br_startoff;
398}
399
Darrick J. Wong118bb472017-06-16 11:00:08 -0700400STATIC int64_t
401xfs_bmbt_diff_two_keys(
402 struct xfs_btree_cur *cur,
403 union xfs_btree_key *k1,
404 union xfs_btree_key *k2)
405{
406 return (int64_t)be64_to_cpu(k1->bmbt.br_startoff) -
407 be64_to_cpu(k2->bmbt.br_startoff);
408}
409
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800410static xfs_failaddr_t
Dave Chinner612cfbf2012-11-14 17:52:32 +1100411xfs_bmbt_verify(
Dave Chinner3d3e6f62012-11-12 22:54:08 +1100412 struct xfs_buf *bp)
413{
414 struct xfs_mount *mp = bp->b_target->bt_mount;
415 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800416 xfs_failaddr_t fa;
Dave Chinner3d3e6f62012-11-12 22:54:08 +1100417 unsigned int level;
Dave Chinner3d3e6f62012-11-12 22:54:08 +1100418
Brian Foster39708c22019-02-07 10:45:48 -0800419 if (!xfs_verify_magic(bp, block->bb_magic))
420 return __this_address;
421
422 if (xfs_sb_version_hascrc(&mp->m_sb)) {
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500423 /*
424 * XXX: need a better way of verifying the owner here. Right now
425 * just make sure there has been one set.
426 */
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800427 fa = xfs_btree_lblock_v5hdr_verify(bp, XFS_RMAP_OWN_UNKNOWN);
428 if (fa)
429 return fa;
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500430 }
431
432 /*
433 * numrecs and level verification.
Dave Chinner3d3e6f62012-11-12 22:54:08 +1100434 *
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500435 * We don't know what fork we belong to, so just verify that the level
Dave Chinner3d3e6f62012-11-12 22:54:08 +1100436 * is less than the maximum of the two. Later checks will be more
437 * precise.
438 */
439 level = be16_to_cpu(block->bb_level);
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500440 if (level > max(mp->m_bm_maxlevels[0], mp->m_bm_maxlevels[1]))
Darrick J. Wonga6a781a2018-01-08 10:51:03 -0800441 return __this_address;
Dave Chinner3d3e6f62012-11-12 22:54:08 +1100442
Darrick J. Wong8368a602018-01-08 10:51:00 -0800443 return xfs_btree_lblock_verify(bp, mp->m_bmap_dmxr[level != 0]);
Dave Chinner612cfbf2012-11-14 17:52:32 +1100444}
Dave Chinner3d3e6f62012-11-12 22:54:08 +1100445
Dave Chinner1813dd62012-11-14 17:54:40 +1100446static void
447xfs_bmbt_read_verify(
448 struct xfs_buf *bp)
449{
Darrick J. Wongbc1a09b2018-01-08 10:51:03 -0800450 xfs_failaddr_t fa;
451
Eric Sandeence5028c2014-02-27 15:23:10 +1100452 if (!xfs_btree_lblock_verify_crc(bp))
Darrick J. Wongbc1a09b2018-01-08 10:51:03 -0800453 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
454 else {
455 fa = xfs_bmbt_verify(bp);
456 if (fa)
457 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
458 }
Eric Sandeence5028c2014-02-27 15:23:10 +1100459
Darrick J. Wong31ca03c2018-01-08 10:51:02 -0800460 if (bp->b_error)
Eric Sandeence5028c2014-02-27 15:23:10 +1100461 trace_xfs_btree_corrupt(bp, _RET_IP_);
Dave Chinner1813dd62012-11-14 17:54:40 +1100462}
463
464static void
Dave Chinner612cfbf2012-11-14 17:52:32 +1100465xfs_bmbt_write_verify(
466 struct xfs_buf *bp)
467{
Darrick J. Wongbc1a09b2018-01-08 10:51:03 -0800468 xfs_failaddr_t fa;
469
470 fa = xfs_bmbt_verify(bp);
471 if (fa) {
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500472 trace_xfs_btree_corrupt(bp, _RET_IP_);
Darrick J. Wongbc1a09b2018-01-08 10:51:03 -0800473 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500474 return;
475 }
476 xfs_btree_lblock_calc_crc(bp);
Dave Chinner612cfbf2012-11-14 17:52:32 +1100477}
478
Dave Chinner1813dd62012-11-14 17:54:40 +1100479const struct xfs_buf_ops xfs_bmbt_buf_ops = {
Eric Sandeen233135b2016-01-04 16:10:19 +1100480 .name = "xfs_bmbt",
Brian Foster39708c22019-02-07 10:45:48 -0800481 .magic = { cpu_to_be32(XFS_BMAP_MAGIC),
482 cpu_to_be32(XFS_BMAP_CRC_MAGIC) },
Dave Chinner1813dd62012-11-14 17:54:40 +1100483 .verify_read = xfs_bmbt_read_verify,
484 .verify_write = xfs_bmbt_write_verify,
Darrick J. Wongb5572592018-01-08 10:51:08 -0800485 .verify_struct = xfs_bmbt_verify,
Dave Chinner1813dd62012-11-14 17:54:40 +1100486};
487
Dave Chinner3d3e6f62012-11-12 22:54:08 +1100488
Christoph Hellwig4a26e662008-10-30 16:58:32 +1100489STATIC int
490xfs_bmbt_keys_inorder(
491 struct xfs_btree_cur *cur,
492 union xfs_btree_key *k1,
493 union xfs_btree_key *k2)
494{
495 return be64_to_cpu(k1->bmbt.br_startoff) <
496 be64_to_cpu(k2->bmbt.br_startoff);
497}
498
499STATIC int
500xfs_bmbt_recs_inorder(
501 struct xfs_btree_cur *cur,
502 union xfs_btree_rec *r1,
503 union xfs_btree_rec *r2)
504{
505 return xfs_bmbt_disk_get_startoff(&r1->bmbt) +
506 xfs_bmbt_disk_get_blockcount(&r1->bmbt) <=
507 xfs_bmbt_disk_get_startoff(&r2->bmbt);
508}
Christoph Hellwig4a26e662008-10-30 16:58:32 +1100509
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100510static const struct xfs_btree_ops xfs_bmbt_ops = {
Christoph Hellwig65f1eae2008-10-30 16:55:34 +1100511 .rec_len = sizeof(xfs_bmbt_rec_t),
512 .key_len = sizeof(xfs_bmbt_key_t),
513
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100514 .dup_cursor = xfs_bmbt_dup_cursor,
Christoph Hellwig4b22a572008-10-30 16:57:40 +1100515 .update_cursor = xfs_bmbt_update_cursor,
Christoph Hellwigf5eb8e72008-10-30 16:57:03 +1100516 .alloc_block = xfs_bmbt_alloc_block,
Christoph Hellwigd4b3a4b2008-10-30 16:57:51 +1100517 .free_block = xfs_bmbt_free_block,
Christoph Hellwigce5e42d2008-10-30 16:55:23 +1100518 .get_maxrecs = xfs_bmbt_get_maxrecs,
Christoph Hellwig91cca5df2008-10-30 16:58:01 +1100519 .get_minrecs = xfs_bmbt_get_minrecs,
Christoph Hellwig4b22a572008-10-30 16:57:40 +1100520 .get_dmaxrecs = xfs_bmbt_get_dmaxrecs,
Christoph Hellwigfe033cc2008-10-30 16:56:09 +1100521 .init_key_from_rec = xfs_bmbt_init_key_from_rec,
Darrick J. Wong118bb472017-06-16 11:00:08 -0700522 .init_high_key_from_rec = xfs_bmbt_init_high_key_from_rec,
Christoph Hellwig4b22a572008-10-30 16:57:40 +1100523 .init_rec_from_cur = xfs_bmbt_init_rec_from_cur,
Christoph Hellwigfe033cc2008-10-30 16:56:09 +1100524 .init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur,
525 .key_diff = xfs_bmbt_key_diff,
Darrick J. Wong118bb472017-06-16 11:00:08 -0700526 .diff_two_keys = xfs_bmbt_diff_two_keys,
Dave Chinner1813dd62012-11-14 17:54:40 +1100527 .buf_ops = &xfs_bmbt_buf_ops,
Christoph Hellwig4a26e662008-10-30 16:58:32 +1100528 .keys_inorder = xfs_bmbt_keys_inorder,
529 .recs_inorder = xfs_bmbt_recs_inorder,
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100530};
531
532/*
533 * Allocate a new bmap btree cursor.
534 */
535struct xfs_btree_cur * /* new bmap btree cursor */
536xfs_bmbt_init_cursor(
537 struct xfs_mount *mp, /* file system mount point */
538 struct xfs_trans *tp, /* transaction pointer */
539 struct xfs_inode *ip, /* inode owning the btree */
540 int whichfork) /* data or attr fork */
541{
542 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
543 struct xfs_btree_cur *cur;
Darrick J. Wong3993bae2016-10-03 09:11:32 -0700544 ASSERT(whichfork != XFS_COW_FORK);
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100545
Darrick J. Wongb24a9782016-12-09 16:49:54 +1100546 cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100547
548 cur->bc_tp = tp;
549 cur->bc_mp = mp;
550 cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
551 cur->bc_btnum = XFS_BTNUM_BMAP;
552 cur->bc_blocklog = mp->m_sb.sb_blocklog;
Dave Chinner11ef38a2016-12-05 14:38:58 +1100553 cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2);
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100554
555 cur->bc_ops = &xfs_bmbt_ops;
Christoph Hellwige99ab902008-10-30 16:54:33 +1100556 cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE;
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500557 if (xfs_sb_version_hascrc(&mp->m_sb))
558 cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100559
560 cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork);
561 cur->bc_private.b.ip = ip;
Christoph Hellwig561f7d12008-10-30 16:53:59 +1100562 cur->bc_private.b.allocated = 0;
563 cur->bc_private.b.flags = 0;
564 cur->bc_private.b.whichfork = whichfork;
565
566 return cur;
567}
Christoph Hellwig60197e82008-10-30 17:11:19 +1100568
569/*
570 * Calculate number of records in a bmap btree block.
571 */
572int
573xfs_bmbt_maxrecs(
574 struct xfs_mount *mp,
575 int blocklen,
576 int leaf)
577{
Christoph Hellwig7cc95a82008-10-30 17:14:34 +1100578 blocklen -= XFS_BMBT_BLOCK_LEN(mp);
Christoph Hellwig60197e82008-10-30 17:11:19 +1100579
580 if (leaf)
581 return blocklen / sizeof(xfs_bmbt_rec_t);
582 return blocklen / (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t));
583}
584
585/*
586 * Calculate number of records in a bmap btree inode root.
587 */
588int
589xfs_bmdr_maxrecs(
Christoph Hellwig60197e82008-10-30 17:11:19 +1100590 int blocklen,
591 int leaf)
592{
593 blocklen -= sizeof(xfs_bmdr_block_t);
594
595 if (leaf)
596 return blocklen / sizeof(xfs_bmdr_rec_t);
597 return blocklen / (sizeof(xfs_bmdr_key_t) + sizeof(xfs_bmdr_ptr_t));
598}
Dave Chinner21b5c972013-08-30 10:23:44 +1000599
600/*
601 * Change the owner of a btree format fork fo the inode passed in. Change it to
602 * the owner of that is passed in so that we can change owners before or after
603 * we switch forks between inodes. The operation that the caller is doing will
604 * determine whether is needs to change owner before or after the switch.
605 *
Dave Chinner638f44162013-08-30 10:23:45 +1000606 * For demand paged transactional modification, the fork switch should be done
607 * after reading in all the blocks, modifying them and pinning them in the
608 * transaction. For modification when the buffers are already pinned in memory,
609 * the fork switch can be done before changing the owner as we won't need to
610 * validate the owner until the btree buffers are unpinned and writes can occur
611 * again.
612 *
613 * For recovery based ownership change, there is no transactional context and
614 * so a buffer list must be supplied so that we can record the buffers that we
615 * modified for the caller to issue IO on.
Dave Chinner21b5c972013-08-30 10:23:44 +1000616 */
617int
618xfs_bmbt_change_owner(
619 struct xfs_trans *tp,
620 struct xfs_inode *ip,
621 int whichfork,
Dave Chinner638f44162013-08-30 10:23:45 +1000622 xfs_ino_t new_owner,
623 struct list_head *buffer_list)
Dave Chinner21b5c972013-08-30 10:23:44 +1000624{
625 struct xfs_btree_cur *cur;
626 int error;
627
Dave Chinner638f44162013-08-30 10:23:45 +1000628 ASSERT(tp || buffer_list);
629 ASSERT(!(tp && buffer_list));
Dave Chinner21b5c972013-08-30 10:23:44 +1000630 if (whichfork == XFS_DATA_FORK)
Dan Carpenteraa9e1042013-09-12 00:17:31 +0300631 ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_BTREE);
Dave Chinner21b5c972013-08-30 10:23:44 +1000632 else
Dan Carpenteraa9e1042013-09-12 00:17:31 +0300633 ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE);
Dave Chinner21b5c972013-08-30 10:23:44 +1000634
635 cur = xfs_bmbt_init_cursor(ip->i_mount, tp, ip, whichfork);
Dave Chinner638f44162013-08-30 10:23:45 +1000636 if (!cur)
Dave Chinner24513372014-06-25 14:58:08 +1000637 return -ENOMEM;
Brian Foster99c794c2017-08-29 10:08:39 -0700638 cur->bc_private.b.flags |= XFS_BTCUR_BPRV_INVALID_OWNER;
Dave Chinner638f44162013-08-30 10:23:45 +1000639
640 error = xfs_btree_change_owner(cur, new_owner, buffer_list);
Darrick J. Wong0b04b6b82018-07-19 12:26:31 -0700641 xfs_btree_del_cursor(cur, error);
Dave Chinner21b5c972013-08-30 10:23:44 +1000642 return error;
643}
Darrick J. Wong14861c42018-05-09 10:02:01 -0700644
645/* Calculate the bmap btree size for some records. */
646unsigned long long
647xfs_bmbt_calc_size(
648 struct xfs_mount *mp,
649 unsigned long long len)
650{
651 return xfs_btree_calc_size(mp->m_bmap_dmnr, len);
652}