blob: 5639961b3626f76772eee1c5574f362a21c5e29d [file] [log] [blame]
David Sterba9888c342018-04-03 19:16:55 +02001/* SPDX-License-Identifier: GPL-2.0 */
Chris Mason0b86a832008-03-24 15:01:56 -04002/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
Chris Mason0b86a832008-03-24 15:01:56 -04004 */
5
David Sterba9888c342018-04-03 19:16:55 +02006#ifndef BTRFS_VOLUMES_H
7#define BTRFS_VOLUMES_H
Chris Mason8790d502008-04-03 16:29:03 -04008
Chris Masoncea9e442008-04-09 16:28:12 -04009#include <linux/bio.h>
Miao Xieb2117a32011-01-05 10:07:28 +000010#include <linux/sort.h>
Filipe Brandenburger55e301f2013-01-29 06:04:50 +000011#include <linux/btrfs.h>
Chris Mason8b712842008-06-11 16:50:36 -040012#include "async-thread.h"
Chris Masoncea9e442008-04-09 16:28:12 -040013
Qu Wenruofce466e2018-07-03 17:10:05 +080014#define BTRFS_MAX_DATA_CHUNK_SIZE (10ULL * SZ_1G)
15
Miao Xie67a2c452014-09-03 21:35:43 +080016extern struct mutex uuid_mutex;
17
Byongho Leeee221842015-12-15 01:42:10 +090018#define BTRFS_STRIPE_LEN SZ_64K
Miao Xieb2117a32011-01-05 10:07:28 +000019
Qu Wenruo719fae82022-04-20 16:08:28 +080020/* Used by sanity check for btrfs_raid_types. */
21#define const_ffs(n) (__builtin_ctzll(n) + 1)
22
23/*
24 * The conversion from BTRFS_BLOCK_GROUP_* bits to btrfs_raid_type requires
25 * RAID0 always to be the lowest profile bit.
26 * Although it's part of on-disk format and should never change, do extra
27 * compile-time sanity checks.
28 */
29static_assert(const_ffs(BTRFS_BLOCK_GROUP_RAID0) <
30 const_ffs(BTRFS_BLOCK_GROUP_PROFILE_MASK & ~BTRFS_BLOCK_GROUP_RAID0));
31static_assert(const_ilog2(BTRFS_BLOCK_GROUP_RAID0) >
32 ilog2(BTRFS_BLOCK_GROUP_TYPE_MASK));
33
34/* ilog2() can handle both constants and variables */
35#define BTRFS_BG_FLAG_TO_INDEX(profile) \
36 ilog2((profile) >> (ilog2(BTRFS_BLOCK_GROUP_RAID0) - 1))
37
Qu Wenruof04fbcc2022-04-20 16:08:27 +080038enum btrfs_raid_types {
Qu Wenruo719fae82022-04-20 16:08:28 +080039 /* SINGLE is the special one as it doesn't have on-disk bit. */
40 BTRFS_RAID_SINGLE = 0,
41
42 BTRFS_RAID_RAID0 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID0),
43 BTRFS_RAID_RAID1 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID1),
44 BTRFS_RAID_DUP = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_DUP),
45 BTRFS_RAID_RAID10 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID10),
46 BTRFS_RAID_RAID5 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID5),
47 BTRFS_RAID_RAID6 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID6),
48 BTRFS_RAID_RAID1C3 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID1C3),
49 BTRFS_RAID_RAID1C4 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID1C4),
50
Qu Wenruof04fbcc2022-04-20 16:08:27 +080051 BTRFS_NR_RAID_TYPES
52};
53
Nikolay Borisov5f141122019-06-03 12:05:03 +030054struct btrfs_io_geometry {
55 /* remaining bytes before crossing a stripe */
56 u64 len;
57 /* offset of logical address in chunk */
58 u64 offset;
59 /* length of single IO stripe */
Qu Wenruocc353a82022-04-12 17:32:51 +080060 u32 stripe_len;
61 /* offset of address in stripe */
62 u32 stripe_offset;
Nikolay Borisov5f141122019-06-03 12:05:03 +030063 /* number of stripe where address falls */
64 u64 stripe_nr;
Nikolay Borisov5f141122019-06-03 12:05:03 +030065 /* offset of raid56 stripe into the chunk */
66 u64 raid56_stripe_offset;
67};
68
Miao Xie7cc8e582014-09-03 21:35:38 +080069/*
70 * Use sequence counter to get consistent device stat data on
71 * 32-bit processors.
72 */
73#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
74#include <linux/seqlock.h>
75#define __BTRFS_NEED_DEVICE_DATA_ORDERED
Su Yuec41ec452021-01-21 19:39:10 +080076#define btrfs_device_data_ordered_init(device) \
77 seqcount_init(&device->data_seqcount)
Miao Xie7cc8e582014-09-03 21:35:38 +080078#else
Su Yuec41ec452021-01-21 19:39:10 +080079#define btrfs_device_data_ordered_init(device) do { } while (0)
Miao Xie7cc8e582014-09-03 21:35:38 +080080#endif
81
Anand Jainebbede42017-12-04 12:54:52 +080082#define BTRFS_DEV_STATE_WRITEABLE (0)
Anand Jaine12c9622017-12-04 12:54:53 +080083#define BTRFS_DEV_STATE_IN_FS_METADATA (1)
Anand Jaine6e674b2017-12-04 12:54:54 +080084#define BTRFS_DEV_STATE_MISSING (2)
Anand Jain401e29c2017-12-04 12:54:55 +080085#define BTRFS_DEV_STATE_REPLACE_TGT (3)
Anand Jain1c3063b2017-12-04 12:54:56 +080086#define BTRFS_DEV_STATE_FLUSH_SENT (4)
Filipe Manana66d204a12020-10-12 11:55:24 +010087#define BTRFS_DEV_STATE_NO_READA (5)
Anand Jainebbede42017-12-04 12:54:52 +080088
Naohiro Aota5b316462020-11-10 20:26:07 +090089struct btrfs_zoned_device_info;
90
Chris Mason0b86a832008-03-24 15:01:56 -040091struct btrfs_device {
Nikolay Borisov0b6f5d42019-05-09 18:11:11 +030092 struct list_head dev_list; /* device_list_mutex */
93 struct list_head dev_alloc_list; /* chunk mutex */
Nikolay Borisovbbbf7242019-03-25 14:31:22 +020094 struct list_head post_commit_list; /* chunk mutex */
Yan Zheng2b820322008-11-17 21:11:30 -050095 struct btrfs_fs_devices *fs_devices;
Jeff Mahoneyfb456252016-06-22 18:54:56 -040096 struct btrfs_fs_info *fs_info;
Chris Masonffbd5172009-04-20 15:50:09 -040097
Madhuparna Bhowmik8d1a7aa2019-12-05 01:49:01 +053098 struct rcu_string __rcu *name;
Miao Xied5ee37b2014-07-24 11:37:10 +080099
100 u64 generation;
101
Miao Xied5ee37b2014-07-24 11:37:10 +0800102 struct block_device *bdev;
103
Naohiro Aota5b316462020-11-10 20:26:07 +0900104 struct btrfs_zoned_device_info *zone_info;
105
Miao Xied5ee37b2014-07-24 11:37:10 +0800106 /* the mode sent to blkdev_get */
107 fmode_t mode;
108
Anand Jain4889bc02022-01-12 13:06:01 +0800109 /*
110 * Device's major-minor number. Must be set even if the device is not
111 * opened (bdev == NULL), unless the device is missing.
112 */
113 dev_t devt;
Anand Jainebbede42017-12-04 12:54:52 +0800114 unsigned long dev_state;
Omar Sandoval58efbc92017-08-22 23:45:59 -0700115 blk_status_t last_flush_error;
Chris Masonb3075712008-04-22 09:22:07 -0400116
Miao Xie7cc8e582014-09-03 21:35:38 +0800117#ifdef __BTRFS_NEED_DEVICE_DATA_ORDERED
Su Yuec41ec452021-01-21 19:39:10 +0800118 seqcount_t data_seqcount;
Miao Xie7cc8e582014-09-03 21:35:38 +0800119#endif
120
Chris Mason0b86a832008-03-24 15:01:56 -0400121 /* the internal btrfs device id */
122 u64 devid;
123
Miao Xie6ba40b62014-07-24 11:37:12 +0800124 /* size of the device in memory */
Chris Mason0b86a832008-03-24 15:01:56 -0400125 u64 total_bytes;
126
Miao Xie6ba40b62014-07-24 11:37:12 +0800127 /* size of the device on disk */
Chris Balld6397ba2009-04-27 07:29:03 -0400128 u64 disk_total_bytes;
129
Chris Mason0b86a832008-03-24 15:01:56 -0400130 /* bytes used */
131 u64 bytes_used;
132
133 /* optimal io alignment for this device */
134 u32 io_align;
135
136 /* optimal io width for this device */
137 u32 io_width;
Dulshani Gunawardhana3c45bfc2013-10-31 09:57:33 +0530138 /* type and info about this device */
139 u64 type;
Chris Mason0b86a832008-03-24 15:01:56 -0400140
141 /* minimal io size for this device */
142 u32 sector_size;
143
Chris Mason0b86a832008-03-24 15:01:56 -0400144 /* physical drive uuid (or lvm uuid) */
Chris Masone17cade2008-04-15 15:41:47 -0400145 u8 uuid[BTRFS_UUID_SIZE];
Chris Mason8b712842008-06-11 16:50:36 -0400146
Miao Xie935e5cc2014-09-03 21:35:33 +0800147 /*
148 * size of the device on the current transaction
149 *
150 * This variant is update when committing the transaction,
Nikolay Borisovbbbf7242019-03-25 14:31:22 +0200151 * and protected by chunk mutex
Miao Xie935e5cc2014-09-03 21:35:33 +0800152 */
153 u64 commit_total_bytes;
154
Miao Xiece7213c2014-09-03 21:35:34 +0800155 /* bytes used on the current transaction */
156 u64 commit_bytes_used;
Miao Xie935e5cc2014-09-03 21:35:33 +0800157
Christoph Hellwigf9e69aa2022-04-06 08:12:24 +0200158 /* Bio used for flushing device barriers */
159 struct bio flush_bio;
Dulshani Gunawardhana3c45bfc2013-10-31 09:57:33 +0530160 struct completion flush_wait;
161
Arne Jansena2de7332011-03-08 14:14:00 +0100162 /* per-device scrub information */
Anand Jaincadbc0a2018-01-03 16:08:30 +0800163 struct scrub_ctx *scrub_ctx;
Arne Jansena2de7332011-03-08 14:14:00 +0100164
Stefan Behrens442a4f62012-05-25 16:06:08 +0200165 /* disk I/O failure stats. For detailed description refer to
166 * enum btrfs_dev_stat_values in ioctl.h */
Stefan Behrens733f4fb2012-05-25 16:06:10 +0200167 int dev_stats_valid;
Miao Xieaddc3fa2014-07-24 11:37:11 +0800168
169 /* Counter to record the change of device stats */
170 atomic_t dev_stats_ccnt;
Stefan Behrens442a4f62012-05-25 16:06:08 +0200171 atomic_t dev_stat_values[BTRFS_DEV_STAT_VALUES_MAX];
Jeff Mahoney1c11b632019-03-27 14:24:12 +0200172
173 struct extent_io_tree alloc_state;
Anand Jain668e48af2020-01-06 19:38:31 +0800174
175 struct completion kobj_unregister;
176 /* For sysfs/FSID/devinfo/devid/ */
177 struct kobject devid_kobj;
David Sterbaeb3b5052019-10-09 13:58:13 +0200178
179 /* Bandwidth limit for scrub, in bytes */
180 u64 scrub_speed_max;
Chris Mason0b86a832008-03-24 15:01:56 -0400181};
182
Miao Xie7cc8e582014-09-03 21:35:38 +0800183/*
184 * If we read those variants at the context of their own lock, we needn't
185 * use the following helpers, reading them directly is safe.
186 */
187#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
188#define BTRFS_DEVICE_GETSET_FUNCS(name) \
189static inline u64 \
190btrfs_device_get_##name(const struct btrfs_device *dev) \
191{ \
192 u64 size; \
193 unsigned int seq; \
194 \
195 do { \
196 seq = read_seqcount_begin(&dev->data_seqcount); \
197 size = dev->name; \
198 } while (read_seqcount_retry(&dev->data_seqcount, seq)); \
199 return size; \
200} \
201 \
202static inline void \
203btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \
204{ \
Su Yuec41ec452021-01-21 19:39:10 +0800205 preempt_disable(); \
Miao Xie7cc8e582014-09-03 21:35:38 +0800206 write_seqcount_begin(&dev->data_seqcount); \
207 dev->name = size; \
208 write_seqcount_end(&dev->data_seqcount); \
Su Yuec41ec452021-01-21 19:39:10 +0800209 preempt_enable(); \
Miao Xie7cc8e582014-09-03 21:35:38 +0800210}
Thomas Gleixner94545872019-10-15 21:18:11 +0200211#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
Miao Xie7cc8e582014-09-03 21:35:38 +0800212#define BTRFS_DEVICE_GETSET_FUNCS(name) \
213static inline u64 \
214btrfs_device_get_##name(const struct btrfs_device *dev) \
215{ \
216 u64 size; \
217 \
218 preempt_disable(); \
219 size = dev->name; \
220 preempt_enable(); \
221 return size; \
222} \
223 \
224static inline void \
225btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \
226{ \
227 preempt_disable(); \
228 dev->name = size; \
229 preempt_enable(); \
230}
231#else
232#define BTRFS_DEVICE_GETSET_FUNCS(name) \
233static inline u64 \
234btrfs_device_get_##name(const struct btrfs_device *dev) \
235{ \
236 return dev->name; \
237} \
238 \
239static inline void \
240btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \
241{ \
242 dev->name = size; \
243}
244#endif
245
246BTRFS_DEVICE_GETSET_FUNCS(total_bytes);
247BTRFS_DEVICE_GETSET_FUNCS(disk_total_bytes);
248BTRFS_DEVICE_GETSET_FUNCS(bytes_used);
249
Naohiro Aotac4a816c2020-02-25 12:56:08 +0900250enum btrfs_chunk_allocation_policy {
251 BTRFS_CHUNK_ALLOC_REGULAR,
Naohiro Aota1cd61212021-02-04 19:21:48 +0900252 BTRFS_CHUNK_ALLOC_ZONED,
Naohiro Aotac4a816c2020-02-25 12:56:08 +0900253};
254
Anand Jain33fd2f72020-10-28 21:14:46 +0800255/*
256 * Read policies for mirrored block group profiles, read picks the stripe based
257 * on these policies.
258 */
259enum btrfs_read_policy {
260 /* Use process PID to choose the stripe */
261 BTRFS_READ_POLICY_PID,
262 BTRFS_NR_READ_POLICY,
263};
264
Chris Mason8a4b83c2008-03-24 15:02:07 -0400265struct btrfs_fs_devices {
266 u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */
Nikolay Borisov7239ff42018-10-30 16:43:23 +0200267 u8 metadata_uuid[BTRFS_FSID_SIZE];
Nikolay Borisovd1a63002018-10-30 16:43:26 +0200268 bool fsid_change;
Anand Jainc4babc52018-04-12 10:29:25 +0800269 struct list_head fs_list;
Chris Mason8a4b83c2008-03-24 15:02:07 -0400270
Anand Jainadd97452021-10-05 16:12:40 -0400271 /*
272 * Number of devices under this fsid including missing and
273 * replace-target device and excludes seed devices.
274 */
Chris Mason8a4b83c2008-03-24 15:02:07 -0400275 u64 num_devices;
Anand Jainadd97452021-10-05 16:12:40 -0400276
277 /*
278 * The number of devices that successfully opened, including
279 * replace-target, excludes seed devices.
280 */
Chris Masona0af4692008-05-13 16:03:06 -0400281 u64 open_devices;
Anand Jainadd97452021-10-05 16:12:40 -0400282
283 /* The number of devices that are under the chunk allocation list. */
Yan Zheng2b820322008-11-17 21:11:30 -0500284 u64 rw_devices;
Anand Jainadd97452021-10-05 16:12:40 -0400285
286 /* Count of missing devices under this fsid excluding seed device. */
Chris Masoncd02dca2010-12-13 14:56:23 -0500287 u64 missing_devices;
Yan Zheng2b820322008-11-17 21:11:30 -0500288 u64 total_rw_bytes;
Anand Jainadd97452021-10-05 16:12:40 -0400289
290 /*
291 * Count of devices from btrfs_super_block::num_devices for this fsid,
292 * which includes the seed device, excludes the transient replace-target
293 * device.
294 */
Josef Bacik02db0842012-06-21 16:03:58 -0400295 u64 total_devices;
Nikolay Borisovd1a63002018-10-30 16:43:26 +0200296
297 /* Highest generation number of seen devices */
298 u64 latest_generation;
299
Anand Jaind24fa5c2021-08-24 13:05:19 +0800300 /*
301 * The mount device or a device with highest generation after removal
302 * or replace.
303 */
304 struct btrfs_device *latest_dev;
Chris Masone5e9a522009-06-10 15:17:02 -0400305
306 /* all of the devices in the FS, protected by a mutex
307 * so we can safely walk it to write out the supers without
Wang Shilong9b011ad2013-10-25 19:12:02 +0800308 * worrying about add/remove by the multi-device code.
309 * Scrubbing super can kick off supers writing by holding
310 * this mutex lock.
Chris Masone5e9a522009-06-10 15:17:02 -0400311 */
312 struct mutex device_list_mutex;
Nikolay Borisov0b6f5d42019-05-09 18:11:11 +0300313
314 /* List of all devices, protected by device_list_mutex */
Chris Mason8a4b83c2008-03-24 15:02:07 -0400315 struct list_head devices;
Chris Masonb3075712008-04-22 09:22:07 -0400316
Nikolay Borisov0b6f5d42019-05-09 18:11:11 +0300317 /*
318 * Devices which can satisfy space allocation. Protected by
319 * chunk_mutex
320 */
Chris Masonb3075712008-04-22 09:22:07 -0400321 struct list_head alloc_list;
Yan Zheng2b820322008-11-17 21:11:30 -0500322
Nikolay Borisov944d3f92020-07-16 10:25:33 +0300323 struct list_head seed_list;
Johannes Thumshirn0395d842019-11-13 11:27:27 +0100324 bool seeding;
Yan Zheng2b820322008-11-17 21:11:30 -0500325
326 int opened;
Chris Masonc2898112009-06-10 09:51:32 -0400327
328 /* set when we find or add a device that doesn't have the
329 * nonrot flag set
330 */
Johannes Thumshirn7f0432d2019-11-13 11:27:28 +0100331 bool rotating;
Anand Jain2e7910d2015-03-10 06:38:29 +0800332
Anand Jain5a13f432015-03-10 06:38:31 +0800333 struct btrfs_fs_info *fs_info;
Anand Jain2e7910d2015-03-10 06:38:29 +0800334 /* sysfs kobjects */
Anand Jainc1b7e472015-08-14 18:32:50 +0800335 struct kobject fsid_kobj;
Anand Jainb5501502019-11-21 17:33:30 +0800336 struct kobject *devices_kobj;
Anand Jaina013d142020-02-12 17:28:10 +0800337 struct kobject *devinfo_kobj;
Anand Jain2e7910d2015-03-10 06:38:29 +0800338 struct completion kobj_unregister;
Naohiro Aotac4a816c2020-02-25 12:56:08 +0900339
340 enum btrfs_chunk_allocation_policy chunk_alloc_policy;
Anand Jain33fd2f72020-10-28 21:14:46 +0800341
342 /* Policy used to read the mirrored stripes */
343 enum btrfs_read_policy read_policy;
Chris Mason8a4b83c2008-03-24 15:02:07 -0400344};
345
Miao Xiefacc8a222013-07-25 19:22:34 +0800346#define BTRFS_BIO_INLINE_CSUM_SIZE 64
347
Qu Wenruoab4ba2e2019-03-08 14:20:03 +0800348#define BTRFS_MAX_DEVS(info) ((BTRFS_MAX_ITEM_SIZE(info) \
349 - sizeof(struct btrfs_chunk)) \
350 / sizeof(struct btrfs_stripe) + 1)
351
352#define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \
353 - 2 * sizeof(struct btrfs_disk_key) \
354 - 2 * sizeof(struct btrfs_chunk)) \
355 / sizeof(struct btrfs_stripe) + 1)
356
Chris Mason9be33952013-05-17 18:30:14 -0400357/*
Christoph Hellwigee5b46a2022-06-21 08:26:27 +0200358 * Maximum number of sectors for a single bio to limit the size of the
359 * checksum array. This matches the number of bio_vecs per bio and thus the
360 * I/O size for buffered I/O.
361 */
362#define BTRFS_MAX_BIO_SECTORS (256)
363
364/*
Qu Wenruoc3a3b192021-09-15 15:17:18 +0800365 * Additional info to pass along bio.
366 *
367 * Mostly for btrfs specific features like csum and mirror_num.
Chris Mason9be33952013-05-17 18:30:14 -0400368 */
Qu Wenruoc3a3b192021-09-15 15:17:18 +0800369struct btrfs_bio {
Miao Xiec1dc0892014-09-12 18:43:56 +0800370 unsigned int mirror_num;
Qu Wenruoc3a3b192021-09-15 15:17:18 +0800371
Christoph Hellwig00d82522022-03-24 17:06:27 +0100372 /* for direct I/O */
373 u64 file_offset;
374
Qu Wenruoc3a3b192021-09-15 15:17:18 +0800375 /* @device is for stripe IO submission. */
Nikolay Borisovc31efbd2020-07-03 11:14:27 +0300376 struct btrfs_device *device;
Miao Xiefacc8a222013-07-25 19:22:34 +0800377 u8 *csum;
378 u8 csum_inline[BTRFS_BIO_INLINE_CSUM_SIZE];
Liu Bo17347ce2017-05-15 15:33:27 -0700379 struct bvec_iter iter;
Qu Wenruoc3a3b192021-09-15 15:17:18 +0800380
Christoph Hellwigd7b94162022-05-26 09:36:40 +0200381 /* For read end I/O handling */
382 struct work_struct end_io_work;
383
David Sterbafa1bcbe2017-06-12 17:29:36 +0200384 /*
385 * This member must come last, bio_alloc_bioset will allocate enough
Qu Wenruoc3a3b192021-09-15 15:17:18 +0800386 * bytes for entire btrfs_bio but relies on bio being last.
David Sterbafa1bcbe2017-06-12 17:29:36 +0200387 */
Chris Mason9be33952013-05-17 18:30:14 -0400388 struct bio bio;
389};
390
Qu Wenruoc3a3b192021-09-15 15:17:18 +0800391static inline struct btrfs_bio *btrfs_bio(struct bio *bio)
Chris Mason9be33952013-05-17 18:30:14 -0400392{
Qu Wenruoc3a3b192021-09-15 15:17:18 +0800393 return container_of(bio, struct btrfs_bio, bio);
Chris Mason9be33952013-05-17 18:30:14 -0400394}
395
Qu Wenruoc3a3b192021-09-15 15:17:18 +0800396static inline void btrfs_bio_free_csum(struct btrfs_bio *bbio)
David Sterbab3a0dd52018-11-22 17:16:49 +0100397{
Qu Wenruoc3a3b192021-09-15 15:17:18 +0800398 if (bbio->csum != bbio->csum_inline) {
399 kfree(bbio->csum);
400 bbio->csum = NULL;
David Sterbab3a0dd52018-11-22 17:16:49 +0100401 }
402}
403
Qu Wenruo261d8122022-05-22 13:47:53 +0200404/*
405 * Iterate through a btrfs_bio (@bbio) on a per-sector basis.
406 *
407 * bvl - struct bio_vec
408 * bbio - struct btrfs_bio
409 * iters - struct bvec_iter
410 * bio_offset - unsigned int
411 */
412#define btrfs_bio_for_each_sector(fs_info, bvl, bbio, iter, bio_offset) \
413 for ((iter) = (bbio)->iter, (bio_offset) = 0; \
414 (iter).bi_size && \
415 (((bvl) = bio_iter_iovec((&(bbio)->bio), (iter))), 1); \
416 (bio_offset) += fs_info->sectorsize, \
417 bio_advance_iter_single(&(bbio)->bio, &(iter), \
418 (fs_info)->sectorsize))
419
Qu Wenruo4c664612021-09-15 15:17:16 +0800420struct btrfs_io_stripe {
Chris Masoncea9e442008-04-09 16:28:12 -0400421 struct btrfs_device *dev;
Christoph Hellwig9ff7ddd2022-05-26 09:36:42 +0200422 union {
423 /* Block mapping */
424 u64 physical;
425 /* For the endio handler */
426 struct btrfs_io_context *bioc;
427 };
Christoph Hellwiga4012f02022-06-03 08:57:25 +0200428};
429
430struct btrfs_discard_stripe {
431 struct btrfs_device *dev;
432 u64 physical;
433 u64 length;
Chris Masoncea9e442008-04-09 16:28:12 -0400434};
435
Qu Wenruo4c664612021-09-15 15:17:16 +0800436/*
437 * Context for IO subsmission for device stripe.
438 *
439 * - Track the unfinished mirrors for mirror based profiles
440 * Mirror based profiles are SINGLE/DUP/RAID1/RAID10.
441 *
442 * - Contain the logical -> physical mapping info
443 * Used by submit_stripe_bio() for mapping logical bio
444 * into physical device address.
445 *
446 * - Contain device replace info
447 * Used by handle_ops_on_dev_replace() to copy logical bios
448 * into the new device.
449 *
450 * - Contain RAID56 full stripe logical bytenrs
451 */
452struct btrfs_io_context {
Elena Reshetova140475a2017-03-03 10:55:10 +0200453 refcount_t refs;
Chris Masoncea9e442008-04-09 16:28:12 -0400454 atomic_t stripes_pending;
Miao Xiec404e0d2014-01-30 16:46:55 +0800455 struct btrfs_fs_info *fs_info;
Zhao Lei10f11902015-01-20 15:11:43 +0800456 u64 map_type; /* get from map_lookup->type */
Chris Masoncea9e442008-04-09 16:28:12 -0400457 bio_end_io_t *end_io;
Chris Mason7d2b4da2008-08-05 10:13:57 -0400458 struct bio *orig_bio;
Chris Masoncea9e442008-04-09 16:28:12 -0400459 void *private;
Chris Masona236aed2008-04-29 09:38:00 -0400460 atomic_t error;
461 int max_errors;
Chris Masoncea9e442008-04-09 16:28:12 -0400462 int num_stripes;
Jan Schmidta1d3c472011-08-04 17:15:33 +0200463 int mirror_num;
Miao Xie2c8cdd62014-11-14 16:06:25 +0800464 int num_tgtdevs;
465 int *tgtdev_map;
Zhao Lei8e5cfb52015-01-20 15:11:33 +0800466 /*
467 * logical block numbers for the start of each stripe
468 * The last one or two are p/q. These are sorted,
469 * so raid_map[0] is the start of our full stripe
470 */
471 u64 *raid_map;
Qu Wenruo4c664612021-09-15 15:17:16 +0800472 struct btrfs_io_stripe stripes[];
Chris Masoncea9e442008-04-09 16:28:12 -0400473};
474
Miao Xieb2117a32011-01-05 10:07:28 +0000475struct btrfs_device_info {
476 struct btrfs_device *dev;
477 u64 dev_offset;
478 u64 max_avail;
Arne Jansen73c5de02011-04-12 12:07:57 +0200479 u64 total_avail;
Miao Xieb2117a32011-01-05 10:07:28 +0000480};
481
Liu Bo31e50222012-11-21 14:18:10 +0000482struct btrfs_raid_attr {
David Sterba8c3e3582019-05-17 11:43:36 +0200483 u8 sub_stripes; /* sub_stripes info for map */
484 u8 dev_stripes; /* stripes per dev */
485 u8 devs_max; /* max devs to use */
486 u8 devs_min; /* min devs needed */
487 u8 tolerated_failures; /* max tolerated fail devs */
488 u8 devs_increment; /* ndevs has to be a multiple of this */
489 u8 ncopies; /* how many copies to data has */
490 u8 nparity; /* number of stripes worth of bytes to store
Hans van Kranenburgb50836e2018-10-04 23:24:42 +0200491 * parity information */
David Sterba8c3e3582019-05-17 11:43:36 +0200492 u8 mindev_error; /* error code if min devs requisite is unmet */
Anand Jained234672018-04-25 19:01:42 +0800493 const char raid_name[8]; /* name of the raid */
Anand Jain41a6e892018-04-25 19:01:43 +0800494 u64 bg_flag; /* block group flag of the raid */
Liu Bo31e50222012-11-21 14:18:10 +0000495};
496
Zhao Leiaf902042015-09-15 21:08:06 +0800497extern const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES];
Zhao Leiaf902042015-09-15 21:08:06 +0800498
liubo1abe9b82011-03-24 11:18:59 +0000499struct map_lookup {
500 u64 type;
501 int io_align;
502 int io_width;
Qu Wenruocc353a82022-04-12 17:32:51 +0800503 u32 stripe_len;
liubo1abe9b82011-03-24 11:18:59 +0000504 int num_stripes;
505 int sub_stripes;
Qu Wenruocf90d882018-08-01 10:37:19 +0800506 int verified_stripes; /* For mount time dev extent verification */
Qu Wenruo4c664612021-09-15 15:17:16 +0800507 struct btrfs_io_stripe stripes[];
liubo1abe9b82011-03-24 11:18:59 +0000508};
509
Arne Jansena2de7332011-03-08 14:14:00 +0100510#define map_lookup_size(n) (sizeof(struct map_lookup) + \
Qu Wenruo4c664612021-09-15 15:17:16 +0800511 (sizeof(struct btrfs_io_stripe) * (n)))
Arne Jansena2de7332011-03-08 14:14:00 +0100512
Ilya Dryomovc9e9f972012-01-16 22:04:47 +0200513struct btrfs_balance_args;
Ilya Dryomov19a39dc2012-01-16 22:04:49 +0200514struct btrfs_balance_progress;
Ilya Dryomovc9e9f972012-01-16 22:04:47 +0200515struct btrfs_balance_control {
Ilya Dryomovc9e9f972012-01-16 22:04:47 +0200516 struct btrfs_balance_args data;
517 struct btrfs_balance_args meta;
518 struct btrfs_balance_args sys;
519
520 u64 flags;
Ilya Dryomov19a39dc2012-01-16 22:04:49 +0200521
522 struct btrfs_balance_progress stat;
Ilya Dryomovc9e9f972012-01-16 22:04:47 +0200523};
524
Josef Bacik562d7b12021-10-05 16:12:42 -0400525/*
526 * Search for a given device by the set parameters
527 */
528struct btrfs_dev_lookup_args {
529 u64 devid;
530 u8 *uuid;
531 u8 *fsid;
532 bool missing;
533};
534
535/* We have to initialize to -1 because BTRFS_DEV_REPLACE_DEVID is 0 */
536#define BTRFS_DEV_LOOKUP_ARGS_INIT { .devid = (u64)-1 }
537
538#define BTRFS_DEV_LOOKUP_ARGS(name) \
539 struct btrfs_dev_lookup_args name = BTRFS_DEV_LOOKUP_ARGS_INIT
540
Christoph Hellwigcf8cddd2016-10-27 09:27:36 +0200541enum btrfs_map_op {
542 BTRFS_MAP_READ,
543 BTRFS_MAP_WRITE,
544 BTRFS_MAP_DISCARD,
545 BTRFS_MAP_GET_READ_MIRRORS,
546};
547
548static inline enum btrfs_map_op btrfs_op(struct bio *bio)
549{
550 switch (bio_op(bio)) {
551 case REQ_OP_DISCARD:
552 return BTRFS_MAP_DISCARD;
553 case REQ_OP_WRITE:
Naohiro Aotacfe94442021-02-04 19:21:59 +0900554 case REQ_OP_ZONE_APPEND:
Christoph Hellwigcf8cddd2016-10-27 09:27:36 +0200555 return BTRFS_MAP_WRITE;
556 default:
557 WARN_ON_ONCE(1);
Marcos Paulo de Souzac730ae02020-06-16 15:54:29 -0300558 fallthrough;
Christoph Hellwigcf8cddd2016-10-27 09:27:36 +0200559 case REQ_OP_READ:
560 return BTRFS_MAP_READ;
561 }
562}
563
Qu Wenruo4c664612021-09-15 15:17:16 +0800564void btrfs_get_bioc(struct btrfs_io_context *bioc);
565void btrfs_put_bioc(struct btrfs_io_context *bioc);
Christoph Hellwigcf8cddd2016-10-27 09:27:36 +0200566int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
Chris Masoncea9e442008-04-09 16:28:12 -0400567 u64 logical, u64 *length,
Qu Wenruo4c664612021-09-15 15:17:16 +0800568 struct btrfs_io_context **bioc_ret, int mirror_num);
Christoph Hellwigcf8cddd2016-10-27 09:27:36 +0200569int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
Miao Xieaf8e2d12014-10-23 14:42:50 +0800570 u64 logical, u64 *length,
Qu Wenruo4c664612021-09-15 15:17:16 +0800571 struct btrfs_io_context **bioc_ret);
Christoph Hellwiga4012f02022-06-03 08:57:25 +0200572struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info,
573 u64 logical, u64 *length_ret,
574 u32 *num_stripes);
Michal Rostecki42034312021-01-27 14:57:27 +0100575int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *map,
Qu Wenruo43c0d1a2021-04-13 17:58:48 +0800576 enum btrfs_map_op op, u64 logical,
Michal Rostecki42034312021-01-27 14:57:27 +0100577 struct btrfs_io_geometry *io_geom);
Jeff Mahoney6bccf3a2016-06-21 21:16:51 -0400578int btrfs_read_sys_array(struct btrfs_fs_info *fs_info);
Jeff Mahoney5b4aace2016-06-21 10:40:19 -0400579int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info);
Nikolay Borisovf6f39f72021-08-18 13:41:19 +0300580struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans,
Filipe Manana79bd3712021-06-29 14:43:06 +0100581 u64 type);
David Sterbac8bf1b62019-05-17 11:43:17 +0200582void btrfs_mapping_tree_free(struct extent_map_tree *tree);
Christoph Hellwig1a722d82022-06-17 12:04:07 +0200583void btrfs_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, int mirror_num);
Chris Mason8a4b83c2008-03-24 15:02:07 -0400584int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
Christoph Hellwig97288f22008-12-02 06:36:09 -0500585 fmode_t flags, void *holder);
Gu Jinxiang36350e92018-07-12 14:23:16 +0800586struct btrfs_device *btrfs_scan_one_device(const char *path,
587 fmode_t flags, void *holder);
Anand Jain16cab912022-01-12 13:06:00 +0800588int btrfs_forget_devices(dev_t devt);
Nikolay Borisov54eed6a2020-07-15 13:48:48 +0300589void btrfs_close_devices(struct btrfs_fs_devices *fs_devices);
Anand Jainbacce862020-11-06 16:06:33 +0800590void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices);
Nikolay Borisovd6507cf2018-07-20 19:37:50 +0300591void btrfs_assign_next_active_device(struct btrfs_device *device,
592 struct btrfs_device *this_dev);
Nikolay Borisova27a94c2018-09-03 12:46:14 +0300593struct btrfs_device *btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info,
594 u64 devid,
595 const char *devpath);
Josef Bacikfaa775c2021-10-05 16:12:43 -0400596int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info,
597 struct btrfs_dev_lookup_args *args,
598 const char *path);
Ilya Dryomov12bd2fc2013-08-23 13:20:17 +0300599struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
600 const u64 *devid,
601 const u8 *uuid);
Josef Bacikfaa775c2021-10-05 16:12:43 -0400602void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args);
David Sterbaa425f9d2018-03-20 15:47:33 +0100603void btrfs_free_device(struct btrfs_device *device);
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400604int btrfs_rm_device(struct btrfs_fs_info *fs_info,
Josef Bacik1a15eb72021-10-05 16:12:44 -0400605 struct btrfs_dev_lookup_args *args,
Josef Bacik3fa421d2021-07-27 17:01:17 -0400606 struct block_device **bdev, fmode_t *mode);
David Sterbaffc5a372018-02-19 17:24:15 +0100607void __exit btrfs_cleanup_fs_uuids(void);
Stefan Behrens5d964052012-11-05 14:59:07 +0100608int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len);
Chris Mason8f18cf12008-04-25 16:53:30 -0400609int btrfs_grow_device(struct btrfs_trans_handle *trans,
610 struct btrfs_device *device, u64 new_size);
Josef Bacik562d7b12021-10-05 16:12:42 -0400611struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices,
612 const struct btrfs_dev_lookup_args *args);
Chris Mason8f18cf12008-04-25 16:53:30 -0400613int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
David Sterbada353f62017-02-14 17:55:53 +0100614int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *path);
David Sterba6fcf6e22018-05-07 17:44:03 +0200615int btrfs_balance(struct btrfs_fs_info *fs_info,
616 struct btrfs_balance_control *bctl,
Ilya Dryomovc9e9f972012-01-16 22:04:47 +0200617 struct btrfs_ioctl_balance_args *bargs);
Anand Jainf89e09c2018-11-20 16:12:55 +0800618void btrfs_describe_block_groups(u64 flags, char *buf, u32 size_buf);
Ilya Dryomov2b6ba622012-06-22 12:24:13 -0600619int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info);
Ilya Dryomov68310a52012-06-22 12:24:12 -0600620int btrfs_recover_balance(struct btrfs_fs_info *fs_info);
Ilya Dryomov837d5b62012-01-16 22:04:49 +0200621int btrfs_pause_balance(struct btrfs_fs_info *fs_info);
Johannes Thumshirn18bb8bb2021-04-19 16:41:02 +0900622int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset);
Ilya Dryomova7e99c62012-01-16 22:04:49 +0200623int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
Stefan Behrensf7a81ea2013-08-15 17:11:19 +0200624int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info);
Nikolay Borisov97f4dd02020-02-18 16:56:08 +0200625int btrfs_uuid_scan_kthread(void *data);
Anand Jaina09f23c2021-08-24 13:27:42 +0800626bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset);
Nikolay Borisov60dfdf22019-03-27 14:24:14 +0200627int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
Josef Bacikba1bf482009-09-11 16:11:19 -0400628 u64 *start, u64 *max_avail);
Stefan Behrens442a4f62012-05-25 16:06:08 +0200629void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400630int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
David Sterbab27f7c02012-06-22 06:30:39 -0600631 struct btrfs_ioctl_get_dev_stats *stats);
Miao Xiecb517ea2013-05-15 07:48:19 +0000632void btrfs_init_devices_late(struct btrfs_fs_info *fs_info);
Stefan Behrens733f4fb2012-05-25 16:06:10 +0200633int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info);
David Sterba196c9d82019-03-20 16:50:38 +0100634int btrfs_run_dev_stats(struct btrfs_trans_handle *trans);
Nikolay Borisov68a9db52018-07-20 19:37:48 +0300635void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev);
David Sterba65237ee2019-03-20 16:34:54 +0100636void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev);
Nikolay Borisov4f5ad7b2018-07-20 19:37:51 +0300637void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev);
Liu Bo592d92e2017-03-14 13:33:55 -0700638int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info,
Nikolay Borisove4ff5fb2017-07-19 10:48:42 +0300639 u64 logical, u64 len);
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400640unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
David Woodhouse53b381b2013-01-29 18:40:14 -0500641 u64 logical);
Qu Wenruobc88b482022-05-13 16:34:28 +0800642u64 btrfs_calc_stripe_length(const struct extent_map *em);
Qu Wenruo0b30f712022-05-13 16:34:30 +0800643int btrfs_nr_parity_stripes(u64 type);
Filipe Manana79bd3712021-06-29 14:43:06 +0100644int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
645 struct btrfs_block_group *bg);
Nikolay Borisov97aff912018-07-20 19:37:53 +0300646int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset);
Omar Sandoval60ca8422018-05-16 16:34:31 -0700647struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
648 u64 logical, u64 length);
Johannes Thumshirn8f323802020-02-14 00:24:32 +0900649void btrfs_release_disk_super(struct btrfs_super_block *super);
Miao Xieaddc3fa2014-07-24 11:37:11 +0800650
Stefan Behrens442a4f62012-05-25 16:06:08 +0200651static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
652 int index)
653{
654 atomic_inc(dev->dev_stat_values + index);
Nikolay Borisov9deae962017-10-24 13:47:37 +0300655 /*
656 * This memory barrier orders stores updating statistics before stores
657 * updating dev_stats_ccnt.
658 *
659 * It pairs with smp_rmb() in btrfs_run_dev_stats().
660 */
Miao Xieaddc3fa2014-07-24 11:37:11 +0800661 smp_mb__before_atomic();
662 atomic_inc(&dev->dev_stats_ccnt);
Stefan Behrens442a4f62012-05-25 16:06:08 +0200663}
664
665static inline int btrfs_dev_stat_read(struct btrfs_device *dev,
666 int index)
667{
668 return atomic_read(dev->dev_stat_values + index);
669}
670
671static inline int btrfs_dev_stat_read_and_reset(struct btrfs_device *dev,
672 int index)
673{
674 int ret;
675
676 ret = atomic_xchg(dev->dev_stat_values + index, 0);
Nikolay Borisov4660c492017-10-20 18:10:58 +0300677 /*
678 * atomic_xchg implies a full memory barriers as per atomic_t.txt:
679 * - RMW operations that have a return value are fully ordered;
680 *
681 * This implicit memory barriers is paired with the smp_rmb in
682 * btrfs_run_dev_stats
683 */
Miao Xieaddc3fa2014-07-24 11:37:11 +0800684 atomic_inc(&dev->dev_stats_ccnt);
Stefan Behrens442a4f62012-05-25 16:06:08 +0200685 return ret;
686}
687
688static inline void btrfs_dev_stat_set(struct btrfs_device *dev,
689 int index, unsigned long val)
690{
691 atomic_set(dev->dev_stat_values + index, val);
Nikolay Borisov9deae962017-10-24 13:47:37 +0300692 /*
693 * This memory barrier orders stores updating statistics before stores
694 * updating dev_stats_ccnt.
695 *
696 * It pairs with smp_rmb() in btrfs_run_dev_stats().
697 */
Miao Xieaddc3fa2014-07-24 11:37:11 +0800698 smp_mb__before_atomic();
699 atomic_inc(&dev->dev_stats_ccnt);
Stefan Behrens442a4f62012-05-25 16:06:08 +0200700}
701
Nikolay Borisovbbbf7242019-03-25 14:31:22 +0200702void btrfs_commit_device_sizes(struct btrfs_transaction *trans);
Filipe Manana04216822014-11-27 21:14:15 +0000703
David Sterba4143cb82019-10-01 19:57:37 +0200704struct list_head * __attribute_const__ btrfs_get_fs_uuids(void);
Anand Jain6528b992017-12-18 17:08:59 +0800705bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
706 struct btrfs_device *failing_dev);
Josef Bacik313b0852020-08-20 11:18:26 -0400707void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
708 struct block_device *bdev,
709 const char *device_path);
Qu Wenruo21634a12017-03-09 09:34:36 +0800710
David Sterba500a44c2021-07-26 14:15:19 +0200711enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags);
David Sterba46df06b2018-07-13 20:46:30 +0200712int btrfs_bg_type_to_factor(u64 flags);
David Sterba158da512019-05-17 11:43:41 +0200713const char *btrfs_bg_type_to_raid_name(u64 flags);
Qu Wenruocf90d882018-08-01 10:37:19 +0800714int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info);
Johannes Thumshirn554aed72021-12-07 06:28:36 -0800715bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical);
David Sterba46df06b2018-07-13 20:46:30 +0200716
Chris Mason0b86a832008-03-24 15:01:56 -0400717#endif