blob: 3ad9d58d1b6618aea5d79220e48fdf98ac243360 [file] [log] [blame]
David Sterba9888c342018-04-03 19:16:55 +02001/* SPDX-License-Identifier: GPL-2.0 */
Chris Mason0b86a832008-03-24 15:01:56 -04002/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
Chris Mason0b86a832008-03-24 15:01:56 -04004 */
5
David Sterba9888c342018-04-03 19:16:55 +02006#ifndef BTRFS_VOLUMES_H
7#define BTRFS_VOLUMES_H
Chris Mason8790d502008-04-03 16:29:03 -04008
Chris Masoncea9e442008-04-09 16:28:12 -04009#include <linux/bio.h>
Miao Xieb2117a32011-01-05 10:07:28 +000010#include <linux/sort.h>
Filipe Brandenburger55e301f2013-01-29 06:04:50 +000011#include <linux/btrfs.h>
Chris Mason8b712842008-06-11 16:50:36 -040012#include "async-thread.h"
Chris Masoncea9e442008-04-09 16:28:12 -040013
Qu Wenruofce466e2018-07-03 17:10:05 +080014#define BTRFS_MAX_DATA_CHUNK_SIZE (10ULL * SZ_1G)
15
Miao Xie67a2c452014-09-03 21:35:43 +080016extern struct mutex uuid_mutex;
17
Byongho Leeee221842015-12-15 01:42:10 +090018#define BTRFS_STRIPE_LEN SZ_64K
Miao Xieb2117a32011-01-05 10:07:28 +000019
Chris Masonf2984462008-04-10 16:19:33 -040020struct buffer_head;
Chris Masonffbd5172009-04-20 15:50:09 -040021struct btrfs_pending_bios {
22 struct bio *head;
23 struct bio *tail;
24};
25
Miao Xie7cc8e582014-09-03 21:35:38 +080026/*
27 * Use sequence counter to get consistent device stat data on
28 * 32-bit processors.
29 */
30#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
31#include <linux/seqlock.h>
32#define __BTRFS_NEED_DEVICE_DATA_ORDERED
33#define btrfs_device_data_ordered_init(device) \
34 seqcount_init(&device->data_seqcount)
35#else
36#define btrfs_device_data_ordered_init(device) do { } while (0)
37#endif
38
Anand Jainebbede42017-12-04 12:54:52 +080039#define BTRFS_DEV_STATE_WRITEABLE (0)
Anand Jaine12c9622017-12-04 12:54:53 +080040#define BTRFS_DEV_STATE_IN_FS_METADATA (1)
Anand Jaine6e674b2017-12-04 12:54:54 +080041#define BTRFS_DEV_STATE_MISSING (2)
Anand Jain401e29c2017-12-04 12:54:55 +080042#define BTRFS_DEV_STATE_REPLACE_TGT (3)
Anand Jain1c3063b2017-12-04 12:54:56 +080043#define BTRFS_DEV_STATE_FLUSH_SENT (4)
Anand Jainebbede42017-12-04 12:54:52 +080044
Chris Mason0b86a832008-03-24 15:01:56 -040045struct btrfs_device {
46 struct list_head dev_list;
Chris Masonb3075712008-04-22 09:22:07 -040047 struct list_head dev_alloc_list;
Yan Zheng2b820322008-11-17 21:11:30 -050048 struct btrfs_fs_devices *fs_devices;
Jeff Mahoneyfb456252016-06-22 18:54:56 -040049 struct btrfs_fs_info *fs_info;
Chris Masonffbd5172009-04-20 15:50:09 -040050
Miao Xied5ee37b2014-07-24 11:37:10 +080051 struct rcu_string *name;
52
53 u64 generation;
54
55 spinlock_t io_lock ____cacheline_aligned;
56 int running_pending;
Chris Masonffbd5172009-04-20 15:50:09 -040057 /* regular prio bios */
58 struct btrfs_pending_bios pending_bios;
Christoph Hellwig70fd7612016-11-01 07:40:10 -060059 /* sync bios */
Chris Masonffbd5172009-04-20 15:50:09 -040060 struct btrfs_pending_bios pending_sync_bios;
61
Miao Xied5ee37b2014-07-24 11:37:10 +080062 struct block_device *bdev;
63
64 /* the mode sent to blkdev_get */
65 fmode_t mode;
66
Anand Jainebbede42017-12-04 12:54:52 +080067 unsigned long dev_state;
Omar Sandoval58efbc92017-08-22 23:45:59 -070068 blk_status_t last_flush_error;
David Sterbae0ae9992017-06-06 17:06:06 +020069 int flush_bio_sent;
Chris Masonb3075712008-04-22 09:22:07 -040070
Miao Xie7cc8e582014-09-03 21:35:38 +080071#ifdef __BTRFS_NEED_DEVICE_DATA_ORDERED
72 seqcount_t data_seqcount;
73#endif
74
Chris Mason0b86a832008-03-24 15:01:56 -040075 /* the internal btrfs device id */
76 u64 devid;
77
Miao Xie6ba40b62014-07-24 11:37:12 +080078 /* size of the device in memory */
Chris Mason0b86a832008-03-24 15:01:56 -040079 u64 total_bytes;
80
Miao Xie6ba40b62014-07-24 11:37:12 +080081 /* size of the device on disk */
Chris Balld6397ba2009-04-27 07:29:03 -040082 u64 disk_total_bytes;
83
Chris Mason0b86a832008-03-24 15:01:56 -040084 /* bytes used */
85 u64 bytes_used;
86
87 /* optimal io alignment for this device */
88 u32 io_align;
89
90 /* optimal io width for this device */
91 u32 io_width;
Dulshani Gunawardhana3c45bfc2013-10-31 09:57:33 +053092 /* type and info about this device */
93 u64 type;
Chris Mason0b86a832008-03-24 15:01:56 -040094
95 /* minimal io size for this device */
96 u32 sector_size;
97
Chris Mason0b86a832008-03-24 15:01:56 -040098 /* physical drive uuid (or lvm uuid) */
Chris Masone17cade2008-04-15 15:41:47 -040099 u8 uuid[BTRFS_UUID_SIZE];
Chris Mason8b712842008-06-11 16:50:36 -0400100
Miao Xie935e5cc2014-09-03 21:35:33 +0800101 /*
102 * size of the device on the current transaction
103 *
104 * This variant is update when committing the transaction,
105 * and protected by device_list_mutex
106 */
107 u64 commit_total_bytes;
108
Miao Xiece7213c2014-09-03 21:35:34 +0800109 /* bytes used on the current transaction */
110 u64 commit_bytes_used;
Miao Xie935e5cc2014-09-03 21:35:33 +0800111 /*
112 * used to manage the device which is resized
113 *
114 * It is protected by chunk_lock.
115 */
116 struct list_head resized_list;
117
Dulshani Gunawardhana3c45bfc2013-10-31 09:57:33 +0530118 /* for sending down flush barriers */
Dulshani Gunawardhana3c45bfc2013-10-31 09:57:33 +0530119 struct bio *flush_bio;
120 struct completion flush_wait;
121
Arne Jansena2de7332011-03-08 14:14:00 +0100122 /* per-device scrub information */
Anand Jaincadbc0a2018-01-03 16:08:30 +0800123 struct scrub_ctx *scrub_ctx;
Arne Jansena2de7332011-03-08 14:14:00 +0100124
Qu Wenruod458b052014-02-28 10:46:19 +0800125 struct btrfs_work work;
Xiao Guangrong1f781602011-04-20 10:09:16 +0000126 struct rcu_head rcu;
Arne Jansen90519d62011-05-23 14:30:00 +0200127
128 /* readahead state */
Arne Jansen90519d62011-05-23 14:30:00 +0200129 atomic_t reada_in_flight;
130 u64 reada_next;
131 struct reada_zone *reada_curr_zone;
132 struct radix_tree_root reada_zones;
133 struct radix_tree_root reada_extents;
Chris Mason387125f2011-11-18 15:07:51 -0500134
Stefan Behrens442a4f62012-05-25 16:06:08 +0200135 /* disk I/O failure stats. For detailed description refer to
136 * enum btrfs_dev_stat_values in ioctl.h */
Stefan Behrens733f4fb2012-05-25 16:06:10 +0200137 int dev_stats_valid;
Miao Xieaddc3fa2014-07-24 11:37:11 +0800138
139 /* Counter to record the change of device stats */
140 atomic_t dev_stats_ccnt;
Stefan Behrens442a4f62012-05-25 16:06:08 +0200141 atomic_t dev_stat_values[BTRFS_DEV_STAT_VALUES_MAX];
Chris Mason0b86a832008-03-24 15:01:56 -0400142};
143
Miao Xie7cc8e582014-09-03 21:35:38 +0800144/*
145 * If we read those variants at the context of their own lock, we needn't
146 * use the following helpers, reading them directly is safe.
147 */
148#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
149#define BTRFS_DEVICE_GETSET_FUNCS(name) \
150static inline u64 \
151btrfs_device_get_##name(const struct btrfs_device *dev) \
152{ \
153 u64 size; \
154 unsigned int seq; \
155 \
156 do { \
157 seq = read_seqcount_begin(&dev->data_seqcount); \
158 size = dev->name; \
159 } while (read_seqcount_retry(&dev->data_seqcount, seq)); \
160 return size; \
161} \
162 \
163static inline void \
164btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \
165{ \
166 preempt_disable(); \
167 write_seqcount_begin(&dev->data_seqcount); \
168 dev->name = size; \
169 write_seqcount_end(&dev->data_seqcount); \
170 preempt_enable(); \
171}
172#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
173#define BTRFS_DEVICE_GETSET_FUNCS(name) \
174static inline u64 \
175btrfs_device_get_##name(const struct btrfs_device *dev) \
176{ \
177 u64 size; \
178 \
179 preempt_disable(); \
180 size = dev->name; \
181 preempt_enable(); \
182 return size; \
183} \
184 \
185static inline void \
186btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \
187{ \
188 preempt_disable(); \
189 dev->name = size; \
190 preempt_enable(); \
191}
192#else
193#define BTRFS_DEVICE_GETSET_FUNCS(name) \
194static inline u64 \
195btrfs_device_get_##name(const struct btrfs_device *dev) \
196{ \
197 return dev->name; \
198} \
199 \
200static inline void \
201btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \
202{ \
203 dev->name = size; \
204}
205#endif
206
207BTRFS_DEVICE_GETSET_FUNCS(total_bytes);
208BTRFS_DEVICE_GETSET_FUNCS(disk_total_bytes);
209BTRFS_DEVICE_GETSET_FUNCS(bytes_used);
210
Chris Mason8a4b83c2008-03-24 15:02:07 -0400211struct btrfs_fs_devices {
212 u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */
Nikolay Borisov7239ff42018-10-30 16:43:23 +0200213 u8 metadata_uuid[BTRFS_FSID_SIZE];
Nikolay Borisovd1a63002018-10-30 16:43:26 +0200214 bool fsid_change;
Anand Jainc4babc52018-04-12 10:29:25 +0800215 struct list_head fs_list;
Chris Mason8a4b83c2008-03-24 15:02:07 -0400216
Chris Mason8a4b83c2008-03-24 15:02:07 -0400217 u64 num_devices;
Chris Masona0af4692008-05-13 16:03:06 -0400218 u64 open_devices;
Yan Zheng2b820322008-11-17 21:11:30 -0500219 u64 rw_devices;
Chris Masoncd02dca2010-12-13 14:56:23 -0500220 u64 missing_devices;
Yan Zheng2b820322008-11-17 21:11:30 -0500221 u64 total_rw_bytes;
Josef Bacik02db0842012-06-21 16:03:58 -0400222 u64 total_devices;
Nikolay Borisovd1a63002018-10-30 16:43:26 +0200223
224 /* Highest generation number of seen devices */
225 u64 latest_generation;
226
Chris Mason8a4b83c2008-03-24 15:02:07 -0400227 struct block_device *latest_bdev;
Chris Masone5e9a522009-06-10 15:17:02 -0400228
229 /* all of the devices in the FS, protected by a mutex
230 * so we can safely walk it to write out the supers without
Wang Shilong9b011ad2013-10-25 19:12:02 +0800231 * worrying about add/remove by the multi-device code.
232 * Scrubbing super can kick off supers writing by holding
233 * this mutex lock.
Chris Masone5e9a522009-06-10 15:17:02 -0400234 */
235 struct mutex device_list_mutex;
Chris Mason8a4b83c2008-03-24 15:02:07 -0400236 struct list_head devices;
Chris Masonb3075712008-04-22 09:22:07 -0400237
Miao Xie935e5cc2014-09-03 21:35:33 +0800238 struct list_head resized_devices;
Chris Masonb3075712008-04-22 09:22:07 -0400239 /* devices not currently being allocated */
240 struct list_head alloc_list;
Yan Zheng2b820322008-11-17 21:11:30 -0500241
242 struct btrfs_fs_devices *seed;
243 int seeding;
Yan Zheng2b820322008-11-17 21:11:30 -0500244
245 int opened;
Chris Masonc2898112009-06-10 09:51:32 -0400246
247 /* set when we find or add a device that doesn't have the
248 * nonrot flag set
249 */
250 int rotating;
Anand Jain2e7910d2015-03-10 06:38:29 +0800251
Anand Jain5a13f432015-03-10 06:38:31 +0800252 struct btrfs_fs_info *fs_info;
Anand Jain2e7910d2015-03-10 06:38:29 +0800253 /* sysfs kobjects */
Anand Jainc1b7e472015-08-14 18:32:50 +0800254 struct kobject fsid_kobj;
Anand Jain2e7910d2015-03-10 06:38:29 +0800255 struct kobject *device_dir_kobj;
256 struct completion kobj_unregister;
Chris Mason8a4b83c2008-03-24 15:02:07 -0400257};
258
Miao Xiefacc8a222013-07-25 19:22:34 +0800259#define BTRFS_BIO_INLINE_CSUM_SIZE 64
260
Chris Mason9be33952013-05-17 18:30:14 -0400261/*
262 * we need the mirror number and stripe index to be passed around
263 * the call chain while we are processing end_io (especially errors).
264 * Really, what we need is a btrfs_bio structure that has this info
265 * and is properly sized with its stripe array, but we're not there
266 * quite yet. We have our own btrfs bioset, and all of the bios
267 * we allocate are actually btrfs_io_bios. We'll cram as much of
268 * struct btrfs_bio as we can into this over time.
269 */
270struct btrfs_io_bio {
Miao Xiec1dc0892014-09-12 18:43:56 +0800271 unsigned int mirror_num;
272 unsigned int stripe_index;
273 u64 logical;
Miao Xiefacc8a222013-07-25 19:22:34 +0800274 u8 *csum;
275 u8 csum_inline[BTRFS_BIO_INLINE_CSUM_SIZE];
Liu Bo17347ce2017-05-15 15:33:27 -0700276 struct bvec_iter iter;
David Sterbafa1bcbe2017-06-12 17:29:36 +0200277 /*
278 * This member must come last, bio_alloc_bioset will allocate enough
279 * bytes for entire btrfs_io_bio but relies on bio being last.
280 */
Chris Mason9be33952013-05-17 18:30:14 -0400281 struct bio bio;
282};
283
284static inline struct btrfs_io_bio *btrfs_io_bio(struct bio *bio)
285{
286 return container_of(bio, struct btrfs_io_bio, bio);
287}
288
David Sterbab3a0dd52018-11-22 17:16:49 +0100289static inline void btrfs_io_bio_free_csum(struct btrfs_io_bio *io_bio)
290{
291 if (io_bio->csum != io_bio->csum_inline) {
292 kfree(io_bio->csum);
293 io_bio->csum = NULL;
294 }
295}
296
Chris Masoncea9e442008-04-09 16:28:12 -0400297struct btrfs_bio_stripe {
298 struct btrfs_device *dev;
299 u64 physical;
Li Dongyangfce3bb92011-03-24 10:24:26 +0000300 u64 length; /* only used for discard mappings */
Chris Masoncea9e442008-04-09 16:28:12 -0400301};
302
Jan Schmidta1d3c472011-08-04 17:15:33 +0200303struct btrfs_bio {
Elena Reshetova140475a2017-03-03 10:55:10 +0200304 refcount_t refs;
Chris Masoncea9e442008-04-09 16:28:12 -0400305 atomic_t stripes_pending;
Miao Xiec404e0d2014-01-30 16:46:55 +0800306 struct btrfs_fs_info *fs_info;
Zhao Lei10f11902015-01-20 15:11:43 +0800307 u64 map_type; /* get from map_lookup->type */
Chris Masoncea9e442008-04-09 16:28:12 -0400308 bio_end_io_t *end_io;
Chris Mason7d2b4da2008-08-05 10:13:57 -0400309 struct bio *orig_bio;
Miao Xiec55f1392014-06-19 10:42:54 +0800310 unsigned long flags;
Chris Masoncea9e442008-04-09 16:28:12 -0400311 void *private;
Chris Masona236aed2008-04-29 09:38:00 -0400312 atomic_t error;
313 int max_errors;
Chris Masoncea9e442008-04-09 16:28:12 -0400314 int num_stripes;
Jan Schmidta1d3c472011-08-04 17:15:33 +0200315 int mirror_num;
Miao Xie2c8cdd62014-11-14 16:06:25 +0800316 int num_tgtdevs;
317 int *tgtdev_map;
Zhao Lei8e5cfb52015-01-20 15:11:33 +0800318 /*
319 * logical block numbers for the start of each stripe
320 * The last one or two are p/q. These are sorted,
321 * so raid_map[0] is the start of our full stripe
322 */
323 u64 *raid_map;
Chris Masoncea9e442008-04-09 16:28:12 -0400324 struct btrfs_bio_stripe stripes[];
325};
326
Miao Xieb2117a32011-01-05 10:07:28 +0000327struct btrfs_device_info {
328 struct btrfs_device *dev;
329 u64 dev_offset;
330 u64 max_avail;
Arne Jansen73c5de02011-04-12 12:07:57 +0200331 u64 total_avail;
Miao Xieb2117a32011-01-05 10:07:28 +0000332};
333
Liu Bo31e50222012-11-21 14:18:10 +0000334struct btrfs_raid_attr {
335 int sub_stripes; /* sub_stripes info for map */
336 int dev_stripes; /* stripes per dev */
337 int devs_max; /* max devs to use */
338 int devs_min; /* min devs needed */
Zhao Lei8789f4f2015-09-15 21:08:07 +0800339 int tolerated_failures; /* max tolerated fail devs */
Liu Bo31e50222012-11-21 14:18:10 +0000340 int devs_increment; /* ndevs has to be a multiple of this */
341 int ncopies; /* how many copies to data has */
Hans van Kranenburgb50836e2018-10-04 23:24:42 +0200342 int nparity; /* number of stripes worth of bytes to store
343 * parity information */
Anand Jainf9fbcaa2018-04-25 19:01:44 +0800344 int mindev_error; /* error code if min devs requisite is unmet */
Anand Jained234672018-04-25 19:01:42 +0800345 const char raid_name[8]; /* name of the raid */
Anand Jain41a6e892018-04-25 19:01:43 +0800346 u64 bg_flag; /* block group flag of the raid */
Liu Bo31e50222012-11-21 14:18:10 +0000347};
348
Zhao Leiaf902042015-09-15 21:08:06 +0800349extern const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES];
Zhao Leiaf902042015-09-15 21:08:06 +0800350
liubo1abe9b82011-03-24 11:18:59 +0000351struct map_lookup {
352 u64 type;
353 int io_align;
354 int io_width;
Liu Bo3d8da672016-04-26 17:53:31 -0700355 u64 stripe_len;
liubo1abe9b82011-03-24 11:18:59 +0000356 int num_stripes;
357 int sub_stripes;
Qu Wenruocf90d882018-08-01 10:37:19 +0800358 int verified_stripes; /* For mount time dev extent verification */
liubo1abe9b82011-03-24 11:18:59 +0000359 struct btrfs_bio_stripe stripes[];
360};
361
Arne Jansena2de7332011-03-08 14:14:00 +0100362#define map_lookup_size(n) (sizeof(struct map_lookup) + \
363 (sizeof(struct btrfs_bio_stripe) * (n)))
364
Ilya Dryomovc9e9f972012-01-16 22:04:47 +0200365struct btrfs_balance_args;
Ilya Dryomov19a39dc2012-01-16 22:04:49 +0200366struct btrfs_balance_progress;
Ilya Dryomovc9e9f972012-01-16 22:04:47 +0200367struct btrfs_balance_control {
Ilya Dryomovc9e9f972012-01-16 22:04:47 +0200368 struct btrfs_balance_args data;
369 struct btrfs_balance_args meta;
370 struct btrfs_balance_args sys;
371
372 u64 flags;
Ilya Dryomov19a39dc2012-01-16 22:04:49 +0200373
374 struct btrfs_balance_progress stat;
Ilya Dryomovc9e9f972012-01-16 22:04:47 +0200375};
376
Christoph Hellwigcf8cddd2016-10-27 09:27:36 +0200377enum btrfs_map_op {
378 BTRFS_MAP_READ,
379 BTRFS_MAP_WRITE,
380 BTRFS_MAP_DISCARD,
381 BTRFS_MAP_GET_READ_MIRRORS,
382};
383
384static inline enum btrfs_map_op btrfs_op(struct bio *bio)
385{
386 switch (bio_op(bio)) {
387 case REQ_OP_DISCARD:
388 return BTRFS_MAP_DISCARD;
389 case REQ_OP_WRITE:
390 return BTRFS_MAP_WRITE;
391 default:
392 WARN_ON_ONCE(1);
393 case REQ_OP_READ:
394 return BTRFS_MAP_READ;
395 }
396}
397
Zhao Lei6e9606d2015-01-20 15:11:34 +0800398void btrfs_get_bbio(struct btrfs_bio *bbio);
399void btrfs_put_bbio(struct btrfs_bio *bbio);
Christoph Hellwigcf8cddd2016-10-27 09:27:36 +0200400int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
Chris Masoncea9e442008-04-09 16:28:12 -0400401 u64 logical, u64 *length,
Jan Schmidta1d3c472011-08-04 17:15:33 +0200402 struct btrfs_bio **bbio_ret, int mirror_num);
Christoph Hellwigcf8cddd2016-10-27 09:27:36 +0200403int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
Miao Xieaf8e2d12014-10-23 14:42:50 +0800404 u64 logical, u64 *length,
David Sterba825ad4c2017-03-28 14:45:22 +0200405 struct btrfs_bio **bbio_ret);
Nikolay Borisov63a9c7b2018-05-04 10:53:05 +0300406int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
407 u64 physical, u64 **logical, int *naddrs, int *stripe_len);
Jeff Mahoney6bccf3a2016-06-21 21:16:51 -0400408int btrfs_read_sys_array(struct btrfs_fs_info *fs_info);
Jeff Mahoney5b4aace2016-06-21 10:40:19 -0400409int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info);
Nikolay Borisovc216b202018-06-20 15:49:06 +0300410int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type);
Chris Mason0b86a832008-03-24 15:01:56 -0400411void btrfs_mapping_init(struct btrfs_mapping_tree *tree);
412void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree);
Omar Sandoval58efbc92017-08-22 23:45:59 -0700413blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
414 int mirror_num, int async_submit);
Chris Mason8a4b83c2008-03-24 15:02:07 -0400415int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
Christoph Hellwig97288f22008-12-02 06:36:09 -0500416 fmode_t flags, void *holder);
Gu Jinxiang36350e92018-07-12 14:23:16 +0800417struct btrfs_device *btrfs_scan_one_device(const char *path,
418 fmode_t flags, void *holder);
Anand Jain228a73a2019-01-04 13:31:54 +0800419int btrfs_forget_devices(const char *path);
Chris Mason8a4b83c2008-03-24 15:02:07 -0400420int btrfs_close_devices(struct btrfs_fs_devices *fs_devices);
Anand Jain9b99b112018-02-27 12:41:59 +0800421void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step);
Nikolay Borisovd6507cf2018-07-20 19:37:50 +0300422void btrfs_assign_next_active_device(struct btrfs_device *device,
423 struct btrfs_device *this_dev);
Nikolay Borisova27a94c2018-09-03 12:46:14 +0300424struct btrfs_device *btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info,
425 u64 devid,
426 const char *devpath);
Ilya Dryomov12bd2fc2013-08-23 13:20:17 +0300427struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
428 const u64 *devid,
429 const u8 *uuid);
David Sterbaa425f9d2018-03-20 15:47:33 +0100430void btrfs_free_device(struct btrfs_device *device);
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400431int btrfs_rm_device(struct btrfs_fs_info *fs_info,
David Sterbada353f62017-02-14 17:55:53 +0100432 const char *device_path, u64 devid);
David Sterbaffc5a372018-02-19 17:24:15 +0100433void __exit btrfs_cleanup_fs_uuids(void);
Stefan Behrens5d964052012-11-05 14:59:07 +0100434int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len);
Chris Mason8f18cf12008-04-25 16:53:30 -0400435int btrfs_grow_device(struct btrfs_trans_handle *trans,
436 struct btrfs_device *device, u64 new_size);
Anand Jaine4319cd2019-01-17 23:32:31 +0800437struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
Anand Jain09ba3bc2019-01-19 14:48:55 +0800438 u64 devid, u8 *uuid, u8 *fsid, bool seed);
Chris Mason8f18cf12008-04-25 16:53:30 -0400439int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
David Sterbada353f62017-02-14 17:55:53 +0100440int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *path);
David Sterba6fcf6e22018-05-07 17:44:03 +0200441int btrfs_balance(struct btrfs_fs_info *fs_info,
442 struct btrfs_balance_control *bctl,
Ilya Dryomovc9e9f972012-01-16 22:04:47 +0200443 struct btrfs_ioctl_balance_args *bargs);
Anand Jainf89e09c2018-11-20 16:12:55 +0800444void btrfs_describe_block_groups(u64 flags, char *buf, u32 size_buf);
Ilya Dryomov2b6ba622012-06-22 12:24:13 -0600445int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info);
Ilya Dryomov68310a52012-06-22 12:24:12 -0600446int btrfs_recover_balance(struct btrfs_fs_info *fs_info);
Ilya Dryomov837d5b62012-01-16 22:04:49 +0200447int btrfs_pause_balance(struct btrfs_fs_info *fs_info);
Ilya Dryomova7e99c62012-01-16 22:04:49 +0200448int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
Stefan Behrensf7a81ea2013-08-15 17:11:19 +0200449int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info);
Stefan Behrens70f8017542013-08-15 17:11:23 +0200450int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info);
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400451int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset);
Jeff Mahoney499f3772015-06-15 09:41:17 -0400452int find_free_dev_extent_start(struct btrfs_transaction *transaction,
453 struct btrfs_device *device, u64 num_bytes,
454 u64 search_start, u64 *start, u64 *max_avail);
Josef Bacik6df9a952013-06-27 13:22:46 -0400455int find_free_dev_extent(struct btrfs_trans_handle *trans,
456 struct btrfs_device *device, u64 num_bytes,
Josef Bacikba1bf482009-09-11 16:11:19 -0400457 u64 *start, u64 *max_avail);
Stefan Behrens442a4f62012-05-25 16:06:08 +0200458void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400459int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
David Sterbab27f7c02012-06-22 06:30:39 -0600460 struct btrfs_ioctl_get_dev_stats *stats);
Miao Xiecb517ea2013-05-15 07:48:19 +0000461void btrfs_init_devices_late(struct btrfs_fs_info *fs_info);
Stefan Behrens733f4fb2012-05-25 16:06:10 +0200462int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info);
463int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
464 struct btrfs_fs_info *fs_info);
Nikolay Borisov68a9db52018-07-20 19:37:48 +0300465void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev);
Qu Wenruo084b6e7c2014-10-30 16:52:31 +0800466void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
467 struct btrfs_device *srcdev);
Nikolay Borisov4f5ad7b2018-07-20 19:37:51 +0300468void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev);
David Sterbada353f62017-02-14 17:55:53 +0100469void btrfs_scratch_superblocks(struct block_device *bdev, const char *device_path);
Liu Bo592d92e2017-03-14 13:33:55 -0700470int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info,
Nikolay Borisove4ff5fb2017-07-19 10:48:42 +0300471 u64 logical, u64 len);
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400472unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
David Woodhouse53b381b2013-01-29 18:40:14 -0500473 u64 logical);
Josef Bacik6df9a952013-06-27 13:22:46 -0400474int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
Nikolay Borisov97aff912018-07-20 19:37:53 +0300475 u64 chunk_offset, u64 chunk_size);
476int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset);
Omar Sandoval60ca8422018-05-16 16:34:31 -0700477struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
478 u64 logical, u64 length);
Miao Xieaddc3fa2014-07-24 11:37:11 +0800479
Stefan Behrens442a4f62012-05-25 16:06:08 +0200480static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
481 int index)
482{
483 atomic_inc(dev->dev_stat_values + index);
Nikolay Borisov9deae962017-10-24 13:47:37 +0300484 /*
485 * This memory barrier orders stores updating statistics before stores
486 * updating dev_stats_ccnt.
487 *
488 * It pairs with smp_rmb() in btrfs_run_dev_stats().
489 */
Miao Xieaddc3fa2014-07-24 11:37:11 +0800490 smp_mb__before_atomic();
491 atomic_inc(&dev->dev_stats_ccnt);
Stefan Behrens442a4f62012-05-25 16:06:08 +0200492}
493
494static inline int btrfs_dev_stat_read(struct btrfs_device *dev,
495 int index)
496{
497 return atomic_read(dev->dev_stat_values + index);
498}
499
500static inline int btrfs_dev_stat_read_and_reset(struct btrfs_device *dev,
501 int index)
502{
503 int ret;
504
505 ret = atomic_xchg(dev->dev_stat_values + index, 0);
Nikolay Borisov4660c492017-10-20 18:10:58 +0300506 /*
507 * atomic_xchg implies a full memory barriers as per atomic_t.txt:
508 * - RMW operations that have a return value are fully ordered;
509 *
510 * This implicit memory barriers is paired with the smp_rmb in
511 * btrfs_run_dev_stats
512 */
Miao Xieaddc3fa2014-07-24 11:37:11 +0800513 atomic_inc(&dev->dev_stats_ccnt);
Stefan Behrens442a4f62012-05-25 16:06:08 +0200514 return ret;
515}
516
517static inline void btrfs_dev_stat_set(struct btrfs_device *dev,
518 int index, unsigned long val)
519{
520 atomic_set(dev->dev_stat_values + index, val);
Nikolay Borisov9deae962017-10-24 13:47:37 +0300521 /*
522 * This memory barrier orders stores updating statistics before stores
523 * updating dev_stats_ccnt.
524 *
525 * It pairs with smp_rmb() in btrfs_run_dev_stats().
526 */
Miao Xieaddc3fa2014-07-24 11:37:11 +0800527 smp_mb__before_atomic();
528 atomic_inc(&dev->dev_stats_ccnt);
Stefan Behrens442a4f62012-05-25 16:06:08 +0200529}
530
531static inline void btrfs_dev_stat_reset(struct btrfs_device *dev,
532 int index)
533{
534 btrfs_dev_stat_set(dev, index, 0);
535}
Miao Xie935e5cc2014-09-03 21:35:33 +0800536
Qu Wenruo3e72ee82018-01-30 18:20:45 +0800537/*
538 * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which
539 * can be used as index to access btrfs_raid_array[].
540 */
541static inline enum btrfs_raid_types btrfs_bg_flags_to_raid_index(u64 flags)
542{
543 if (flags & BTRFS_BLOCK_GROUP_RAID10)
544 return BTRFS_RAID_RAID10;
545 else if (flags & BTRFS_BLOCK_GROUP_RAID1)
546 return BTRFS_RAID_RAID1;
547 else if (flags & BTRFS_BLOCK_GROUP_DUP)
548 return BTRFS_RAID_DUP;
549 else if (flags & BTRFS_BLOCK_GROUP_RAID0)
550 return BTRFS_RAID_RAID0;
551 else if (flags & BTRFS_BLOCK_GROUP_RAID5)
552 return BTRFS_RAID_RAID5;
553 else if (flags & BTRFS_BLOCK_GROUP_RAID6)
554 return BTRFS_RAID_RAID6;
555
556 return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
557}
558
Anand Jained234672018-04-25 19:01:42 +0800559const char *get_raid_name(enum btrfs_raid_types type);
560
Miao Xie935e5cc2014-09-03 21:35:33 +0800561void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info);
Nikolay Borisove9b919b2018-02-07 17:55:49 +0200562void btrfs_update_commit_device_bytes_used(struct btrfs_transaction *trans);
Filipe Manana04216822014-11-27 21:14:15 +0000563
Anand Jainc73eccf2015-03-10 06:38:30 +0800564struct list_head *btrfs_get_fs_uuids(void);
Anand Jain5a13f432015-03-10 06:38:31 +0800565void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info);
566void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info);
Anand Jain6528b992017-12-18 17:08:59 +0800567bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
568 struct btrfs_device *failing_dev);
Qu Wenruo21634a12017-03-09 09:34:36 +0800569
David Sterba46df06b2018-07-13 20:46:30 +0200570int btrfs_bg_type_to_factor(u64 flags);
Qu Wenruocf90d882018-08-01 10:37:19 +0800571int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info);
David Sterba46df06b2018-07-13 20:46:30 +0200572
Chris Mason0b86a832008-03-24 15:01:56 -0400573#endif