Btrfs: Wait for IO on the block device inodes of newly added devices
btrfs-vol -a /dev/xxx will zero the first and last two MB of the device.
The kernel code needs to wait for this IO to finish before it adds
the device.
btrfs metadata IO does not happen through the block device inode. A
separate address space is used, allowing the zero filled buffer heads in
the block device inode to be written to disk after FS metadata starts
going down to the disk via the btrfs metadata inode.
The end result is zero filled metadata blocks after adding new devices
into the filesystem.
The fix is a simple filemap_write_and_wait on the block device inode
before actually inserting it into the pool of available devices.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 2775e27..0079b60 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -610,6 +610,7 @@
struct list_head dead_roots;
atomic_t nr_async_submits;
+ atomic_t async_submit_draining;
atomic_t nr_async_bios;
atomic_t tree_log_writers;
atomic_t tree_log_commit;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 45bc3132..45b4f728 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -460,6 +460,13 @@
async->submit_bio_hook = submit_bio_hook;
async->work.func = run_one_async_submit;
async->work.flags = 0;
+
+ while(atomic_read(&fs_info->async_submit_draining) &&
+ atomic_read(&fs_info->nr_async_submits)) {
+ wait_event(fs_info->async_submit_wait,
+ (atomic_read(&fs_info->nr_async_submits) == 0));
+ }
+
atomic_inc(&fs_info->nr_async_submits);
btrfs_queue_worker(&fs_info->workers, &async->work);
@@ -495,11 +502,8 @@
int mirror_num)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
- u64 offset;
int ret;
- offset = bio->bi_sector << 9;
-
/*
* when we're called for a write, we're already in the async
* submission context. Just jump into btrfs_map_bio
@@ -1360,6 +1364,7 @@
INIT_LIST_HEAD(&fs_info->space_info);
btrfs_mapping_init(&fs_info->mapping_tree);
atomic_set(&fs_info->nr_async_submits, 0);
+ atomic_set(&fs_info->async_submit_draining, 0);
atomic_set(&fs_info->nr_async_bios, 0);
atomic_set(&fs_info->throttles, 0);
atomic_set(&fs_info->throttle_gen, 0);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 4516fbf..404704d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3440,13 +3440,24 @@
list_del_init(&binode->delalloc_inodes);
spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
if (inode) {
- filemap_write_and_wait(inode->i_mapping);
+ filemap_flush(inode->i_mapping);
iput(inode);
}
cond_resched();
spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
}
spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
+
+ /* the filemap_flush will queue IO into the worker threads, but
+ * we have to make sure the IO is actually started and that
+ * ordered extents get created before we return
+ */
+ atomic_inc(&root->fs_info->async_submit_draining);
+ while(atomic_read(&root->fs_info->nr_async_submits)) {
+ wait_event(root->fs_info->async_submit_wait,
+ (atomic_read(&root->fs_info->nr_async_submits) == 0));
+ }
+ atomic_dec(&root->fs_info->async_submit_draining);
return 0;
}
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 51f1131..f63cf76 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1038,6 +1038,7 @@
return -EIO;
}
+ filemap_write_and_wait(bdev->bd_inode->i_mapping);
mutex_lock(&root->fs_info->volume_mutex);
trans = btrfs_start_transaction(root, 1);