| // SPDX-License-Identifier: GPL-2.0 |
| |
| #include <linux/bitops.h> |
| #include <linux/slab.h> |
| #include <linux/bio.h> |
| #include <linux/mm.h> |
| #include <linux/pagemap.h> |
| #include <linux/page-flags.h> |
| #include <linux/spinlock.h> |
| #include <linux/blkdev.h> |
| #include <linux/swap.h> |
| #include <linux/writeback.h> |
| #include <linux/pagevec.h> |
| #include <linux/prefetch.h> |
| #include <linux/cleancache.h> |
| #include <linux/fsverity.h> |
| #include "misc.h" |
| #include "extent_io.h" |
| #include "extent-io-tree.h" |
| #include "extent_map.h" |
| #include "ctree.h" |
| #include "btrfs_inode.h" |
| #include "volumes.h" |
| #include "check-integrity.h" |
| #include "locking.h" |
| #include "rcu-string.h" |
| #include "backref.h" |
| #include "disk-io.h" |
| #include "subpage.h" |
| #include "zoned.h" |
| #include "block-group.h" |
| |
| static struct kmem_cache *extent_state_cache; |
| static struct kmem_cache *extent_buffer_cache; |
| static struct bio_set btrfs_bioset; |
| |
| static inline bool extent_state_in_tree(const struct extent_state *state) |
| { |
| return !RB_EMPTY_NODE(&state->rb_node); |
| } |
| |
| #ifdef CONFIG_BTRFS_DEBUG |
| static LIST_HEAD(states); |
| static DEFINE_SPINLOCK(leak_lock); |
| |
| static inline void btrfs_leak_debug_add(spinlock_t *lock, |
| struct list_head *new, |
| struct list_head *head) |
| { |
| unsigned long flags; |
| |
| spin_lock_irqsave(lock, flags); |
| list_add(new, head); |
| spin_unlock_irqrestore(lock, flags); |
| } |
| |
| static inline void btrfs_leak_debug_del(spinlock_t *lock, |
| struct list_head *entry) |
| { |
| unsigned long flags; |
| |
| spin_lock_irqsave(lock, flags); |
| list_del(entry); |
| spin_unlock_irqrestore(lock, flags); |
| } |
| |
| void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info) |
| { |
| struct extent_buffer *eb; |
| unsigned long flags; |
| |
| /* |
| * If we didn't get into open_ctree our allocated_ebs will not be |
| * initialized, so just skip this. |
| */ |
| if (!fs_info->allocated_ebs.next) |
| return; |
| |
| spin_lock_irqsave(&fs_info->eb_leak_lock, flags); |
| while (!list_empty(&fs_info->allocated_ebs)) { |
| eb = list_first_entry(&fs_info->allocated_ebs, |
| struct extent_buffer, leak_list); |
| pr_err( |
| "BTRFS: buffer leak start %llu len %lu refs %d bflags %lu owner %llu\n", |
| eb->start, eb->len, atomic_read(&eb->refs), eb->bflags, |
| btrfs_header_owner(eb)); |
| list_del(&eb->leak_list); |
| kmem_cache_free(extent_buffer_cache, eb); |
| } |
| spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags); |
| } |
| |
| static inline void btrfs_extent_state_leak_debug_check(void) |
| { |
| struct extent_state *state; |
| |
| while (!list_empty(&states)) { |
| state = list_entry(states.next, struct extent_state, leak_list); |
| pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n", |
| state->start, state->end, state->state, |
| extent_state_in_tree(state), |
| refcount_read(&state->refs)); |
| list_del(&state->leak_list); |
| kmem_cache_free(extent_state_cache, state); |
| } |
| } |
| |
| #define btrfs_debug_check_extent_io_range(tree, start, end) \ |
| __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end)) |
| static inline void __btrfs_debug_check_extent_io_range(const char *caller, |
| struct extent_io_tree *tree, u64 start, u64 end) |
| { |
| struct inode *inode = tree->private_data; |
| u64 isize; |
| |
| if (!inode || !is_data_inode(inode)) |
| return; |
| |
| isize = i_size_read(inode); |
| if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) { |
| btrfs_debug_rl(BTRFS_I(inode)->root->fs_info, |
| "%s: ino %llu isize %llu odd range [%llu,%llu]", |
| caller, btrfs_ino(BTRFS_I(inode)), isize, start, end); |
| } |
| } |
| #else |
| #define btrfs_leak_debug_add(lock, new, head) do {} while (0) |
| #define btrfs_leak_debug_del(lock, entry) do {} while (0) |
| #define btrfs_extent_state_leak_debug_check() do {} while (0) |
| #define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0) |
| #endif |
| |
| struct tree_entry { |
| u64 start; |
| u64 end; |
| struct rb_node rb_node; |
| }; |
| |
| struct extent_page_data { |
| struct btrfs_bio_ctrl bio_ctrl; |
| /* tells writepage not to lock the state bits for this range |
| * it still does the unlocking |
| */ |
| unsigned int extent_locked:1; |
| |
| /* tells the submit_bio code to use REQ_SYNC */ |
| unsigned int sync_io:1; |
| }; |
| |
| static int add_extent_changeset(struct extent_state *state, u32 bits, |
| struct extent_changeset *changeset, |
| int set) |
| { |
| int ret; |
| |
| if (!changeset) |
| return 0; |
| if (set && (state->state & bits) == bits) |
| return 0; |
| if (!set && (state->state & bits) == 0) |
| return 0; |
| changeset->bytes_changed += state->end - state->start + 1; |
| ret = ulist_add(&changeset->range_changed, state->start, state->end, |
| GFP_ATOMIC); |
| return ret; |
| } |
| |
| int __must_check submit_one_bio(struct bio *bio, int mirror_num, |
| unsigned long bio_flags) |
| { |
| blk_status_t ret = 0; |
| struct extent_io_tree *tree = bio->bi_private; |
| |
| bio->bi_private = NULL; |
| |
| /* Caller should ensure the bio has at least some range added */ |
| ASSERT(bio->bi_iter.bi_size); |
| if (is_data_inode(tree->private_data)) |
| ret = btrfs_submit_data_bio(tree->private_data, bio, mirror_num, |
| bio_flags); |
| else |
| ret = btrfs_submit_metadata_bio(tree->private_data, bio, |
| mirror_num, bio_flags); |
| |
| return blk_status_to_errno(ret); |
| } |
| |
| /* Cleanup unsubmitted bios */ |
| static void end_write_bio(struct extent_page_data *epd, int ret) |
| { |
| struct bio *bio = epd->bio_ctrl.bio; |
| |
| if (bio) { |
| bio->bi_status = errno_to_blk_status(ret); |
| bio_endio(bio); |
| epd->bio_ctrl.bio = NULL; |
| } |
| } |
| |
| /* |
| * Submit bio from extent page data via submit_one_bio |
| * |
| * Return 0 if everything is OK. |
| * Return <0 for error. |
| */ |
| static int __must_check flush_write_bio(struct extent_page_data *epd) |
| { |
| int ret = 0; |
| struct bio *bio = epd->bio_ctrl.bio; |
| |
| if (bio) { |
| ret = submit_one_bio(bio, 0, 0); |
| /* |
| * Clean up of epd->bio is handled by its endio function. |
| * And endio is either triggered by successful bio execution |
| * or the error handler of submit bio hook. |
| * So at this point, no matter what happened, we don't need |
| * to clean up epd->bio. |
| */ |
| epd->bio_ctrl.bio = NULL; |
| } |
| return ret; |
| } |
| |
| int __init extent_state_cache_init(void) |
| { |
| extent_state_cache = kmem_cache_create("btrfs_extent_state", |
| sizeof(struct extent_state), 0, |
| SLAB_MEM_SPREAD, NULL); |
| if (!extent_state_cache) |
| return -ENOMEM; |
| return 0; |
| } |
| |
| int __init extent_io_init(void) |
| { |
| extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer", |
| sizeof(struct extent_buffer), 0, |
| SLAB_MEM_SPREAD, NULL); |
| if (!extent_buffer_cache) |
| return -ENOMEM; |
| |
| if (bioset_init(&btrfs_bioset, BIO_POOL_SIZE, |
| offsetof(struct btrfs_bio, bio), |
| BIOSET_NEED_BVECS)) |
| goto free_buffer_cache; |
| |
| if (bioset_integrity_create(&btrfs_bioset, BIO_POOL_SIZE)) |
| goto free_bioset; |
| |
| return 0; |
| |
| free_bioset: |
| bioset_exit(&btrfs_bioset); |
| |
| free_buffer_cache: |
| kmem_cache_destroy(extent_buffer_cache); |
| extent_buffer_cache = NULL; |
| return -ENOMEM; |
| } |
| |
| void __cold extent_state_cache_exit(void) |
| { |
| btrfs_extent_state_leak_debug_check(); |
| kmem_cache_destroy(extent_state_cache); |
| } |
| |
| void __cold extent_io_exit(void) |
| { |
| /* |
| * Make sure all delayed rcu free are flushed before we |
| * destroy caches. |
| */ |
| rcu_barrier(); |
| kmem_cache_destroy(extent_buffer_cache); |
| bioset_exit(&btrfs_bioset); |
| } |
| |
| /* |
| * For the file_extent_tree, we want to hold the inode lock when we lookup and |
| * update the disk_i_size, but lockdep will complain because our io_tree we hold |
| * the tree lock and get the inode lock when setting delalloc. These two things |
| * are unrelated, so make a class for the file_extent_tree so we don't get the |
| * two locking patterns mixed up. |
| */ |
| static struct lock_class_key file_extent_tree_class; |
| |
| void extent_io_tree_init(struct btrfs_fs_info *fs_info, |
| struct extent_io_tree *tree, unsigned int owner, |
| void *private_data) |
| { |
| tree->fs_info = fs_info; |
| tree->state = RB_ROOT; |
| tree->dirty_bytes = 0; |
| spin_lock_init(&tree->lock); |
| tree->private_data = private_data; |
| tree->owner = owner; |
| if (owner == IO_TREE_INODE_FILE_EXTENT) |
| lockdep_set_class(&tree->lock, &file_extent_tree_class); |
| } |
| |
| void extent_io_tree_release(struct extent_io_tree *tree) |
| { |
| spin_lock(&tree->lock); |
| /* |
| * Do a single barrier for the waitqueue_active check here, the state |
| * of the waitqueue should not change once extent_io_tree_release is |
| * called. |
| */ |
| smp_mb(); |
| while (!RB_EMPTY_ROOT(&tree->state)) { |
| struct rb_node *node; |
| struct extent_state *state; |
| |
| node = rb_first(&tree->state); |
| state = rb_entry(node, struct extent_state, rb_node); |
| rb_erase(&state->rb_node, &tree->state); |
| RB_CLEAR_NODE(&state->rb_node); |
| /* |
| * btree io trees aren't supposed to have tasks waiting for |
| * changes in the flags of extent states ever. |
| */ |
| ASSERT(!waitqueue_active(&state->wq)); |
| free_extent_state(state); |
| |
| cond_resched_lock(&tree->lock); |
| } |
| spin_unlock(&tree->lock); |
| } |
| |
| static struct extent_state *alloc_extent_state(gfp_t mask) |
| { |
| struct extent_state *state; |
| |
| /* |
| * The given mask might be not appropriate for the slab allocator, |
| * drop the unsupported bits |
| */ |
| mask &= ~(__GFP_DMA32|__GFP_HIGHMEM); |
| state = kmem_cache_alloc(extent_state_cache, mask); |
| if (!state) |
| return state; |
| state->state = 0; |
| state->failrec = NULL; |
| RB_CLEAR_NODE(&state->rb_node); |
| btrfs_leak_debug_add(&leak_lock, &state->leak_list, &states); |
| refcount_set(&state->refs, 1); |
| init_waitqueue_head(&state->wq); |
| trace_alloc_extent_state(state, mask, _RET_IP_); |
| return state; |
| } |
| |
| void free_extent_state(struct extent_state *state) |
| { |
| if (!state) |
| return; |
| if (refcount_dec_and_test(&state->refs)) { |
| WARN_ON(extent_state_in_tree(state)); |
| btrfs_leak_debug_del(&leak_lock, &state->leak_list); |
| trace_free_extent_state(state, _RET_IP_); |
| kmem_cache_free(extent_state_cache, state); |
| } |
| } |
| |
| static struct rb_node *tree_insert(struct rb_root *root, |
| struct rb_node *search_start, |
| u64 offset, |
| struct rb_node *node, |
| struct rb_node ***p_in, |
| struct rb_node **parent_in) |
| { |
| struct rb_node **p; |
| struct rb_node *parent = NULL; |
| struct tree_entry *entry; |
| |
| if (p_in && parent_in) { |
| p = *p_in; |
| parent = *parent_in; |
| goto do_insert; |
| } |
| |
| p = search_start ? &search_start : &root->rb_node; |
| while (*p) { |
| parent = *p; |
| entry = rb_entry(parent, struct tree_entry, rb_node); |
| |
| if (offset < entry->start) |
| p = &(*p)->rb_left; |
| else if (offset > entry->end) |
| p = &(*p)->rb_right; |
| else |
| return parent; |
| } |
| |
| do_insert: |
| rb_link_node(node, parent, p); |
| rb_insert_color(node, root); |
| return NULL; |
| } |
| |
| /** |
| * Search @tree for an entry that contains @offset. Such entry would have |
| * entry->start <= offset && entry->end >= offset. |
| * |
| * @tree: the tree to search |
| * @offset: offset that should fall within an entry in @tree |
| * @next_ret: pointer to the first entry whose range ends after @offset |
| * @prev_ret: pointer to the first entry whose range begins before @offset |
| * @p_ret: pointer where new node should be anchored (used when inserting an |
| * entry in the tree) |
| * @parent_ret: points to entry which would have been the parent of the entry, |
| * containing @offset |
| * |
| * This function returns a pointer to the entry that contains @offset byte |
| * address. If no such entry exists, then NULL is returned and the other |
| * pointer arguments to the function are filled, otherwise the found entry is |
| * returned and other pointers are left untouched. |
| */ |
| static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset, |
| struct rb_node **next_ret, |
| struct rb_node **prev_ret, |
| struct rb_node ***p_ret, |
| struct rb_node **parent_ret) |
| { |
| struct rb_root *root = &tree->state; |
| struct rb_node **n = &root->rb_node; |
| struct rb_node *prev = NULL; |
| struct rb_node *orig_prev = NULL; |
| struct tree_entry *entry; |
| struct tree_entry *prev_entry = NULL; |
| |
| while (*n) { |
| prev = *n; |
| entry = rb_entry(prev, struct tree_entry, rb_node); |
| prev_entry = entry; |
| |
| if (offset < entry->start) |
| n = &(*n)->rb_left; |
| else if (offset > entry->end) |
| n = &(*n)->rb_right; |
| else |
| return *n; |
| } |
| |
| if (p_ret) |
| *p_ret = n; |
| if (parent_ret) |
| *parent_ret = prev; |
| |
| if (next_ret) { |
| orig_prev = prev; |
| while (prev && offset > prev_entry->end) { |
| prev = rb_next(prev); |
| prev_entry = rb_entry(prev, struct tree_entry, rb_node); |
| } |
| *next_ret = prev; |
| prev = orig_prev; |
| } |
| |
| if (prev_ret) { |
| prev_entry = rb_entry(prev, struct tree_entry, rb_node); |
| while (prev && offset < prev_entry->start) { |
| prev = rb_prev(prev); |
| prev_entry = rb_entry(prev, struct tree_entry, rb_node); |
| } |
| *prev_ret = prev; |
| } |
| return NULL; |
| } |
| |
| static inline struct rb_node * |
| tree_search_for_insert(struct extent_io_tree *tree, |
| u64 offset, |
| struct rb_node ***p_ret, |
| struct rb_node **parent_ret) |
| { |
| struct rb_node *next= NULL; |
| struct rb_node *ret; |
| |
| ret = __etree_search(tree, offset, &next, NULL, p_ret, parent_ret); |
| if (!ret) |
| return next; |
| return ret; |
| } |
| |
| static inline struct rb_node *tree_search(struct extent_io_tree *tree, |
| u64 offset) |
| { |
| return tree_search_for_insert(tree, offset, NULL, NULL); |
| } |
| |
| /* |
| * utility function to look for merge candidates inside a given range. |
| * Any extents with matching state are merged together into a single |
| * extent in the tree. Extents with EXTENT_IO in their state field |
| * are not merged because the end_io handlers need to be able to do |
| * operations on them without sleeping (or doing allocations/splits). |
| * |
| * This should be called with the tree lock held. |
| */ |
| static void merge_state(struct extent_io_tree *tree, |
| struct extent_state *state) |
| { |
| struct extent_state *other; |
| struct rb_node *other_node; |
| |
| if (state->state & (EXTENT_LOCKED | EXTENT_BOUNDARY)) |
| return; |
| |
| other_node = rb_prev(&state->rb_node); |
| if (other_node) { |
| other = rb_entry(other_node, struct extent_state, rb_node); |
| if (other->end == state->start - 1 && |
| other->state == state->state) { |
| if (tree->private_data && |
| is_data_inode(tree->private_data)) |
| btrfs_merge_delalloc_extent(tree->private_data, |
| state, other); |
| state->start = other->start; |
| rb_erase(&other->rb_node, &tree->state); |
| RB_CLEAR_NODE(&other->rb_node); |
| free_extent_state(other); |
| } |
| } |
| other_node = rb_next(&state->rb_node); |
| if (other_node) { |
| other = rb_entry(other_node, struct extent_state, rb_node); |
| if (other->start == state->end + 1 && |
| other->state == state->state) { |
| if (tree->private_data && |
| is_data_inode(tree->private_data)) |
| btrfs_merge_delalloc_extent(tree->private_data, |
| state, other); |
| state->end = other->end; |
| rb_erase(&other->rb_node, &tree->state); |
| RB_CLEAR_NODE(&other->rb_node); |
| free_extent_state(other); |
| } |
| } |
| } |
| |
| static void set_state_bits(struct extent_io_tree *tree, |
| struct extent_state *state, u32 *bits, |
| struct extent_changeset *changeset); |
| |
| /* |
| * insert an extent_state struct into the tree. 'bits' are set on the |
| * struct before it is inserted. |
| * |
| * This may return -EEXIST if the extent is already there, in which case the |
| * state struct is freed. |
| * |
| * The tree lock is not taken internally. This is a utility function and |
| * probably isn't what you want to call (see set/clear_extent_bit). |
| */ |
| static int insert_state(struct extent_io_tree *tree, |
| struct extent_state *state, u64 start, u64 end, |
| struct rb_node ***p, |
| struct rb_node **parent, |
| u32 *bits, struct extent_changeset *changeset) |
| { |
| struct rb_node *node; |
| |
| if (end < start) { |
| btrfs_err(tree->fs_info, |
| "insert state: end < start %llu %llu", end, start); |
| WARN_ON(1); |
| } |
| state->start = start; |
| state->end = end; |
| |
| set_state_bits(tree, state, bits, changeset); |
| |
| node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent); |
| if (node) { |
| struct extent_state *found; |
| found = rb_entry(node, struct extent_state, rb_node); |
| btrfs_err(tree->fs_info, |
| "found node %llu %llu on insert of %llu %llu", |
| found->start, found->end, start, end); |
| return -EEXIST; |
| } |
| merge_state(tree, state); |
| return 0; |
| } |
| |
| /* |
| * split a given extent state struct in two, inserting the preallocated |
| * struct 'prealloc' as the newly created second half. 'split' indicates an |
| * offset inside 'orig' where it should be split. |
| * |
| * Before calling, |
| * the tree has 'orig' at [orig->start, orig->end]. After calling, there |
| * are two extent state structs in the tree: |
| * prealloc: [orig->start, split - 1] |
| * orig: [ split, orig->end ] |
| * |
| * The tree locks are not taken by this function. They need to be held |
| * by the caller. |
| */ |
| static int split_state(struct extent_io_tree *tree, struct extent_state *orig, |
| struct extent_state *prealloc, u64 split) |
| { |
| struct rb_node *node; |
| |
| if (tree->private_data && is_data_inode(tree->private_data)) |
| btrfs_split_delalloc_extent(tree->private_data, orig, split); |
| |
| prealloc->start = orig->start; |
| prealloc->end = split - 1; |
| prealloc->state = orig->state; |
| orig->start = split; |
| |
| node = tree_insert(&tree->state, &orig->rb_node, prealloc->end, |
| &prealloc->rb_node, NULL, NULL); |
| if (node) { |
| free_extent_state(prealloc); |
| return -EEXIST; |
| } |
| return 0; |
| } |
| |
| static struct extent_state *next_state(struct extent_state *state) |
| { |
| struct rb_node *next = rb_next(&state->rb_node); |
| if (next) |
| return rb_entry(next, struct extent_state, rb_node); |
| else |
| return NULL; |
| } |
| |
| /* |
| * utility function to clear some bits in an extent state struct. |
| * it will optionally wake up anyone waiting on this state (wake == 1). |
| * |
| * If no bits are set on the state struct after clearing things, the |
| * struct is freed and removed from the tree |
| */ |
| static struct extent_state *clear_state_bit(struct extent_io_tree *tree, |
| struct extent_state *state, |
| u32 *bits, int wake, |
| struct extent_changeset *changeset) |
| { |
| struct extent_state *next; |
| u32 bits_to_clear = *bits & ~EXTENT_CTLBITS; |
| int ret; |
| |
| if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) { |
| u64 range = state->end - state->start + 1; |
| WARN_ON(range > tree->dirty_bytes); |
| tree->dirty_bytes -= range; |
| } |
| |
| if (tree->private_data && is_data_inode(tree->private_data)) |
| btrfs_clear_delalloc_extent(tree->private_data, state, bits); |
| |
| ret = add_extent_changeset(state, bits_to_clear, changeset, 0); |
| BUG_ON(ret < 0); |
| state->state &= ~bits_to_clear; |
| if (wake) |
| wake_up(&state->wq); |
| if (state->state == 0) { |
| next = next_state(state); |
| if (extent_state_in_tree(state)) { |
| rb_erase(&state->rb_node, &tree->state); |
| RB_CLEAR_NODE(&state->rb_node); |
| free_extent_state(state); |
| } else { |
| WARN_ON(1); |
| } |
| } else { |
| merge_state(tree, state); |
| next = next_state(state); |
| } |
| return next; |
| } |
| |
| static struct extent_state * |
| alloc_extent_state_atomic(struct extent_state *prealloc) |
| { |
| if (!prealloc) |
| prealloc = alloc_extent_state(GFP_ATOMIC); |
| |
| return prealloc; |
| } |
| |
| static void extent_io_tree_panic(struct extent_io_tree *tree, int err) |
| { |
| btrfs_panic(tree->fs_info, err, |
| "locking error: extent tree was modified by another thread while locked"); |
| } |
| |
| /* |
| * clear some bits on a range in the tree. This may require splitting |
| * or inserting elements in the tree, so the gfp mask is used to |
| * indicate which allocations or sleeping are allowed. |
| * |
| * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove |
| * the given range from the tree regardless of state (ie for truncate). |
| * |
| * the range [start, end] is inclusive. |
| * |
| * This takes the tree lock, and returns 0 on success and < 0 on error. |
| */ |
| int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, |
| u32 bits, int wake, int delete, |
| struct extent_state **cached_state, |
| gfp_t mask, struct extent_changeset *changeset) |
| { |
| struct extent_state *state; |
| struct extent_state *cached; |
| struct extent_state *prealloc = NULL; |
| struct rb_node *node; |
| u64 last_end; |
| int err; |
| int clear = 0; |
| |
| btrfs_debug_check_extent_io_range(tree, start, end); |
| trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits); |
| |
| if (bits & EXTENT_DELALLOC) |
| bits |= EXTENT_NORESERVE; |
| |
| if (delete) |
| bits |= ~EXTENT_CTLBITS; |
| |
| if (bits & (EXTENT_LOCKED | EXTENT_BOUNDARY)) |
| clear = 1; |
| again: |
| if (!prealloc && gfpflags_allow_blocking(mask)) { |
| /* |
| * Don't care for allocation failure here because we might end |
| * up not needing the pre-allocated extent state at all, which |
| * is the case if we only have in the tree extent states that |
| * cover our input range and don't cover too any other range. |
| * If we end up needing a new extent state we allocate it later. |
| */ |
| prealloc = alloc_extent_state(mask); |
| } |
| |
| spin_lock(&tree->lock); |
| if (cached_state) { |
| cached = *cached_state; |
| |
| if (clear) { |
| *cached_state = NULL; |
| cached_state = NULL; |
| } |
| |
| if (cached && extent_state_in_tree(cached) && |
| cached->start <= start && cached->end > start) { |
| if (clear) |
| refcount_dec(&cached->refs); |
| state = cached; |
| goto hit_next; |
| } |
| if (clear) |
| free_extent_state(cached); |
| } |
| /* |
| * this search will find the extents that end after |
| * our range starts |
| */ |
| node = tree_search(tree, start); |
| if (!node) |
| goto out; |
| state = rb_entry(node, struct extent_state, rb_node); |
| hit_next: |
| if (state->start > end) |
| goto out; |
| WARN_ON(state->end < start); |
| last_end = state->end; |
| |
| /* the state doesn't have the wanted bits, go ahead */ |
| if (!(state->state & bits)) { |
| state = next_state(state); |
| goto next; |
| } |
| |
| /* |
| * | ---- desired range ---- | |
| * | state | or |
| * | ------------- state -------------- | |
| * |
| * We need to split the extent we found, and may flip |
| * bits on second half. |
| * |
| * If the extent we found extends past our range, we |
| * just split and search again. It'll get split again |
| * the next time though. |
| * |
| * If the extent we found is inside our range, we clear |
| * the desired bit on it. |
| */ |
| |
| if (state->start < start) { |
| prealloc = alloc_extent_state_atomic(prealloc); |
| BUG_ON(!prealloc); |
| err = split_state(tree, state, prealloc, start); |
| if (err) |
| extent_io_tree_panic(tree, err); |
| |
| prealloc = NULL; |
| if (err) |
| goto out; |
| if (state->end <= end) { |
| state = clear_state_bit(tree, state, &bits, wake, |
| changeset); |
| goto next; |
| } |
| goto search_again; |
| } |
| /* |
| * | ---- desired range ---- | |
| * | state | |
| * We need to split the extent, and clear the bit |
| * on the first half |
| */ |
| if (state->start <= end && state->end > end) { |
| prealloc = alloc_extent_state_atomic(prealloc); |
| BUG_ON(!prealloc); |
| err = split_state(tree, state, prealloc, end + 1); |
| if (err) |
| extent_io_tree_panic(tree, err); |
| |
| if (wake) |
| wake_up(&state->wq); |
| |
| clear_state_bit(tree, prealloc, &bits, wake, changeset); |
| |
| prealloc = NULL; |
| goto out; |
| } |
| |
| state = clear_state_bit(tree, state, &bits, wake, changeset); |
| next: |
| if (last_end == (u64)-1) |
| goto out; |
| start = last_end + 1; |
| if (start <= end && state && !need_resched()) |
| goto hit_next; |
| |
| search_again: |
| if (start > end) |
| goto out; |
| spin_unlock(&tree->lock); |
| if (gfpflags_allow_blocking(mask)) |
| cond_resched(); |
| goto again; |
| |
| out: |
| spin_unlock(&tree->lock); |
| if (prealloc) |
| free_extent_state(prealloc); |
| |
| return 0; |
| |
| } |
| |
| static void wait_on_state(struct extent_io_tree *tree, |
| struct extent_state *state) |
| __releases(tree->lock) |
| __acquires(tree->lock) |
| { |
| DEFINE_WAIT(wait); |
| prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE); |
| spin_unlock(&tree->lock); |
| schedule(); |
| spin_lock(&tree->lock); |
| finish_wait(&state->wq, &wait); |
| } |
| |
| /* |
| * waits for one or more bits to clear on a range in the state tree. |
| * The range [start, end] is inclusive. |
| * The tree lock is taken by this function |
| */ |
| static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, |
| u32 bits) |
| { |
| struct extent_state *state; |
| struct rb_node *node; |
| |
| btrfs_debug_check_extent_io_range(tree, start, end); |
| |
| spin_lock(&tree->lock); |
| again: |
| while (1) { |
| /* |
| * this search will find all the extents that end after |
| * our range starts |
| */ |
| node = tree_search(tree, start); |
| process_node: |
| if (!node) |
| break; |
| |
| state = rb_entry(node, struct extent_state, rb_node); |
| |
| if (state->start > end) |
| goto out; |
| |
| if (state->state & bits) { |
| start = state->start; |
| refcount_inc(&state->refs); |
| wait_on_state(tree, state); |
| free_extent_state(state); |
| goto again; |
| } |
| start = state->end + 1; |
| |
| if (start > end) |
| break; |
| |
| if (!cond_resched_lock(&tree->lock)) { |
| node = rb_next(node); |
| goto process_node; |
| } |
| } |
| out: |
| spin_unlock(&tree->lock); |
| } |
| |
| static void set_state_bits(struct extent_io_tree *tree, |
| struct extent_state *state, |
| u32 *bits, struct extent_changeset *changeset) |
| { |
| u32 bits_to_set = *bits & ~EXTENT_CTLBITS; |
| int ret; |
| |
| if (tree->private_data && is_data_inode(tree->private_data)) |
| btrfs_set_delalloc_extent(tree->private_data, state, bits); |
| |
| if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) { |
| u64 range = state->end - state->start + 1; |
| tree->dirty_bytes += range; |
| } |
| ret = add_extent_changeset(state, bits_to_set, changeset, 1); |
| BUG_ON(ret < 0); |
| state->state |= bits_to_set; |
| } |
| |
| static void cache_state_if_flags(struct extent_state *state, |
| struct extent_state **cached_ptr, |
| unsigned flags) |
| { |
| if (cached_ptr && !(*cached_ptr)) { |
| if (!flags || (state->state & flags)) { |
| *cached_ptr = state; |
| refcount_inc(&state->refs); |
| } |
| } |
| } |
| |
| static void cache_state(struct extent_state *state, |
| struct extent_state **cached_ptr) |
| { |
| return cache_state_if_flags(state, cached_ptr, |
| EXTENT_LOCKED | EXTENT_BOUNDARY); |
| } |
| |
| /* |
| * set some bits on a range in the tree. This may require allocations or |
| * sleeping, so the gfp mask is used to indicate what is allowed. |
| * |
| * If any of the exclusive bits are set, this will fail with -EEXIST if some |
| * part of the range already has the desired bits set. The start of the |
| * existing range is returned in failed_start in this case. |
| * |
| * [start, end] is inclusive This takes the tree lock. |
| */ |
| int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits, |
| u32 exclusive_bits, u64 *failed_start, |
| struct extent_state **cached_state, gfp_t mask, |
| struct extent_changeset *changeset) |
| { |
| struct extent_state *state; |
| struct extent_state *prealloc = NULL; |
| struct rb_node *node; |
| struct rb_node **p; |
| struct rb_node *parent; |
| int err = 0; |
| u64 last_start; |
| u64 last_end; |
| |
| btrfs_debug_check_extent_io_range(tree, start, end); |
| trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits); |
| |
| if (exclusive_bits) |
| ASSERT(failed_start); |
| else |
| ASSERT(failed_start == NULL); |
| again: |
| if (!prealloc && gfpflags_allow_blocking(mask)) { |
| /* |
| * Don't care for allocation failure here because we might end |
| * up not needing the pre-allocated extent state at all, which |
| * is the case if we only have in the tree extent states that |
| * cover our input range and don't cover too any other range. |
| * If we end up needing a new extent state we allocate it later. |
| */ |
| prealloc = alloc_extent_state(mask); |
| } |
| |
| spin_lock(&tree->lock); |
| if (cached_state && *cached_state) { |
| state = *cached_state; |
| if (state->start <= start && state->end > start && |
| extent_state_in_tree(state)) { |
| node = &state->rb_node; |
| goto hit_next; |
| } |
| } |
| /* |
| * this search will find all the extents that end after |
| * our range starts. |
| */ |
| node = tree_search_for_insert(tree, start, &p, &parent); |
| if (!node) { |
| prealloc = alloc_extent_state_atomic(prealloc); |
| BUG_ON(!prealloc); |
| err = insert_state(tree, prealloc, start, end, |
| &p, &parent, &bits, changeset); |
| if (err) |
| extent_io_tree_panic(tree, err); |
| |
| cache_state(prealloc, cached_state); |
| prealloc = NULL; |
| goto out; |
| } |
| state = rb_entry(node, struct extent_state, rb_node); |
| hit_next: |
| last_start = state->start; |
| last_end = state->end; |
| |
| /* |
| * | ---- desired range ---- | |
| * | state | |
| * |
| * Just lock what we found and keep going |
| */ |
| if (state->start == start && state->end <= end) { |
| if (state->state & exclusive_bits) { |
| *failed_start = state->start; |
| err = -EEXIST; |
| goto out; |
| } |
| |
| set_state_bits(tree, state, &bits, changeset); |
| cache_state(state, cached_state); |
| merge_state(tree, state); |
| if (last_end == (u64)-1) |
| goto out; |
| start = last_end + 1; |
| state = next_state(state); |
| if (start < end && state && state->start == start && |
| !need_resched()) |
| goto hit_next; |
| goto search_again; |
| } |
| |
| /* |
| * | ---- desired range ---- | |
| * | state | |
| * or |
| * | ------------- state -------------- | |
| * |
| * We need to split the extent we found, and may flip bits on |
| * second half. |
| * |
| * If the extent we found extends past our |
| * range, we just split and search again. It'll get split |
| * again the next time though. |
| * |
| * If the extent we found is inside our range, we set the |
| * desired bit on it. |
| */ |
| if (state->start < start) { |
| if (state->state & exclusive_bits) { |
| *failed_start = start; |
| err = -EEXIST; |
| goto out; |
| } |
| |
| /* |
| * If this extent already has all the bits we want set, then |
| * skip it, not necessary to split it or do anything with it. |
| */ |
| if ((state->state & bits) == bits) { |
| start = state->end + 1; |
| cache_state(state, cached_state); |
| goto search_again; |
| } |
| |
| prealloc = alloc_extent_state_atomic(prealloc); |
| BUG_ON(!prealloc); |
| err = split_state(tree, state, prealloc, start); |
| if (err) |
| extent_io_tree_panic(tree, err); |
| |
| prealloc = NULL; |
| if (err) |
| goto out; |
| if (state->end <= end) { |
| set_state_bits(tree, state, &bits, changeset); |
| cache_state(state, cached_state); |
| merge_state(tree, state); |
| if (last_end == (u64)-1) |
| goto out; |
| start = last_end + 1; |
| state = next_state(state); |
| if (start < end && state && state->start == start && |
| !need_resched()) |
| goto hit_next; |
| } |
| goto search_again; |
| } |
| /* |
| * | ---- desired range ---- | |
| * | state | or | state | |
| * |
| * There's a hole, we need to insert something in it and |
| * ignore the extent we found. |
| */ |
| if (state->start > start) { |
| u64 this_end; |
| if (end < last_start) |
| this_end = end; |
| else |
| this_end = last_start - 1; |
| |
| prealloc = alloc_extent_state_atomic(prealloc); |
| BUG_ON(!prealloc); |
| |
| /* |
| * Avoid to free 'prealloc' if it can be merged with |
| * the later extent. |
| */ |
| err = insert_state(tree, prealloc, start, this_end, |
| NULL, NULL, &bits, changeset); |
| if (err) |
| extent_io_tree_panic(tree, err); |
| |
| cache_state(prealloc, cached_state); |
| prealloc = NULL; |
| start = this_end + 1; |
| goto search_again; |
| } |
| /* |
| * | ---- desired range ---- | |
| * | state | |
| * We need to split the extent, and set the bit |
| * on the first half |
| */ |
| if (state->start <= end && state->end > end) { |
| if (state->state & exclusive_bits) { |
| *failed_start = start; |
| err = -EEXIST; |
| goto out; |
| } |
| |
| prealloc = alloc_extent_state_atomic(prealloc); |
| BUG_ON(!prealloc); |
| err = split_state(tree, state, prealloc, end + 1); |
| if (err) |
| extent_io_tree_panic(tree, err); |
| |
| set_state_bits(tree, prealloc, &bits, changeset); |
| cache_state(prealloc, cached_state); |
| merge_state(tree, prealloc); |
| prealloc = NULL; |
| goto out; |
| } |
| |
| search_again: |
| if (start > end) |
| goto out; |
| spin_unlock(&tree->lock); |
| if (gfpflags_allow_blocking(mask)) |
| cond_resched(); |
| goto again; |
| |
| out: |
| spin_unlock(&tree->lock); |
| if (prealloc) |
| free_extent_state(prealloc); |
| |
| return err; |
| |
| } |
| |
| /** |
| * convert_extent_bit - convert all bits in a given range from one bit to |
| * another |
| * @tree: the io tree to search |
| * @start: the start offset in bytes |
| * @end: the end offset in bytes (inclusive) |
| * @bits: the bits to set in this range |
| * @clear_bits: the bits to clear in this range |
| * @cached_state: state that we're going to cache |
| * |
| * This will go through and set bits for the given range. If any states exist |
| * already in this range they are set with the given bit and cleared of the |
| * clear_bits. This is only meant to be used by things that are mergeable, ie |
| * converting from say DELALLOC to DIRTY. This is not meant to be used with |
| * boundary bits like LOCK. |
| * |
| * All allocations are done with GFP_NOFS. |
| */ |
| int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, |
| u32 bits, u32 clear_bits, |
| struct extent_state **cached_state) |
| { |
| struct extent_state *state; |
| struct extent_state *prealloc = NULL; |
| struct rb_node *node; |
| struct rb_node **p; |
| struct rb_node *parent; |
| int err = 0; |
| u64 last_start; |
| u64 last_end; |
| bool first_iteration = true; |
| |
| btrfs_debug_check_extent_io_range(tree, start, end); |
| trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits, |
| clear_bits); |
| |
| again: |
| if (!prealloc) { |
| /* |
| * Best effort, don't worry if extent state allocation fails |
| * here for the first iteration. We might have a cached state |
| * that matches exactly the target range, in which case no |
| * extent state allocations are needed. We'll only know this |
| * after locking the tree. |
| */ |
| prealloc = alloc_extent_state(GFP_NOFS); |
| if (!prealloc && !first_iteration) |
| return -ENOMEM; |
| } |
| |
| spin_lock(&tree->lock); |
| if (cached_state && *cached_state) { |
| state = *cached_state; |
| if (state->start <= start && state->end > start && |
| extent_state_in_tree(state)) { |
| node = &state->rb_node; |
| goto hit_next; |
| } |
| } |
| |
| /* |
| * this search will find all the extents that end after |
| * our range starts. |
| */ |
| node = tree_search_for_insert(tree, start, &p, &parent); |
| if (!node) { |
| prealloc = alloc_extent_state_atomic(prealloc); |
| if (!prealloc) { |
| err = -ENOMEM; |
| goto out; |
| } |
| err = insert_state(tree, prealloc, start, end, |
| &p, &parent, &bits, NULL); |
| if (err) |
| extent_io_tree_panic(tree, err); |
| cache_state(prealloc, cached_state); |
| prealloc = NULL; |
| goto out; |
| } |
| state = rb_entry(node, struct extent_state, rb_node); |
| hit_next: |
| last_start = state->start; |
| last_end = state->end; |
| |
| /* |
| * | ---- desired range ---- | |
| * | state | |
| * |
| * Just lock what we found and keep going |
| */ |
| if (state->start == start && state->end <= end) { |
| set_state_bits(tree, state, &bits, NULL); |
| cache_state(state, cached_state); |
| state = clear_state_bit(tree, state, &clear_bits, 0, NULL); |
| if (last_end == (u64)-1) |
| goto out; |
| start = last_end + 1; |
| if (start < end && state && state->start == start && |
| !need_resched()) |
| goto hit_next; |
| goto search_again; |
| } |
| |
| /* |
| * | ---- desired range ---- | |
| * | state | |
| * or |
| * | ------------- state -------------- | |
| * |
| * We need to split the extent we found, and may flip bits on |
| * second half. |
| * |
| * If the extent we found extends past our |
| * range, we just split and search again. It'll get split |
| * again the next time though. |
| * |
| * If the extent we found is inside our range, we set the |
| * desired bit on it. |
| */ |
| if (state->start < start) { |
| prealloc = alloc_extent_state_atomic(prealloc); |
| if (!prealloc) { |
| err = -ENOMEM; |
| goto out; |
| } |
| err = split_state(tree, state, prealloc, start); |
| if (err) |
| extent_io_tree_panic(tree, err); |
| prealloc = NULL; |
| if (err) |
| goto out; |
| if (state->end <= end) { |
| set_state_bits(tree, state, &bits, NULL); |
| cache_state(state, cached_state); |
| state = clear_state_bit(tree, state, &clear_bits, 0, |
| NULL); |
| if (last_end == (u64)-1) |
| goto out; |
| start = last_end + 1; |
| if (start < end && state && state->start == start && |
| !need_resched()) |
| goto hit_next; |
| } |
| goto search_again; |
| } |
| /* |
| * | ---- desired range ---- | |
| * | state | or | state | |
| * |
| * There's a hole, we need to insert something in it and |
| * ignore the extent we found. |
| */ |
| if (state->start > start) { |
| u64 this_end; |
| if (end < last_start) |
| this_end = end; |
| else |
| this_end = last_start - 1; |
| |
| prealloc = alloc_extent_state_atomic(prealloc); |
| if (!prealloc) { |
| err = -ENOMEM; |
| goto out; |
| } |
| |
| /* |
| * Avoid to free 'prealloc' if it can be merged with |
| * the later extent. |
| */ |
| err = insert_state(tree, prealloc, start, this_end, |
| NULL, NULL, &bits, NULL); |
| if (err) |
| extent_io_tree_panic(tree, err); |
| cache_state(prealloc, cached_state); |
| prealloc = NULL; |
| start = this_end + 1; |
| goto search_again; |
| } |
| /* |
| * | ---- desired range ---- | |
| * | state | |
| * We need to split the extent, and set the bit |
| * on the first half |
| */ |
| if (state->start <= end && state->end > end) { |
| prealloc = alloc_extent_state_atomic(prealloc); |
| if (!prealloc) { |
| err = -ENOMEM; |
| goto out; |
| } |
| |
| err = split_state(tree, state, prealloc, end + 1); |
| if (err) |
| extent_io_tree_panic(tree, err); |
| |
| set_state_bits(tree, prealloc, &bits, NULL); |
| cache_state(prealloc, cached_state); |
| clear_state_bit(tree, prealloc, &clear_bits, 0, NULL); |
| prealloc = NULL; |
| goto out; |
| } |
| |
| search_again: |
| if (start > end) |
| goto out; |
| spin_unlock(&tree->lock); |
| cond_resched(); |
| first_iteration = false; |
| goto again; |
| |
| out: |
| spin_unlock(&tree->lock); |
| if (prealloc) |
| free_extent_state(prealloc); |
| |
| return err; |
| } |
| |
| /* wrappers around set/clear extent bit */ |
| int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, |
| u32 bits, struct extent_changeset *changeset) |
| { |
| /* |
| * We don't support EXTENT_LOCKED yet, as current changeset will |
| * record any bits changed, so for EXTENT_LOCKED case, it will |
| * either fail with -EEXIST or changeset will record the whole |
| * range. |
| */ |
| BUG_ON(bits & EXTENT_LOCKED); |
| |
| return set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS, |
| changeset); |
| } |
| |
| int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end, |
| u32 bits) |
| { |
| return set_extent_bit(tree, start, end, bits, 0, NULL, NULL, |
| GFP_NOWAIT, NULL); |
| } |
| |
| int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, |
| u32 bits, int wake, int delete, |
| struct extent_state **cached) |
| { |
| return __clear_extent_bit(tree, start, end, bits, wake, delete, |
| cached, GFP_NOFS, NULL); |
| } |
| |
| int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, |
| u32 bits, struct extent_changeset *changeset) |
| { |
| /* |
| * Don't support EXTENT_LOCKED case, same reason as |
| * set_record_extent_bits(). |
| */ |
| BUG_ON(bits & EXTENT_LOCKED); |
| |
| return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS, |
| changeset); |
| } |
| |
| /* |
| * either insert or lock state struct between start and end use mask to tell |
| * us if waiting is desired. |
| */ |
| int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, |
| struct extent_state **cached_state) |
| { |
| int err; |
| u64 failed_start; |
| |
| while (1) { |
| err = set_extent_bit(tree, start, end, EXTENT_LOCKED, |
| EXTENT_LOCKED, &failed_start, |
| cached_state, GFP_NOFS, NULL); |
| if (err == -EEXIST) { |
| wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED); |
| start = failed_start; |
| } else |
| break; |
| WARN_ON(start > end); |
| } |
| return err; |
| } |
| |
| int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end) |
| { |
| int err; |
| u64 failed_start; |
| |
| err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED, |
| &failed_start, NULL, GFP_NOFS, NULL); |
| if (err == -EEXIST) { |
| if (failed_start > start) |
| clear_extent_bit(tree, start, failed_start - 1, |
| EXTENT_LOCKED, 1, 0, NULL); |
| return 0; |
| } |
| return 1; |
| } |
| |
| void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end) |
| { |
| unsigned long index = start >> PAGE_SHIFT; |
| unsigned long end_index = end >> PAGE_SHIFT; |
| struct page *page; |
| |
| while (index <= end_index) { |
| page = find_get_page(inode->i_mapping, index); |
| BUG_ON(!page); /* Pages should be in the extent_io_tree */ |
| clear_page_dirty_for_io(page); |
| put_page(page); |
| index++; |
| } |
| } |
| |
| void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end) |
| { |
| unsigned long index = start >> PAGE_SHIFT; |
| unsigned long end_index = end >> PAGE_SHIFT; |
| struct page *page; |
| |
| while (index <= end_index) { |
| page = find_get_page(inode->i_mapping, index); |
| BUG_ON(!page); /* Pages should be in the extent_io_tree */ |
| __set_page_dirty_nobuffers(page); |
| account_page_redirty(page); |
| put_page(page); |
| index++; |
| } |
| } |
| |
| /* find the first state struct with 'bits' set after 'start', and |
| * return it. tree->lock must be held. NULL will returned if |
| * nothing was found after 'start' |
| */ |
| static struct extent_state * |
| find_first_extent_bit_state(struct extent_io_tree *tree, u64 start, u32 bits) |
| { |
| struct rb_node *node; |
| struct extent_state *state; |
| |
| /* |
| * this search will find all the extents that end after |
| * our range starts. |
| */ |
| node = tree_search(tree, start); |
| if (!node) |
| goto out; |
| |
| while (1) { |
| state = rb_entry(node, struct extent_state, rb_node); |
| if (state->end >= start && (state->state & bits)) |
| return state; |
| |
| node = rb_next(node); |
| if (!node) |
| break; |
| } |
| out: |
| return NULL; |
| } |
| |
| /* |
| * Find the first offset in the io tree with one or more @bits set. |
| * |
| * Note: If there are multiple bits set in @bits, any of them will match. |
| * |
| * Return 0 if we find something, and update @start_ret and @end_ret. |
| * Return 1 if we found nothing. |
| */ |
| int find_first_extent_bit(struct extent_io_tree *tree, u64 start, |
| u64 *start_ret, u64 *end_ret, u32 bits, |
| struct extent_state **cached_state) |
| { |
| struct extent_state *state; |
| int ret = 1; |
| |
| spin_lock(&tree->lock); |
| if (cached_state && *cached_state) { |
| state = *cached_state; |
| if (state->end == start - 1 && extent_state_in_tree(state)) { |
| while ((state = next_state(state)) != NULL) { |
| if (state->state & bits) |
| goto got_it; |
| } |
| free_extent_state(*cached_state); |
| *cached_state = NULL; |
| goto out; |
| } |
| free_extent_state(*cached_state); |
| *cached_state = NULL; |
| } |
| |
| state = find_first_extent_bit_state(tree, start, bits); |
| got_it: |
| if (state) { |
| cache_state_if_flags(state, cached_state, 0); |
| *start_ret = state->start; |
| *end_ret = state->end; |
| ret = 0; |
| } |
| out: |
| spin_unlock(&tree->lock); |
| return ret; |
| } |
| |
| /** |
| * Find a contiguous area of bits |
| * |
| * @tree: io tree to check |
| * @start: offset to start the search from |
| * @start_ret: the first offset we found with the bits set |
| * @end_ret: the final contiguous range of the bits that were set |
| * @bits: bits to look for |
| * |
| * set_extent_bit and clear_extent_bit can temporarily split contiguous ranges |
| * to set bits appropriately, and then merge them again. During this time it |
| * will drop the tree->lock, so use this helper if you want to find the actual |
| * contiguous area for given bits. We will search to the first bit we find, and |
| * then walk down the tree until we find a non-contiguous area. The area |
| * returned will be the full contiguous area with the bits set. |
| */ |
| int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start, |
| u64 *start_ret, u64 *end_ret, u32 bits) |
| { |
| struct extent_state *state; |
| int ret = 1; |
| |
| spin_lock(&tree->lock); |
| state = find_first_extent_bit_state(tree, start, bits); |
| if (state) { |
| *start_ret = state->start; |
| *end_ret = state->end; |
| while ((state = next_state(state)) != NULL) { |
| if (state->start > (*end_ret + 1)) |
| break; |
| *end_ret = state->end; |
| } |
| ret = 0; |
| } |
| spin_unlock(&tree->lock); |
| return ret; |
| } |
| |
| /** |
| * Find the first range that has @bits not set. This range could start before |
| * @start. |
| * |
| * @tree: the tree to search |
| * @start: offset at/after which the found extent should start |
| * @start_ret: records the beginning of the range |
| * @end_ret: records the end of the range (inclusive) |
| * @bits: the set of bits which must be unset |
| * |
| * Since unallocated range is also considered one which doesn't have the bits |
| * set it's possible that @end_ret contains -1, this happens in case the range |
| * spans (last_range_end, end of device]. In this case it's up to the caller to |
| * trim @end_ret to the appropriate size. |
| */ |
| void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start, |
| u64 *start_ret, u64 *end_ret, u32 bits) |
| { |
| struct extent_state *state; |
| struct rb_node *node, *prev = NULL, *next; |
| |
| spin_lock(&tree->lock); |
| |
| /* Find first extent with bits cleared */ |
| while (1) { |
| node = __etree_search(tree, start, &next, &prev, NULL, NULL); |
| if (!node && !next && !prev) { |
| /* |
| * Tree is completely empty, send full range and let |
| * caller deal with it |
| */ |
| *start_ret = 0; |
| *end_ret = -1; |
| goto out; |
| } else if (!node && !next) { |
| /* |
| * We are past the last allocated chunk, set start at |
| * the end of the last extent. |
| */ |
| state = rb_entry(prev, struct extent_state, rb_node); |
| *start_ret = state->end + 1; |
| *end_ret = -1; |
| goto out; |
| } else if (!node) { |
| node = next; |
| } |
| /* |
| * At this point 'node' either contains 'start' or start is |
| * before 'node' |
| */ |
| state = rb_entry(node, struct extent_state, rb_node); |
| |
| if (in_range(start, state->start, state->end - state->start + 1)) { |
| if (state->state & bits) { |
| /* |
| * |--range with bits sets--| |
| * | |
| * start |
| */ |
| start = state->end + 1; |
| } else { |
| /* |
| * 'start' falls within a range that doesn't |
| * have the bits set, so take its start as |
| * the beginning of the desired range |
| * |
| * |--range with bits cleared----| |
| * | |
| * start |
| */ |
| *start_ret = state->start; |
| break; |
| } |
| } else { |
| /* |
| * |---prev range---|---hole/unset---|---node range---| |
| * | |
| * start |
| * |
| * or |
| * |
| * |---hole/unset--||--first node--| |
| * 0 | |
| * start |
| */ |
| if (prev) { |
| state = rb_entry(prev, struct extent_state, |
| rb_node); |
| *start_ret = state->end + 1; |
| } else { |
| *start_ret = 0; |
| } |
| break; |
| } |
| } |
| |
| /* |
| * Find the longest stretch from start until an entry which has the |
| * bits set |
| */ |
| while (1) { |
| state = rb_entry(node, struct extent_state, rb_node); |
| if (state->end >= start && !(state->state & bits)) { |
| *end_ret = state->end; |
| } else { |
| *end_ret = state->start - 1; |
| break; |
| } |
| |
| node = rb_next(node); |
| if (!node) |
| break; |
| } |
| out: |
| spin_unlock(&tree->lock); |
| } |
| |
| /* |
| * find a contiguous range of bytes in the file marked as delalloc, not |
| * more than 'max_bytes'. start and end are used to return the range, |
| * |
| * true is returned if we find something, false if nothing was in the tree |
| */ |
| bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start, |
| u64 *end, u64 max_bytes, |
| struct extent_state **cached_state) |
| { |
| struct rb_node *node; |
| struct extent_state *state; |
| u64 cur_start = *start; |
| bool found = false; |
| u64 total_bytes = 0; |
| |
| spin_lock(&tree->lock); |
| |
| /* |
| * this search will find all the extents that end after |
| * our range starts. |
| */ |
| node = tree_search(tree, cur_start); |
| if (!node) { |
| *end = (u64)-1; |
| goto out; |
| } |
| |
| while (1) { |
| state = rb_entry(node, struct extent_state, rb_node); |
| if (found && (state->start != cur_start || |
| (state->state & EXTENT_BOUNDARY))) { |
| goto out; |
| } |
| if (!(state->state & EXTENT_DELALLOC)) { |
| if (!found) |
| *end = state->end; |
| goto out; |
| } |
| if (!found) { |
| *start = state->start; |
| *cached_state = state; |
| refcount_inc(&state->refs); |
| } |
| found = true; |
| *end = state->end; |
| cur_start = state->end + 1; |
| node = rb_next(node); |
| total_bytes += state->end - state->start + 1; |
| if (total_bytes >= max_bytes) |
| break; |
| if (!node) |
| break; |
| } |
| out: |
| spin_unlock(&tree->lock); |
| return found; |
| } |
| |
| /* |
| * Process one page for __process_pages_contig(). |
| * |
| * Return >0 if we hit @page == @locked_page. |
| * Return 0 if we updated the page status. |
| * Return -EGAIN if the we need to try again. |
| * (For PAGE_LOCK case but got dirty page or page not belong to mapping) |
| */ |
| static int process_one_page(struct btrfs_fs_info *fs_info, |
| struct address_space *mapping, |
| struct page *page, struct page *locked_page, |
| unsigned long page_ops, u64 start, u64 end) |
| { |
| u32 len; |
| |
| ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX); |
| len = end + 1 - start; |
| |
| if (page_ops & PAGE_SET_ORDERED) |
| btrfs_page_clamp_set_ordered(fs_info, page, start, len); |
| if (page_ops & PAGE_SET_ERROR) |
| btrfs_page_clamp_set_error(fs_info, page, start, len); |
| if (page_ops & PAGE_START_WRITEBACK) { |
| btrfs_page_clamp_clear_dirty(fs_info, page, start, len); |
| btrfs_page_clamp_set_writeback(fs_info, page, start, len); |
| } |
| if (page_ops & PAGE_END_WRITEBACK) |
| btrfs_page_clamp_clear_writeback(fs_info, page, start, len); |
| |
| if (page == locked_page) |
| return 1; |
| |
| if (page_ops & PAGE_LOCK) { |
| int ret; |
| |
| ret = btrfs_page_start_writer_lock(fs_info, page, start, len); |
| if (ret) |
| return ret; |
| if (!PageDirty(page) || page->mapping != mapping) { |
| btrfs_page_end_writer_lock(fs_info, page, start, len); |
| return -EAGAIN; |
| } |
| } |
| if (page_ops & PAGE_UNLOCK) |
| btrfs_page_end_writer_lock(fs_info, page, start, len); |
| return 0; |
| } |
| |
| static int __process_pages_contig(struct address_space *mapping, |
| struct page *locked_page, |
| u64 start, u64 end, unsigned long page_ops, |
| u64 *processed_end) |
| { |
| struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb); |
| pgoff_t start_index = start >> PAGE_SHIFT; |
| pgoff_t end_index = end >> PAGE_SHIFT; |
| pgoff_t index = start_index; |
| unsigned long nr_pages = end_index - start_index + 1; |
| unsigned long pages_processed = 0; |
| struct page *pages[16]; |
| int err = 0; |
| int i; |
| |
| if (page_ops & PAGE_LOCK) { |
| ASSERT(page_ops == PAGE_LOCK); |
| ASSERT(processed_end && *processed_end == start); |
| } |
| |
| if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0) |
| mapping_set_error(mapping, -EIO); |
| |
| while (nr_pages > 0) { |
| int found_pages; |
| |
| found_pages = find_get_pages_contig(mapping, index, |
| min_t(unsigned long, |
| nr_pages, ARRAY_SIZE(pages)), pages); |
| if (found_pages == 0) { |
| /* |
| * Only if we're going to lock these pages, we can find |
| * nothing at @index. |
| */ |
| ASSERT(page_ops & PAGE_LOCK); |
| err = -EAGAIN; |
| goto out; |
| } |
| |
| for (i = 0; i < found_pages; i++) { |
| int process_ret; |
| |
| process_ret = process_one_page(fs_info, mapping, |
| pages[i], locked_page, page_ops, |
| start, end); |
| if (process_ret < 0) { |
| for (; i < found_pages; i++) |
| put_page(pages[i]); |
| err = -EAGAIN; |
| goto out; |
| } |
| put_page(pages[i]); |
| pages_processed++; |
| } |
| nr_pages -= found_pages; |
| index += found_pages; |
| cond_resched(); |
| } |
| out: |
| if (err && processed_end) { |
| /* |
| * Update @processed_end. I know this is awful since it has |
| * two different return value patterns (inclusive vs exclusive). |
| * |
| * But the exclusive pattern is necessary if @start is 0, or we |
| * underflow and check against processed_end won't work as |
| * expected. |
| */ |
| if (pages_processed) |
| *processed_end = min(end, |
| ((u64)(start_index + pages_processed) << PAGE_SHIFT) - 1); |
| else |
| *processed_end = start; |
| } |
| return err; |
| } |
| |
| static noinline void __unlock_for_delalloc(struct inode *inode, |
| struct page *locked_page, |
| u64 start, u64 end) |
| { |
| unsigned long index = start >> PAGE_SHIFT; |
| unsigned long end_index = end >> PAGE_SHIFT; |
| |
| ASSERT(locked_page); |
| if (index == locked_page->index && end_index == index) |
| return; |
| |
| __process_pages_contig(inode->i_mapping, locked_page, start, end, |
| PAGE_UNLOCK, NULL); |
| } |
| |
| static noinline int lock_delalloc_pages(struct inode *inode, |
| struct page *locked_page, |
| u64 delalloc_start, |
| u64 delalloc_end) |
| { |
| unsigned long index = delalloc_start >> PAGE_SHIFT; |
| unsigned long end_index = delalloc_end >> PAGE_SHIFT; |
| u64 processed_end = delalloc_start; |
| int ret; |
| |
| ASSERT(locked_page); |
| if (index == locked_page->index && index == end_index) |
| return 0; |
| |
| ret = __process_pages_contig(inode->i_mapping, locked_page, delalloc_start, |
| delalloc_end, PAGE_LOCK, &processed_end); |
| if (ret == -EAGAIN && processed_end > delalloc_start) |
| __unlock_for_delalloc(inode, locked_page, delalloc_start, |
| processed_end); |
| return ret; |
| } |
| |
| /* |
| * Find and lock a contiguous range of bytes in the file marked as delalloc, no |
| * more than @max_bytes. |
| * |
| * @start: The original start bytenr to search. |
| * Will store the extent range start bytenr. |
| * @end: The original end bytenr of the search range |
| * Will store the extent range end bytenr. |
| * |
| * Return true if we find a delalloc range which starts inside the original |
| * range, and @start/@end will store the delalloc range start/end. |
| * |
| * Return false if we can't find any delalloc range which starts inside the |
| * original range, and @start/@end will be the non-delalloc range start/end. |
| */ |
| EXPORT_FOR_TESTS |
| noinline_for_stack bool find_lock_delalloc_range(struct inode *inode, |
| struct page *locked_page, u64 *start, |
| u64 *end) |
| { |
| struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; |
| const u64 orig_start = *start; |
| const u64 orig_end = *end; |
| u64 max_bytes = BTRFS_MAX_EXTENT_SIZE; |
| u64 delalloc_start; |
| u64 delalloc_end; |
| bool found; |
| struct extent_state *cached_state = NULL; |
| int ret; |
| int loops = 0; |
| |
| /* Caller should pass a valid @end to indicate the search range end */ |
| ASSERT(orig_end > orig_start); |
| |
| /* The range should at least cover part of the page */ |
| ASSERT(!(orig_start >= page_offset(locked_page) + PAGE_SIZE || |
| orig_end <= page_offset(locked_page))); |
| again: |
| /* step one, find a bunch of delalloc bytes starting at start */ |
| delalloc_start = *start; |
| delalloc_end = 0; |
| found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end, |
| max_bytes, &cached_state); |
| if (!found || delalloc_end <= *start || delalloc_start > orig_end) { |
| *start = delalloc_start; |
| |
| /* @delalloc_end can be -1, never go beyond @orig_end */ |
| *end = min(delalloc_end, orig_end); |
| free_extent_state(cached_state); |
| return false; |
| } |
| |
| /* |
| * start comes from the offset of locked_page. We have to lock |
| * pages in order, so we can't process delalloc bytes before |
| * locked_page |
| */ |
| if (delalloc_start < *start) |
| delalloc_start = *start; |
| |
| /* |
| * make sure to limit the number of pages we try to lock down |
| */ |
| if (delalloc_end + 1 - delalloc_start > max_bytes) |
| delalloc_end = delalloc_start + max_bytes - 1; |
| |
| /* step two, lock all the pages after the page that has start */ |
| ret = lock_delalloc_pages(inode, locked_page, |
| delalloc_start, delalloc_end); |
| ASSERT(!ret || ret == -EAGAIN); |
| if (ret == -EAGAIN) { |
| /* some of the pages are gone, lets avoid looping by |
| * shortening the size of the delalloc range we're searching |
| */ |
| free_extent_state(cached_state); |
| cached_state = NULL; |
| if (!loops) { |
| max_bytes = PAGE_SIZE; |
| loops = 1; |
| goto again; |
| } else { |
| found = false; |
| goto out_failed; |
| } |
| } |
| |
| /* step three, lock the state bits for the whole range */ |
| lock_extent_bits(tree, delalloc_start, delalloc_end, &cached_state); |
| |
| /* then test to make sure it is all still delalloc */ |
| ret = test_range_bit(tree, delalloc_start, delalloc_end, |
| EXTENT_DELALLOC, 1, cached_state); |
| if (!ret) { |
| unlock_extent_cached(tree, delalloc_start, delalloc_end, |
| &cached_state); |
| __unlock_for_delalloc(inode, locked_page, |
| delalloc_start, delalloc_end); |
| cond_resched(); |
| goto again; |
| } |
| free_extent_state(cached_state); |
| *start = delalloc_start; |
| *end = delalloc_end; |
| out_failed: |
| return found; |
| } |
| |
| void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end, |
| struct page *locked_page, |
| u32 clear_bits, unsigned long page_ops) |
| { |
| clear_extent_bit(&inode->io_tree, start, end, clear_bits, 1, 0, NULL); |
| |
| __process_pages_contig(inode->vfs_inode.i_mapping, locked_page, |
| start, end, page_ops, NULL); |
| } |
| |
| /* |
| * count the number of bytes in the tree that have a given bit(s) |
| * set. This can be fairly slow, except for EXTENT_DIRTY which is |
| * cached. The total number found is returned. |
| */ |
| u64 count_range_bits(struct extent_io_tree *tree, |
| u64 *start, u64 search_end, u64 max_bytes, |
| u32 bits, int contig) |
| { |
| struct rb_node *node; |
| struct extent_state *state; |
| u64 cur_start = *start; |
| u64 total_bytes = 0; |
| u64 last = 0; |
| int found = 0; |
| |
| if (WARN_ON(search_end <= cur_start)) |
| return 0; |
| |
| spin_lock(&tree->lock); |
| if (cur_start == 0 && bits == EXTENT_DIRTY) { |
| total_bytes = tree->dirty_bytes; |
| goto out; |
| } |
| /* |
| * this search will find all the extents that end after |
| * our range starts. |
| */ |
| node = tree_search(tree, cur_start); |
| if (!node) |
| goto out; |
| |
| while (1) { |
| state = rb_entry(node, struct extent_state, rb_node); |
| if (state->start > search_end) |
| break; |
| if (contig && found && state->start > last + 1) |
| break; |
| if (state->end >= cur_start && (state->state & bits) == bits) { |
| total_bytes += min(search_end, state->end) + 1 - |
| max(cur_start, state->start); |
| if (total_bytes >= max_bytes) |
| break; |
| if (!found) { |
| *start = max(cur_start, state->start); |
| found = 1; |
| } |
| last = state->end; |
| } else if (contig && found) { |
| break; |
| } |
| node = rb_next(node); |
| if (!node) |
| break; |
| } |
| out: |
| spin_unlock(&tree->lock); |
| return total_bytes; |
| } |
| |
| /* |
| * set the private field for a given byte offset in the tree. If there isn't |
| * an extent_state there already, this does nothing. |
| */ |
| int set_state_failrec(struct extent_io_tree *tree, u64 start, |
| struct io_failure_record *failrec) |
| { |
| struct rb_node *node; |
| struct extent_state *state; |
| int ret = 0; |
| |
| spin_lock(&tree->lock); |
| /* |
| * this search will find all the extents that end after |
| * our range starts. |
| */ |
| node = tree_search(tree, start); |
| if (!node) { |
| ret = -ENOENT; |
| goto out; |
| } |
| state = rb_entry(node, struct extent_state, rb_node); |
| if (state->start != start) { |
| ret = -ENOENT; |
| goto out; |
| } |
| state->failrec = failrec; |
| out: |
| spin_unlock(&tree->lock); |
| return ret; |
| } |
| |
| struct io_failure_record *get_state_failrec(struct extent_io_tree *tree, u64 start) |
| { |
| struct rb_node *node; |
| struct extent_state *state; |
| struct io_failure_record *failrec; |
| |
| spin_lock(&tree->lock); |
| /* |
| * this search will find all the extents that end after |
| * our range starts. |
| */ |
| node = tree_search(tree, start); |
| if (!node) { |
| failrec = ERR_PTR(-ENOENT); |
| goto out; |
| } |
| state = rb_entry(node, struct extent_state, rb_node); |
| if (state->start != start) { |
| failrec = ERR_PTR(-ENOENT); |
| goto out; |
| } |
| |
| failrec = state->failrec; |
| out: |
| spin_unlock(&tree->lock); |
| return failrec; |
| } |
| |
| /* |
| * searches a range in the state tree for a given mask. |
| * If 'filled' == 1, this returns 1 only if every extent in the tree |
| * has the bits set. Otherwise, 1 is returned if any bit in the |
| * range is found set. |
| */ |
| int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, |
| u32 bits, int filled, struct extent_state *cached) |
| { |
| struct extent_state *state = NULL; |
| struct rb_node *node; |
| int bitset = 0; |
| |
| spin_lock(&tree->lock); |
| if (cached && extent_state_in_tree(cached) && cached->start <= start && |
| cached->end > start) |
| node = &cached->rb_node; |
| else |
| node = tree_search(tree, start); |
| while (node && start <= end) { |
| state = rb_entry(node, struct extent_state, rb_node); |
| |
| if (filled && state->start > start) { |
| bitset = 0; |
| break; |
| } |
| |
| if (state->start > end) |
| break; |
| |
| if (state->state & bits) { |
| bitset = 1; |
| if (!filled) |
| break; |
| } else if (filled) { |
| bitset = 0; |
| break; |
| } |
| |
| if (state->end == (u64)-1) |
| break; |
| |
| start = state->end + 1; |
| if (start > end) |
| break; |
| node = rb_next(node); |
| if (!node) { |
| if (filled) |
| bitset = 0; |
| break; |
| } |
| } |
| spin_unlock(&tree->lock); |
| return bitset; |
| } |
| |
| int free_io_failure(struct extent_io_tree *failure_tree, |
| struct extent_io_tree *io_tree, |
| struct io_failure_record *rec) |
| { |
| int ret; |
| int err = 0; |
| |
| set_state_failrec(failure_tree, rec->start, NULL); |
| ret = clear_extent_bits(failure_tree, rec->start, |
| rec->start + rec->len - 1, |
| EXTENT_LOCKED | EXTENT_DIRTY); |
| if (ret) |
| err = ret; |
| |
| ret = clear_extent_bits(io_tree, rec->start, |
| rec->start + rec->len - 1, |
| EXTENT_DAMAGED); |
| if (ret && !err) |
| err = ret; |
| |
| kfree(rec); |
| return err; |
| } |
| |
| /* |
| * this bypasses the standard btrfs submit functions deliberately, as |
| * the standard behavior is to write all copies in a raid setup. here we only |
| * want to write the one bad copy. so we do the mapping for ourselves and issue |
| * submit_bio directly. |
| * to avoid any synchronization issues, wait for the data after writing, which |
| * actually prevents the read that triggered the error from finishing. |
| * currently, there can be no more than two copies of every data bit. thus, |
| * exactly one rewrite is required. |
| */ |
| static int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start, |
| u64 length, u64 logical, struct page *page, |
| unsigned int pg_offset, int mirror_num) |
| { |
| struct bio *bio; |
| struct btrfs_device *dev; |
| u64 map_length = 0; |
| u64 sector; |
| struct btrfs_io_context *bioc = NULL; |
| int ret; |
| |
| ASSERT(!(fs_info->sb->s_flags & SB_RDONLY)); |
| BUG_ON(!mirror_num); |
| |
| if (btrfs_is_zoned(fs_info)) |
| return btrfs_repair_one_zone(fs_info, logical); |
| |
| bio = btrfs_bio_alloc(1); |
| bio->bi_iter.bi_size = 0; |
| map_length = length; |
| |
| /* |
| * Avoid races with device replace and make sure our bioc has devices |
| * associated to its stripes that don't go away while we are doing the |
| * read repair operation. |
| */ |
| btrfs_bio_counter_inc_blocked(fs_info); |
| if (btrfs_is_parity_mirror(fs_info, logical, length)) { |
| /* |
| * Note that we don't use BTRFS_MAP_WRITE because it's supposed |
| * to update all raid stripes, but here we just want to correct |
| * bad stripe, thus BTRFS_MAP_READ is abused to only get the bad |
| * stripe's dev and sector. |
| */ |
| ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical, |
| &map_length, &bioc, 0); |
| if (ret) { |
| btrfs_bio_counter_dec(fs_info); |
| bio_put(bio); |
| return -EIO; |
| } |
| ASSERT(bioc->mirror_num == 1); |
| } else { |
| ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical, |
| &map_length, &bioc, mirror_num); |
| if (ret) { |
| btrfs_bio_counter_dec(fs_info); |
| bio_put(bio); |
| return -EIO; |
| } |
| BUG_ON(mirror_num != bioc->mirror_num); |
| } |
| |
| sector = bioc->stripes[bioc->mirror_num - 1].physical >> 9; |
| bio->bi_iter.bi_sector = sector; |
| dev = bioc->stripes[bioc->mirror_num - 1].dev; |
| btrfs_put_bioc(bioc); |
| if (!dev || !dev->bdev || |
| !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) { |
| btrfs_bio_counter_dec(fs_info); |
| bio_put(bio); |
| return -EIO; |
| } |
| bio_set_dev(bio, dev->bdev); |
| bio->bi_opf = REQ_OP_WRITE | REQ_SYNC; |
| bio_add_page(bio, page, length, pg_offset); |
| |
| if (btrfsic_submit_bio_wait(bio)) { |
| /* try to remap that extent elsewhere? */ |
| btrfs_bio_counter_dec(fs_info); |
| bio_put(bio); |
| btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); |
| return -EIO; |
| } |
| |
| btrfs_info_rl_in_rcu(fs_info, |
| "read error corrected: ino %llu off %llu (dev %s sector %llu)", |
| ino, start, |
| rcu_str_deref(dev->name), sector); |
| btrfs_bio_counter_dec(fs_info); |
| bio_put(bio); |
| return 0; |
| } |
| |
| int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, int mirror_num) |
| { |
| struct btrfs_fs_info *fs_info = eb->fs_info; |
| u64 start = eb->start; |
| int i, num_pages = num_extent_pages(eb); |
| int ret = 0; |
| |
| if (sb_rdonly(fs_info->sb)) |
| return -EROFS; |
| |
| for (i = 0; i < num_pages; i++) { |
| struct page *p = eb->pages[i]; |
| |
| ret = repair_io_failure(fs_info, 0, start, PAGE_SIZE, start, p, |
| start - page_offset(p), mirror_num); |
| if (ret) |
| break; |
| start += PAGE_SIZE; |
| } |
| |
| return ret; |
| } |
| |
| /* |
| * each time an IO finishes, we do a fast check in the IO failure tree |
| * to see if we need to process or clean up an io_failure_record |
| */ |
| int clean_io_failure(struct btrfs_fs_info *fs_info, |
| struct extent_io_tree *failure_tree, |
| struct extent_io_tree *io_tree, u64 start, |
| struct page *page, u64 ino, unsigned int pg_offset) |
| { |
| u64 private; |
| struct io_failure_record *failrec; |
| struct extent_state *state; |
| int num_copies; |
| int ret; |
| |
| private = 0; |
| ret = count_range_bits(failure_tree, &private, (u64)-1, 1, |
| EXTENT_DIRTY, 0); |
| if (!ret) |
| return 0; |
| |
| failrec = get_state_failrec(failure_tree, start); |
| if (IS_ERR(failrec)) |
| return 0; |
| |
| BUG_ON(!failrec->this_mirror); |
| |
| if (sb_rdonly(fs_info->sb)) |
| goto out; |
| |
| spin_lock(&io_tree->lock); |
| state = find_first_extent_bit_state(io_tree, |
| failrec->start, |
| EXTENT_LOCKED); |
| spin_unlock(&io_tree->lock); |
| |
| if (state && state->start <= failrec->start && |
| state->end >= failrec->start + failrec->len - 1) { |
| num_copies = btrfs_num_copies(fs_info, failrec->logical, |
| failrec->len); |
| if (num_copies > 1) { |
| repair_io_failure(fs_info, ino, start, failrec->len, |
| failrec->logical, page, pg_offset, |
| failrec->failed_mirror); |
| } |
| } |
| |
| out: |
| free_io_failure(failure_tree, io_tree, failrec); |
| |
| return 0; |
| } |
| |
| /* |
| * Can be called when |
| * - hold extent lock |
| * - under ordered extent |
| * - the inode is freeing |
| */ |
| void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end) |
| { |
| struct extent_io_tree *failure_tree = &inode->io_failure_tree; |
| struct io_failure_record *failrec; |
| struct extent_state *state, *next; |
| |
| if (RB_EMPTY_ROOT(&failure_tree->state)) |
| return; |
| |
| spin_lock(&failure_tree->lock); |
| state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY); |
| while (state) { |
| if (state->start > end) |
| break; |
| |
| ASSERT(state->end <= end); |
| |
| next = next_state(state); |
| |
| failrec = state->failrec; |
| free_extent_state(state); |
| kfree(failrec); |
| |
| state = next; |
| } |
| spin_unlock(&failure_tree->lock); |
| } |
| |
| static struct io_failure_record *btrfs_get_io_failure_record(struct inode *inode, |
| u64 start) |
| { |
| struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
| struct io_failure_record *failrec; |
| struct extent_map *em; |
| struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree; |
| struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; |
| struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; |
| const u32 sectorsize = fs_info->sectorsize; |
| int ret; |
| u64 logical; |
| |
| failrec = get_state_failrec(failure_tree, start); |
| if (!IS_ERR(failrec)) { |
| btrfs_debug(fs_info, |
| "Get IO Failure Record: (found) logical=%llu, start=%llu, len=%llu", |
| failrec->logical, failrec->start, failrec->len); |
| /* |
| * when data can be on disk more than twice, add to failrec here |
| * (e.g. with a list for failed_mirror) to make |
| * clean_io_failure() clean all those errors at once. |
| */ |
| |
| return failrec; |
| } |
| |
| failrec = kzalloc(sizeof(*failrec), GFP_NOFS); |
| if (!failrec) |
| return ERR_PTR(-ENOMEM); |
| |
| failrec->start = start; |
| failrec->len = sectorsize; |
| failrec->this_mirror = 0; |
| failrec->bio_flags = 0; |
| |
| read_lock(&em_tree->lock); |
| em = lookup_extent_mapping(em_tree, start, failrec->len); |
| if (!em) { |
| read_unlock(&em_tree->lock); |
| kfree(failrec); |
| return ERR_PTR(-EIO); |
| } |
| |
| if (em->start > start || em->start + em->len <= start) { |
| free_extent_map(em); |
| em = NULL; |
| } |
| read_unlock(&em_tree->lock); |
| if (!em) { |
| kfree(failrec); |
| return ERR_PTR(-EIO); |
| } |
| |
| logical = start - em->start; |
| logical = em->block_start + logical; |
| if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { |
| logical = em->block_start; |
| failrec->bio_flags = EXTENT_BIO_COMPRESSED; |
| extent_set_compress_type(&failrec->bio_flags, em->compress_type); |
| } |
| |
| btrfs_debug(fs_info, |
| "Get IO Failure Record: (new) logical=%llu, start=%llu, len=%llu", |
| logical, start, failrec->len); |
| |
| failrec->logical = logical; |
| free_extent_map(em); |
| |
| /* Set the bits in the private failure tree */ |
| ret = set_extent_bits(failure_tree, start, start + sectorsize - 1, |
| EXTENT_LOCKED | EXTENT_DIRTY); |
| if (ret >= 0) { |
| ret = set_state_failrec(failure_tree, start, failrec); |
| /* Set the bits in the inode's tree */ |
| ret = set_extent_bits(tree, start, start + sectorsize - 1, |
| EXTENT_DAMAGED); |
| } else if (ret < 0) { |
| kfree(failrec); |
| return ERR_PTR(ret); |
| } |
| |
| return failrec; |
| } |
| |
| static bool btrfs_check_repairable(struct inode *inode, |
| struct io_failure_record *failrec, |
| int failed_mirror) |
| { |
| struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
| int num_copies; |
| |
| num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len); |
| if (num_copies == 1) { |
| /* |
| * we only have a single copy of the data, so don't bother with |
| * all the retry and error correction code that follows. no |
| * matter what the error is, it is very likely to persist. |
| */ |
| btrfs_debug(fs_info, |
| "Check Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d", |
| num_copies, failrec->this_mirror, failed_mirror); |
| return false; |
| } |
| |
| /* The failure record should only contain one sector */ |
| ASSERT(failrec->len == fs_info->sectorsize); |
| |
| /* |
| * There are two premises: |
| * a) deliver good data to the caller |
| * b) correct the bad sectors on disk |
| * |
| * Since we're only doing repair for one sector, we only need to get |
| * a good copy of the failed sector and if we succeed, we have setup |
| * everything for repair_io_failure to do the rest for us. |
| */ |
| failrec->failed_mirror = failed_mirror; |
| failrec->this_mirror++; |
| if (failrec->this_mirror == failed_mirror) |
| failrec->this_mirror++; |
| |
| if (failrec->this_mirror > num_copies) { |
| btrfs_debug(fs_info, |
| "Check Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d", |
| num_copies, failrec->this_mirror, failed_mirror); |
| return false; |
| } |
| |
| return true; |
| } |
| |
| int btrfs_repair_one_sector(struct inode *inode, |
| struct bio *failed_bio, u32 bio_offset, |
| struct page *page, unsigned int pgoff, |
| u64 start, int failed_mirror, |
| submit_bio_hook_t *submit_bio_hook) |
| { |
| struct io_failure_record *failrec; |
| struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
| struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; |
| struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree; |
| struct btrfs_bio *failed_bbio = btrfs_bio(failed_bio); |
| const int icsum = bio_offset >> fs_info->sectorsize_bits; |
| struct bio *repair_bio; |
| struct btrfs_bio *repair_bbio; |
| blk_status_t status; |
| |
| btrfs_debug(fs_info, |
| "repair read error: read error at %llu", start); |
| |
| BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE); |
| |
| failrec = btrfs_get_io_failure_record(inode, start); |
| if (IS_ERR(failrec)) |
| return PTR_ERR(failrec); |
| |
| |
| if (!btrfs_check_repairable(inode, failrec, failed_mirror)) { |
| free_io_failure(failure_tree, tree, failrec); |
| return -EIO; |
| } |
| |
| repair_bio = btrfs_bio_alloc(1); |
| repair_bbio = btrfs_bio(repair_bio); |
| repair_bio->bi_opf = REQ_OP_READ; |
| repair_bio->bi_end_io = failed_bio->bi_end_io; |
| repair_bio->bi_iter.bi_sector = failrec->logical >> 9; |
| repair_bio->bi_private = failed_bio->bi_private; |
| |
| if (failed_bbio->csum) { |
| const u32 csum_size = fs_info->csum_size; |
| |
| repair_bbio->csum = repair_bbio->csum_inline; |
| memcpy(repair_bbio->csum, |
| failed_bbio->csum + csum_size * icsum, csum_size); |
| } |
| |
| bio_add_page(repair_bio, page, failrec->len, pgoff); |
| repair_bbio->iter = repair_bio->bi_iter; |
| |
| btrfs_debug(btrfs_sb(inode->i_sb), |
| "repair read error: submitting new read to mirror %d", |
| failrec->this_mirror); |
| |
| status = submit_bio_hook(inode, repair_bio, failrec->this_mirror, |
| failrec->bio_flags); |
| if (status) { |
| free_io_failure(failure_tree, tree, failrec); |
| bio_put(repair_bio); |
| } |
| return blk_status_to_errno(status); |
| } |
| |
| static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len) |
| { |
| struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb); |
| |
| ASSERT(page_offset(page) <= start && |
| start + len <= page_offset(page) + PAGE_SIZE); |
| |
| if (uptodate) { |
| if (fsverity_active(page->mapping->host) && |
| !PageError(page) && |
| !PageUptodate(page) && |
| start < i_size_read(page->mapping->host) && |
| !fsverity_verify_page(page)) { |
| btrfs_page_set_error(fs_info, page, start, len); |
| } else { |
| btrfs_page_set_uptodate(fs_info, page, start, len); |
| } |
| } else { |
| btrfs_page_clear_uptodate(fs_info, page, start, len); |
| btrfs_page_set_error(fs_info, page, start, len); |
| } |
| |
| if (fs_info->sectorsize == PAGE_SIZE) |
| unlock_page(page); |
| else |
| btrfs_subpage_end_reader(fs_info, page, start, len); |
| } |
| |
| static blk_status_t submit_read_repair(struct inode *inode, |
| struct bio *failed_bio, u32 bio_offset, |
| struct page *page, unsigned int pgoff, |
| u64 start, u64 end, int failed_mirror, |
| unsigned int error_bitmap, |
| submit_bio_hook_t *submit_bio_hook) |
| { |
| struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
| const u32 sectorsize = fs_info->sectorsize; |
| const int nr_bits = (end + 1 - start) >> fs_info->sectorsize_bits; |
| int error = 0; |
| int i; |
| |
| BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE); |
| |
| /* We're here because we had some read errors or csum mismatch */ |
| ASSERT(error_bitmap); |
| |
| /* |
| * We only get called on buffered IO, thus page must be mapped and bio |
| * must not be cloned. |
| */ |
| ASSERT(page->mapping && !bio_flagged(failed_bio, BIO_CLONED)); |
| |
| /* Iterate through all the sectors in the range */ |
| for (i = 0; i < nr_bits; i++) { |
| const unsigned int offset = i * sectorsize; |
| struct extent_state *cached = NULL; |
| bool uptodate = false; |
| int ret; |
| |
| if (!(error_bitmap & (1U << i))) { |
| /* |
| * This sector has no error, just end the page read |
| * and unlock the range. |
| */ |
| uptodate = true; |
| goto next; |
| } |
| |
| ret = btrfs_repair_one_sector(inode, failed_bio, |
| bio_offset + offset, |
| page, pgoff + offset, start + offset, |
| failed_mirror, submit_bio_hook); |
| if (!ret) { |
| /* |
| * We have submitted the read repair, the page release |
| * will be handled by the endio function of the |
| * submitted repair bio. |
| * Thus we don't need to do any thing here. |
| */ |
| continue; |
| } |
| /* |
| * Repair failed, just record the error but still continue. |
| * Or the remaining sectors will not be properly unlocked. |
| */ |
| if (!error) |
| error = ret; |
| next: |
| end_page_read(page, uptodate, start + offset, sectorsize); |
| if (uptodate) |
| set_extent_uptodate(&BTRFS_I(inode)->io_tree, |
| start + offset, |
| start + offset + sectorsize - 1, |
| &cached, GFP_ATOMIC); |
| unlock_extent_cached_atomic(&BTRFS_I(inode)->io_tree, |
| start + offset, |
| start + offset + sectorsize - 1, |
| &cached); |
| } |
| return errno_to_blk_status(error); |
| } |
| |
| /* lots and lots of room for performance fixes in the end_bio funcs */ |
| |
| void end_extent_writepage(struct page *page, int err, u64 start, u64 end) |
| { |
| struct btrfs_inode *inode; |
| const bool uptodate = (err == 0); |
| int ret = 0; |
| |
| ASSERT(page && page->mapping); |
| inode = BTRFS_I(page->mapping->host); |
| btrfs_writepage_endio_finish_ordered(inode, page, start, end, uptodate); |
| |
| if (!uptodate) { |
| const struct btrfs_fs_info *fs_info = inode->root->fs_info; |
| u32 len; |
| |
| ASSERT(end + 1 - start <= U32_MAX); |
| len = end + 1 - start; |
| |
| btrfs_page_clear_uptodate(fs_info, page, start, len); |
| btrfs_page_set_error(fs_info, page, start, len); |
| ret = err < 0 ? err : -EIO; |
| mapping_set_error(page->mapping, ret); |
| } |
| } |
| |
| /* |
| * after a writepage IO is done, we need to: |
| * clear the uptodate bits on error |
| * clear the writeback bits in the extent tree for this IO |
| * end_page_writeback if the page has no more pending IO |
| * |
| * Scheduling is not allowed, so the extent state tree is expected |
| * to have one and only one object corresponding to this IO. |
| */ |
| static void end_bio_extent_writepage(struct bio *bio) |
| { |
| int error = blk_status_to_errno(bio->bi_status); |
| struct bio_vec *bvec; |
| u64 start; |
| u64 end; |
| struct bvec_iter_all iter_all; |
| bool first_bvec = true; |
| |
| ASSERT(!bio_flagged(bio, BIO_CLONED)); |
| bio_for_each_segment_all(bvec, bio, iter_all) { |
| struct page *page = bvec->bv_page; |
| struct inode *inode = page->mapping->host; |
| struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
| const u32 sectorsize = fs_info->sectorsize; |
| |
| /* Our read/write should always be sector aligned. */ |
| if (!IS_ALIGNED(bvec->bv_offset, sectorsize)) |
| btrfs_err(fs_info, |
| "partial page write in btrfs with offset %u and length %u", |
| bvec->bv_offset, bvec->bv_len); |
| else if (!IS_ALIGNED(bvec->bv_len, sectorsize)) |
| btrfs_info(fs_info, |
| "incomplete page write with offset %u and length %u", |
| bvec->bv_offset, bvec->bv_len); |
| |
| start = page_offset(page) + bvec->bv_offset; |
| end = start + bvec->bv_len - 1; |
| |
| if (first_bvec) { |
| btrfs_record_physical_zoned(inode, start, bio); |
| first_bvec = false; |
| } |
| |
| end_extent_writepage(page, error, start, end); |
| |
| btrfs_page_clear_writeback(fs_info, page, start, bvec->bv_len); |
| } |
| |
| bio_put(bio); |
| } |
| |
| /* |
| * Record previously processed extent range |
| * |
| * For endio_readpage_release_extent() to handle a full extent range, reducing |
| * the extent io operations. |
| */ |
| struct processed_extent { |
| struct btrfs_inode *inode; |
| /* Start of the range in @inode */ |
| u64 start; |
| /* End of the range in @inode */ |
| u64 end; |
| bool uptodate; |
| }; |
| |
| /* |
| * Try to release processed extent range |
| * |
| * May not release the extent range right now if the current range is |
| * contiguous to processed extent. |
| * |
| * Will release processed extent when any of @inode, @uptodate, the range is |
| * no longer contiguous to the processed range. |
| * |
| * Passing @inode == NULL will force processed extent to be released. |
| */ |
| static void endio_readpage_release_extent(struct processed_extent *processed, |
| struct btrfs_inode *inode, u64 start, u64 end, |
| bool uptodate) |
| { |
| struct extent_state *cached = NULL; |
| struct extent_io_tree *tree; |
| |
| /* The first extent, initialize @processed */ |
| if (!processed->inode) |
| goto update; |
| |
| /* |
| * Contiguous to processed extent, just uptodate the end. |
| * |
| * Several things to notice: |
| * |
| * - bio can be merged as long as on-disk bytenr is contiguous |
| * This means we can have page belonging to other inodes, thus need to |
| * check if the inode still matches. |
| * - bvec can contain range beyond current page for multi-page bvec |
| * Thus we need to do processed->end + 1 >= start check |
| */ |
| if (processed->inode == inode && processed->uptodate == uptodate && |
| processed->end + 1 >= start && end >= processed->end) { |
| processed->end = end; |
| return; |
| } |
| |
| tree = &processed->inode->io_tree; |
| /* |
| * Now we don't have range contiguous to the processed range, release |
| * the processed range now. |
| */ |
| if (processed->uptodate && tree->track_uptodate) |
| set_extent_uptodate(tree, processed->start, processed->end, |
| &cached, GFP_ATOMIC); |
| unlock_extent_cached_atomic(tree, processed->start, processed->end, |
| &cached); |
| |
| update: |
| /* Update processed to current range */ |
| processed->inode = inode; |
| processed->start = start; |
| processed->end = end; |
| processed->uptodate = uptodate; |
| } |
| |
| static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page) |
| { |
| ASSERT(PageLocked(page)); |
| if (fs_info->sectorsize == PAGE_SIZE) |
| return; |
| |
| ASSERT(PagePrivate(page)); |
| btrfs_subpage_start_reader(fs_info, page, page_offset(page), PAGE_SIZE); |
| } |
| |
| /* |
| * Find extent buffer for a givne bytenr. |
| * |
| * This is for end_bio_extent_readpage(), thus we can't do any unsafe locking |
| * in endio context. |
| */ |
| static struct extent_buffer *find_extent_buffer_readpage( |
| struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr) |
| { |
| struct extent_buffer *eb; |
| |
| /* |
| * For regular sectorsize, we can use page->private to grab extent |
| * buffer |
| */ |
| if (fs_info->sectorsize == PAGE_SIZE) { |
| ASSERT(PagePrivate(page) && page->private); |
| return (struct extent_buffer *)page->private; |
| } |
| |
| /* For subpage case, we need to lookup buffer radix tree */ |
| rcu_read_lock(); |
| eb = radix_tree_lookup(&fs_info->buffer_radix, |
| bytenr >> fs_info->sectorsize_bits); |
| rcu_read_unlock(); |
| ASSERT(eb); |
| return eb; |
| } |
| |
| /* |
| * after a readpage IO is done, we need to: |
| * clear the uptodate bits on error |
| * set the uptodate bits if things worked |
| * set the page up to date if all extents in the tree are uptodate |
| * clear the lock bit in the extent tree |
| * unlock the page if there are no other extents locked for it |
| * |
| * Scheduling is not allowed, so the extent state tree is expected |
| * to have one and only one object corresponding to this IO. |
| */ |
| static void end_bio_extent_readpage(struct bio *bio) |
| { |
| struct bio_vec *bvec; |
| struct btrfs_bio *bbio = btrfs_bio(bio); |
| struct extent_io_tree *tree, *failure_tree; |
| struct processed_extent processed = { 0 }; |
| /* |
| * The offset to the beginning of a bio, since one bio can never be |
| * larger than UINT_MAX, u32 here is enough. |
| */ |
| u32 bio_offset = 0; |
| int mirror; |
| int ret; |
| struct bvec_iter_all iter_all; |
| |
| ASSERT(!bio_flagged(bio, BIO_CLONED)); |
| bio_for_each_segment_all(bvec, bio, iter_all) { |
| bool uptodate = !bio->bi_status; |
| struct page *page = bvec->bv_page; |
| struct inode *inode = page->mapping->host; |
| struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
| const u32 sectorsize = fs_info->sectorsize; |
| unsigned int error_bitmap = (unsigned int)-1; |
| u64 start; |
| u64 end; |
| u32 len; |
| |
| btrfs_debug(fs_info, |
| "end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u", |
| bio->bi_iter.bi_sector, bio->bi_status, |
| bbio->mirror_num); |
| tree = &BTRFS_I(inode)->io_tree; |
| failure_tree = &BTRFS_I(inode)->io_failure_tree; |
| |
| /* |
| * We always issue full-sector reads, but if some block in a |
| * page fails to read, blk_update_request() will advance |
| * bv_offset and adjust bv_len to compensate. Print a warning |
| * for unaligned offsets, and an error if they don't add up to |
| * a full sector. |
| */ |
| if (!IS_ALIGNED(bvec->bv_offset, sectorsize)) |
| btrfs_err(fs_info, |
| "partial page read in btrfs with offset %u and length %u", |
| bvec->bv_offset, bvec->bv_len); |
| else if (!IS_ALIGNED(bvec->bv_offset + bvec->bv_len, |
| sectorsize)) |
| btrfs_info(fs_info, |
| "incomplete page read with offset %u and length %u", |
| bvec->bv_offset, bvec->bv_len); |
| |
| start = page_offset(page) + bvec->bv_offset; |
| end = start + bvec->bv_len - 1; |
| len = bvec->bv_len; |
| |
| mirror = bbio->mirror_num; |
| if (likely(uptodate)) { |
| if (is_data_inode(inode)) { |
| error_bitmap = btrfs_verify_data_csum(bbio, |
| bio_offset, page, start, end); |
| ret = error_bitmap; |
| } else { |
| ret = btrfs_validate_metadata_buffer(bbio, |
| page, start, end, mirror); |
| } |
| if (ret) |
| uptodate = false; |
| else |
| clean_io_failure(BTRFS_I(inode)->root->fs_info, |
| failure_tree, tree, start, |
| page, |
| btrfs_ino(BTRFS_I(inode)), 0); |
| } |
| |
| if (likely(uptodate)) |
| goto readpage_ok; |
| |
| if (is_data_inode(inode)) { |
| /* |
| * btrfs_submit_read_repair() will handle all the good |
| * and bad sectors, we just continue to the next bvec. |
| */ |
| submit_read_repair(inode, bio, bio_offset, page, |
| start - page_offset(page), start, |
| end, mirror, error_bitmap, |
| btrfs_submit_data_bio); |
| |
| ASSERT(bio_offset + len > bio_offset); |
| bio_offset += len; |
| continue; |
| } else { |
| struct extent_buffer *eb; |
| |
| eb = find_extent_buffer_readpage(fs_info, page, start); |
| set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); |
| eb->read_mirror = mirror; |
| atomic_dec(&eb->io_pages); |
| if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, |
| &eb->bflags)) |
| btree_readahead_hook(eb, -EIO); |
| } |
| readpage_ok: |
| if (likely(uptodate)) { |
| loff_t i_size = i_size_read(inode); |
| pgoff_t end_index = i_size >> PAGE_SHIFT; |
| |
| /* |
| * Zero out the remaining part if this range straddles |
| * i_size. |
| * |
| * Here we should only zero the range inside the bvec, |
| * not touch anything else. |
| * |
| * NOTE: i_size is exclusive while end is inclusive. |
| */ |
| if (page->index == end_index && i_size <= end) { |
| u32 zero_start = max(offset_in_page(i_size), |
| offset_in_page(start)); |
| |
| zero_user_segment(page, zero_start, |
| offset_in_page(end) + 1); |
| } |
| } |
| ASSERT(bio_offset + len > bio_offset); |
| bio_offset += len; |
| |
| /* Update page status and unlock */ |
| end_page_read(page, uptodate, start, len); |
| endio_readpage_release_extent(&processed, BTRFS_I(inode), |
| start, end, PageUptodate(page)); |
| } |
| /* Release the last extent */ |
| endio_readpage_release_extent(&processed, NULL, 0, 0, false); |
| btrfs_bio_free_csum(bbio); |
| bio_put(bio); |
| } |
| |
| /* |
| * Initialize the members up to but not including 'bio'. Use after allocating a |
| * new bio by bio_alloc_bioset as it does not initialize the bytes outside of |
| * 'bio' because use of __GFP_ZERO is not supported. |
| */ |
| static inline void btrfs_bio_init(struct btrfs_bio *bbio) |
| { |
| memset(bbio, 0, offsetof(
|