blob: 5cbf67ccbdeb1db5de93a8c3640cb851531a198a [file] [log] [blame]
Qu Wenruocac06d82021-01-26 16:33:47 +08001/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef BTRFS_SUBPAGE_H
4#define BTRFS_SUBPAGE_H
5
6#include <linux/spinlock.h>
7
8/*
Qu Wenruo8481dd82021-08-17 17:38:51 +08009 * Extra info for subpapge bitmap.
10 *
Christoph Hellwig2b2553f2023-05-31 08:04:57 +020011 * For subpage we pack all uptodate/dirty/writeback/ordered bitmaps into
Qu Wenruo8481dd82021-08-17 17:38:51 +080012 * one larger bitmap.
13 *
14 * This structure records how they are organized in the bitmap:
15 *
Christoph Hellwig2b2553f2023-05-31 08:04:57 +020016 * /- uptodate_offset /- dirty_offset /- ordered_offset
Qu Wenruo8481dd82021-08-17 17:38:51 +080017 * | | |
18 * v v v
Christoph Hellwig2b2553f2023-05-31 08:04:57 +020019 * |u|u|u|u|........|u|u|d|d|.......|d|d|o|o|.......|o|o|
Qu Wenruo8481dd82021-08-17 17:38:51 +080020 * |<- bitmap_nr_bits ->|
Christoph Hellwig2b2553f2023-05-31 08:04:57 +020021 * |<----------------- total_nr_bits ------------------>|
Qu Wenruo8481dd82021-08-17 17:38:51 +080022 */
23struct btrfs_subpage_info {
24 /* Number of bits for each bitmap */
25 unsigned int bitmap_nr_bits;
26
27 /* Total number of bits for the whole bitmap */
28 unsigned int total_nr_bits;
29
30 /*
31 * *_start indicates where the bitmap starts, the length is always
32 * @bitmap_size, which is calculated from PAGE_SIZE / sectorsize.
33 */
34 unsigned int uptodate_offset;
Qu Wenruo8481dd82021-08-17 17:38:51 +080035 unsigned int dirty_offset;
36 unsigned int writeback_offset;
37 unsigned int ordered_offset;
Qu Wenruoe4f94342021-09-27 15:21:49 +080038 unsigned int checked_offset;
Qu Wenruo8481dd82021-08-17 17:38:51 +080039};
40
41/*
Qu Wenruocac06d82021-01-26 16:33:47 +080042 * Structure to trace status of each sector inside a page, attached to
43 * page::private for both data and metadata inodes.
44 */
45struct btrfs_subpage {
46 /* Common members for both data and metadata pages */
47 spinlock_t lock;
Qu Wenruo3d078ef2021-06-07 17:02:58 +080048 /*
49 * Both data and metadata needs to track how many readers are for the
50 * page.
51 * Data relies on @readers to unlock the page when last reader finished.
52 * While metadata doesn't need page unlock, it needs to prevent
53 * page::private get cleared before the last end_page_read().
54 */
55 atomic_t readers;
Qu Wenruo760f9912021-01-26 16:33:48 +080056 union {
Qu Wenruo8ff84662021-01-26 16:33:50 +080057 /*
58 * Structures only used by metadata
59 *
60 * @eb_refs should only be operated under private_lock, as it
61 * manages whether the subpage can be detached.
62 */
63 atomic_t eb_refs;
Qu Wenruo6f174002021-05-31 16:50:45 +080064
Qu Wenruo72a69cd2021-08-17 17:38:52 +080065 /* Structures only used by data */
66 atomic_t writers;
Qu Wenruo760f9912021-01-26 16:33:48 +080067 };
Qu Wenruo72a69cd2021-08-17 17:38:52 +080068 unsigned long bitmaps[];
Qu Wenruocac06d82021-01-26 16:33:47 +080069};
70
71enum btrfs_subpage_type {
72 BTRFS_SUBPAGE_METADATA,
73 BTRFS_SUBPAGE_DATA,
74};
75
Qu Wenruofbca46e2022-01-13 13:22:09 +080076bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct page *page);
77
Qu Wenruo8481dd82021-08-17 17:38:51 +080078void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize);
Qu Wenruocac06d82021-01-26 16:33:47 +080079int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
80 struct page *page, enum btrfs_subpage_type type);
81void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info,
82 struct page *page);
83
Qu Wenruo760f9912021-01-26 16:33:48 +080084/* Allocate additional data where page represents more than one sector */
Qu Wenruo651fb412021-08-17 17:38:50 +080085struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
86 enum btrfs_subpage_type type);
Qu Wenruo760f9912021-01-26 16:33:48 +080087void btrfs_free_subpage(struct btrfs_subpage *subpage);
88
Qu Wenruo8ff84662021-01-26 16:33:50 +080089void btrfs_page_inc_eb_refs(const struct btrfs_fs_info *fs_info,
90 struct page *page);
91void btrfs_page_dec_eb_refs(const struct btrfs_fs_info *fs_info,
92 struct page *page);
93
Qu Wenruo92082d42021-02-02 10:28:36 +080094void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
95 struct page *page, u64 start, u32 len);
96void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
97 struct page *page, u64 start, u32 len);
98
Qu Wenruo1e1de382021-05-31 16:50:44 +080099void btrfs_subpage_start_writer(const struct btrfs_fs_info *fs_info,
100 struct page *page, u64 start, u32 len);
101bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info,
102 struct page *page, u64 start, u32 len);
103int btrfs_page_start_writer_lock(const struct btrfs_fs_info *fs_info,
104 struct page *page, u64 start, u32 len);
105void btrfs_page_end_writer_lock(const struct btrfs_fs_info *fs_info,
106 struct page *page, u64 start, u32 len);
107
Qu Wenruoa1d767c2021-01-26 16:33:52 +0800108/*
109 * Template for subpage related operations.
110 *
111 * btrfs_subpage_*() are for call sites where the page has subpage attached and
112 * the range is ensured to be inside the page.
113 *
114 * btrfs_page_*() are for call sites where the page can either be subpage
115 * specific or regular page. The function will handle both cases.
116 * But the range still needs to be inside the page.
Qu Wenruo60e2d252021-05-31 16:50:39 +0800117 *
118 * btrfs_page_clamp_*() are similar to btrfs_page_*(), except the range doesn't
119 * need to be inside the page. Those functions will truncate the range
120 * automatically.
Qu Wenruoa1d767c2021-01-26 16:33:52 +0800121 */
122#define DECLARE_BTRFS_SUBPAGE_OPS(name) \
123void btrfs_subpage_set_##name(const struct btrfs_fs_info *fs_info, \
124 struct page *page, u64 start, u32 len); \
125void btrfs_subpage_clear_##name(const struct btrfs_fs_info *fs_info, \
126 struct page *page, u64 start, u32 len); \
127bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \
128 struct page *page, u64 start, u32 len); \
129void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info, \
130 struct page *page, u64 start, u32 len); \
131void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info, \
132 struct page *page, u64 start, u32 len); \
133bool btrfs_page_test_##name(const struct btrfs_fs_info *fs_info, \
Qu Wenruo60e2d252021-05-31 16:50:39 +0800134 struct page *page, u64 start, u32 len); \
135void btrfs_page_clamp_set_##name(const struct btrfs_fs_info *fs_info, \
136 struct page *page, u64 start, u32 len); \
137void btrfs_page_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
138 struct page *page, u64 start, u32 len); \
139bool btrfs_page_clamp_test_##name(const struct btrfs_fs_info *fs_info, \
Qu Wenruoa1d767c2021-01-26 16:33:52 +0800140 struct page *page, u64 start, u32 len);
141
142DECLARE_BTRFS_SUBPAGE_OPS(uptodate);
Qu Wenruod8a57132021-03-25 15:14:37 +0800143DECLARE_BTRFS_SUBPAGE_OPS(dirty);
Qu Wenruo3470da32021-03-25 15:14:38 +0800144DECLARE_BTRFS_SUBPAGE_OPS(writeback);
Qu Wenruo6f174002021-05-31 16:50:45 +0800145DECLARE_BTRFS_SUBPAGE_OPS(ordered);
Qu Wenruoe4f94342021-09-27 15:21:49 +0800146DECLARE_BTRFS_SUBPAGE_OPS(checked);
Qu Wenruod8a57132021-03-25 15:14:37 +0800147
148bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
149 struct page *page, u64 start, u32 len);
Qu Wenruoa1d767c2021-01-26 16:33:52 +0800150
Qu Wenruocc1d0d92021-07-26 14:34:58 +0800151void btrfs_page_assert_not_dirty(const struct btrfs_fs_info *fs_info,
152 struct page *page);
Qu Wenruoe55a0de2021-09-27 15:22:05 +0800153void btrfs_page_unlock_writer(struct btrfs_fs_info *fs_info, struct page *page,
154 u64 start, u32 len);
Qu Wenruo75258f22023-05-26 20:30:53 +0800155void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
156 struct page *page, u64 start, u32 len);
Qu Wenruocc1d0d92021-07-26 14:34:58 +0800157
Qu Wenruocac06d82021-01-26 16:33:47 +0800158#endif