blob: d87f674ab7622d46d7c20c6cd8ace4707d9015a2 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
NeilBrown32a76272005-06-21 17:17:14 -07002/*
3 * bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003
4 *
5 * bitmap_create - sets up the bitmap structure
6 * bitmap_destroy - destroys the bitmap structure
7 *
8 * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.:
9 * - added disk storage for bitmap
10 * - changes to allow various bitmap chunk sizes
NeilBrown32a76272005-06-21 17:17:14 -070011 */
12
13/*
14 * Still to do:
15 *
16 * flush after percent set rather than just time based. (maybe both).
NeilBrown32a76272005-06-21 17:17:14 -070017 */
18
NeilBrownbff61972009-03-31 14:33:13 +110019#include <linux/blkdev.h>
NeilBrown32a76272005-06-21 17:17:14 -070020#include <linux/module.h>
NeilBrown32a76272005-06-21 17:17:14 -070021#include <linux/errno.h>
22#include <linux/slab.h>
23#include <linux/init.h>
NeilBrown32a76272005-06-21 17:17:14 -070024#include <linux/timer.h>
25#include <linux/sched.h>
26#include <linux/list.h>
27#include <linux/file.h>
28#include <linux/mount.h>
29#include <linux/buffer_head.h>
NeilBrown57148962012-03-19 12:46:40 +110030#include <linux/seq_file.h>
NeilBrown581dbd92016-11-14 16:30:21 +110031#include <trace/events/block.h>
NeilBrown43b2e5d2009-03-31 14:33:13 +110032#include "md.h"
Mike Snitzer935fe092017-10-10 17:02:41 -040033#include "md-bitmap.h"
NeilBrown32a76272005-06-21 17:17:14 -070034
NeilBrownac2f40b2010-06-01 19:37:31 +100035static inline char *bmname(struct bitmap *bitmap)
NeilBrown32a76272005-06-21 17:17:14 -070036{
37 return bitmap->mddev ? mdname(bitmap->mddev) : "mdX";
38}
39
NeilBrown32a76272005-06-21 17:17:14 -070040/*
NeilBrown32a76272005-06-21 17:17:14 -070041 * check a page and, if necessary, allocate it (or hijack it if the alloc fails)
42 *
43 * 1) check to see if this page is allocated, if it's not then try to alloc
44 * 2) if the alloc fails, set the page's hijacked flag so we'll use the
45 * page pointer directly as a counter
46 *
47 * if we find our page, we increment the page's refcount so that it stays
48 * allocated while we're using it
49 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -070050static int md_bitmap_checkpage(struct bitmap_counts *bitmap,
51 unsigned long page, int create, int no_hijack)
NeilBrownee305ac2009-09-23 18:06:44 +100052__releases(bitmap->lock)
53__acquires(bitmap->lock)
NeilBrown32a76272005-06-21 17:17:14 -070054{
55 unsigned char *mappage;
56
57 if (page >= bitmap->pages) {
NeilBrown1187cf02009-03-31 14:27:02 +110058 /* This can happen if bitmap_start_sync goes beyond
59 * End-of-device while looking for a whole page.
60 * It is harmless.
61 */
NeilBrown32a76272005-06-21 17:17:14 -070062 return -EINVAL;
63 }
64
NeilBrown32a76272005-06-21 17:17:14 -070065 if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
66 return 0;
67
68 if (bitmap->bp[page].map) /* page is already allocated, just return */
69 return 0;
70
71 if (!create)
72 return -ENOENT;
73
NeilBrown32a76272005-06-21 17:17:14 -070074 /* this page has not been allocated yet */
75
NeilBrownac2f40b2010-06-01 19:37:31 +100076 spin_unlock_irq(&bitmap->lock);
NeilBrownd9590142015-02-02 17:08:03 +110077 /* It is possible that this is being called inside a
78 * prepare_to_wait/finish_wait loop from raid5c:make_request().
79 * In general it is not permitted to sleep in that context as it
80 * can cause the loop to spin freely.
81 * That doesn't apply here as we can only reach this point
82 * once with any loop.
83 * When this function completes, either bp[page].map or
84 * bp[page].hijacked. In either case, this function will
85 * abort before getting to this point again. So there is
86 * no risk of a free-spin, and so it is safe to assert
87 * that sleeping here is allowed.
88 */
89 sched_annotate_sleep();
NeilBrown792a1d42012-03-19 12:46:41 +110090 mappage = kzalloc(PAGE_SIZE, GFP_NOIO);
NeilBrownac2f40b2010-06-01 19:37:31 +100091 spin_lock_irq(&bitmap->lock);
92
93 if (mappage == NULL) {
NeilBrown40cffcc2012-05-22 13:55:24 +100094 pr_debug("md/bitmap: map page allocation failed, hijacking\n");
Guoqing Jiangc9d65032016-05-02 11:50:11 -040095 /* We don't support hijack for cluster raid */
96 if (no_hijack)
97 return -ENOMEM;
NeilBrown32a76272005-06-21 17:17:14 -070098 /* failed - set the hijacked flag so that we can use the
99 * pointer as a counter */
NeilBrown32a76272005-06-21 17:17:14 -0700100 if (!bitmap->bp[page].map)
101 bitmap->bp[page].hijacked = 1;
NeilBrownac2f40b2010-06-01 19:37:31 +1000102 } else if (bitmap->bp[page].map ||
103 bitmap->bp[page].hijacked) {
NeilBrown32a76272005-06-21 17:17:14 -0700104 /* somebody beat us to getting the page */
NeilBrown792a1d42012-03-19 12:46:41 +1100105 kfree(mappage);
NeilBrownac2f40b2010-06-01 19:37:31 +1000106 } else {
107
108 /* no page was in place and we have one, so install it */
109
110 bitmap->bp[page].map = mappage;
111 bitmap->missing_pages--;
NeilBrown32a76272005-06-21 17:17:14 -0700112 }
NeilBrown32a76272005-06-21 17:17:14 -0700113 return 0;
114}
115
NeilBrown32a76272005-06-21 17:17:14 -0700116/* if page is completely empty, put it back on the free list, or dealloc it */
117/* if page was hijacked, unmark the flag so it might get alloced next time */
118/* Note: lock should be held when calling this */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700119static void md_bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page)
NeilBrown32a76272005-06-21 17:17:14 -0700120{
121 char *ptr;
122
123 if (bitmap->bp[page].count) /* page is still busy */
124 return;
125
126 /* page is no longer in use, it can be released */
127
128 if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */
129 bitmap->bp[page].hijacked = 0;
130 bitmap->bp[page].map = NULL;
NeilBrownac2f40b2010-06-01 19:37:31 +1000131 } else {
132 /* normal case, free the page */
133 ptr = bitmap->bp[page].map;
134 bitmap->bp[page].map = NULL;
135 bitmap->missing_pages++;
NeilBrown792a1d42012-03-19 12:46:41 +1100136 kfree(ptr);
NeilBrown32a76272005-06-21 17:17:14 -0700137 }
NeilBrown32a76272005-06-21 17:17:14 -0700138}
139
NeilBrown32a76272005-06-21 17:17:14 -0700140/*
141 * bitmap file handling - read and write the bitmap file and its superblock
142 */
143
NeilBrown32a76272005-06-21 17:17:14 -0700144/*
145 * basic page I/O operations
146 */
147
NeilBrowna654b9d82005-06-21 17:17:27 -0700148/* IO operations when bitmap is stored near all superblocks */
NeilBrown27581e52012-05-22 13:55:08 +1000149static int read_sb_page(struct mddev *mddev, loff_t offset,
150 struct page *page,
151 unsigned long index, int size)
NeilBrowna654b9d82005-06-21 17:17:27 -0700152{
153 /* choose a good rdev and read the page from there */
154
NeilBrown3cb03002011-10-11 16:45:26 +1100155 struct md_rdev *rdev;
NeilBrowna654b9d82005-06-21 17:17:27 -0700156 sector_t target;
NeilBrowna654b9d82005-06-21 17:17:27 -0700157
NeilBrowndafb20f2012-03-19 12:46:39 +1100158 rdev_for_each(rdev, mddev) {
NeilBrownb2d444d2005-11-08 21:39:31 -0800159 if (! test_bit(In_sync, &rdev->flags)
Guoqing Jiang4aaf76942017-07-04 11:20:30 +0800160 || test_bit(Faulty, &rdev->flags)
161 || test_bit(Bitmap_sync, &rdev->flags))
NeilBrownab904d62005-09-09 16:23:52 -0700162 continue;
163
Jonathan Brassowccebd4c2011-01-14 09:14:33 +1100164 target = offset + index * (PAGE_SIZE/512);
NeilBrowna654b9d82005-06-21 17:17:27 -0700165
NeilBrown2b193362010-10-27 15:16:40 +1100166 if (sync_page_io(rdev, target,
Martin K. Petersene1defc42009-05-22 17:17:49 -0400167 roundup(size, bdev_logical_block_size(rdev->bdev)),
Mike Christie796a5cf2016-06-05 14:32:07 -0500168 page, REQ_OP_READ, 0, true)) {
NeilBrownab904d62005-09-09 16:23:52 -0700169 page->index = index;
NeilBrown27581e52012-05-22 13:55:08 +1000170 return 0;
NeilBrownab904d62005-09-09 16:23:52 -0700171 }
172 }
NeilBrown27581e52012-05-22 13:55:08 +1000173 return -EIO;
NeilBrowna654b9d82005-06-21 17:17:27 -0700174}
175
NeilBrownfd01b882011-10-11 16:47:53 +1100176static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mddev)
NeilBrownb2d2c4c2008-09-01 12:48:13 +1000177{
178 /* Iterate the disks of an mddev, using rcu to protect access to the
179 * linked list, and raising the refcount of devices we return to ensure
180 * they don't disappear while in use.
181 * As devices are only added or removed when raid_disk is < 0 and
182 * nr_pending is 0 and In_sync is clear, the entries we return will
183 * still be in the same position on the list when we re-enter
Michael Wangfd177482012-10-11 13:43:21 +1100184 * list_for_each_entry_continue_rcu.
NeilBrown8532e342015-05-20 15:05:09 +1000185 *
186 * Note that if entered with 'rdev == NULL' to start at the
187 * beginning, we temporarily assign 'rdev' to an address which
188 * isn't really an rdev, but which can be used by
189 * list_for_each_entry_continue_rcu() to find the first entry.
NeilBrownb2d2c4c2008-09-01 12:48:13 +1000190 */
NeilBrownb2d2c4c2008-09-01 12:48:13 +1000191 rcu_read_lock();
192 if (rdev == NULL)
193 /* start at the beginning */
NeilBrown8532e342015-05-20 15:05:09 +1000194 rdev = list_entry(&mddev->disks, struct md_rdev, same_set);
NeilBrownb2d2c4c2008-09-01 12:48:13 +1000195 else {
196 /* release the previous rdev and start from there. */
197 rdev_dec_pending(rdev, mddev);
NeilBrownb2d2c4c2008-09-01 12:48:13 +1000198 }
Michael Wangfd177482012-10-11 13:43:21 +1100199 list_for_each_entry_continue_rcu(rdev, &mddev->disks, same_set) {
NeilBrownb2d2c4c2008-09-01 12:48:13 +1000200 if (rdev->raid_disk >= 0 &&
NeilBrownb2d2c4c2008-09-01 12:48:13 +1000201 !test_bit(Faulty, &rdev->flags)) {
202 /* this is a usable devices */
203 atomic_inc(&rdev->nr_pending);
204 rcu_read_unlock();
205 return rdev;
206 }
207 }
208 rcu_read_unlock();
209 return NULL;
210}
211
NeilBrownab6085c2007-05-23 13:58:10 -0700212static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
NeilBrowna654b9d82005-06-21 17:17:27 -0700213{
NeilBrown46533ff2016-11-18 16:16:11 +1100214 struct md_rdev *rdev;
Jonathan Brassowa6ff7e02011-01-14 09:14:34 +1100215 struct block_device *bdev;
NeilBrownfd01b882011-10-11 16:47:53 +1100216 struct mddev *mddev = bitmap->mddev;
NeilBrown1ec885c2012-05-22 13:55:10 +1000217 struct bitmap_storage *store = &bitmap->storage;
NeilBrowna654b9d82005-06-21 17:17:27 -0700218
NeilBrown46533ff2016-11-18 16:16:11 +1100219restart:
220 rdev = NULL;
NeilBrownb2d2c4c2008-09-01 12:48:13 +1000221 while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
NeilBrownac2f40b2010-06-01 19:37:31 +1000222 int size = PAGE_SIZE;
223 loff_t offset = mddev->bitmap_info.offset;
Jonathan Brassowa6ff7e02011-01-14 09:14:34 +1100224
225 bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;
226
NeilBrown9b1215c2012-05-22 13:55:11 +1000227 if (page->index == store->file_pages-1) {
228 int last_page_size = store->bytes & (PAGE_SIZE-1);
229 if (last_page_size == 0)
230 last_page_size = PAGE_SIZE;
231 size = roundup(last_page_size,
Jonathan Brassowa6ff7e02011-01-14 09:14:34 +1100232 bdev_logical_block_size(bdev));
NeilBrown9b1215c2012-05-22 13:55:11 +1000233 }
NeilBrownac2f40b2010-06-01 19:37:31 +1000234 /* Just make sure we aren't corrupting data or
235 * metadata
236 */
237 if (mddev->external) {
238 /* Bitmap could be anywhere. */
239 if (rdev->sb_start + offset + (page->index
240 * (PAGE_SIZE/512))
241 > rdev->data_offset
242 &&
243 rdev->sb_start + offset
244 < (rdev->data_offset + mddev->dev_sectors
245 + (PAGE_SIZE/512)))
246 goto bad_alignment;
247 } else if (offset < 0) {
248 /* DATA BITMAP METADATA */
249 if (offset
250 + (long)(page->index * (PAGE_SIZE/512))
251 + size/512 > 0)
252 /* bitmap runs in to metadata */
253 goto bad_alignment;
254 if (rdev->data_offset + mddev->dev_sectors
255 > rdev->sb_start + offset)
256 /* data runs in to bitmap */
257 goto bad_alignment;
258 } else if (rdev->sb_start < rdev->data_offset) {
259 /* METADATA BITMAP DATA */
260 if (rdev->sb_start
261 + offset
262 + page->index*(PAGE_SIZE/512) + size/512
263 > rdev->data_offset)
264 /* bitmap runs in to data */
265 goto bad_alignment;
266 } else {
267 /* DATA METADATA BITMAP - no problems */
268 }
269 md_super_write(mddev, rdev,
270 rdev->sb_start + offset
271 + page->index * (PAGE_SIZE/512),
272 size,
273 page);
NeilBrownb2d2c4c2008-09-01 12:48:13 +1000274 }
NeilBrowna654b9d82005-06-21 17:17:27 -0700275
NeilBrown46533ff2016-11-18 16:16:11 +1100276 if (wait && md_super_wait(mddev) < 0)
277 goto restart;
NeilBrowna654b9d82005-06-21 17:17:27 -0700278 return 0;
NeilBrown4b809912008-07-21 17:05:25 +1000279
280 bad_alignment:
NeilBrown4b809912008-07-21 17:05:25 +1000281 return -EINVAL;
NeilBrowna654b9d82005-06-21 17:17:27 -0700282}
283
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700284static void md_bitmap_file_kick(struct bitmap *bitmap);
NeilBrown32a76272005-06-21 17:17:14 -0700285/*
NeilBrowna654b9d82005-06-21 17:17:27 -0700286 * write out a page to a file
NeilBrown32a76272005-06-21 17:17:14 -0700287 */
NeilBrown4ad13662007-07-17 04:06:13 -0700288static void write_page(struct bitmap *bitmap, struct page *page, int wait)
NeilBrown32a76272005-06-21 17:17:14 -0700289{
NeilBrownd785a062006-06-26 00:27:48 -0700290 struct buffer_head *bh;
NeilBrown32a76272005-06-21 17:17:14 -0700291
NeilBrown1ec885c2012-05-22 13:55:10 +1000292 if (bitmap->storage.file == NULL) {
NeilBrownf0d76d72007-07-17 04:06:12 -0700293 switch (write_sb_page(bitmap, page, wait)) {
294 case -EINVAL:
NeilBrownb405fe92012-05-22 13:55:15 +1000295 set_bit(BITMAP_WRITE_ERROR, &bitmap->flags);
NeilBrownf0d76d72007-07-17 04:06:12 -0700296 }
NeilBrown4ad13662007-07-17 04:06:13 -0700297 } else {
NeilBrowna654b9d82005-06-21 17:17:27 -0700298
NeilBrown4ad13662007-07-17 04:06:13 -0700299 bh = page_buffers(page);
NeilBrownc7084432006-01-06 00:20:45 -0800300
NeilBrown4ad13662007-07-17 04:06:13 -0700301 while (bh && bh->b_blocknr) {
302 atomic_inc(&bitmap->pending_writes);
303 set_buffer_locked(bh);
304 set_buffer_mapped(bh);
Mike Christie2a222ca2016-06-05 14:31:43 -0500305 submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
NeilBrown4ad13662007-07-17 04:06:13 -0700306 bh = bh->b_this_page;
307 }
NeilBrown32a76272005-06-21 17:17:14 -0700308
NeilBrownac2f40b2010-06-01 19:37:31 +1000309 if (wait)
NeilBrown4ad13662007-07-17 04:06:13 -0700310 wait_event(bitmap->write_wait,
311 atomic_read(&bitmap->pending_writes)==0);
NeilBrown32a76272005-06-21 17:17:14 -0700312 }
NeilBrownb405fe92012-05-22 13:55:15 +1000313 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700314 md_bitmap_file_kick(bitmap);
NeilBrown32a76272005-06-21 17:17:14 -0700315}
316
NeilBrownd785a062006-06-26 00:27:48 -0700317static void end_bitmap_write(struct buffer_head *bh, int uptodate)
NeilBrown32a76272005-06-21 17:17:14 -0700318{
NeilBrownd785a062006-06-26 00:27:48 -0700319 struct bitmap *bitmap = bh->b_private;
NeilBrownd785a062006-06-26 00:27:48 -0700320
NeilBrownb405fe92012-05-22 13:55:15 +1000321 if (!uptodate)
322 set_bit(BITMAP_WRITE_ERROR, &bitmap->flags);
NeilBrownd785a062006-06-26 00:27:48 -0700323 if (atomic_dec_and_test(&bitmap->pending_writes))
324 wake_up(&bitmap->write_wait);
325}
326
NeilBrownd785a062006-06-26 00:27:48 -0700327static void free_buffers(struct page *page)
328{
NeilBrown27581e52012-05-22 13:55:08 +1000329 struct buffer_head *bh;
NeilBrownd785a062006-06-26 00:27:48 -0700330
NeilBrown27581e52012-05-22 13:55:08 +1000331 if (!PagePrivate(page))
332 return;
333
334 bh = page_buffers(page);
NeilBrownd785a062006-06-26 00:27:48 -0700335 while (bh) {
336 struct buffer_head *next = bh->b_this_page;
337 free_buffer_head(bh);
338 bh = next;
339 }
Guoqing Jiangdb2c1d82020-06-01 21:47:42 -0700340 detach_page_private(page);
NeilBrownd785a062006-06-26 00:27:48 -0700341 put_page(page);
342}
343
344/* read a page from a file.
345 * We both read the page, and attach buffers to the page to record the
346 * address of each block (using bmap). These addresses will be used
347 * to write the block later, completely bypassing the filesystem.
348 * This usage is similar to how swap files are handled, and allows us
349 * to write to a file with no concerns of memory allocation failing.
350 */
NeilBrown27581e52012-05-22 13:55:08 +1000351static int read_page(struct file *file, unsigned long index,
352 struct bitmap *bitmap,
353 unsigned long count,
354 struct page *page)
NeilBrownd785a062006-06-26 00:27:48 -0700355{
NeilBrown27581e52012-05-22 13:55:08 +1000356 int ret = 0;
Al Viro496ad9a2013-01-23 17:07:38 -0500357 struct inode *inode = file_inode(file);
NeilBrownd785a062006-06-26 00:27:48 -0700358 struct buffer_head *bh;
Carlos Maiolino30460e12020-01-09 14:30:41 +0100359 sector_t block, blk_cur;
Xianting Tian313b8252020-08-18 13:42:06 +0800360 unsigned long blocksize = i_blocksize(inode);
NeilBrown32a76272005-06-21 17:17:14 -0700361
NeilBrown36a4e1f2011-10-07 14:23:17 +1100362 pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
363 (unsigned long long)index << PAGE_SHIFT);
NeilBrown32a76272005-06-21 17:17:14 -0700364
Xianting Tian313b8252020-08-18 13:42:06 +0800365 bh = alloc_page_buffers(page, blocksize, false);
NeilBrownd785a062006-06-26 00:27:48 -0700366 if (!bh) {
NeilBrown27581e52012-05-22 13:55:08 +1000367 ret = -ENOMEM;
NeilBrownd785a062006-06-26 00:27:48 -0700368 goto out;
369 }
Guoqing Jiangdb2c1d82020-06-01 21:47:42 -0700370 attach_page_private(page, bh);
Carlos Maiolino30460e12020-01-09 14:30:41 +0100371 blk_cur = index << (PAGE_SHIFT - inode->i_blkbits);
NeilBrownd785a062006-06-26 00:27:48 -0700372 while (bh) {
Carlos Maiolino30460e12020-01-09 14:30:41 +0100373 block = blk_cur;
374
NeilBrownd785a062006-06-26 00:27:48 -0700375 if (count == 0)
376 bh->b_blocknr = 0;
377 else {
Carlos Maiolino30460e12020-01-09 14:30:41 +0100378 ret = bmap(inode, &block);
379 if (ret || !block) {
NeilBrown27581e52012-05-22 13:55:08 +1000380 ret = -EINVAL;
Carlos Maiolino30460e12020-01-09 14:30:41 +0100381 bh->b_blocknr = 0;
NeilBrownd785a062006-06-26 00:27:48 -0700382 goto out;
383 }
Carlos Maiolino30460e12020-01-09 14:30:41 +0100384
385 bh->b_blocknr = block;
NeilBrownd785a062006-06-26 00:27:48 -0700386 bh->b_bdev = inode->i_sb->s_bdev;
Xianting Tian313b8252020-08-18 13:42:06 +0800387 if (count < blocksize)
NeilBrownd785a062006-06-26 00:27:48 -0700388 count = 0;
389 else
Xianting Tian313b8252020-08-18 13:42:06 +0800390 count -= blocksize;
NeilBrown32a76272005-06-21 17:17:14 -0700391
NeilBrownd785a062006-06-26 00:27:48 -0700392 bh->b_end_io = end_bitmap_write;
393 bh->b_private = bitmap;
NeilBrownce25c312006-06-26 00:27:49 -0700394 atomic_inc(&bitmap->pending_writes);
395 set_buffer_locked(bh);
396 set_buffer_mapped(bh);
Mike Christie2a222ca2016-06-05 14:31:43 -0500397 submit_bh(REQ_OP_READ, 0, bh);
NeilBrownd785a062006-06-26 00:27:48 -0700398 }
Carlos Maiolino30460e12020-01-09 14:30:41 +0100399 blk_cur++;
NeilBrownd785a062006-06-26 00:27:48 -0700400 bh = bh->b_this_page;
401 }
NeilBrownd785a062006-06-26 00:27:48 -0700402 page->index = index;
NeilBrownce25c312006-06-26 00:27:49 -0700403
404 wait_event(bitmap->write_wait,
405 atomic_read(&bitmap->pending_writes)==0);
NeilBrownb405fe92012-05-22 13:55:15 +1000406 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
NeilBrown27581e52012-05-22 13:55:08 +1000407 ret = -EIO;
NeilBrown32a76272005-06-21 17:17:14 -0700408out:
NeilBrown27581e52012-05-22 13:55:08 +1000409 if (ret)
NeilBrownec0cc222016-11-02 14:16:49 +1100410 pr_err("md: bitmap read error: (%dB @ %llu): %d\n",
411 (int)PAGE_SIZE,
412 (unsigned long long)index << PAGE_SHIFT,
413 ret);
NeilBrown27581e52012-05-22 13:55:08 +1000414 return ret;
NeilBrown32a76272005-06-21 17:17:14 -0700415}
416
417/*
418 * bitmap file superblock operations
419 */
420
NeilBrown85c9ccd2016-11-04 16:46:03 +1100421/*
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700422 * md_bitmap_wait_writes() should be called before writing any bitmap
NeilBrown85c9ccd2016-11-04 16:46:03 +1100423 * blocks, to ensure previous writes, particularly from
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700424 * md_bitmap_daemon_work(), have completed.
NeilBrown85c9ccd2016-11-04 16:46:03 +1100425 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700426static void md_bitmap_wait_writes(struct bitmap *bitmap)
NeilBrown85c9ccd2016-11-04 16:46:03 +1100427{
428 if (bitmap->storage.file)
429 wait_event(bitmap->write_wait,
430 atomic_read(&bitmap->pending_writes)==0);
431 else
NeilBrown46533ff2016-11-18 16:16:11 +1100432 /* Note that we ignore the return value. The writes
433 * might have failed, but that would just mean that
434 * some bits which should be cleared haven't been,
435 * which is safe. The relevant bitmap blocks will
436 * probably get written again, but there is no great
437 * loss if they aren't.
438 */
NeilBrown85c9ccd2016-11-04 16:46:03 +1100439 md_super_wait(bitmap->mddev);
440}
441
442
NeilBrown32a76272005-06-21 17:17:14 -0700443/* update the event counter and sync the superblock to disk */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700444void md_bitmap_update_sb(struct bitmap *bitmap)
NeilBrown32a76272005-06-21 17:17:14 -0700445{
446 bitmap_super_t *sb;
NeilBrown32a76272005-06-21 17:17:14 -0700447
448 if (!bitmap || !bitmap->mddev) /* no bitmap for this array */
NeilBrown4ad13662007-07-17 04:06:13 -0700449 return;
NeilBrownece5cff2009-12-14 12:49:56 +1100450 if (bitmap->mddev->bitmap_info.external)
451 return;
NeilBrown1ec885c2012-05-22 13:55:10 +1000452 if (!bitmap->storage.sb_page) /* no superblock */
NeilBrown4ad13662007-07-17 04:06:13 -0700453 return;
NeilBrown1ec885c2012-05-22 13:55:10 +1000454 sb = kmap_atomic(bitmap->storage.sb_page);
NeilBrown32a76272005-06-21 17:17:14 -0700455 sb->events = cpu_to_le64(bitmap->mddev->events);
NeilBrown8258c532011-05-11 14:26:30 +1000456 if (bitmap->mddev->events < bitmap->events_cleared)
Neil Browna0da84f2008-06-28 08:31:22 +1000457 /* rocking back to read-only */
458 bitmap->events_cleared = bitmap->mddev->events;
NeilBrown8258c532011-05-11 14:26:30 +1000459 sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
Hou Tao97f0eb92017-11-06 10:11:25 +0800460 /*
461 * clear BITMAP_WRITE_ERROR bit to protect against the case that
462 * a bitmap write error occurred but the later writes succeeded.
463 */
464 sb->state = cpu_to_le32(bitmap->flags & ~BIT(BITMAP_WRITE_ERROR));
NeilBrown43a70502009-12-14 12:49:55 +1100465 /* Just in case these have been changed via sysfs: */
466 sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ);
467 sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind);
NeilBrownb81a0402012-05-22 13:55:26 +1000468 /* This might have been changed by a reshape */
469 sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
470 sb->chunksize = cpu_to_le32(bitmap->mddev->bitmap_info.chunksize);
Goldwyn Rodriguesc4ce8672014-03-29 10:20:02 -0500471 sb->nodes = cpu_to_le32(bitmap->mddev->bitmap_info.nodes);
NeilBrown1dff2b82012-05-22 13:55:34 +1000472 sb->sectors_reserved = cpu_to_le32(bitmap->mddev->
473 bitmap_info.space);
Cong Wangb2f46e62011-11-28 13:25:44 +0800474 kunmap_atomic(sb);
NeilBrown1ec885c2012-05-22 13:55:10 +1000475 write_page(bitmap, bitmap->storage.sb_page, 1);
NeilBrown32a76272005-06-21 17:17:14 -0700476}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700477EXPORT_SYMBOL(md_bitmap_update_sb);
NeilBrown32a76272005-06-21 17:17:14 -0700478
479/* print out the bitmap file superblock */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700480void md_bitmap_print_sb(struct bitmap *bitmap)
NeilBrown32a76272005-06-21 17:17:14 -0700481{
482 bitmap_super_t *sb;
483
NeilBrown1ec885c2012-05-22 13:55:10 +1000484 if (!bitmap || !bitmap->storage.sb_page)
NeilBrown32a76272005-06-21 17:17:14 -0700485 return;
NeilBrown1ec885c2012-05-22 13:55:10 +1000486 sb = kmap_atomic(bitmap->storage.sb_page);
NeilBrownec0cc222016-11-02 14:16:49 +1100487 pr_debug("%s: bitmap file superblock:\n", bmname(bitmap));
488 pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic));
489 pr_debug(" version: %d\n", le32_to_cpu(sb->version));
490 pr_debug(" uuid: %08x.%08x.%08x.%08x\n",
Christoph Hellwigc35403f2019-04-04 18:56:11 +0200491 le32_to_cpu(*(__le32 *)(sb->uuid+0)),
492 le32_to_cpu(*(__le32 *)(sb->uuid+4)),
493 le32_to_cpu(*(__le32 *)(sb->uuid+8)),
494 le32_to_cpu(*(__le32 *)(sb->uuid+12)));
NeilBrownec0cc222016-11-02 14:16:49 +1100495 pr_debug(" events: %llu\n",
496 (unsigned long long) le64_to_cpu(sb->events));
497 pr_debug("events cleared: %llu\n",
498 (unsigned long long) le64_to_cpu(sb->events_cleared));
499 pr_debug(" state: %08x\n", le32_to_cpu(sb->state));
500 pr_debug(" chunksize: %d B\n", le32_to_cpu(sb->chunksize));
501 pr_debug(" daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
502 pr_debug(" sync size: %llu KB\n",
503 (unsigned long long)le64_to_cpu(sb->sync_size)/2);
504 pr_debug("max write behind: %d\n", le32_to_cpu(sb->write_behind));
Cong Wangb2f46e62011-11-28 13:25:44 +0800505 kunmap_atomic(sb);
NeilBrown32a76272005-06-21 17:17:14 -0700506}
507
Jonathan Brassow9c810752011-06-08 17:59:30 -0500508/*
509 * bitmap_new_disk_sb
510 * @bitmap
511 *
512 * This function is somewhat the reverse of bitmap_read_sb. bitmap_read_sb
513 * reads and verifies the on-disk bitmap superblock and populates bitmap_info.
514 * This function verifies 'bitmap_info' and populates the on-disk bitmap
515 * structure, which is to be written to disk.
516 *
517 * Returns: 0 on success, -Exxx on error
518 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700519static int md_bitmap_new_disk_sb(struct bitmap *bitmap)
Jonathan Brassow9c810752011-06-08 17:59:30 -0500520{
521 bitmap_super_t *sb;
522 unsigned long chunksize, daemon_sleep, write_behind;
Jonathan Brassow9c810752011-06-08 17:59:30 -0500523
Goldwyn Rodriguesd3b178a2015-07-22 12:09:17 -0500524 bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
Jianpeng Ma582e2e02012-10-11 13:45:36 +1100525 if (bitmap->storage.sb_page == NULL)
526 return -ENOMEM;
NeilBrown1ec885c2012-05-22 13:55:10 +1000527 bitmap->storage.sb_page->index = 0;
Jonathan Brassow9c810752011-06-08 17:59:30 -0500528
NeilBrown1ec885c2012-05-22 13:55:10 +1000529 sb = kmap_atomic(bitmap->storage.sb_page);
Jonathan Brassow9c810752011-06-08 17:59:30 -0500530
531 sb->magic = cpu_to_le32(BITMAP_MAGIC);
532 sb->version = cpu_to_le32(BITMAP_MAJOR_HI);
533
534 chunksize = bitmap->mddev->bitmap_info.chunksize;
535 BUG_ON(!chunksize);
536 if (!is_power_of_2(chunksize)) {
Cong Wangb2f46e62011-11-28 13:25:44 +0800537 kunmap_atomic(sb);
NeilBrownec0cc222016-11-02 14:16:49 +1100538 pr_warn("bitmap chunksize not a power of 2\n");
Jonathan Brassow9c810752011-06-08 17:59:30 -0500539 return -EINVAL;
540 }
541 sb->chunksize = cpu_to_le32(chunksize);
542
543 daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
Eric Engestromc97e0602016-03-07 12:01:05 +0000544 if (!daemon_sleep || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
NeilBrownec0cc222016-11-02 14:16:49 +1100545 pr_debug("Choosing daemon_sleep default (5 sec)\n");
Jonathan Brassow9c810752011-06-08 17:59:30 -0500546 daemon_sleep = 5 * HZ;
547 }
548 sb->daemon_sleep = cpu_to_le32(daemon_sleep);
549 bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
550
551 /*
552 * FIXME: write_behind for RAID1. If not specified, what
553 * is a good choice? We choose COUNTER_MAX / 2 arbitrarily.
554 */
555 write_behind = bitmap->mddev->bitmap_info.max_write_behind;
556 if (write_behind > COUNTER_MAX)
557 write_behind = COUNTER_MAX / 2;
558 sb->write_behind = cpu_to_le32(write_behind);
559 bitmap->mddev->bitmap_info.max_write_behind = write_behind;
560
561 /* keep the array size field of the bitmap superblock up to date */
562 sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
563
564 memcpy(sb->uuid, bitmap->mddev->uuid, 16);
565
NeilBrownb405fe92012-05-22 13:55:15 +1000566 set_bit(BITMAP_STALE, &bitmap->flags);
NeilBrown84e92342012-05-22 13:55:14 +1000567 sb->state = cpu_to_le32(bitmap->flags);
Jonathan Brassow9c810752011-06-08 17:59:30 -0500568 bitmap->events_cleared = bitmap->mddev->events;
569 sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
Goldwyn Rodriguesd3b178a2015-07-22 12:09:17 -0500570 bitmap->mddev->bitmap_info.nodes = 0;
Jonathan Brassow9c810752011-06-08 17:59:30 -0500571
Cong Wangb2f46e62011-11-28 13:25:44 +0800572 kunmap_atomic(sb);
Jonathan Brassow9c810752011-06-08 17:59:30 -0500573
574 return 0;
575}
576
NeilBrown32a76272005-06-21 17:17:14 -0700577/* read the superblock from the bitmap file and initialize some bitmap fields */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700578static int md_bitmap_read_sb(struct bitmap *bitmap)
NeilBrown32a76272005-06-21 17:17:14 -0700579{
580 char *reason = NULL;
581 bitmap_super_t *sb;
NeilBrown4b6d2872005-09-09 16:23:47 -0700582 unsigned long chunksize, daemon_sleep, write_behind;
NeilBrown32a76272005-06-21 17:17:14 -0700583 unsigned long long events;
Goldwyn Rodriguesc4ce8672014-03-29 10:20:02 -0500584 int nodes = 0;
NeilBrown1dff2b82012-05-22 13:55:34 +1000585 unsigned long sectors_reserved = 0;
NeilBrown32a76272005-06-21 17:17:14 -0700586 int err = -EINVAL;
NeilBrown27581e52012-05-22 13:55:08 +1000587 struct page *sb_page;
Goldwyn Rodrigues33e38ac2015-07-01 12:19:56 +1000588 loff_t offset = bitmap->mddev->bitmap_info.offset;
NeilBrown32a76272005-06-21 17:17:14 -0700589
NeilBrown1ec885c2012-05-22 13:55:10 +1000590 if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) {
NeilBrownef99bf42012-05-22 13:55:08 +1000591 chunksize = 128 * 1024 * 1024;
592 daemon_sleep = 5 * HZ;
593 write_behind = 0;
NeilBrownb405fe92012-05-22 13:55:15 +1000594 set_bit(BITMAP_STALE, &bitmap->flags);
NeilBrownef99bf42012-05-22 13:55:08 +1000595 err = 0;
596 goto out_no_sb;
597 }
NeilBrown32a76272005-06-21 17:17:14 -0700598 /* page 0 is the superblock, read it... */
NeilBrown27581e52012-05-22 13:55:08 +1000599 sb_page = alloc_page(GFP_KERNEL);
600 if (!sb_page)
601 return -ENOMEM;
NeilBrown1ec885c2012-05-22 13:55:10 +1000602 bitmap->storage.sb_page = sb_page;
NeilBrown27581e52012-05-22 13:55:08 +1000603
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500604re_read:
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -0500605 /* If cluster_slot is set, the cluster is setup */
606 if (bitmap->cluster_slot >= 0) {
Stephen Rothwell3b0e6aa2015-03-03 13:35:31 +1100607 sector_t bm_blocks = bitmap->mddev->resync_max_sectors;
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -0500608
Zhao Heminga9130962020-10-06 00:00:23 +0800609 bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks,
610 (bitmap->mddev->bitmap_info.chunksize >> 9));
Goldwyn Rodrigues124eb762015-03-24 11:29:05 -0500611 /* bits to bytes */
612 bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t);
613 /* to 4k blocks */
NeilBrown935f3d42015-03-02 17:02:29 +1100614 bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
Goldwyn Rodrigues33e38ac2015-07-01 12:19:56 +1000615 offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3));
NeilBrownec0cc222016-11-02 14:16:49 +1100616 pr_debug("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
Goldwyn Rodrigues33e38ac2015-07-01 12:19:56 +1000617 bitmap->cluster_slot, offset);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -0500618 }
619
NeilBrown1ec885c2012-05-22 13:55:10 +1000620 if (bitmap->storage.file) {
621 loff_t isize = i_size_read(bitmap->storage.file->f_mapping->host);
NeilBrownf49d5e62007-01-26 00:57:03 -0800622 int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize;
623
NeilBrown1ec885c2012-05-22 13:55:10 +1000624 err = read_page(bitmap->storage.file, 0,
NeilBrown27581e52012-05-22 13:55:08 +1000625 bitmap, bytes, sb_page);
NeilBrownf49d5e62007-01-26 00:57:03 -0800626 } else {
NeilBrown27581e52012-05-22 13:55:08 +1000627 err = read_sb_page(bitmap->mddev,
Goldwyn Rodrigues33e38ac2015-07-01 12:19:56 +1000628 offset,
NeilBrown27581e52012-05-22 13:55:08 +1000629 sb_page,
Shaohua Li938b5332017-10-16 19:03:44 -0700630 0, sizeof(bitmap_super_t));
NeilBrowna654b9d82005-06-21 17:17:27 -0700631 }
NeilBrown27581e52012-05-22 13:55:08 +1000632 if (err)
NeilBrown32a76272005-06-21 17:17:14 -0700633 return err;
NeilBrown32a76272005-06-21 17:17:14 -0700634
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500635 err = -EINVAL;
NeilBrown27581e52012-05-22 13:55:08 +1000636 sb = kmap_atomic(sb_page);
NeilBrown32a76272005-06-21 17:17:14 -0700637
NeilBrown32a76272005-06-21 17:17:14 -0700638 chunksize = le32_to_cpu(sb->chunksize);
NeilBrown1b04be92009-12-14 12:49:53 +1100639 daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
NeilBrown4b6d2872005-09-09 16:23:47 -0700640 write_behind = le32_to_cpu(sb->write_behind);
NeilBrown1dff2b82012-05-22 13:55:34 +1000641 sectors_reserved = le32_to_cpu(sb->sectors_reserved);
NeilBrown32a76272005-06-21 17:17:14 -0700642
643 /* verify that the bitmap-specific fields are valid */
644 if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
645 reason = "bad magic";
NeilBrownbd926c62005-11-08 21:39:32 -0800646 else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO ||
Goldwyn Rodrigues3c462c82015-08-19 07:35:54 +1000647 le32_to_cpu(sb->version) > BITMAP_MAJOR_CLUSTERED)
NeilBrown32a76272005-06-21 17:17:14 -0700648 reason = "unrecognized superblock version";
NeilBrown1187cf02009-03-31 14:27:02 +1100649 else if (chunksize < 512)
NeilBrown7dd5d342006-01-06 00:20:39 -0800650 reason = "bitmap chunksize too small";
Jonathan Brassowd7445402011-06-08 18:01:10 -0500651 else if (!is_power_of_2(chunksize))
NeilBrown32a76272005-06-21 17:17:14 -0700652 reason = "bitmap chunksize not a power of 2";
NeilBrown1b04be92009-12-14 12:49:53 +1100653 else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT)
NeilBrown7dd5d342006-01-06 00:20:39 -0800654 reason = "daemon sleep period out of range";
NeilBrown4b6d2872005-09-09 16:23:47 -0700655 else if (write_behind > COUNTER_MAX)
656 reason = "write-behind limit out of range (0 - 16383)";
NeilBrown32a76272005-06-21 17:17:14 -0700657 if (reason) {
NeilBrownec0cc222016-11-02 14:16:49 +1100658 pr_warn("%s: invalid bitmap file superblock: %s\n",
NeilBrown32a76272005-06-21 17:17:14 -0700659 bmname(bitmap), reason);
660 goto out;
661 }
662
Heming Zhaoe68cb832022-04-01 10:13:16 +0800663 /*
664 * Setup nodes/clustername only if bitmap version is
665 * cluster-compatible
666 */
667 if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) {
668 nodes = le32_to_cpu(sb->nodes);
Heming Zhao92d9aac2022-04-01 10:13:17 +0800669 strscpy(bitmap->mddev->bitmap_info.cluster_name,
Heming Zhaoe68cb832022-04-01 10:13:16 +0800670 sb->cluster_name, 64);
671 }
672
NeilBrown32a76272005-06-21 17:17:14 -0700673 /* keep the array size field of the bitmap superblock up to date */
674 sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
675
NeilBrown278c1ca2012-03-19 12:46:40 +1100676 if (bitmap->mddev->persistent) {
677 /*
678 * We have a persistent array superblock, so compare the
679 * bitmap's UUID and event counter to the mddev's
680 */
681 if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) {
NeilBrownec0cc222016-11-02 14:16:49 +1100682 pr_warn("%s: bitmap superblock UUID mismatch\n",
683 bmname(bitmap));
NeilBrown278c1ca2012-03-19 12:46:40 +1100684 goto out;
685 }
686 events = le64_to_cpu(sb->events);
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500687 if (!nodes && (events < bitmap->mddev->events)) {
NeilBrownec0cc222016-11-02 14:16:49 +1100688 pr_warn("%s: bitmap file is out of date (%llu < %llu) -- forcing full recovery\n",
689 bmname(bitmap), events,
690 (unsigned long long) bitmap->mddev->events);
NeilBrownb405fe92012-05-22 13:55:15 +1000691 set_bit(BITMAP_STALE, &bitmap->flags);
NeilBrown278c1ca2012-03-19 12:46:40 +1100692 }
693 }
NeilBrown32a76272005-06-21 17:17:14 -0700694
NeilBrown32a76272005-06-21 17:17:14 -0700695 /* assign fields using values from superblock */
NeilBrown4f2e6392006-10-21 10:24:09 -0700696 bitmap->flags |= le32_to_cpu(sb->state);
NeilBrownbd926c62005-11-08 21:39:32 -0800697 if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN)
NeilBrownb405fe92012-05-22 13:55:15 +1000698 set_bit(BITMAP_HOSTENDIAN, &bitmap->flags);
NeilBrown32a76272005-06-21 17:17:14 -0700699 bitmap->events_cleared = le64_to_cpu(sb->events_cleared);
700 err = 0;
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500701
NeilBrown32a76272005-06-21 17:17:14 -0700702out:
Cong Wangb2f46e62011-11-28 13:25:44 +0800703 kunmap_atomic(sb);
Goldwyn Rodriguesf7357272015-07-22 12:09:16 -0500704 if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
Heming Zhaoe68cb832022-04-01 10:13:16 +0800705 /* Assigning chunksize is required for "re_read" */
706 bitmap->mddev->bitmap_info.chunksize = chunksize;
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500707 err = md_setup_cluster(bitmap->mddev, nodes);
708 if (err) {
NeilBrownec0cc222016-11-02 14:16:49 +1100709 pr_warn("%s: Could not setup cluster service (%d)\n",
710 bmname(bitmap), err);
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500711 goto out_no_sb;
712 }
713 bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev);
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500714 goto re_read;
715 }
716
NeilBrownef99bf42012-05-22 13:55:08 +1000717out_no_sb:
Heming Zhaoe68cb832022-04-01 10:13:16 +0800718 if (err == 0) {
719 if (test_bit(BITMAP_STALE, &bitmap->flags))
720 bitmap->events_cleared = bitmap->mddev->events;
721 bitmap->mddev->bitmap_info.chunksize = chunksize;
722 bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
723 bitmap->mddev->bitmap_info.max_write_behind = write_behind;
724 bitmap->mddev->bitmap_info.nodes = nodes;
725 if (bitmap->mddev->bitmap_info.space == 0 ||
726 bitmap->mddev->bitmap_info.space > sectors_reserved)
727 bitmap->mddev->bitmap_info.space = sectors_reserved;
728 } else {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700729 md_bitmap_print_sb(bitmap);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -0500730 if (bitmap->cluster_slot < 0)
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500731 md_cluster_stop(bitmap->mddev);
732 }
NeilBrown32a76272005-06-21 17:17:14 -0700733 return err;
734}
735
NeilBrown32a76272005-06-21 17:17:14 -0700736/*
737 * general bitmap file operations
738 */
739
NeilBrownece5cff2009-12-14 12:49:56 +1100740/*
741 * on-disk bitmap:
742 *
743 * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap
744 * file a page at a time. There's a superblock at the start of the file.
745 */
NeilBrown32a76272005-06-21 17:17:14 -0700746/* calculate the index of the page that contains this bit */
NeilBrown1ec885c2012-05-22 13:55:10 +1000747static inline unsigned long file_page_index(struct bitmap_storage *store,
748 unsigned long chunk)
NeilBrown32a76272005-06-21 17:17:14 -0700749{
NeilBrown1ec885c2012-05-22 13:55:10 +1000750 if (store->sb_page)
NeilBrownece5cff2009-12-14 12:49:56 +1100751 chunk += sizeof(bitmap_super_t) << 3;
752 return chunk >> PAGE_BIT_SHIFT;
NeilBrown32a76272005-06-21 17:17:14 -0700753}
754
755/* calculate the (bit) offset of this bit within a page */
NeilBrown1ec885c2012-05-22 13:55:10 +1000756static inline unsigned long file_page_offset(struct bitmap_storage *store,
757 unsigned long chunk)
NeilBrown32a76272005-06-21 17:17:14 -0700758{
NeilBrown1ec885c2012-05-22 13:55:10 +1000759 if (store->sb_page)
NeilBrownece5cff2009-12-14 12:49:56 +1100760 chunk += sizeof(bitmap_super_t) << 3;
761 return chunk & (PAGE_BITS - 1);
NeilBrown32a76272005-06-21 17:17:14 -0700762}
763
764/*
765 * return a pointer to the page in the filemap that contains the given bit
766 *
NeilBrown32a76272005-06-21 17:17:14 -0700767 */
NeilBrown1ec885c2012-05-22 13:55:10 +1000768static inline struct page *filemap_get_page(struct bitmap_storage *store,
Jonathan Brassow3520fa42011-07-27 11:00:37 +1000769 unsigned long chunk)
NeilBrown32a76272005-06-21 17:17:14 -0700770{
NeilBrown1ec885c2012-05-22 13:55:10 +1000771 if (file_page_index(store, chunk) >= store->file_pages)
NeilBrownac2f40b2010-06-01 19:37:31 +1000772 return NULL;
NeilBrownf2e06c52014-05-28 13:39:23 +1000773 return store->filemap[file_page_index(store, chunk)];
NeilBrown32a76272005-06-21 17:17:14 -0700774}
775
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700776static int md_bitmap_storage_alloc(struct bitmap_storage *store,
777 unsigned long chunks, int with_super,
778 int slot_number)
NeilBrownd1244cb2012-05-22 13:55:12 +1000779{
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500780 int pnum, offset = 0;
NeilBrownd1244cb2012-05-22 13:55:12 +1000781 unsigned long num_pages;
782 unsigned long bytes;
783
784 bytes = DIV_ROUND_UP(chunks, 8);
785 if (with_super)
786 bytes += sizeof(bitmap_super_t);
787
788 num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
Guoqing Jiang7f86ffe2016-05-02 11:50:13 -0400789 offset = slot_number * num_pages;
NeilBrownd1244cb2012-05-22 13:55:12 +1000790
Kees Cook6da2ec52018-06-12 13:55:00 -0700791 store->filemap = kmalloc_array(num_pages, sizeof(struct page *),
792 GFP_KERNEL);
NeilBrownd1244cb2012-05-22 13:55:12 +1000793 if (!store->filemap)
794 return -ENOMEM;
795
796 if (with_super && !store->sb_page) {
NeilBrownd60b4792012-05-22 13:55:25 +1000797 store->sb_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
NeilBrownd1244cb2012-05-22 13:55:12 +1000798 if (store->sb_page == NULL)
799 return -ENOMEM;
NeilBrownd1244cb2012-05-22 13:55:12 +1000800 }
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500801
NeilBrownd1244cb2012-05-22 13:55:12 +1000802 pnum = 0;
803 if (store->sb_page) {
804 store->filemap[0] = store->sb_page;
805 pnum = 1;
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500806 store->sb_page->index = offset;
NeilBrownd1244cb2012-05-22 13:55:12 +1000807 }
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500808
NeilBrownd1244cb2012-05-22 13:55:12 +1000809 for ( ; pnum < num_pages; pnum++) {
NeilBrownd60b4792012-05-22 13:55:25 +1000810 store->filemap[pnum] = alloc_page(GFP_KERNEL|__GFP_ZERO);
NeilBrownd1244cb2012-05-22 13:55:12 +1000811 if (!store->filemap[pnum]) {
812 store->file_pages = pnum;
813 return -ENOMEM;
814 }
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500815 store->filemap[pnum]->index = pnum + offset;
NeilBrownd1244cb2012-05-22 13:55:12 +1000816 }
817 store->file_pages = pnum;
818
819 /* We need 4 bits per page, rounded up to a multiple
820 * of sizeof(unsigned long) */
821 store->filemap_attr = kzalloc(
822 roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
823 GFP_KERNEL);
824 if (!store->filemap_attr)
825 return -ENOMEM;
826
827 store->bytes = bytes;
828
829 return 0;
830}
831
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700832static void md_bitmap_file_unmap(struct bitmap_storage *store)
NeilBrown32a76272005-06-21 17:17:14 -0700833{
834 struct page **map, *sb_page;
NeilBrown32a76272005-06-21 17:17:14 -0700835 int pages;
NeilBrownfae7d322012-05-22 13:55:21 +1000836 struct file *file;
NeilBrown32a76272005-06-21 17:17:14 -0700837
NeilBrownfae7d322012-05-22 13:55:21 +1000838 file = store->file;
NeilBrown1ec885c2012-05-22 13:55:10 +1000839 map = store->filemap;
NeilBrown1ec885c2012-05-22 13:55:10 +1000840 pages = store->file_pages;
NeilBrown1ec885c2012-05-22 13:55:10 +1000841 sb_page = store->sb_page;
NeilBrown32a76272005-06-21 17:17:14 -0700842
843 while (pages--)
NeilBrownece5cff2009-12-14 12:49:56 +1100844 if (map[pages] != sb_page) /* 0 is sb_page, release it below */
NeilBrownd785a062006-06-26 00:27:48 -0700845 free_buffers(map[pages]);
NeilBrown32a76272005-06-21 17:17:14 -0700846 kfree(map);
NeilBrownfae7d322012-05-22 13:55:21 +1000847 kfree(store->filemap_attr);
NeilBrown32a76272005-06-21 17:17:14 -0700848
NeilBrownd785a062006-06-26 00:27:48 -0700849 if (sb_page)
850 free_buffers(sb_page);
NeilBrown32a76272005-06-21 17:17:14 -0700851
NeilBrownd785a062006-06-26 00:27:48 -0700852 if (file) {
Al Viro496ad9a2013-01-23 17:07:38 -0500853 struct inode *inode = file_inode(file);
Andrew Mortonfc0ecff2007-02-10 01:45:39 -0800854 invalidate_mapping_pages(inode->i_mapping, 0, -1);
NeilBrown32a76272005-06-21 17:17:14 -0700855 fput(file);
NeilBrownd785a062006-06-26 00:27:48 -0700856 }
NeilBrown32a76272005-06-21 17:17:14 -0700857}
858
NeilBrown32a76272005-06-21 17:17:14 -0700859/*
860 * bitmap_file_kick - if an error occurs while manipulating the bitmap file
861 * then it is no longer reliable, so we stop using it and we mark the file
862 * as failed in the superblock
863 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700864static void md_bitmap_file_kick(struct bitmap *bitmap)
NeilBrown32a76272005-06-21 17:17:14 -0700865{
866 char *path, *ptr = NULL;
867
NeilBrownb405fe92012-05-22 13:55:15 +1000868 if (!test_and_set_bit(BITMAP_STALE, &bitmap->flags)) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700869 md_bitmap_update_sb(bitmap);
NeilBrown32a76272005-06-21 17:17:14 -0700870
NeilBrown1ec885c2012-05-22 13:55:10 +1000871 if (bitmap->storage.file) {
NeilBrown4ad13662007-07-17 04:06:13 -0700872 path = kmalloc(PAGE_SIZE, GFP_KERNEL);
873 if (path)
Miklos Szeredi9bf39ab2015-06-19 10:29:13 +0200874 ptr = file_path(bitmap->storage.file,
NeilBrown1ec885c2012-05-22 13:55:10 +1000875 path, PAGE_SIZE);
Christoph Hellwig6bcfd602008-05-23 13:04:34 -0700876
NeilBrownec0cc222016-11-02 14:16:49 +1100877 pr_warn("%s: kicking failed bitmap file %s from array!\n",
878 bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
NeilBrown32a76272005-06-21 17:17:14 -0700879
NeilBrown4ad13662007-07-17 04:06:13 -0700880 kfree(path);
881 } else
NeilBrownec0cc222016-11-02 14:16:49 +1100882 pr_warn("%s: disabling internal bitmap due to errors\n",
883 bmname(bitmap));
NeilBrowna654b9d82005-06-21 17:17:27 -0700884 }
NeilBrown32a76272005-06-21 17:17:14 -0700885}
886
887enum bitmap_page_attr {
NeilBrownac2f40b2010-06-01 19:37:31 +1000888 BITMAP_PAGE_DIRTY = 0, /* there are set bits that need to be synced */
NeilBrown5a537df2011-09-21 15:37:46 +1000889 BITMAP_PAGE_PENDING = 1, /* there are bits that are being cleaned.
890 * i.e. counter is 1 or 2. */
NeilBrownac2f40b2010-06-01 19:37:31 +1000891 BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */
NeilBrown32a76272005-06-21 17:17:14 -0700892};
893
NeilBrownd1891222012-05-22 13:55:09 +1000894static inline void set_page_attr(struct bitmap *bitmap, int pnum,
895 enum bitmap_page_attr attr)
NeilBrown32a76272005-06-21 17:17:14 -0700896{
NeilBrownbdfd1142012-05-22 13:55:22 +1000897 set_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
NeilBrown32a76272005-06-21 17:17:14 -0700898}
899
NeilBrownd1891222012-05-22 13:55:09 +1000900static inline void clear_page_attr(struct bitmap *bitmap, int pnum,
901 enum bitmap_page_attr attr)
NeilBrown32a76272005-06-21 17:17:14 -0700902{
NeilBrownbdfd1142012-05-22 13:55:22 +1000903 clear_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
NeilBrown32a76272005-06-21 17:17:14 -0700904}
905
NeilBrownbdfd1142012-05-22 13:55:22 +1000906static inline int test_page_attr(struct bitmap *bitmap, int pnum,
907 enum bitmap_page_attr attr)
NeilBrown32a76272005-06-21 17:17:14 -0700908{
NeilBrown1ec885c2012-05-22 13:55:10 +1000909 return test_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
NeilBrown32a76272005-06-21 17:17:14 -0700910}
911
NeilBrownbdfd1142012-05-22 13:55:22 +1000912static inline int test_and_clear_page_attr(struct bitmap *bitmap, int pnum,
913 enum bitmap_page_attr attr)
914{
915 return test_and_clear_bit((pnum<<2) + attr,
916 bitmap->storage.filemap_attr);
917}
NeilBrown32a76272005-06-21 17:17:14 -0700918/*
919 * bitmap_file_set_bit -- called before performing a write to the md device
920 * to set (and eventually sync) a particular bit in the bitmap file
921 *
922 * we set the bit immediately, then we record the page number so that
923 * when an unplug occurs, we can flush the dirty pages out to disk
924 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700925static void md_bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
NeilBrown32a76272005-06-21 17:17:14 -0700926{
927 unsigned long bit;
Jonathan Brassow3520fa42011-07-27 11:00:37 +1000928 struct page *page;
NeilBrown32a76272005-06-21 17:17:14 -0700929 void *kaddr;
NeilBrown40cffcc2012-05-22 13:55:24 +1000930 unsigned long chunk = block >> bitmap->counts.chunkshift;
Guoqing Jiang23cea66a2016-05-02 11:50:14 -0400931 struct bitmap_storage *store = &bitmap->storage;
932 unsigned long node_offset = 0;
933
934 if (mddev_is_clustered(bitmap->mddev))
935 node_offset = bitmap->cluster_slot * store->file_pages;
NeilBrown32a76272005-06-21 17:17:14 -0700936
NeilBrown1ec885c2012-05-22 13:55:10 +1000937 page = filemap_get_page(&bitmap->storage, chunk);
Jonathan Brassow3520fa42011-07-27 11:00:37 +1000938 if (!page)
939 return;
NeilBrown1ec885c2012-05-22 13:55:10 +1000940 bit = file_page_offset(&bitmap->storage, chunk);
NeilBrown32a76272005-06-21 17:17:14 -0700941
Jonathan Brassow3520fa42011-07-27 11:00:37 +1000942 /* set the bit */
Cong Wangb2f46e62011-11-28 13:25:44 +0800943 kaddr = kmap_atomic(page);
NeilBrownb405fe92012-05-22 13:55:15 +1000944 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
Jonathan Brassow3520fa42011-07-27 11:00:37 +1000945 set_bit(bit, kaddr);
946 else
Akinobu Mita3f810b62013-04-24 11:42:41 +1000947 set_bit_le(bit, kaddr);
Cong Wangb2f46e62011-11-28 13:25:44 +0800948 kunmap_atomic(kaddr);
NeilBrown36a4e1f2011-10-07 14:23:17 +1100949 pr_debug("set file bit %lu page %lu\n", bit, page->index);
NeilBrown32a76272005-06-21 17:17:14 -0700950 /* record page number so it gets flushed to disk when unplug occurs */
Guoqing Jiang23cea66a2016-05-02 11:50:14 -0400951 set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_DIRTY);
NeilBrown32a76272005-06-21 17:17:14 -0700952}
953
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700954static void md_bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
NeilBrownef99bf42012-05-22 13:55:08 +1000955{
956 unsigned long bit;
957 struct page *page;
958 void *paddr;
NeilBrown40cffcc2012-05-22 13:55:24 +1000959 unsigned long chunk = block >> bitmap->counts.chunkshift;
Guoqing Jiang23cea66a2016-05-02 11:50:14 -0400960 struct bitmap_storage *store = &bitmap->storage;
961 unsigned long node_offset = 0;
962
963 if (mddev_is_clustered(bitmap->mddev))
964 node_offset = bitmap->cluster_slot * store->file_pages;
NeilBrownef99bf42012-05-22 13:55:08 +1000965
NeilBrown1ec885c2012-05-22 13:55:10 +1000966 page = filemap_get_page(&bitmap->storage, chunk);
NeilBrownef99bf42012-05-22 13:55:08 +1000967 if (!page)
968 return;
NeilBrown1ec885c2012-05-22 13:55:10 +1000969 bit = file_page_offset(&bitmap->storage, chunk);
NeilBrownef99bf42012-05-22 13:55:08 +1000970 paddr = kmap_atomic(page);
NeilBrownb405fe92012-05-22 13:55:15 +1000971 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
NeilBrownef99bf42012-05-22 13:55:08 +1000972 clear_bit(bit, paddr);
973 else
Akinobu Mita3f810b62013-04-24 11:42:41 +1000974 clear_bit_le(bit, paddr);
NeilBrownef99bf42012-05-22 13:55:08 +1000975 kunmap_atomic(paddr);
Guoqing Jiang23cea66a2016-05-02 11:50:14 -0400976 if (!test_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_NEEDWRITE)) {
977 set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_PENDING);
NeilBrownef99bf42012-05-22 13:55:08 +1000978 bitmap->allclean = 0;
979 }
980}
981
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700982static int md_bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -0500983{
984 unsigned long bit;
985 struct page *page;
986 void *paddr;
987 unsigned long chunk = block >> bitmap->counts.chunkshift;
988 int set = 0;
989
990 page = filemap_get_page(&bitmap->storage, chunk);
991 if (!page)
992 return -EINVAL;
993 bit = file_page_offset(&bitmap->storage, chunk);
994 paddr = kmap_atomic(page);
995 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
996 set = test_bit(bit, paddr);
997 else
998 set = test_bit_le(bit, paddr);
999 kunmap_atomic(paddr);
1000 return set;
1001}
1002
1003
NeilBrown32a76272005-06-21 17:17:14 -07001004/* this gets called when the md device is ready to unplug its underlying
1005 * (slave) device queues -- before we let any writes go down, we need to
1006 * sync the dirty pages of the bitmap file to disk */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001007void md_bitmap_unplug(struct bitmap *bitmap)
NeilBrown32a76272005-06-21 17:17:14 -07001008{
NeilBrown74667122012-05-22 13:55:19 +10001009 unsigned long i;
NeilBrownec7a3192006-06-26 00:27:45 -07001010 int dirty, need_write;
NeilBrown85c9ccd2016-11-04 16:46:03 +11001011 int writing = 0;
NeilBrown32a76272005-06-21 17:17:14 -07001012
NeilBrown62f82fa2012-05-22 13:55:21 +10001013 if (!bitmap || !bitmap->storage.filemap ||
1014 test_bit(BITMAP_STALE, &bitmap->flags))
NeilBrown4ad13662007-07-17 04:06:13 -07001015 return;
NeilBrown32a76272005-06-21 17:17:14 -07001016
1017 /* look at each page to see if there are any set bits that need to be
1018 * flushed out to disk */
NeilBrown1ec885c2012-05-22 13:55:10 +10001019 for (i = 0; i < bitmap->storage.file_pages; i++) {
NeilBrownbdfd1142012-05-22 13:55:22 +10001020 dirty = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
1021 need_write = test_and_clear_page_attr(bitmap, i,
1022 BITMAP_PAGE_NEEDWRITE);
1023 if (dirty || need_write) {
NeilBrown581dbd92016-11-14 16:30:21 +11001024 if (!writing) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001025 md_bitmap_wait_writes(bitmap);
NeilBrown581dbd92016-11-14 16:30:21 +11001026 if (bitmap->mddev->queue)
1027 blk_add_trace_msg(bitmap->mddev->queue,
1028 "md bitmap_unplug");
1029 }
NeilBrownd1891222012-05-22 13:55:09 +10001030 clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING);
NeilBrownbdfd1142012-05-22 13:55:22 +10001031 write_page(bitmap, bitmap->storage.filemap[i], 0);
NeilBrown85c9ccd2016-11-04 16:46:03 +11001032 writing = 1;
NeilBrownbdfd1142012-05-22 13:55:22 +10001033 }
NeilBrown32a76272005-06-21 17:17:14 -07001034 }
NeilBrown85c9ccd2016-11-04 16:46:03 +11001035 if (writing)
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001036 md_bitmap_wait_writes(bitmap);
NeilBrown4b5060d2014-09-09 14:13:51 +10001037
NeilBrownb405fe92012-05-22 13:55:15 +10001038 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001039 md_bitmap_file_kick(bitmap);
NeilBrown32a76272005-06-21 17:17:14 -07001040}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001041EXPORT_SYMBOL(md_bitmap_unplug);
NeilBrown32a76272005-06-21 17:17:14 -07001042
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001043static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
NeilBrown32a76272005-06-21 17:17:14 -07001044/* * bitmap_init_from_disk -- called at bitmap_create time to initialize
1045 * the in-memory bitmap from the on-disk bitmap -- also, sets up the
1046 * memory mapping of the bitmap file
1047 * Special cases:
1048 * if there's no bitmap file, or if the bitmap file had been
1049 * previously kicked from the array, we mark all the bits as
1050 * 1's in order to cause a full resync.
NeilBrown6a079972005-09-09 16:23:44 -07001051 *
1052 * We ignore all bits for sectors that end earlier than 'start'.
1053 * This is used when reading an out-of-date bitmap...
NeilBrown32a76272005-06-21 17:17:14 -07001054 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001055static int md_bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
NeilBrown32a76272005-06-21 17:17:14 -07001056{
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -05001057 unsigned long i, chunks, index, oldindex, bit, node_offset = 0;
NeilBrown27581e52012-05-22 13:55:08 +10001058 struct page *page = NULL;
NeilBrownd1244cb2012-05-22 13:55:12 +10001059 unsigned long bit_cnt = 0;
NeilBrown32a76272005-06-21 17:17:14 -07001060 struct file *file;
NeilBrownd1244cb2012-05-22 13:55:12 +10001061 unsigned long offset;
NeilBrown32a76272005-06-21 17:17:14 -07001062 int outofdate;
1063 int ret = -ENOSPC;
NeilBrownea03aff2006-01-06 00:20:34 -08001064 void *paddr;
NeilBrown1ec885c2012-05-22 13:55:10 +10001065 struct bitmap_storage *store = &bitmap->storage;
NeilBrown32a76272005-06-21 17:17:14 -07001066
NeilBrown40cffcc2012-05-22 13:55:24 +10001067 chunks = bitmap->counts.chunks;
NeilBrown1ec885c2012-05-22 13:55:10 +10001068 file = store->file;
NeilBrown32a76272005-06-21 17:17:14 -07001069
NeilBrownef99bf42012-05-22 13:55:08 +10001070 if (!file && !bitmap->mddev->bitmap_info.offset) {
1071 /* No permanent bitmap - fill with '1s'. */
NeilBrown1ec885c2012-05-22 13:55:10 +10001072 store->filemap = NULL;
1073 store->file_pages = 0;
NeilBrownef99bf42012-05-22 13:55:08 +10001074 for (i = 0; i < chunks ; i++) {
1075 /* if the disk bit is set, set the memory bit */
NeilBrown40cffcc2012-05-22 13:55:24 +10001076 int needed = ((sector_t)(i+1) << (bitmap->counts.chunkshift)
NeilBrownef99bf42012-05-22 13:55:08 +10001077 >= start);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001078 md_bitmap_set_memory_bits(bitmap,
1079 (sector_t)i << bitmap->counts.chunkshift,
1080 needed);
NeilBrownef99bf42012-05-22 13:55:08 +10001081 }
1082 return 0;
1083 }
NeilBrown32a76272005-06-21 17:17:14 -07001084
NeilBrownb405fe92012-05-22 13:55:15 +10001085 outofdate = test_bit(BITMAP_STALE, &bitmap->flags);
NeilBrown32a76272005-06-21 17:17:14 -07001086 if (outofdate)
NeilBrownec0cc222016-11-02 14:16:49 +11001087 pr_warn("%s: bitmap file is out of date, doing full recovery\n", bmname(bitmap));
NeilBrown32a76272005-06-21 17:17:14 -07001088
NeilBrownd1244cb2012-05-22 13:55:12 +10001089 if (file && i_size_read(file->f_mapping->host) < store->bytes) {
NeilBrownec0cc222016-11-02 14:16:49 +11001090 pr_warn("%s: bitmap file too short %lu < %lu\n",
1091 bmname(bitmap),
1092 (unsigned long) i_size_read(file->f_mapping->host),
1093 store->bytes);
NeilBrown4ad13662007-07-17 04:06:13 -07001094 goto err;
NeilBrown32a76272005-06-21 17:17:14 -07001095 }
NeilBrownbc7f77d2005-06-21 17:17:17 -07001096
NeilBrown32a76272005-06-21 17:17:14 -07001097 oldindex = ~0L;
NeilBrownd1244cb2012-05-22 13:55:12 +10001098 offset = 0;
1099 if (!bitmap->mddev->bitmap_info.external)
1100 offset = sizeof(bitmap_super_t);
NeilBrown32a76272005-06-21 17:17:14 -07001101
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -05001102 if (mddev_is_clustered(bitmap->mddev))
1103 node_offset = bitmap->cluster_slot * (DIV_ROUND_UP(store->bytes, PAGE_SIZE));
1104
NeilBrown32a76272005-06-21 17:17:14 -07001105 for (i = 0; i < chunks; i++) {
NeilBrownbd926c62005-11-08 21:39:32 -08001106 int b;
NeilBrown1ec885c2012-05-22 13:55:10 +10001107 index = file_page_index(&bitmap->storage, i);
1108 bit = file_page_offset(&bitmap->storage, i);
NeilBrown32a76272005-06-21 17:17:14 -07001109 if (index != oldindex) { /* this is a new page, read it in */
NeilBrownd785a062006-06-26 00:27:48 -07001110 int count;
NeilBrown32a76272005-06-21 17:17:14 -07001111 /* unmap the old page, we're done with it */
NeilBrownd1244cb2012-05-22 13:55:12 +10001112 if (index == store->file_pages-1)
1113 count = store->bytes - index * PAGE_SIZE;
NeilBrownd785a062006-06-26 00:27:48 -07001114 else
1115 count = PAGE_SIZE;
NeilBrown1ec885c2012-05-22 13:55:10 +10001116 page = store->filemap[index];
NeilBrown27581e52012-05-22 13:55:08 +10001117 if (file)
1118 ret = read_page(file, index, bitmap,
1119 count, page);
1120 else
1121 ret = read_sb_page(
1122 bitmap->mddev,
1123 bitmap->mddev->bitmap_info.offset,
1124 page,
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -05001125 index + node_offset, count);
NeilBrown27581e52012-05-22 13:55:08 +10001126
1127 if (ret)
NeilBrown4ad13662007-07-17 04:06:13 -07001128 goto err;
NeilBrowna654b9d82005-06-21 17:17:27 -07001129
NeilBrown32a76272005-06-21 17:17:14 -07001130 oldindex = index;
NeilBrown32a76272005-06-21 17:17:14 -07001131
1132 if (outofdate) {
1133 /*
1134 * if bitmap is out of date, dirty the
NeilBrownac2f40b2010-06-01 19:37:31 +10001135 * whole page and write it out
NeilBrown32a76272005-06-21 17:17:14 -07001136 */
Cong Wangb2f46e62011-11-28 13:25:44 +08001137 paddr = kmap_atomic(page);
NeilBrownea03aff2006-01-06 00:20:34 -08001138 memset(paddr + offset, 0xff,
NeilBrown6a079972005-09-09 16:23:44 -07001139 PAGE_SIZE - offset);
Cong Wangb2f46e62011-11-28 13:25:44 +08001140 kunmap_atomic(paddr);
NeilBrown4ad13662007-07-17 04:06:13 -07001141 write_page(bitmap, page, 1);
1142
1143 ret = -EIO;
NeilBrownb405fe92012-05-22 13:55:15 +10001144 if (test_bit(BITMAP_WRITE_ERROR,
1145 &bitmap->flags))
NeilBrown4ad13662007-07-17 04:06:13 -07001146 goto err;
NeilBrown32a76272005-06-21 17:17:14 -07001147 }
NeilBrown32a76272005-06-21 17:17:14 -07001148 }
Cong Wangb2f46e62011-11-28 13:25:44 +08001149 paddr = kmap_atomic(page);
NeilBrownb405fe92012-05-22 13:55:15 +10001150 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
NeilBrownea03aff2006-01-06 00:20:34 -08001151 b = test_bit(bit, paddr);
NeilBrownbd926c62005-11-08 21:39:32 -08001152 else
Akinobu Mita6b33aff2011-03-23 16:42:13 -07001153 b = test_bit_le(bit, paddr);
Cong Wangb2f46e62011-11-28 13:25:44 +08001154 kunmap_atomic(paddr);
NeilBrownbd926c62005-11-08 21:39:32 -08001155 if (b) {
NeilBrown32a76272005-06-21 17:17:14 -07001156 /* if the disk bit is set, set the memory bit */
NeilBrown40cffcc2012-05-22 13:55:24 +10001157 int needed = ((sector_t)(i+1) << bitmap->counts.chunkshift
NeilBrowndb305e52009-05-07 12:49:06 +10001158 >= start);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001159 md_bitmap_set_memory_bits(bitmap,
1160 (sector_t)i << bitmap->counts.chunkshift,
1161 needed);
NeilBrown32a76272005-06-21 17:17:14 -07001162 bit_cnt++;
1163 }
NeilBrown27581e52012-05-22 13:55:08 +10001164 offset = 0;
NeilBrown32a76272005-06-21 17:17:14 -07001165 }
1166
NeilBrownec0cc222016-11-02 14:16:49 +11001167 pr_debug("%s: bitmap initialized from disk: read %lu pages, set %lu of %lu bits\n",
1168 bmname(bitmap), store->file_pages,
1169 bit_cnt, chunks);
NeilBrown32a76272005-06-21 17:17:14 -07001170
NeilBrown4ad13662007-07-17 04:06:13 -07001171 return 0;
1172
1173 err:
NeilBrownec0cc222016-11-02 14:16:49 +11001174 pr_warn("%s: bitmap initialisation failed: %d\n",
1175 bmname(bitmap), ret);
NeilBrown32a76272005-06-21 17:17:14 -07001176 return ret;
1177}
1178
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001179void md_bitmap_write_all(struct bitmap *bitmap)
NeilBrowna654b9d82005-06-21 17:17:27 -07001180{
1181 /* We don't actually write all bitmap blocks here,
1182 * just flag them as needing to be written
1183 */
NeilBrownec7a3192006-06-26 00:27:45 -07001184 int i;
NeilBrowna654b9d82005-06-21 17:17:27 -07001185
NeilBrown1ec885c2012-05-22 13:55:10 +10001186 if (!bitmap || !bitmap->storage.filemap)
NeilBrownef99bf42012-05-22 13:55:08 +10001187 return;
NeilBrown1ec885c2012-05-22 13:55:10 +10001188 if (bitmap->storage.file)
NeilBrownef99bf42012-05-22 13:55:08 +10001189 /* Only one copy, so nothing needed */
1190 return;
1191
NeilBrown1ec885c2012-05-22 13:55:10 +10001192 for (i = 0; i < bitmap->storage.file_pages; i++)
NeilBrownd1891222012-05-22 13:55:09 +10001193 set_page_attr(bitmap, i,
NeilBrownec7a3192006-06-26 00:27:45 -07001194 BITMAP_PAGE_NEEDWRITE);
NeilBrown2585f3e2011-09-21 15:37:46 +10001195 bitmap->allclean = 0;
NeilBrowna654b9d82005-06-21 17:17:27 -07001196}
1197
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001198static void md_bitmap_count_page(struct bitmap_counts *bitmap,
1199 sector_t offset, int inc)
NeilBrown32a76272005-06-21 17:17:14 -07001200{
NeilBrown61a0d802012-03-19 12:46:41 +11001201 sector_t chunk = offset >> bitmap->chunkshift;
NeilBrown32a76272005-06-21 17:17:14 -07001202 unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1203 bitmap->bp[page].count += inc;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001204 md_bitmap_checkfree(bitmap, page);
NeilBrown32a76272005-06-21 17:17:14 -07001205}
NeilBrownbf07bb72012-05-22 13:55:06 +10001206
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001207static void md_bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset)
NeilBrownbf07bb72012-05-22 13:55:06 +10001208{
1209 sector_t chunk = offset >> bitmap->chunkshift;
1210 unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1211 struct bitmap_page *bp = &bitmap->bp[page];
1212
1213 if (!bp->pending)
1214 bp->pending = 1;
1215}
1216
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001217static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap,
1218 sector_t offset, sector_t *blocks,
1219 int create);
NeilBrown32a76272005-06-21 17:17:14 -07001220
1221/*
1222 * bitmap daemon -- periodically wakes up to clean bits and flush pages
1223 * out to disk
1224 */
1225
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001226void md_bitmap_daemon_work(struct mddev *mddev)
NeilBrown32a76272005-06-21 17:17:14 -07001227{
NeilBrownaa5cbd12009-12-14 12:49:46 +11001228 struct bitmap *bitmap;
NeilBrownaa3163f2005-06-21 17:17:22 -07001229 unsigned long j;
NeilBrownbf07bb72012-05-22 13:55:06 +10001230 unsigned long nextpage;
NeilBrown57dab0b2010-10-19 10:03:39 +11001231 sector_t blocks;
NeilBrown40cffcc2012-05-22 13:55:24 +10001232 struct bitmap_counts *counts;
NeilBrown32a76272005-06-21 17:17:14 -07001233
NeilBrownaa5cbd12009-12-14 12:49:46 +11001234 /* Use a mutex to guard daemon_work against
1235 * bitmap_destroy.
1236 */
NeilBrownc3d97142009-12-14 12:49:52 +11001237 mutex_lock(&mddev->bitmap_info.mutex);
NeilBrownaa5cbd12009-12-14 12:49:46 +11001238 bitmap = mddev->bitmap;
1239 if (bitmap == NULL) {
NeilBrownc3d97142009-12-14 12:49:52 +11001240 mutex_unlock(&mddev->bitmap_info.mutex);
NeilBrown4ad13662007-07-17 04:06:13 -07001241 return;
NeilBrownaa5cbd12009-12-14 12:49:46 +11001242 }
NeilBrown42a04b52009-12-14 12:49:53 +11001243 if (time_before(jiffies, bitmap->daemon_lastrun
NeilBrown2e61ebb2011-12-23 10:17:50 +11001244 + mddev->bitmap_info.daemon_sleep))
NeilBrown7be3dfe2008-03-10 11:43:48 -07001245 goto done;
1246
NeilBrown32a76272005-06-21 17:17:14 -07001247 bitmap->daemon_lastrun = jiffies;
NeilBrown8311c292008-03-04 14:29:30 -08001248 if (bitmap->allclean) {
NeilBrown2e61ebb2011-12-23 10:17:50 +11001249 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
NeilBrownaa5cbd12009-12-14 12:49:46 +11001250 goto done;
NeilBrown8311c292008-03-04 14:29:30 -08001251 }
1252 bitmap->allclean = 1;
NeilBrown32a76272005-06-21 17:17:14 -07001253
NeilBrown581dbd92016-11-14 16:30:21 +11001254 if (bitmap->mddev->queue)
1255 blk_add_trace_msg(bitmap->mddev->queue,
1256 "md bitmap_daemon_work");
1257
NeilBrownbf07bb72012-05-22 13:55:06 +10001258 /* Any file-page which is PENDING now needs to be written.
1259 * So set NEEDWRITE now, then after we make any last-minute changes
1260 * we will write it.
1261 */
NeilBrown1ec885c2012-05-22 13:55:10 +10001262 for (j = 0; j < bitmap->storage.file_pages; j++)
NeilBrownbdfd1142012-05-22 13:55:22 +10001263 if (test_and_clear_page_attr(bitmap, j,
1264 BITMAP_PAGE_PENDING))
NeilBrownd1891222012-05-22 13:55:09 +10001265 set_page_attr(bitmap, j,
NeilBrownbf07bb72012-05-22 13:55:06 +10001266 BITMAP_PAGE_NEEDWRITE);
NeilBrownbf07bb72012-05-22 13:55:06 +10001267
1268 if (bitmap->need_sync &&
1269 mddev->bitmap_info.external == 0) {
1270 /* Arrange for superblock update as well as
1271 * other changes */
1272 bitmap_super_t *sb;
1273 bitmap->need_sync = 0;
NeilBrown1ec885c2012-05-22 13:55:10 +10001274 if (bitmap->storage.filemap) {
1275 sb = kmap_atomic(bitmap->storage.sb_page);
NeilBrownef99bf42012-05-22 13:55:08 +10001276 sb->events_cleared =
1277 cpu_to_le64(bitmap->events_cleared);
1278 kunmap_atomic(sb);
NeilBrownd1891222012-05-22 13:55:09 +10001279 set_page_attr(bitmap, 0,
NeilBrownef99bf42012-05-22 13:55:08 +10001280 BITMAP_PAGE_NEEDWRITE);
1281 }
NeilBrownbf07bb72012-05-22 13:55:06 +10001282 }
1283 /* Now look at the bitmap counters and if any are '2' or '1',
1284 * decrement and handle accordingly.
1285 */
NeilBrown40cffcc2012-05-22 13:55:24 +10001286 counts = &bitmap->counts;
1287 spin_lock_irq(&counts->lock);
NeilBrownbf07bb72012-05-22 13:55:06 +10001288 nextpage = 0;
NeilBrown40cffcc2012-05-22 13:55:24 +10001289 for (j = 0; j < counts->chunks; j++) {
NeilBrown32a76272005-06-21 17:17:14 -07001290 bitmap_counter_t *bmc;
NeilBrown40cffcc2012-05-22 13:55:24 +10001291 sector_t block = (sector_t)j << counts->chunkshift;
Jonathan Brassow3520fa42011-07-27 11:00:37 +10001292
NeilBrownbf07bb72012-05-22 13:55:06 +10001293 if (j == nextpage) {
1294 nextpage += PAGE_COUNTER_RATIO;
NeilBrown40cffcc2012-05-22 13:55:24 +10001295 if (!counts->bp[j >> PAGE_COUNTER_SHIFT].pending) {
NeilBrownbf07bb72012-05-22 13:55:06 +10001296 j |= PAGE_COUNTER_MASK;
NeilBrownaa3163f2005-06-21 17:17:22 -07001297 continue;
1298 }
NeilBrown40cffcc2012-05-22 13:55:24 +10001299 counts->bp[j >> PAGE_COUNTER_SHIFT].pending = 0;
NeilBrown32a76272005-06-21 17:17:14 -07001300 }
NeilBrown32a76272005-06-21 17:17:14 -07001301
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001302 bmc = md_bitmap_get_counter(counts, block, &blocks, 0);
NeilBrownbf07bb72012-05-22 13:55:06 +10001303 if (!bmc) {
1304 j |= PAGE_COUNTER_MASK;
1305 continue;
1306 }
1307 if (*bmc == 1 && !bitmap->need_sync) {
1308 /* We can clear the bit */
NeilBrownbf07bb72012-05-22 13:55:06 +10001309 *bmc = 0;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001310 md_bitmap_count_page(counts, block, -1);
1311 md_bitmap_file_clear_bit(bitmap, block);
NeilBrownbf07bb72012-05-22 13:55:06 +10001312 } else if (*bmc && *bmc <= 2) {
1313 *bmc = 1;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001314 md_bitmap_set_pending(counts, block);
NeilBrown2585f3e2011-09-21 15:37:46 +10001315 bitmap->allclean = 0;
NeilBrown32a76272005-06-21 17:17:14 -07001316 }
NeilBrown32a76272005-06-21 17:17:14 -07001317 }
NeilBrown40cffcc2012-05-22 13:55:24 +10001318 spin_unlock_irq(&counts->lock);
NeilBrown32a76272005-06-21 17:17:14 -07001319
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001320 md_bitmap_wait_writes(bitmap);
NeilBrownbf07bb72012-05-22 13:55:06 +10001321 /* Now start writeout on any page in NEEDWRITE that isn't DIRTY.
1322 * DIRTY pages need to be written by bitmap_unplug so it can wait
1323 * for them.
1324 * If we find any DIRTY page we stop there and let bitmap_unplug
1325 * handle all the rest. This is important in the case where
1326 * the first blocking holds the superblock and it has been updated.
1327 * We mustn't write any other blocks before the superblock.
1328 */
NeilBrown62f82fa2012-05-22 13:55:21 +10001329 for (j = 0;
1330 j < bitmap->storage.file_pages
1331 && !test_bit(BITMAP_STALE, &bitmap->flags);
1332 j++) {
NeilBrownd1891222012-05-22 13:55:09 +10001333 if (test_page_attr(bitmap, j,
NeilBrownbf07bb72012-05-22 13:55:06 +10001334 BITMAP_PAGE_DIRTY))
1335 /* bitmap_unplug will handle the rest */
1336 break;
Zhiqiang Liu55180492019-12-07 11:00:08 +08001337 if (bitmap->storage.filemap &&
1338 test_and_clear_page_attr(bitmap, j,
NeilBrownbdfd1142012-05-22 13:55:22 +10001339 BITMAP_PAGE_NEEDWRITE)) {
NeilBrown1ec885c2012-05-22 13:55:10 +10001340 write_page(bitmap, bitmap->storage.filemap[j], 0);
NeilBrownbf07bb72012-05-22 13:55:06 +10001341 }
1342 }
NeilBrownbf07bb72012-05-22 13:55:06 +10001343
NeilBrown7be3dfe2008-03-10 11:43:48 -07001344 done:
NeilBrown8311c292008-03-04 14:29:30 -08001345 if (bitmap->allclean == 0)
NeilBrown2e61ebb2011-12-23 10:17:50 +11001346 mddev->thread->timeout =
1347 mddev->bitmap_info.daemon_sleep;
NeilBrownc3d97142009-12-14 12:49:52 +11001348 mutex_unlock(&mddev->bitmap_info.mutex);
NeilBrown32a76272005-06-21 17:17:14 -07001349}
1350
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001351static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap,
1352 sector_t offset, sector_t *blocks,
1353 int create)
NeilBrownee305ac2009-09-23 18:06:44 +10001354__releases(bitmap->lock)
1355__acquires(bitmap->lock)
NeilBrown32a76272005-06-21 17:17:14 -07001356{
1357 /* If 'create', we might release the lock and reclaim it.
1358 * The lock must have been taken with interrupts enabled.
1359 * If !create, we don't release the lock.
1360 */
NeilBrown61a0d802012-03-19 12:46:41 +11001361 sector_t chunk = offset >> bitmap->chunkshift;
NeilBrown32a76272005-06-21 17:17:14 -07001362 unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1363 unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT;
1364 sector_t csize;
NeilBrownef4256732010-06-01 19:37:33 +10001365 int err;
NeilBrown32a76272005-06-21 17:17:14 -07001366
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001367 err = md_bitmap_checkpage(bitmap, page, create, 0);
NeilBrownef4256732010-06-01 19:37:33 +10001368
1369 if (bitmap->bp[page].hijacked ||
1370 bitmap->bp[page].map == NULL)
NeilBrown61a0d802012-03-19 12:46:41 +11001371 csize = ((sector_t)1) << (bitmap->chunkshift +
Zhao Hemingd837f722020-10-06 00:00:24 +08001372 PAGE_COUNTER_SHIFT);
NeilBrownef4256732010-06-01 19:37:33 +10001373 else
NeilBrown61a0d802012-03-19 12:46:41 +11001374 csize = ((sector_t)1) << bitmap->chunkshift;
NeilBrownef4256732010-06-01 19:37:33 +10001375 *blocks = csize - (offset & (csize - 1));
1376
1377 if (err < 0)
NeilBrown32a76272005-06-21 17:17:14 -07001378 return NULL;
NeilBrownef4256732010-06-01 19:37:33 +10001379
NeilBrown32a76272005-06-21 17:17:14 -07001380 /* now locked ... */
1381
1382 if (bitmap->bp[page].hijacked) { /* hijacked pointer */
1383 /* should we use the first or second counter field
1384 * of the hijacked pointer? */
1385 int hi = (pageoff > PAGE_COUNTER_MASK);
NeilBrown32a76272005-06-21 17:17:14 -07001386 return &((bitmap_counter_t *)
1387 &bitmap->bp[page].map)[hi];
NeilBrownef4256732010-06-01 19:37:33 +10001388 } else /* page is allocated */
NeilBrown32a76272005-06-21 17:17:14 -07001389 return (bitmap_counter_t *)
1390 &(bitmap->bp[page].map[pageoff]);
NeilBrown32a76272005-06-21 17:17:14 -07001391}
1392
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001393int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind)
NeilBrown32a76272005-06-21 17:17:14 -07001394{
NeilBrownac2f40b2010-06-01 19:37:31 +10001395 if (!bitmap)
1396 return 0;
NeilBrown4b6d2872005-09-09 16:23:47 -07001397
1398 if (behind) {
Paul Clements696fcd52010-03-08 16:02:37 +11001399 int bw;
NeilBrown4b6d2872005-09-09 16:23:47 -07001400 atomic_inc(&bitmap->behind_writes);
Paul Clements696fcd52010-03-08 16:02:37 +11001401 bw = atomic_read(&bitmap->behind_writes);
1402 if (bw > bitmap->behind_writes_used)
1403 bitmap->behind_writes_used = bw;
1404
NeilBrown36a4e1f2011-10-07 14:23:17 +11001405 pr_debug("inc write-behind count %d/%lu\n",
1406 bw, bitmap->mddev->bitmap_info.max_write_behind);
NeilBrown4b6d2872005-09-09 16:23:47 -07001407 }
1408
NeilBrown32a76272005-06-21 17:17:14 -07001409 while (sectors) {
NeilBrown57dab0b2010-10-19 10:03:39 +11001410 sector_t blocks;
NeilBrown32a76272005-06-21 17:17:14 -07001411 bitmap_counter_t *bmc;
1412
NeilBrown40cffcc2012-05-22 13:55:24 +10001413 spin_lock_irq(&bitmap->counts.lock);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001414 bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 1);
NeilBrown32a76272005-06-21 17:17:14 -07001415 if (!bmc) {
NeilBrown40cffcc2012-05-22 13:55:24 +10001416 spin_unlock_irq(&bitmap->counts.lock);
NeilBrown32a76272005-06-21 17:17:14 -07001417 return 0;
1418 }
1419
Namhyung Kim27d5ea02011-06-09 11:42:57 +10001420 if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) {
Neil Brownda6e1a32007-02-08 14:20:37 -08001421 DEFINE_WAIT(__wait);
1422 /* note that it is safe to do the prepare_to_wait
1423 * after the test as long as we do it before dropping
1424 * the spinlock.
1425 */
1426 prepare_to_wait(&bitmap->overflow_wait, &__wait,
1427 TASK_UNINTERRUPTIBLE);
NeilBrown40cffcc2012-05-22 13:55:24 +10001428 spin_unlock_irq(&bitmap->counts.lock);
NeilBrownf54a9d02012-08-02 08:33:20 +10001429 schedule();
Neil Brownda6e1a32007-02-08 14:20:37 -08001430 finish_wait(&bitmap->overflow_wait, &__wait);
1431 continue;
1432 }
1433
NeilBrownac2f40b2010-06-01 19:37:31 +10001434 switch (*bmc) {
NeilBrown32a76272005-06-21 17:17:14 -07001435 case 0:
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001436 md_bitmap_file_set_bit(bitmap, offset);
1437 md_bitmap_count_page(&bitmap->counts, offset, 1);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05001438 fallthrough;
NeilBrown32a76272005-06-21 17:17:14 -07001439 case 1:
1440 *bmc = 2;
1441 }
Neil Brownda6e1a32007-02-08 14:20:37 -08001442
NeilBrown32a76272005-06-21 17:17:14 -07001443 (*bmc)++;
1444
NeilBrown40cffcc2012-05-22 13:55:24 +10001445 spin_unlock_irq(&bitmap->counts.lock);
NeilBrown32a76272005-06-21 17:17:14 -07001446
1447 offset += blocks;
1448 if (sectors > blocks)
1449 sectors -= blocks;
NeilBrownac2f40b2010-06-01 19:37:31 +10001450 else
1451 sectors = 0;
NeilBrown32a76272005-06-21 17:17:14 -07001452 }
1453 return 0;
1454}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001455EXPORT_SYMBOL(md_bitmap_startwrite);
NeilBrown32a76272005-06-21 17:17:14 -07001456
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001457void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
1458 unsigned long sectors, int success, int behind)
NeilBrown32a76272005-06-21 17:17:14 -07001459{
NeilBrownac2f40b2010-06-01 19:37:31 +10001460 if (!bitmap)
1461 return;
NeilBrown4b6d2872005-09-09 16:23:47 -07001462 if (behind) {
NeilBrowne5551902010-03-31 11:21:44 +11001463 if (atomic_dec_and_test(&bitmap->behind_writes))
1464 wake_up(&bitmap->behind_wait);
NeilBrown36a4e1f2011-10-07 14:23:17 +11001465 pr_debug("dec write-behind count %d/%lu\n",
1466 atomic_read(&bitmap->behind_writes),
1467 bitmap->mddev->bitmap_info.max_write_behind);
NeilBrown4b6d2872005-09-09 16:23:47 -07001468 }
1469
NeilBrown32a76272005-06-21 17:17:14 -07001470 while (sectors) {
NeilBrown57dab0b2010-10-19 10:03:39 +11001471 sector_t blocks;
NeilBrown32a76272005-06-21 17:17:14 -07001472 unsigned long flags;
1473 bitmap_counter_t *bmc;
1474
NeilBrown40cffcc2012-05-22 13:55:24 +10001475 spin_lock_irqsave(&bitmap->counts.lock, flags);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001476 bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 0);
NeilBrown32a76272005-06-21 17:17:14 -07001477 if (!bmc) {
NeilBrown40cffcc2012-05-22 13:55:24 +10001478 spin_unlock_irqrestore(&bitmap->counts.lock, flags);
NeilBrown32a76272005-06-21 17:17:14 -07001479 return;
1480 }
1481
NeilBrown961902c2011-12-23 09:57:48 +11001482 if (success && !bitmap->mddev->degraded &&
Neil Browna0da84f2008-06-28 08:31:22 +10001483 bitmap->events_cleared < bitmap->mddev->events) {
1484 bitmap->events_cleared = bitmap->mddev->events;
1485 bitmap->need_sync = 1;
NeilBrown5ff5aff2010-06-01 19:37:32 +10001486 sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
Neil Browna0da84f2008-06-28 08:31:22 +10001487 }
1488
Namhyung Kim27d5ea02011-06-09 11:42:57 +10001489 if (!success && !NEEDED(*bmc))
NeilBrown32a76272005-06-21 17:17:14 -07001490 *bmc |= NEEDED_MASK;
1491
Namhyung Kim27d5ea02011-06-09 11:42:57 +10001492 if (COUNTER(*bmc) == COUNTER_MAX)
Neil Brownda6e1a32007-02-08 14:20:37 -08001493 wake_up(&bitmap->overflow_wait);
1494
NeilBrown32a76272005-06-21 17:17:14 -07001495 (*bmc)--;
NeilBrown2585f3e2011-09-21 15:37:46 +10001496 if (*bmc <= 2) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001497 md_bitmap_set_pending(&bitmap->counts, offset);
NeilBrown2585f3e2011-09-21 15:37:46 +10001498 bitmap->allclean = 0;
1499 }
NeilBrown40cffcc2012-05-22 13:55:24 +10001500 spin_unlock_irqrestore(&bitmap->counts.lock, flags);
NeilBrown32a76272005-06-21 17:17:14 -07001501 offset += blocks;
1502 if (sectors > blocks)
1503 sectors -= blocks;
NeilBrownac2f40b2010-06-01 19:37:31 +10001504 else
1505 sectors = 0;
NeilBrown32a76272005-06-21 17:17:14 -07001506 }
1507}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001508EXPORT_SYMBOL(md_bitmap_endwrite);
NeilBrown32a76272005-06-21 17:17:14 -07001509
NeilBrown57dab0b2010-10-19 10:03:39 +11001510static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
NeilBrown1187cf02009-03-31 14:27:02 +11001511 int degraded)
NeilBrown32a76272005-06-21 17:17:14 -07001512{
1513 bitmap_counter_t *bmc;
1514 int rv;
1515 if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */
1516 *blocks = 1024;
1517 return 1; /* always resync if no bitmap */
1518 }
NeilBrown40cffcc2012-05-22 13:55:24 +10001519 spin_lock_irq(&bitmap->counts.lock);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001520 bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
NeilBrown32a76272005-06-21 17:17:14 -07001521 rv = 0;
1522 if (bmc) {
1523 /* locked */
1524 if (RESYNC(*bmc))
1525 rv = 1;
1526 else if (NEEDED(*bmc)) {
1527 rv = 1;
NeilBrown6a806c52005-07-15 03:56:35 -07001528 if (!degraded) { /* don't set/clear bits if degraded */
1529 *bmc |= RESYNC_MASK;
1530 *bmc &= ~NEEDED_MASK;
1531 }
NeilBrown32a76272005-06-21 17:17:14 -07001532 }
1533 }
NeilBrown40cffcc2012-05-22 13:55:24 +10001534 spin_unlock_irq(&bitmap->counts.lock);
NeilBrown32a76272005-06-21 17:17:14 -07001535 return rv;
1536}
1537
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001538int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
1539 int degraded)
NeilBrown1187cf02009-03-31 14:27:02 +11001540{
1541 /* bitmap_start_sync must always report on multiples of whole
1542 * pages, otherwise resync (which is very PAGE_SIZE based) will
1543 * get confused.
1544 * So call __bitmap_start_sync repeatedly (if needed) until
1545 * At least PAGE_SIZE>>9 blocks are covered.
1546 * Return the 'or' of the result.
1547 */
1548 int rv = 0;
NeilBrown57dab0b2010-10-19 10:03:39 +11001549 sector_t blocks1;
NeilBrown1187cf02009-03-31 14:27:02 +11001550
1551 *blocks = 0;
1552 while (*blocks < (PAGE_SIZE>>9)) {
1553 rv |= __bitmap_start_sync(bitmap, offset,
1554 &blocks1, degraded);
1555 offset += blocks1;
1556 *blocks += blocks1;
1557 }
1558 return rv;
1559}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001560EXPORT_SYMBOL(md_bitmap_start_sync);
NeilBrown1187cf02009-03-31 14:27:02 +11001561
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001562void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted)
NeilBrown32a76272005-06-21 17:17:14 -07001563{
1564 bitmap_counter_t *bmc;
1565 unsigned long flags;
NeilBrownac2f40b2010-06-01 19:37:31 +10001566
1567 if (bitmap == NULL) {
NeilBrown32a76272005-06-21 17:17:14 -07001568 *blocks = 1024;
1569 return;
1570 }
NeilBrown40cffcc2012-05-22 13:55:24 +10001571 spin_lock_irqsave(&bitmap->counts.lock, flags);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001572 bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
NeilBrown32a76272005-06-21 17:17:14 -07001573 if (bmc == NULL)
1574 goto unlock;
1575 /* locked */
NeilBrown32a76272005-06-21 17:17:14 -07001576 if (RESYNC(*bmc)) {
1577 *bmc &= ~RESYNC_MASK;
1578
1579 if (!NEEDED(*bmc) && aborted)
1580 *bmc |= NEEDED_MASK;
1581 else {
NeilBrown2585f3e2011-09-21 15:37:46 +10001582 if (*bmc <= 2) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001583 md_bitmap_set_pending(&bitmap->counts, offset);
NeilBrown2585f3e2011-09-21 15:37:46 +10001584 bitmap->allclean = 0;
1585 }
NeilBrown32a76272005-06-21 17:17:14 -07001586 }
1587 }
1588 unlock:
NeilBrown40cffcc2012-05-22 13:55:24 +10001589 spin_unlock_irqrestore(&bitmap->counts.lock, flags);
NeilBrown32a76272005-06-21 17:17:14 -07001590}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001591EXPORT_SYMBOL(md_bitmap_end_sync);
NeilBrown32a76272005-06-21 17:17:14 -07001592
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001593void md_bitmap_close_sync(struct bitmap *bitmap)
NeilBrown32a76272005-06-21 17:17:14 -07001594{
1595 /* Sync has finished, and any bitmap chunks that weren't synced
1596 * properly have been aborted. It remains to us to clear the
1597 * RESYNC bit wherever it is still on
1598 */
1599 sector_t sector = 0;
NeilBrown57dab0b2010-10-19 10:03:39 +11001600 sector_t blocks;
NeilBrownb47490c2008-02-06 01:39:50 -08001601 if (!bitmap)
1602 return;
NeilBrown32a76272005-06-21 17:17:14 -07001603 while (sector < bitmap->mddev->resync_max_sectors) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001604 md_bitmap_end_sync(bitmap, sector, &blocks, 0);
NeilBrownb47490c2008-02-06 01:39:50 -08001605 sector += blocks;
NeilBrown32a76272005-06-21 17:17:14 -07001606 }
1607}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001608EXPORT_SYMBOL(md_bitmap_close_sync);
NeilBrown32a76272005-06-21 17:17:14 -07001609
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001610void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
NeilBrownb47490c2008-02-06 01:39:50 -08001611{
1612 sector_t s = 0;
NeilBrown57dab0b2010-10-19 10:03:39 +11001613 sector_t blocks;
NeilBrownb47490c2008-02-06 01:39:50 -08001614
1615 if (!bitmap)
1616 return;
1617 if (sector == 0) {
1618 bitmap->last_end_sync = jiffies;
1619 return;
1620 }
Goldwyn Rodriguesc40f3412015-08-19 08:14:42 +10001621 if (!force && time_before(jiffies, (bitmap->last_end_sync
NeilBrown1b04be92009-12-14 12:49:53 +11001622 + bitmap->mddev->bitmap_info.daemon_sleep)))
NeilBrownb47490c2008-02-06 01:39:50 -08001623 return;
1624 wait_event(bitmap->mddev->recovery_wait,
1625 atomic_read(&bitmap->mddev->recovery_active) == 0);
1626
NeilBrown75d3da42011-01-14 09:14:34 +11001627 bitmap->mddev->curr_resync_completed = sector;
Shaohua Li29530792016-12-08 15:48:19 -08001628 set_bit(MD_SB_CHANGE_CLEAN, &bitmap->mddev->sb_flags);
NeilBrown40cffcc2012-05-22 13:55:24 +10001629 sector &= ~((1ULL << bitmap->counts.chunkshift) - 1);
NeilBrownb47490c2008-02-06 01:39:50 -08001630 s = 0;
1631 while (s < sector && s < bitmap->mddev->resync_max_sectors) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001632 md_bitmap_end_sync(bitmap, s, &blocks, 0);
NeilBrownb47490c2008-02-06 01:39:50 -08001633 s += blocks;
1634 }
1635 bitmap->last_end_sync = jiffies;
Junxiao Bie1a86db2020-07-14 16:10:26 -07001636 sysfs_notify_dirent_safe(bitmap->mddev->sysfs_completed);
NeilBrownb47490c2008-02-06 01:39:50 -08001637}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001638EXPORT_SYMBOL(md_bitmap_cond_end_sync);
NeilBrownb47490c2008-02-06 01:39:50 -08001639
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001640void md_bitmap_sync_with_cluster(struct mddev *mddev,
Guoqing Jiang18c9ff72016-05-02 11:50:12 -04001641 sector_t old_lo, sector_t old_hi,
1642 sector_t new_lo, sector_t new_hi)
1643{
1644 struct bitmap *bitmap = mddev->bitmap;
1645 sector_t sector, blocks = 0;
1646
1647 for (sector = old_lo; sector < new_lo; ) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001648 md_bitmap_end_sync(bitmap, sector, &blocks, 0);
Guoqing Jiang18c9ff72016-05-02 11:50:12 -04001649 sector += blocks;
1650 }
1651 WARN((blocks > new_lo) && old_lo, "alignment is not correct for lo\n");
1652
1653 for (sector = old_hi; sector < new_hi; ) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001654 md_bitmap_start_sync(bitmap, sector, &blocks, 0);
Guoqing Jiang18c9ff72016-05-02 11:50:12 -04001655 sector += blocks;
1656 }
1657 WARN((blocks > new_hi) && old_hi, "alignment is not correct for hi\n");
1658}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001659EXPORT_SYMBOL(md_bitmap_sync_with_cluster);
Guoqing Jiang18c9ff72016-05-02 11:50:12 -04001660
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001661static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
NeilBrown32a76272005-06-21 17:17:14 -07001662{
1663 /* For each chunk covered by any of these sectors, set the
NeilBrownef99bf42012-05-22 13:55:08 +10001664 * counter to 2 and possibly set resync_needed. They should all
NeilBrown32a76272005-06-21 17:17:14 -07001665 * be 0 at this point
1666 */
NeilBrown193f1c92005-08-04 12:53:33 -07001667
NeilBrown57dab0b2010-10-19 10:03:39 +11001668 sector_t secs;
NeilBrown193f1c92005-08-04 12:53:33 -07001669 bitmap_counter_t *bmc;
NeilBrown40cffcc2012-05-22 13:55:24 +10001670 spin_lock_irq(&bitmap->counts.lock);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001671 bmc = md_bitmap_get_counter(&bitmap->counts, offset, &secs, 1);
NeilBrown193f1c92005-08-04 12:53:33 -07001672 if (!bmc) {
NeilBrown40cffcc2012-05-22 13:55:24 +10001673 spin_unlock_irq(&bitmap->counts.lock);
NeilBrown193f1c92005-08-04 12:53:33 -07001674 return;
NeilBrown32a76272005-06-21 17:17:14 -07001675 }
NeilBrownac2f40b2010-06-01 19:37:31 +10001676 if (!*bmc) {
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05001677 *bmc = 2;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001678 md_bitmap_count_page(&bitmap->counts, offset, 1);
1679 md_bitmap_set_pending(&bitmap->counts, offset);
NeilBrown2585f3e2011-09-21 15:37:46 +10001680 bitmap->allclean = 0;
NeilBrown193f1c92005-08-04 12:53:33 -07001681 }
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05001682 if (needed)
1683 *bmc |= NEEDED_MASK;
NeilBrown40cffcc2012-05-22 13:55:24 +10001684 spin_unlock_irq(&bitmap->counts.lock);
NeilBrown32a76272005-06-21 17:17:14 -07001685}
1686
Paul Clements9b1d1da2006-10-03 01:15:49 -07001687/* dirty the memory and file bits for bitmap chunks "s" to "e" */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001688void md_bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
Paul Clements9b1d1da2006-10-03 01:15:49 -07001689{
1690 unsigned long chunk;
1691
1692 for (chunk = s; chunk <= e; chunk++) {
NeilBrown40cffcc2012-05-22 13:55:24 +10001693 sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001694 md_bitmap_set_memory_bits(bitmap, sec, 1);
1695 md_bitmap_file_set_bit(bitmap, sec);
NeilBrownffa23322009-12-14 12:49:56 +11001696 if (sec < bitmap->mddev->recovery_cp)
1697 /* We are asserting that the array is dirty,
1698 * so move the recovery_cp address back so
1699 * that it is obvious that it is dirty
1700 */
1701 bitmap->mddev->recovery_cp = sec;
Paul Clements9b1d1da2006-10-03 01:15:49 -07001702 }
1703}
1704
NeilBrown32a76272005-06-21 17:17:14 -07001705/*
NeilBrown6b8b3e82005-08-04 12:53:35 -07001706 * flush out any pending updates
1707 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001708void md_bitmap_flush(struct mddev *mddev)
NeilBrown6b8b3e82005-08-04 12:53:35 -07001709{
1710 struct bitmap *bitmap = mddev->bitmap;
NeilBrown42a04b52009-12-14 12:49:53 +11001711 long sleep;
NeilBrown6b8b3e82005-08-04 12:53:35 -07001712
1713 if (!bitmap) /* there was no bitmap */
1714 return;
1715
1716 /* run the daemon_work three time to ensure everything is flushed
1717 * that can be
1718 */
NeilBrown1b04be92009-12-14 12:49:53 +11001719 sleep = mddev->bitmap_info.daemon_sleep * 2;
NeilBrown42a04b52009-12-14 12:49:53 +11001720 bitmap->daemon_lastrun -= sleep;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001721 md_bitmap_daemon_work(mddev);
NeilBrown42a04b52009-12-14 12:49:53 +11001722 bitmap->daemon_lastrun -= sleep;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001723 md_bitmap_daemon_work(mddev);
NeilBrown42a04b52009-12-14 12:49:53 +11001724 bitmap->daemon_lastrun -= sleep;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001725 md_bitmap_daemon_work(mddev);
Sudhakar Panneerselvam404a8ef2021-04-13 04:08:29 +00001726 if (mddev->bitmap_info.external)
1727 md_super_wait(mddev);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001728 md_bitmap_update_sb(bitmap);
NeilBrown6b8b3e82005-08-04 12:53:35 -07001729}
1730
1731/*
NeilBrown32a76272005-06-21 17:17:14 -07001732 * free memory that was allocated
1733 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001734void md_bitmap_free(struct bitmap *bitmap)
NeilBrown32a76272005-06-21 17:17:14 -07001735{
1736 unsigned long k, pages;
1737 struct bitmap_page *bp;
NeilBrown32a76272005-06-21 17:17:14 -07001738
1739 if (!bitmap) /* there was no bitmap */
1740 return;
1741
Guoqing Jiangf9a67b12016-04-01 17:08:49 +08001742 if (bitmap->sysfs_can_clear)
1743 sysfs_put(bitmap->sysfs_can_clear);
1744
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05001745 if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info &&
1746 bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev))
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -05001747 md_cluster_stop(bitmap->mddev);
1748
NeilBrownfae7d322012-05-22 13:55:21 +10001749 /* Shouldn't be needed - but just in case.... */
1750 wait_event(bitmap->write_wait,
1751 atomic_read(&bitmap->pending_writes) == 0);
1752
1753 /* release the bitmap file */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001754 md_bitmap_file_unmap(&bitmap->storage);
NeilBrown32a76272005-06-21 17:17:14 -07001755
NeilBrown40cffcc2012-05-22 13:55:24 +10001756 bp = bitmap->counts.bp;
1757 pages = bitmap->counts.pages;
NeilBrown32a76272005-06-21 17:17:14 -07001758
1759 /* free all allocated memory */
1760
NeilBrown32a76272005-06-21 17:17:14 -07001761 if (bp) /* deallocate the page memory */
1762 for (k = 0; k < pages; k++)
1763 if (bp[k].map && !bp[k].hijacked)
1764 kfree(bp[k].map);
1765 kfree(bp);
1766 kfree(bitmap);
1767}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001768EXPORT_SYMBOL(md_bitmap_free);
NeilBrownaa5cbd12009-12-14 12:49:46 +11001769
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001770void md_bitmap_wait_behind_writes(struct mddev *mddev)
Guoqing Jiang48df4982017-03-14 09:40:20 +08001771{
1772 struct bitmap *bitmap = mddev->bitmap;
1773
1774 /* wait for behind writes to complete */
1775 if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
1776 pr_debug("md:%s: behind writes in progress - waiting to stop.\n",
1777 mdname(mddev));
1778 /* need to kick something here to make sure I/O goes? */
1779 wait_event(bitmap->behind_wait,
1780 atomic_read(&bitmap->behind_writes) == 0);
1781 }
1782}
1783
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001784void md_bitmap_destroy(struct mddev *mddev)
NeilBrown3178b0d2005-09-09 16:23:50 -07001785{
1786 struct bitmap *bitmap = mddev->bitmap;
1787
1788 if (!bitmap) /* there was no bitmap */
1789 return;
1790
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001791 md_bitmap_wait_behind_writes(mddev);
Guoqing Jiang69b00b52019-12-23 10:49:00 +01001792 if (!mddev->serialize_policy)
1793 mddev_destroy_serial_pool(mddev, NULL, true);
Guoqing Jiang48df4982017-03-14 09:40:20 +08001794
NeilBrownc3d97142009-12-14 12:49:52 +11001795 mutex_lock(&mddev->bitmap_info.mutex);
NeilBrown978a7a42014-12-15 12:56:58 +11001796 spin_lock(&mddev->lock);
NeilBrown3178b0d2005-09-09 16:23:50 -07001797 mddev->bitmap = NULL; /* disconnect from the md device */
NeilBrown978a7a42014-12-15 12:56:58 +11001798 spin_unlock(&mddev->lock);
NeilBrownc3d97142009-12-14 12:49:52 +11001799 mutex_unlock(&mddev->bitmap_info.mutex);
NeilBrownb15c2e52006-01-06 00:20:16 -08001800 if (mddev->thread)
1801 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
NeilBrown3178b0d2005-09-09 16:23:50 -07001802
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001803 md_bitmap_free(bitmap);
NeilBrown3178b0d2005-09-09 16:23:50 -07001804}
NeilBrown32a76272005-06-21 17:17:14 -07001805
1806/*
1807 * initialize the bitmap structure
1808 * if this returns an error, bitmap_destroy must be called to do clean up
Guoqing Jiangf9a67b12016-04-01 17:08:49 +08001809 * once mddev->bitmap is set
NeilBrown32a76272005-06-21 17:17:14 -07001810 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001811struct bitmap *md_bitmap_create(struct mddev *mddev, int slot)
NeilBrown32a76272005-06-21 17:17:14 -07001812{
1813 struct bitmap *bitmap;
NeilBrown1f593902009-04-20 11:50:24 +10001814 sector_t blocks = mddev->resync_max_sectors;
NeilBrownc3d97142009-12-14 12:49:52 +11001815 struct file *file = mddev->bitmap_info.file;
NeilBrown32a76272005-06-21 17:17:14 -07001816 int err;
Tejun Heo324a56e2013-12-11 14:11:53 -05001817 struct kernfs_node *bm = NULL;
NeilBrown32a76272005-06-21 17:17:14 -07001818
Alexey Dobriyan5f6e3c832006-10-11 01:22:26 -07001819 BUILD_BUG_ON(sizeof(bitmap_super_t) != 256);
NeilBrown32a76272005-06-21 17:17:14 -07001820
NeilBrownc3d97142009-12-14 12:49:52 +11001821 BUG_ON(file && mddev->bitmap_info.offset);
NeilBrowna654b9d82005-06-21 17:17:27 -07001822
NeilBrown230b55f2017-10-17 14:24:09 +11001823 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
1824 pr_notice("md/raid:%s: array with journal cannot have bitmap\n",
1825 mdname(mddev));
1826 return ERR_PTR(-EBUSY);
1827 }
1828
NeilBrown9ffae0c2006-01-06 00:20:32 -08001829 bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
NeilBrown32a76272005-06-21 17:17:14 -07001830 if (!bitmap)
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05001831 return ERR_PTR(-ENOMEM);
NeilBrown32a76272005-06-21 17:17:14 -07001832
NeilBrown40cffcc2012-05-22 13:55:24 +10001833 spin_lock_init(&bitmap->counts.lock);
NeilBrownce25c312006-06-26 00:27:49 -07001834 atomic_set(&bitmap->pending_writes, 0);
1835 init_waitqueue_head(&bitmap->write_wait);
Neil Brownda6e1a32007-02-08 14:20:37 -08001836 init_waitqueue_head(&bitmap->overflow_wait);
NeilBrowne5551902010-03-31 11:21:44 +11001837 init_waitqueue_head(&bitmap->behind_wait);
NeilBrownce25c312006-06-26 00:27:49 -07001838
NeilBrown32a76272005-06-21 17:17:14 -07001839 bitmap->mddev = mddev;
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05001840 bitmap->cluster_slot = slot;
NeilBrown32a76272005-06-21 17:17:14 -07001841
NeilBrown5ff5aff2010-06-01 19:37:32 +10001842 if (mddev->kobj.sd)
Tejun Heo388975c2013-09-11 23:19:13 -04001843 bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap");
NeilBrownece5cff2009-12-14 12:49:56 +11001844 if (bm) {
Tejun Heo388975c2013-09-11 23:19:13 -04001845 bitmap->sysfs_can_clear = sysfs_get_dirent(bm, "can_clear");
NeilBrownece5cff2009-12-14 12:49:56 +11001846 sysfs_put(bm);
1847 } else
1848 bitmap->sysfs_can_clear = NULL;
1849
NeilBrown1ec885c2012-05-22 13:55:10 +10001850 bitmap->storage.file = file;
NeilBrownce25c312006-06-26 00:27:49 -07001851 if (file) {
1852 get_file(file);
NeilBrownae8fa282009-10-16 15:56:01 +11001853 /* As future accesses to this file will use bmap,
1854 * and bypass the page cache, we must sync the file
1855 * first.
1856 */
Christoph Hellwig8018ab02010-03-22 17:32:25 +01001857 vfs_fsync(file, 1);
NeilBrownce25c312006-06-26 00:27:49 -07001858 }
NeilBrown42a04b52009-12-14 12:49:53 +11001859 /* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
Jonathan Brassow9c810752011-06-08 17:59:30 -05001860 if (!mddev->bitmap_info.external) {
1861 /*
1862 * If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is
1863 * instructing us to create a new on-disk bitmap instance.
1864 */
1865 if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags))
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001866 err = md_bitmap_new_disk_sb(bitmap);
Jonathan Brassow9c810752011-06-08 17:59:30 -05001867 else
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001868 err = md_bitmap_read_sb(bitmap);
Jonathan Brassow9c810752011-06-08 17:59:30 -05001869 } else {
NeilBrownece5cff2009-12-14 12:49:56 +11001870 err = 0;
1871 if (mddev->bitmap_info.chunksize == 0 ||
1872 mddev->bitmap_info.daemon_sleep == 0)
1873 /* chunksize and time_base need to be
1874 * set first. */
1875 err = -EINVAL;
1876 }
NeilBrown32a76272005-06-21 17:17:14 -07001877 if (err)
NeilBrown3178b0d2005-09-09 16:23:50 -07001878 goto error;
NeilBrown32a76272005-06-21 17:17:14 -07001879
NeilBrown624ce4f2009-12-14 12:49:56 +11001880 bitmap->daemon_lastrun = jiffies;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001881 err = md_bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, 1);
NeilBrownd60b4792012-05-22 13:55:25 +10001882 if (err)
NeilBrown3178b0d2005-09-09 16:23:50 -07001883 goto error;
NeilBrown32a76272005-06-21 17:17:14 -07001884
NeilBrownec0cc222016-11-02 14:16:49 +11001885 pr_debug("created bitmap (%lu pages) for device %s\n",
1886 bitmap->counts.pages, bmname(bitmap));
NeilBrown69e51b42010-06-01 19:37:35 +10001887
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05001888 err = test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0;
1889 if (err)
1890 goto error;
NeilBrown69e51b42010-06-01 19:37:35 +10001891
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05001892 return bitmap;
NeilBrown69e51b42010-06-01 19:37:35 +10001893 error:
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001894 md_bitmap_free(bitmap);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05001895 return ERR_PTR(err);
NeilBrown69e51b42010-06-01 19:37:35 +10001896}
1897
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001898int md_bitmap_load(struct mddev *mddev)
NeilBrown69e51b42010-06-01 19:37:35 +10001899{
1900 int err = 0;
Jonathan Brassow3520fa42011-07-27 11:00:37 +10001901 sector_t start = 0;
NeilBrown69e51b42010-06-01 19:37:35 +10001902 sector_t sector = 0;
1903 struct bitmap *bitmap = mddev->bitmap;
Guoqing Jiang617b1942019-06-14 17:10:38 +08001904 struct md_rdev *rdev;
NeilBrown69e51b42010-06-01 19:37:35 +10001905
1906 if (!bitmap)
1907 goto out;
1908
Guoqing Jiang617b1942019-06-14 17:10:38 +08001909 rdev_for_each(rdev, mddev)
Guoqing Jiang404659c2019-12-23 10:48:53 +01001910 mddev_create_serial_pool(mddev, rdev, true);
Guoqing Jiang617b1942019-06-14 17:10:38 +08001911
Guoqing Jiang51e453a2016-05-04 02:17:09 -04001912 if (mddev_is_clustered(mddev))
1913 md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes);
1914
NeilBrown69e51b42010-06-01 19:37:35 +10001915 /* Clear out old bitmap info first: Either there is none, or we
1916 * are resuming after someone else has possibly changed things,
1917 * so we should forget old cached info.
1918 * All chunks should be clean, but some might need_sync.
1919 */
1920 while (sector < mddev->resync_max_sectors) {
NeilBrown57dab0b2010-10-19 10:03:39 +11001921 sector_t blocks;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001922 md_bitmap_start_sync(bitmap, sector, &blocks, 0);
NeilBrown69e51b42010-06-01 19:37:35 +10001923 sector += blocks;
1924 }
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001925 md_bitmap_close_sync(bitmap);
NeilBrown69e51b42010-06-01 19:37:35 +10001926
Jonathan Brassow3520fa42011-07-27 11:00:37 +10001927 if (mddev->degraded == 0
1928 || bitmap->events_cleared == mddev->events)
1929 /* no need to keep dirty bits to optimise a
1930 * re-add of a missing device */
1931 start = mddev->recovery_cp;
NeilBrown69e51b42010-06-01 19:37:35 +10001932
NeilBrownafbaa902012-04-12 16:05:06 +10001933 mutex_lock(&mddev->bitmap_info.mutex);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001934 err = md_bitmap_init_from_disk(bitmap, start);
NeilBrownafbaa902012-04-12 16:05:06 +10001935 mutex_unlock(&mddev->bitmap_info.mutex);
Jonathan Brassow3520fa42011-07-27 11:00:37 +10001936
NeilBrown32a76272005-06-21 17:17:14 -07001937 if (err)
NeilBrown69e51b42010-06-01 19:37:35 +10001938 goto out;
NeilBrownb405fe92012-05-22 13:55:15 +10001939 clear_bit(BITMAP_STALE, &bitmap->flags);
NeilBrownef99bf42012-05-22 13:55:08 +10001940
1941 /* Kick recovery in case any bits were set */
1942 set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery);
NeilBrown3178b0d2005-09-09 16:23:50 -07001943
NeilBrown1b04be92009-12-14 12:49:53 +11001944 mddev->thread->timeout = mddev->bitmap_info.daemon_sleep;
NeilBrown9cd30fd2009-12-14 12:49:54 +11001945 md_wakeup_thread(mddev->thread);
NeilBrownb15c2e52006-01-06 00:20:16 -08001946
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001947 md_bitmap_update_sb(bitmap);
NeilBrown4ad13662007-07-17 04:06:13 -07001948
NeilBrownb405fe92012-05-22 13:55:15 +10001949 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
NeilBrown69e51b42010-06-01 19:37:35 +10001950 err = -EIO;
1951out:
NeilBrown3178b0d2005-09-09 16:23:50 -07001952 return err;
NeilBrown32a76272005-06-21 17:17:14 -07001953}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001954EXPORT_SYMBOL_GPL(md_bitmap_load);
NeilBrown32a76272005-06-21 17:17:14 -07001955
Zhao Heming1383b342020-09-27 13:40:13 +08001956/* caller need to free returned bitmap with md_bitmap_free() */
Guoqing Jiangb98938d2017-03-01 16:42:39 +08001957struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot)
1958{
1959 int rv = 0;
1960 struct bitmap *bitmap;
1961
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001962 bitmap = md_bitmap_create(mddev, slot);
Guoqing Jiangb98938d2017-03-01 16:42:39 +08001963 if (IS_ERR(bitmap)) {
1964 rv = PTR_ERR(bitmap);
1965 return ERR_PTR(rv);
1966 }
1967
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001968 rv = md_bitmap_init_from_disk(bitmap, 0);
Guoqing Jiangb98938d2017-03-01 16:42:39 +08001969 if (rv) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001970 md_bitmap_free(bitmap);
Guoqing Jiangb98938d2017-03-01 16:42:39 +08001971 return ERR_PTR(rv);
1972 }
1973
1974 return bitmap;
1975}
1976EXPORT_SYMBOL(get_bitmap_from_slot);
1977
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05001978/* Loads the bitmap associated with slot and copies the resync information
1979 * to our bitmap
1980 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001981int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
Goldwyn Rodrigues97f6cd32015-04-14 10:45:42 -05001982 sector_t *low, sector_t *high, bool clear_bits)
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05001983{
1984 int rv = 0, i, j;
1985 sector_t block, lo = 0, hi = 0;
1986 struct bitmap_counts *counts;
Guoqing Jiangb98938d2017-03-01 16:42:39 +08001987 struct bitmap *bitmap;
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05001988
Guoqing Jiangb98938d2017-03-01 16:42:39 +08001989 bitmap = get_bitmap_from_slot(mddev, slot);
1990 if (IS_ERR(bitmap)) {
1991 pr_err("%s can't get bitmap from slot %d\n", __func__, slot);
1992 return -1;
1993 }
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05001994
1995 counts = &bitmap->counts;
1996 for (j = 0; j < counts->chunks; j++) {
1997 block = (sector_t)j << counts->chunkshift;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001998 if (md_bitmap_file_test_bit(bitmap, block)) {
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05001999 if (!lo)
2000 lo = block;
2001 hi = block;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002002 md_bitmap_file_clear_bit(bitmap, block);
2003 md_bitmap_set_memory_bits(mddev->bitmap, block, 1);
2004 md_bitmap_file_set_bit(mddev->bitmap, block);
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05002005 }
2006 }
2007
Goldwyn Rodrigues97f6cd32015-04-14 10:45:42 -05002008 if (clear_bits) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002009 md_bitmap_update_sb(bitmap);
Guoqing Jiangc84400c2016-05-02 11:50:15 -04002010 /* BITMAP_PAGE_PENDING is set, but bitmap_unplug needs
2011 * BITMAP_PAGE_DIRTY or _NEEDWRITE to write ... */
Goldwyn Rodrigues97f6cd32015-04-14 10:45:42 -05002012 for (i = 0; i < bitmap->storage.file_pages; i++)
Guoqing Jiangc84400c2016-05-02 11:50:15 -04002013 if (test_page_attr(bitmap, i, BITMAP_PAGE_PENDING))
2014 set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002015 md_bitmap_unplug(bitmap);
Goldwyn Rodrigues97f6cd32015-04-14 10:45:42 -05002016 }
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002017 md_bitmap_unplug(mddev->bitmap);
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05002018 *low = lo;
2019 *high = hi;
Zhao Heming1383b342020-09-27 13:40:13 +08002020 md_bitmap_free(bitmap);
Guoqing Jiangb98938d2017-03-01 16:42:39 +08002021
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05002022 return rv;
2023}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002024EXPORT_SYMBOL_GPL(md_bitmap_copy_from_slot);
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05002025
2026
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002027void md_bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
NeilBrown57148962012-03-19 12:46:40 +11002028{
2029 unsigned long chunk_kb;
NeilBrown40cffcc2012-05-22 13:55:24 +10002030 struct bitmap_counts *counts;
NeilBrown57148962012-03-19 12:46:40 +11002031
2032 if (!bitmap)
2033 return;
2034
NeilBrown40cffcc2012-05-22 13:55:24 +10002035 counts = &bitmap->counts;
2036
NeilBrown57148962012-03-19 12:46:40 +11002037 chunk_kb = bitmap->mddev->bitmap_info.chunksize >> 10;
2038 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
2039 "%lu%s chunk",
NeilBrown40cffcc2012-05-22 13:55:24 +10002040 counts->pages - counts->missing_pages,
2041 counts->pages,
2042 (counts->pages - counts->missing_pages)
NeilBrown57148962012-03-19 12:46:40 +11002043 << (PAGE_SHIFT - 10),
2044 chunk_kb ? chunk_kb : bitmap->mddev->bitmap_info.chunksize,
2045 chunk_kb ? "KB" : "B");
NeilBrown1ec885c2012-05-22 13:55:10 +10002046 if (bitmap->storage.file) {
NeilBrown57148962012-03-19 12:46:40 +11002047 seq_printf(seq, ", file: ");
Miklos Szeredi2726d562015-06-19 10:30:28 +02002048 seq_file_path(seq, bitmap->storage.file, " \t\n");
NeilBrown57148962012-03-19 12:46:40 +11002049 }
2050
2051 seq_printf(seq, "\n");
NeilBrown57148962012-03-19 12:46:40 +11002052}
2053
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002054int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
NeilBrownd60b4792012-05-22 13:55:25 +10002055 int chunksize, int init)
2056{
2057 /* If chunk_size is 0, choose an appropriate chunk size.
2058 * Then possibly allocate new storage space.
2059 * Then quiesce, copy bits, replace bitmap, and re-start
2060 *
2061 * This function is called both to set up the initial bitmap
2062 * and to resize the bitmap while the array is active.
2063 * If this happens as a result of the array being resized,
2064 * chunksize will be zero, and we need to choose a suitable
2065 * chunksize, otherwise we use what we are given.
2066 */
2067 struct bitmap_storage store;
2068 struct bitmap_counts old_counts;
2069 unsigned long chunks;
2070 sector_t block;
2071 sector_t old_blocks, new_blocks;
2072 int chunkshift;
2073 int ret = 0;
2074 long pages;
2075 struct bitmap_page *new_bp;
2076
NeilBrowne8a27f82017-08-31 10:23:25 +10002077 if (bitmap->storage.file && !init) {
2078 pr_info("md: cannot resize file-based bitmap\n");
2079 return -EINVAL;
2080 }
2081
NeilBrownd60b4792012-05-22 13:55:25 +10002082 if (chunksize == 0) {
2083 /* If there is enough space, leave the chunk size unchanged,
2084 * else increase by factor of two until there is enough space.
2085 */
2086 long bytes;
2087 long space = bitmap->mddev->bitmap_info.space;
2088
2089 if (space == 0) {
2090 /* We don't know how much space there is, so limit
2091 * to current size - in sectors.
2092 */
2093 bytes = DIV_ROUND_UP(bitmap->counts.chunks, 8);
2094 if (!bitmap->mddev->bitmap_info.external)
2095 bytes += sizeof(bitmap_super_t);
2096 space = DIV_ROUND_UP(bytes, 512);
2097 bitmap->mddev->bitmap_info.space = space;
2098 }
2099 chunkshift = bitmap->counts.chunkshift;
2100 chunkshift--;
2101 do {
2102 /* 'chunkshift' is shift from block size to chunk size */
2103 chunkshift++;
2104 chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
2105 bytes = DIV_ROUND_UP(chunks, 8);
2106 if (!bitmap->mddev->bitmap_info.external)
2107 bytes += sizeof(bitmap_super_t);
2108 } while (bytes > (space << 9));
2109 } else
2110 chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT;
2111
2112 chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
2113 memset(&store, 0, sizeof(store));
2114 if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002115 ret = md_bitmap_storage_alloc(&store, chunks,
2116 !bitmap->mddev->bitmap_info.external,
2117 mddev_is_clustered(bitmap->mddev)
2118 ? bitmap->cluster_slot : 0);
Guoqing Jiangcbb38732016-10-31 10:19:00 +08002119 if (ret) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002120 md_bitmap_file_unmap(&store);
NeilBrownd60b4792012-05-22 13:55:25 +10002121 goto err;
Guoqing Jiangcbb38732016-10-31 10:19:00 +08002122 }
NeilBrownd60b4792012-05-22 13:55:25 +10002123
2124 pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO);
2125
Kees Cook6396bb22018-06-12 14:03:40 -07002126 new_bp = kcalloc(pages, sizeof(*new_bp), GFP_KERNEL);
NeilBrownd60b4792012-05-22 13:55:25 +10002127 ret = -ENOMEM;
2128 if (!new_bp) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002129 md_bitmap_file_unmap(&store);
NeilBrownd60b4792012-05-22 13:55:25 +10002130 goto err;
2131 }
2132
2133 if (!init)
2134 bitmap->mddev->pers->quiesce(bitmap->mddev, 1);
2135
2136 store.file = bitmap->storage.file;
2137 bitmap->storage.file = NULL;
2138
2139 if (store.sb_page && bitmap->storage.sb_page)
2140 memcpy(page_address(store.sb_page),
2141 page_address(bitmap->storage.sb_page),
Shaohua Li938b5332017-10-16 19:03:44 -07002142 sizeof(bitmap_super_t));
Guoqing Jiangfadcbd22019-09-26 13:53:50 +02002143 spin_lock_irq(&bitmap->counts.lock);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002144 md_bitmap_file_unmap(&bitmap->storage);
NeilBrownd60b4792012-05-22 13:55:25 +10002145 bitmap->storage = store;
2146
2147 old_counts = bitmap->counts;
2148 bitmap->counts.bp = new_bp;
2149 bitmap->counts.pages = pages;
2150 bitmap->counts.missing_pages = pages;
2151 bitmap->counts.chunkshift = chunkshift;
2152 bitmap->counts.chunks = chunks;
2153 bitmap->mddev->bitmap_info.chunksize = 1 << (chunkshift +
2154 BITMAP_BLOCK_SHIFT);
2155
2156 blocks = min(old_counts.chunks << old_counts.chunkshift,
2157 chunks << chunkshift);
2158
Guoqing Jiangc9d65032016-05-02 11:50:11 -04002159 /* For cluster raid, need to pre-allocate bitmap */
2160 if (mddev_is_clustered(bitmap->mddev)) {
2161 unsigned long page;
2162 for (page = 0; page < pages; page++) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002163 ret = md_bitmap_checkpage(&bitmap->counts, page, 1, 1);
Guoqing Jiangc9d65032016-05-02 11:50:11 -04002164 if (ret) {
2165 unsigned long k;
2166
2167 /* deallocate the page memory */
2168 for (k = 0; k < page; k++) {
kbuild test robotbc47e842016-05-02 11:50:16 -04002169 kfree(new_bp[k].map);
Guoqing Jiangc9d65032016-05-02 11:50:11 -04002170 }
Zdenek Kabelac0868b992017-11-08 13:44:56 +01002171 kfree(new_bp);
Guoqing Jiangc9d65032016-05-02 11:50:11 -04002172
2173 /* restore some fields from old_counts */
2174 bitmap->counts.bp = old_counts.bp;
2175 bitmap->counts.pages = old_counts.pages;
2176 bitmap->counts.missing_pages = old_counts.pages;
2177 bitmap->counts.chunkshift = old_counts.chunkshift;
2178 bitmap->counts.chunks = old_counts.chunks;
2179 bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift +
2180 BITMAP_BLOCK_SHIFT);
2181 blocks = old_counts.chunks << old_counts.chunkshift;
NeilBrownec0cc222016-11-02 14:16:49 +11002182 pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n");
Guoqing Jiangc9d65032016-05-02 11:50:11 -04002183 break;
2184 } else
2185 bitmap->counts.bp[page].count += 1;
2186 }
2187 }
2188
NeilBrownd60b4792012-05-22 13:55:25 +10002189 for (block = 0; block < blocks; ) {
2190 bitmap_counter_t *bmc_old, *bmc_new;
2191 int set;
2192
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002193 bmc_old = md_bitmap_get_counter(&old_counts, block, &old_blocks, 0);
NeilBrownd60b4792012-05-22 13:55:25 +10002194 set = bmc_old && NEEDED(*bmc_old);
2195
2196 if (set) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002197 bmc_new = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
NeilBrownd60b4792012-05-22 13:55:25 +10002198 if (*bmc_new == 0) {
2199 /* need to set on-disk bits too. */
2200 sector_t end = block + new_blocks;
2201 sector_t start = block >> chunkshift;
2202 start <<= chunkshift;
2203 while (start < end) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002204 md_bitmap_file_set_bit(bitmap, block);
NeilBrownd60b4792012-05-22 13:55:25 +10002205 start += 1 << chunkshift;
2206 }
2207 *bmc_new = 2;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002208 md_bitmap_count_page(&bitmap->counts, block, 1);
2209 md_bitmap_set_pending(&bitmap->counts, block);
NeilBrownd60b4792012-05-22 13:55:25 +10002210 }
2211 *bmc_new |= NEEDED_MASK;
2212 if (new_blocks < old_blocks)
2213 old_blocks = new_blocks;
2214 }
2215 block += old_blocks;
2216 }
2217
Zdenek Kabelac0868b992017-11-08 13:44:56 +01002218 if (bitmap->counts.bp != old_counts.bp) {
2219 unsigned long k;
2220 for (k = 0; k < old_counts.pages; k++)
2221 if (!old_counts.bp[k].hijacked)
2222 kfree(old_counts.bp[k].map);
2223 kfree(old_counts.bp);
2224 }
2225
NeilBrownd60b4792012-05-22 13:55:25 +10002226 if (!init) {
2227 int i;
2228 while (block < (chunks << chunkshift)) {
2229 bitmap_counter_t *bmc;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002230 bmc = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
NeilBrownd60b4792012-05-22 13:55:25 +10002231 if (bmc) {
2232 /* new space. It needs to be resynced, so
2233 * we set NEEDED_MASK.
2234 */
2235 if (*bmc == 0) {
2236 *bmc = NEEDED_MASK | 2;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002237 md_bitmap_count_page(&bitmap->counts, block, 1);
2238 md_bitmap_set_pending(&bitmap->counts, block);
NeilBrownd60b4792012-05-22 13:55:25 +10002239 }
2240 }
2241 block += new_blocks;
2242 }
2243 for (i = 0; i < bitmap->storage.file_pages; i++)
2244 set_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
2245 }
2246 spin_unlock_irq(&bitmap->counts.lock);
2247
2248 if (!init) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002249 md_bitmap_unplug(bitmap);
NeilBrownd60b4792012-05-22 13:55:25 +10002250 bitmap->mddev->pers->quiesce(bitmap->mddev, 0);
2251 }
2252 ret = 0;
2253err:
2254 return ret;
2255}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002256EXPORT_SYMBOL_GPL(md_bitmap_resize);
NeilBrownd60b4792012-05-22 13:55:25 +10002257
NeilBrown43a70502009-12-14 12:49:55 +11002258static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11002259location_show(struct mddev *mddev, char *page)
NeilBrown43a70502009-12-14 12:49:55 +11002260{
2261 ssize_t len;
NeilBrownac2f40b2010-06-01 19:37:31 +10002262 if (mddev->bitmap_info.file)
NeilBrown43a70502009-12-14 12:49:55 +11002263 len = sprintf(page, "file");
NeilBrownac2f40b2010-06-01 19:37:31 +10002264 else if (mddev->bitmap_info.offset)
NeilBrown43a70502009-12-14 12:49:55 +11002265 len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset);
NeilBrownac2f40b2010-06-01 19:37:31 +10002266 else
NeilBrown43a70502009-12-14 12:49:55 +11002267 len = sprintf(page, "none");
2268 len += sprintf(page+len, "\n");
2269 return len;
2270}
2271
2272static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11002273location_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown43a70502009-12-14 12:49:55 +11002274{
Shaohua Lid9dd26b2016-07-30 10:05:31 -07002275 int rv;
NeilBrown43a70502009-12-14 12:49:55 +11002276
Shaohua Lid9dd26b2016-07-30 10:05:31 -07002277 rv = mddev_lock(mddev);
2278 if (rv)
2279 return rv;
NeilBrown43a70502009-12-14 12:49:55 +11002280 if (mddev->pers) {
Shaohua Lid9dd26b2016-07-30 10:05:31 -07002281 if (!mddev->pers->quiesce) {
2282 rv = -EBUSY;
2283 goto out;
2284 }
2285 if (mddev->recovery || mddev->sync_thread) {
2286 rv = -EBUSY;
2287 goto out;
2288 }
NeilBrown43a70502009-12-14 12:49:55 +11002289 }
2290
2291 if (mddev->bitmap || mddev->bitmap_info.file ||
2292 mddev->bitmap_info.offset) {
2293 /* bitmap already configured. Only option is to clear it */
Shaohua Lid9dd26b2016-07-30 10:05:31 -07002294 if (strncmp(buf, "none", 4) != 0) {
2295 rv = -EBUSY;
2296 goto out;
2297 }
NeilBrown43a70502009-12-14 12:49:55 +11002298 if (mddev->pers) {
Jack Wangf8f83d82018-10-08 17:24:03 +02002299 mddev_suspend(mddev);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002300 md_bitmap_destroy(mddev);
Jack Wangf8f83d82018-10-08 17:24:03 +02002301 mddev_resume(mddev);
NeilBrown43a70502009-12-14 12:49:55 +11002302 }
2303 mddev->bitmap_info.offset = 0;
2304 if (mddev->bitmap_info.file) {
2305 struct file *f = mddev->bitmap_info.file;
2306 mddev->bitmap_info.file = NULL;
NeilBrown43a70502009-12-14 12:49:55 +11002307 fput(f);
2308 }
2309 } else {
2310 /* No bitmap, OK to set a location */
2311 long long offset;
2312 if (strncmp(buf, "none", 4) == 0)
2313 /* nothing to be done */;
2314 else if (strncmp(buf, "file:", 5) == 0) {
2315 /* Not supported yet */
Shaohua Lid9dd26b2016-07-30 10:05:31 -07002316 rv = -EINVAL;
2317 goto out;
NeilBrown43a70502009-12-14 12:49:55 +11002318 } else {
NeilBrown43a70502009-12-14 12:49:55 +11002319 if (buf[0] == '+')
Jingoo Hanb29bebd2013-06-01 16:15:16 +09002320 rv = kstrtoll(buf+1, 10, &offset);
NeilBrown43a70502009-12-14 12:49:55 +11002321 else
Jingoo Hanb29bebd2013-06-01 16:15:16 +09002322 rv = kstrtoll(buf, 10, &offset);
NeilBrown43a70502009-12-14 12:49:55 +11002323 if (rv)
Shaohua Lid9dd26b2016-07-30 10:05:31 -07002324 goto out;
2325 if (offset == 0) {
2326 rv = -EINVAL;
2327 goto out;
2328 }
NeilBrownece5cff2009-12-14 12:49:56 +11002329 if (mddev->bitmap_info.external == 0 &&
2330 mddev->major_version == 0 &&
Shaohua Lid9dd26b2016-07-30 10:05:31 -07002331 offset != mddev->bitmap_info.default_offset) {
2332 rv = -EINVAL;
2333 goto out;
2334 }
NeilBrown43a70502009-12-14 12:49:55 +11002335 mddev->bitmap_info.offset = offset;
2336 if (mddev->pers) {
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05002337 struct bitmap *bitmap;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002338 bitmap = md_bitmap_create(mddev, -1);
Jack Wangf8f83d82018-10-08 17:24:03 +02002339 mddev_suspend(mddev);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05002340 if (IS_ERR(bitmap))
2341 rv = PTR_ERR(bitmap);
2342 else {
2343 mddev->bitmap = bitmap;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002344 rv = md_bitmap_load(mddev);
Guoqing Jiangf9a67b12016-04-01 17:08:49 +08002345 if (rv)
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05002346 mddev->bitmap_info.offset = 0;
NeilBrown43a70502009-12-14 12:49:55 +11002347 }
Guoqing Jiangf9a67b12016-04-01 17:08:49 +08002348 if (rv) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002349 md_bitmap_destroy(mddev);
Jack Wangf8f83d82018-10-08 17:24:03 +02002350 mddev_resume(mddev);
Shaohua Lid9dd26b2016-07-30 10:05:31 -07002351 goto out;
Guoqing Jiangf9a67b12016-04-01 17:08:49 +08002352 }
Jack Wangf8f83d82018-10-08 17:24:03 +02002353 mddev_resume(mddev);
NeilBrown43a70502009-12-14 12:49:55 +11002354 }
2355 }
2356 }
2357 if (!mddev->external) {
2358 /* Ensure new bitmap info is stored in
2359 * metadata promptly.
2360 */
Shaohua Li29530792016-12-08 15:48:19 -08002361 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrown43a70502009-12-14 12:49:55 +11002362 md_wakeup_thread(mddev->thread);
2363 }
Shaohua Lid9dd26b2016-07-30 10:05:31 -07002364 rv = 0;
2365out:
2366 mddev_unlock(mddev);
2367 if (rv)
2368 return rv;
NeilBrown43a70502009-12-14 12:49:55 +11002369 return len;
2370}
2371
2372static struct md_sysfs_entry bitmap_location =
2373__ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store);
2374
NeilBrown6409bb02012-05-22 13:55:07 +10002375/* 'bitmap/space' is the space available at 'location' for the
2376 * bitmap. This allows the kernel to know when it is safe to
2377 * resize the bitmap to match a resized array.
2378 */
2379static ssize_t
2380space_show(struct mddev *mddev, char *page)
2381{
2382 return sprintf(page, "%lu\n", mddev->bitmap_info.space);
2383}
2384
2385static ssize_t
2386space_store(struct mddev *mddev, const char *buf, size_t len)
2387{
2388 unsigned long sectors;
2389 int rv;
2390
2391 rv = kstrtoul(buf, 10, &sectors);
2392 if (rv)
2393 return rv;
2394
2395 if (sectors == 0)
2396 return -EINVAL;
2397
2398 if (mddev->bitmap &&
NeilBrown9b1215c2012-05-22 13:55:11 +10002399 sectors < (mddev->bitmap->storage.bytes + 511) >> 9)
NeilBrown6409bb02012-05-22 13:55:07 +10002400 return -EFBIG; /* Bitmap is too big for this small space */
2401
2402 /* could make sure it isn't too big, but that isn't really
2403 * needed - user-space should be careful.
2404 */
2405 mddev->bitmap_info.space = sectors;
2406 return len;
2407}
2408
2409static struct md_sysfs_entry bitmap_space =
2410__ATTR(space, S_IRUGO|S_IWUSR, space_show, space_store);
2411
NeilBrown43a70502009-12-14 12:49:55 +11002412static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11002413timeout_show(struct mddev *mddev, char *page)
NeilBrown43a70502009-12-14 12:49:55 +11002414{
2415 ssize_t len;
2416 unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ;
2417 unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ;
NeilBrownac2f40b2010-06-01 19:37:31 +10002418
NeilBrown43a70502009-12-14 12:49:55 +11002419 len = sprintf(page, "%lu", secs);
2420 if (jifs)
2421 len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs));
2422 len += sprintf(page+len, "\n");
2423 return len;
2424}
2425
2426static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11002427timeout_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown43a70502009-12-14 12:49:55 +11002428{
2429 /* timeout can be set at any time */
2430 unsigned long timeout;
2431 int rv = strict_strtoul_scaled(buf, &timeout, 4);
2432 if (rv)
2433 return rv;
2434
2435 /* just to make sure we don't overflow... */
2436 if (timeout >= LONG_MAX / HZ)
2437 return -EINVAL;
2438
2439 timeout = timeout * HZ / 10000;
2440
2441 if (timeout >= MAX_SCHEDULE_TIMEOUT)
2442 timeout = MAX_SCHEDULE_TIMEOUT-1;
2443 if (timeout < 1)
2444 timeout = 1;
2445 mddev->bitmap_info.daemon_sleep = timeout;
2446 if (mddev->thread) {
2447 /* if thread->timeout is MAX_SCHEDULE_TIMEOUT, then
2448 * the bitmap is all clean and we don't need to
2449 * adjust the timeout right now
2450 */
2451 if (mddev->thread->timeout < MAX_SCHEDULE_TIMEOUT) {
2452 mddev->thread->timeout = timeout;
2453 md_wakeup_thread(mddev->thread);
2454 }
2455 }
2456 return len;
2457}
2458
2459static struct md_sysfs_entry bitmap_timeout =
2460__ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store);
2461
2462static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11002463backlog_show(struct mddev *mddev, char *page)
NeilBrown43a70502009-12-14 12:49:55 +11002464{
2465 return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind);
2466}
2467
2468static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11002469backlog_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown43a70502009-12-14 12:49:55 +11002470{
2471 unsigned long backlog;
Guoqing Jiang10c92fc2019-06-14 17:10:37 +08002472 unsigned long old_mwb = mddev->bitmap_info.max_write_behind;
Guoqing Jiang8c13ab12021-10-17 21:50:17 +08002473 struct md_rdev *rdev;
2474 bool has_write_mostly = false;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09002475 int rv = kstrtoul(buf, 10, &backlog);
NeilBrown43a70502009-12-14 12:49:55 +11002476 if (rv)
2477 return rv;
2478 if (backlog > COUNTER_MAX)
2479 return -EINVAL;
Guoqing Jiang8c13ab12021-10-17 21:50:17 +08002480
2481 /*
2482 * Without write mostly device, it doesn't make sense to set
2483 * backlog for max_write_behind.
2484 */
2485 rdev_for_each(rdev, mddev) {
2486 if (test_bit(WriteMostly, &rdev->flags)) {
2487 has_write_mostly = true;
2488 break;
2489 }
2490 }
2491 if (!has_write_mostly) {
2492 pr_warn_ratelimited("%s: can't set backlog, no write mostly device available\n",
2493 mdname(mddev));
2494 return -EINVAL;
2495 }
2496
NeilBrown43a70502009-12-14 12:49:55 +11002497 mddev->bitmap_info.max_write_behind = backlog;
Guoqing Jiang404659c2019-12-23 10:48:53 +01002498 if (!backlog && mddev->serial_info_pool) {
2499 /* serial_info_pool is not needed if backlog is zero */
Guoqing Jiang69b00b52019-12-23 10:49:00 +01002500 if (!mddev->serialize_policy)
2501 mddev_destroy_serial_pool(mddev, NULL, false);
Guoqing Jiang404659c2019-12-23 10:48:53 +01002502 } else if (backlog && !mddev->serial_info_pool) {
2503 /* serial_info_pool is needed since backlog is not zero */
Guoqing Jiang10c92fc2019-06-14 17:10:37 +08002504 struct md_rdev *rdev;
2505
2506 rdev_for_each(rdev, mddev)
Guoqing Jiang404659c2019-12-23 10:48:53 +01002507 mddev_create_serial_pool(mddev, rdev, false);
Guoqing Jiang10c92fc2019-06-14 17:10:37 +08002508 }
2509 if (old_mwb != backlog)
2510 md_bitmap_update_sb(mddev->bitmap);
NeilBrown43a70502009-12-14 12:49:55 +11002511 return len;
2512}
2513
2514static struct md_sysfs_entry bitmap_backlog =
2515__ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store);
2516
2517static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11002518chunksize_show(struct mddev *mddev, char *page)
NeilBrown43a70502009-12-14 12:49:55 +11002519{
2520 return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize);
2521}
2522
2523static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11002524chunksize_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown43a70502009-12-14 12:49:55 +11002525{
2526 /* Can only be changed when no bitmap is active */
2527 int rv;
2528 unsigned long csize;
2529 if (mddev->bitmap)
2530 return -EBUSY;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09002531 rv = kstrtoul(buf, 10, &csize);
NeilBrown43a70502009-12-14 12:49:55 +11002532 if (rv)
2533 return rv;
2534 if (csize < 512 ||
2535 !is_power_of_2(csize))
2536 return -EINVAL;
2537 mddev->bitmap_info.chunksize = csize;
2538 return len;
2539}
2540
2541static struct md_sysfs_entry bitmap_chunksize =
2542__ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store);
2543
NeilBrownfd01b882011-10-11 16:47:53 +11002544static ssize_t metadata_show(struct mddev *mddev, char *page)
NeilBrownece5cff2009-12-14 12:49:56 +11002545{
Goldwyn Rodriguesc4ce8672014-03-29 10:20:02 -05002546 if (mddev_is_clustered(mddev))
2547 return sprintf(page, "clustered\n");
NeilBrownece5cff2009-12-14 12:49:56 +11002548 return sprintf(page, "%s\n", (mddev->bitmap_info.external
2549 ? "external" : "internal"));
2550}
2551
NeilBrownfd01b882011-10-11 16:47:53 +11002552static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownece5cff2009-12-14 12:49:56 +11002553{
2554 if (mddev->bitmap ||
2555 mddev->bitmap_info.file ||
2556 mddev->bitmap_info.offset)
2557 return -EBUSY;
2558 if (strncmp(buf, "external", 8) == 0)
2559 mddev->bitmap_info.external = 1;
Goldwyn Rodriguesc4ce8672014-03-29 10:20:02 -05002560 else if ((strncmp(buf, "internal", 8) == 0) ||
2561 (strncmp(buf, "clustered", 9) == 0))
NeilBrownece5cff2009-12-14 12:49:56 +11002562 mddev->bitmap_info.external = 0;
2563 else
2564 return -EINVAL;
2565 return len;
2566}
2567
2568static struct md_sysfs_entry bitmap_metadata =
2569__ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
2570
NeilBrownfd01b882011-10-11 16:47:53 +11002571static ssize_t can_clear_show(struct mddev *mddev, char *page)
NeilBrownece5cff2009-12-14 12:49:56 +11002572{
2573 int len;
NeilBrownb7b17c92014-12-15 12:56:59 +11002574 spin_lock(&mddev->lock);
NeilBrownece5cff2009-12-14 12:49:56 +11002575 if (mddev->bitmap)
2576 len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ?
2577 "false" : "true"));
2578 else
2579 len = sprintf(page, "\n");
NeilBrownb7b17c92014-12-15 12:56:59 +11002580 spin_unlock(&mddev->lock);
NeilBrownece5cff2009-12-14 12:49:56 +11002581 return len;
2582}
2583
NeilBrownfd01b882011-10-11 16:47:53 +11002584static ssize_t can_clear_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownece5cff2009-12-14 12:49:56 +11002585{
2586 if (mddev->bitmap == NULL)
2587 return -ENOENT;
2588 if (strncmp(buf, "false", 5) == 0)
2589 mddev->bitmap->need_sync = 1;
2590 else if (strncmp(buf, "true", 4) == 0) {
2591 if (mddev->degraded)
2592 return -EBUSY;
2593 mddev->bitmap->need_sync = 0;
2594 } else
2595 return -EINVAL;
2596 return len;
2597}
2598
2599static struct md_sysfs_entry bitmap_can_clear =
2600__ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store);
2601
Paul Clements696fcd52010-03-08 16:02:37 +11002602static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11002603behind_writes_used_show(struct mddev *mddev, char *page)
Paul Clements696fcd52010-03-08 16:02:37 +11002604{
NeilBrownb7b17c92014-12-15 12:56:59 +11002605 ssize_t ret;
2606 spin_lock(&mddev->lock);
Paul Clements696fcd52010-03-08 16:02:37 +11002607 if (mddev->bitmap == NULL)
NeilBrownb7b17c92014-12-15 12:56:59 +11002608 ret = sprintf(page, "0\n");
2609 else
2610 ret = sprintf(page, "%lu\n",
2611 mddev->bitmap->behind_writes_used);
2612 spin_unlock(&mddev->lock);
2613 return ret;
Paul Clements696fcd52010-03-08 16:02:37 +11002614}
2615
2616static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11002617behind_writes_used_reset(struct mddev *mddev, const char *buf, size_t len)
Paul Clements696fcd52010-03-08 16:02:37 +11002618{
2619 if (mddev->bitmap)
2620 mddev->bitmap->behind_writes_used = 0;
2621 return len;
2622}
2623
2624static struct md_sysfs_entry max_backlog_used =
2625__ATTR(max_backlog_used, S_IRUGO | S_IWUSR,
2626 behind_writes_used_show, behind_writes_used_reset);
2627
NeilBrown43a70502009-12-14 12:49:55 +11002628static struct attribute *md_bitmap_attrs[] = {
2629 &bitmap_location.attr,
NeilBrown6409bb02012-05-22 13:55:07 +10002630 &bitmap_space.attr,
NeilBrown43a70502009-12-14 12:49:55 +11002631 &bitmap_timeout.attr,
2632 &bitmap_backlog.attr,
2633 &bitmap_chunksize.attr,
NeilBrownece5cff2009-12-14 12:49:56 +11002634 &bitmap_metadata.attr,
2635 &bitmap_can_clear.attr,
Paul Clements696fcd52010-03-08 16:02:37 +11002636 &max_backlog_used.attr,
NeilBrown43a70502009-12-14 12:49:55 +11002637 NULL
2638};
Rikard Falkebornc32dc042021-05-29 12:30:49 +02002639const struct attribute_group md_bitmap_group = {
NeilBrown43a70502009-12-14 12:49:55 +11002640 .name = "bitmap",
2641 .attrs = md_bitmap_attrs,
2642};