blob: ddcb2bc4a6179ac6552f9f627a1de3625edfdba7 [file] [log] [blame]
Heinz Mauelshagen3bd94002023-01-25 21:00:44 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Copyright (C) 2003 Sistina Software Limited.
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01004 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * This file is released under the GPL.
7 */
8
Jonathan Brassow06386bb2008-02-08 02:11:37 +00009#include "dm-bio-record.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070010
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/init.h>
12#include <linux/mempool.h>
13#include <linux/module.h>
14#include <linux/pagemap.h>
15#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/workqueue.h>
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010017#include <linux/device-mapper.h>
Alasdair G Kergona765e202008-04-24 22:02:01 +010018#include <linux/dm-io.h>
19#include <linux/dm-dirty-log.h>
20#include <linux/dm-kcopyd.h>
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010021#include <linux/dm-region-hash.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Tetsuo Handaa7e8f7f2022-04-20 14:12:26 +090023static struct workqueue_struct *dm_raid1_wq;
24
Alasdair G Kergon72d94862006-06-26 00:27:35 -070025#define DM_MSG_PREFIX "raid1"
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010026
27#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
Alasdair G Kergon72d94862006-06-26 00:27:35 -070028
Kees Cook65972a62018-04-10 21:43:15 -070029#define MAX_NR_MIRRORS (DM_KCOPYD_MAX_REGIONS + 1)
30
Lidong Zhonged632872015-05-13 14:04:10 +080031#define DM_RAID1_HANDLE_ERRORS 0x01
32#define DM_RAID1_KEEP_LOG 0x02
Jonathan Brassowf44db672007-07-12 17:29:04 +010033#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
Lidong Zhonged632872015-05-13 14:04:10 +080034#define keep_log(p) ((p)->features & DM_RAID1_KEEP_LOG)
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -070035
Jonathan E Brassow33184042006-11-08 17:44:44 -080036static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
Heinz Mauelshagena4a82ce2023-01-26 15:48:30 +010038/*
39 *---------------------------------------------------------------
Neil Browne4c8b3b2006-06-26 00:27:26 -070040 * Mirror set structures.
Heinz Mauelshagena4a82ce2023-01-26 15:48:30 +010041 *---------------------------------------------------------------
42 */
Jonathan Brassow72f4b312008-02-08 02:11:29 +000043enum dm_raid1_error {
44 DM_RAID1_WRITE_ERROR,
Mikulas Patocka64b30c42009-12-10 23:52:02 +000045 DM_RAID1_FLUSH_ERROR,
Jonathan Brassow72f4b312008-02-08 02:11:29 +000046 DM_RAID1_SYNC_ERROR,
47 DM_RAID1_READ_ERROR
48};
49
Neil Browne4c8b3b2006-06-26 00:27:26 -070050struct mirror {
Jonathan Brassowaa5617c2007-10-19 22:47:58 +010051 struct mirror_set *ms;
Neil Browne4c8b3b2006-06-26 00:27:26 -070052 atomic_t error_count;
Al Viro39ed7ad2008-02-13 03:53:00 +000053 unsigned long error_type;
Neil Browne4c8b3b2006-06-26 00:27:26 -070054 struct dm_dev *dev;
55 sector_t offset;
56};
57
58struct mirror_set {
59 struct dm_target *ti;
60 struct list_head list;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010061
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -070062 uint64_t features;
Neil Browne4c8b3b2006-06-26 00:27:26 -070063
Jonathan Brassow72f4b312008-02-08 02:11:29 +000064 spinlock_t lock; /* protects the lists */
Neil Browne4c8b3b2006-06-26 00:27:26 -070065 struct bio_list reads;
66 struct bio_list writes;
Jonathan Brassow72f4b312008-02-08 02:11:29 +000067 struct bio_list failures;
Mikulas Patocka04788502009-12-10 23:52:03 +000068 struct bio_list holds; /* bios are waiting until suspend */
Neil Browne4c8b3b2006-06-26 00:27:26 -070069
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010070 struct dm_region_hash *rh;
71 struct dm_kcopyd_client *kcopyd_client;
Milan Broz88be1632007-05-09 02:33:04 -070072 struct dm_io_client *io_client;
73
Neil Browne4c8b3b2006-06-26 00:27:26 -070074 /* recovery */
75 region_t nr_regions;
76 int in_sync;
Jonathan Brassowfc1ff952007-07-12 17:29:15 +010077 int log_failure;
Mikulas Patocka929be8f2009-12-10 23:52:06 +000078 int leg_failure;
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +000079 atomic_t suspend;
Neil Browne4c8b3b2006-06-26 00:27:26 -070080
Jonathan Brassow72f4b312008-02-08 02:11:29 +000081 atomic_t default_mirror; /* Default mirror */
Neil Browne4c8b3b2006-06-26 00:27:26 -070082
Holger Smolinski6ad36fe2007-05-09 02:32:50 -070083 struct workqueue_struct *kmirrord_wq;
84 struct work_struct kmirrord_work;
Mikulas Patockaa2aebe02008-04-24 22:10:42 +010085 struct timer_list timer;
86 unsigned long timer_pending;
87
Jonathan Brassow72f4b312008-02-08 02:11:29 +000088 struct work_struct trigger_event;
Holger Smolinski6ad36fe2007-05-09 02:32:50 -070089
Heinz Mauelshagen86a32382023-01-25 21:14:58 +010090 unsigned int nr_mirrors;
Gustavo A. R. Silvab18ae8d2020-05-07 13:51:58 -050091 struct mirror mirror[];
Neil Browne4c8b3b2006-06-26 00:27:26 -070092};
93
Mikulas Patockadf5d2e92013-03-01 22:45:49 +000094DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(raid1_resync_throttle,
95 "A percentage of time allocated for raid resynchronization");
96
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010097static void wakeup_mirrord(void *context)
Linus Torvalds1da177e2005-04-16 15:20:36 -070098{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010099 struct mirror_set *ms = context;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
Holger Smolinski6ad36fe2007-05-09 02:32:50 -0700101 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
102}
103
Kees Cook8376d3c12017-10-16 17:01:48 -0700104static void delayed_wake_fn(struct timer_list *t)
Mikulas Patockaa2aebe02008-04-24 22:10:42 +0100105{
Kees Cook8376d3c12017-10-16 17:01:48 -0700106 struct mirror_set *ms = from_timer(ms, t, timer);
Mikulas Patockaa2aebe02008-04-24 22:10:42 +0100107
108 clear_bit(0, &ms->timer_pending);
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100109 wakeup_mirrord(ms);
Mikulas Patockaa2aebe02008-04-24 22:10:42 +0100110}
111
112static void delayed_wake(struct mirror_set *ms)
113{
114 if (test_and_set_bit(0, &ms->timer_pending))
115 return;
116
117 ms->timer.expires = jiffies + HZ / 5;
Mikulas Patockaa2aebe02008-04-24 22:10:42 +0100118 add_timer(&ms->timer);
119}
120
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100121static void wakeup_all_recovery_waiters(void *context)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100123 wake_up_all(&_kmirrord_recovery_stopped);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124}
125
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100126static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127{
128 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 int should_wake = 0;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100130 struct bio_list *bl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100132 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
133 spin_lock_irqsave(&ms->lock, flags);
134 should_wake = !(bl->head);
135 bio_list_add(bl, bio);
136 spin_unlock_irqrestore(&ms->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137
138 if (should_wake)
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100139 wakeup_mirrord(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140}
141
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100142static void dispatch_bios(void *context, struct bio_list *bio_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100144 struct mirror_set *ms = context;
145 struct bio *bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100147 while ((bio = bio_list_pop(bio_list)))
148 queue_bio(ms, bio, WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149}
150
Mikulas Patocka89c7cd82012-12-21 20:23:39 +0000151struct dm_raid1_bio_record {
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000152 struct mirror *m;
Christoph Hellwig309dca302021-01-24 11:02:34 +0100153 /* if details->bi_bdev == NULL, details were not saved */
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000154 struct dm_bio_details details;
Mikulas Patocka0045d612012-12-21 20:23:40 +0000155 region_t write_region;
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000156};
157
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158/*
159 * Every mirror should look like this one.
160 */
161#define DEFAULT_MIRROR 0
162
163/*
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000164 * This is yucky. We squirrel the mirror struct away inside
165 * bi_next for read/write buffers. This is safe since the bh
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 * doesn't get submitted to the lower levels of block layer.
167 */
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000168static struct mirror *bio_get_m(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169{
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000170 return (struct mirror *) bio->bi_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171}
172
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000173static void bio_set_m(struct bio *bio, struct mirror *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174{
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000175 bio->bi_next = (struct bio *) m;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176}
177
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000178static struct mirror *get_default_mirror(struct mirror_set *ms)
179{
180 return &ms->mirror[atomic_read(&ms->default_mirror)];
181}
182
183static void set_default_mirror(struct mirror *m)
184{
185 struct mirror_set *ms = m->ms;
186 struct mirror *m0 = &(ms->mirror[0]);
187
188 atomic_set(&ms->default_mirror, m - m0);
189}
190
Mikulas Patocka87968dd2009-12-10 23:52:04 +0000191static struct mirror *get_valid_mirror(struct mirror_set *ms)
192{
193 struct mirror *m;
194
195 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
196 if (!atomic_read(&m->error_count))
197 return m;
198
199 return NULL;
200}
201
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000202/* fail_mirror
203 * @m: mirror device to fail
204 * @error_type: one of the enum's, DM_RAID1_*_ERROR
205 *
206 * If errors are being handled, record the type of
207 * error encountered for this device. If this type
208 * of error has already been recorded, we can return;
209 * otherwise, we must signal userspace by triggering
210 * an event. Additionally, if the device is the
211 * primary device, we must choose a new primary, but
212 * only if the mirror is in-sync.
213 *
214 * This function must not block.
215 */
216static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
217{
218 struct mirror_set *ms = m->ms;
219 struct mirror *new;
220
Mikulas Patocka929be8f2009-12-10 23:52:06 +0000221 ms->leg_failure = 1;
222
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000223 /*
224 * error_count is used for nothing more than a
225 * simple way to tell if a device has encountered
226 * errors.
227 */
228 atomic_inc(&m->error_count);
229
230 if (test_and_set_bit(error_type, &m->error_type))
231 return;
232
Jonathan Brassowd460c652009-01-06 03:04:57 +0000233 if (!errors_handled(ms))
234 return;
235
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000236 if (m != get_default_mirror(ms))
237 goto out;
238
Lidong Zhonged632872015-05-13 14:04:10 +0800239 if (!ms->in_sync && !keep_log(ms)) {
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000240 /*
241 * Better to issue requests to same failing device
242 * than to risk returning corrupt data.
243 */
Heinz Mauelshagen2e84fec2023-02-03 18:55:47 +0100244 DMERR("Primary mirror (%s) failed while out-of-sync: Reads may fail.",
245 m->dev->name);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000246 goto out;
247 }
248
Mikulas Patocka87968dd2009-12-10 23:52:04 +0000249 new = get_valid_mirror(ms);
250 if (new)
251 set_default_mirror(new);
252 else
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000253 DMWARN("All sides of mirror have failed.");
254
255out:
Tetsuo Handaa7e8f7f2022-04-20 14:12:26 +0900256 queue_work(dm_raid1_wq, &ms->trigger_event);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000257}
258
Mikulas Patockac0da3742009-12-10 23:52:02 +0000259static int mirror_flush(struct dm_target *ti)
260{
261 struct mirror_set *ms = ti->private;
262 unsigned long error_bits;
263
264 unsigned int i;
Kees Cook65972a62018-04-10 21:43:15 -0700265 struct dm_io_region io[MAX_NR_MIRRORS];
Mikulas Patockac0da3742009-12-10 23:52:02 +0000266 struct mirror *m;
267 struct dm_io_request io_req = {
Bart Van Assche581075e2022-07-14 11:06:47 -0700268 .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
Mikulas Patockac0da3742009-12-10 23:52:02 +0000269 .mem.type = DM_IO_KMEM,
Mike Snitzer5fc2ffe2011-01-13 19:59:48 +0000270 .mem.ptr.addr = NULL,
Mikulas Patockac0da3742009-12-10 23:52:02 +0000271 .client = ms->io_client,
272 };
273
274 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) {
275 io[i].bdev = m->dev->bdev;
276 io[i].sector = 0;
277 io[i].count = 0;
278 }
279
280 error_bits = -1;
281 dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
282 if (unlikely(error_bits != 0)) {
283 for (i = 0; i < ms->nr_mirrors; i++)
284 if (test_bit(i, &error_bits))
285 fail_mirror(ms->mirror + i,
Mikulas Patocka64b30c42009-12-10 23:52:02 +0000286 DM_RAID1_FLUSH_ERROR);
Mikulas Patockac0da3742009-12-10 23:52:02 +0000287 return -EIO;
288 }
289
290 return 0;
291}
292
Heinz Mauelshagena4a82ce2023-01-26 15:48:30 +0100293/*
294 *---------------------------------------------------------------
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 * Recovery.
296 *
297 * When a mirror is first activated we may find that some regions
298 * are in the no-sync state. We have to recover these by
299 * recopying from the default mirror to all the others.
Heinz Mauelshagena4a82ce2023-01-26 15:48:30 +0100300 *---------------------------------------------------------------
301 */
Alasdair G Kergon4cdc1d12008-03-28 14:16:10 -0700302static void recovery_complete(int read_err, unsigned long write_err,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 void *context)
304{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100305 struct dm_region *reg = context;
306 struct mirror_set *ms = dm_rh_region_context(reg);
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000307 int m, bit = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000309 if (read_err) {
Jonathan Brassowf44db672007-07-12 17:29:04 +0100310 /* Read error means the failure of default mirror. */
311 DMERR_LIMIT("Unable to read primary mirror during recovery");
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000312 fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
313 }
Jonathan Brassowf44db672007-07-12 17:29:04 +0100314
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000315 if (write_err) {
Alasdair G Kergon4cdc1d12008-03-28 14:16:10 -0700316 DMERR_LIMIT("Write error during recovery (error = 0x%lx)",
Jonathan Brassowf44db672007-07-12 17:29:04 +0100317 write_err);
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000318 /*
319 * Bits correspond to devices (excluding default mirror).
320 * The default mirror cannot change during recovery.
321 */
322 for (m = 0; m < ms->nr_mirrors; m++) {
323 if (&ms->mirror[m] == get_default_mirror(ms))
324 continue;
325 if (test_bit(bit, &write_err))
326 fail_mirror(ms->mirror + m,
327 DM_RAID1_SYNC_ERROR);
328 bit++;
329 }
330 }
Jonathan Brassowf44db672007-07-12 17:29:04 +0100331
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100332 dm_rh_recovery_end(reg, !(read_err || write_err));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333}
334
Mike Snitzer7209049d2018-07-31 17:27:02 -0400335static void recover(struct mirror_set *ms, struct dm_region *reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336{
Heinz Mauelshagen86a32382023-01-25 21:14:58 +0100337 unsigned int i;
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +0100338 struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 struct mirror *m;
340 unsigned long flags = 0;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100341 region_t key = dm_rh_get_region_key(reg);
342 sector_t region_size = dm_rh_get_region_size(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
344 /* fill in the source */
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000345 m = get_default_mirror(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 from.bdev = m->dev->bdev;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100347 from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
348 if (key == (ms->nr_regions - 1)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 /*
350 * The final region may be smaller than
351 * region_size.
352 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100353 from.count = ms->ti->len & (region_size - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 if (!from.count)
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100355 from.count = region_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 } else
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100357 from.count = region_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358
359 /* fill in the destinations */
360 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000361 if (&ms->mirror[i] == get_default_mirror(ms))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 continue;
363
364 m = ms->mirror + i;
365 dest->bdev = m->dev->bdev;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100366 dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 dest->count = from.count;
368 dest++;
369 }
370
371 /* hand to kcopyd */
Jonathan Brassowf7c83e22008-10-10 13:36:59 +0100372 if (!errors_handled(ms))
Mikulas Patockadb2351e2021-05-26 10:16:01 -0400373 flags |= BIT(DM_KCOPYD_IGNORE_ERROR);
Jonathan Brassowf7c83e22008-10-10 13:36:59 +0100374
Mike Snitzer7209049d2018-07-31 17:27:02 -0400375 dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
376 flags, recovery_complete, reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377}
378
Lidong Zhonged632872015-05-13 14:04:10 +0800379static void reset_ms_flags(struct mirror_set *ms)
380{
381 unsigned int m;
382
383 ms->leg_failure = 0;
384 for (m = 0; m < ms->nr_mirrors; m++) {
385 atomic_set(&(ms->mirror[m].error_count), 0);
386 ms->mirror[m].error_type = 0;
387 }
388}
389
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390static void do_recovery(struct mirror_set *ms)
391{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100392 struct dm_region *reg;
393 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394
395 /*
396 * Start quiescing some regions.
397 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100398 dm_rh_recovery_prepare(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399
400 /*
401 * Copy any already quiesced regions.
402 */
Mike Snitzer7209049d2018-07-31 17:27:02 -0400403 while ((reg = dm_rh_recovery_start(ms->rh)))
404 recover(ms, reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405
406 /*
407 * Update the in sync flag.
408 */
409 if (!ms->in_sync &&
410 (log->type->get_sync_count(log) == ms->nr_regions)) {
411 /* the sync is complete */
412 dm_table_event(ms->ti->table);
413 ms->in_sync = 1;
Lidong Zhonged632872015-05-13 14:04:10 +0800414 reset_ms_flags(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 }
416}
417
Heinz Mauelshagena4a82ce2023-01-26 15:48:30 +0100418/*
419 *---------------------------------------------------------------
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 * Reads
Heinz Mauelshagena4a82ce2023-01-26 15:48:30 +0100421 *---------------------------------------------------------------
422 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
424{
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000425 struct mirror *m = get_default_mirror(ms);
426
427 do {
428 if (likely(!atomic_read(&m->error_count)))
429 return m;
430
431 if (m-- == ms->mirror)
432 m += ms->nr_mirrors;
433 } while (m != get_default_mirror(ms));
434
435 return NULL;
436}
437
438static int default_ok(struct mirror *m)
439{
440 struct mirror *default_mirror = get_default_mirror(m->ms);
441
442 return !atomic_read(&default_mirror->error_count);
443}
444
445static int mirror_available(struct mirror_set *ms, struct bio *bio)
446{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100447 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
448 region_t region = dm_rh_bio_to_region(ms->rh, bio);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000449
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100450 if (log->type->in_sync(log, region, 0))
Kent Overstreet4f024f32013-10-11 15:44:27 -0700451 return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0;
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000452
453 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454}
455
456/*
457 * remap a buffer to a particular mirror.
458 */
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000459static sector_t map_sector(struct mirror *m, struct bio *bio)
460{
Kent Overstreet4f024f32013-10-11 15:44:27 -0700461 if (unlikely(!bio->bi_iter.bi_size))
Mikulas Patocka41841532009-12-10 23:51:59 +0000462 return 0;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700463 return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000464}
465
466static void map_bio(struct mirror *m, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467{
Christoph Hellwig74d46992017-08-23 19:10:32 +0200468 bio_set_dev(bio, m->dev->bdev);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700469 bio->bi_iter.bi_sector = map_sector(m, bio);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000470}
471
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100472static void map_region(struct dm_io_region *io, struct mirror *m,
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000473 struct bio *bio)
474{
475 io->bdev = m->dev->bdev;
476 io->sector = map_sector(m, bio);
Kent Overstreetaa8b57a2013-02-05 15:19:29 -0800477 io->count = bio_sectors(bio);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000478}
479
Mikulas Patocka04788502009-12-10 23:52:03 +0000480static void hold_bio(struct mirror_set *ms, struct bio *bio)
481{
482 /*
Takahiro Yasuif0703042010-03-06 02:32:35 +0000483 * Lock is required to avoid race condition during suspend
484 * process.
Mikulas Patocka04788502009-12-10 23:52:03 +0000485 */
Takahiro Yasuif0703042010-03-06 02:32:35 +0000486 spin_lock_irq(&ms->lock);
487
Mikulas Patocka04788502009-12-10 23:52:03 +0000488 if (atomic_read(&ms->suspend)) {
Takahiro Yasuif0703042010-03-06 02:32:35 +0000489 spin_unlock_irq(&ms->lock);
490
491 /*
492 * If device is suspended, complete the bio.
493 */
Mikulas Patocka04788502009-12-10 23:52:03 +0000494 if (dm_noflush_suspending(ms->ti))
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200495 bio->bi_status = BLK_STS_DM_REQUEUE;
Mikulas Patocka04788502009-12-10 23:52:03 +0000496 else
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200497 bio->bi_status = BLK_STS_IOERR;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200498
499 bio_endio(bio);
Mikulas Patocka04788502009-12-10 23:52:03 +0000500 return;
501 }
502
503 /*
504 * Hold bio until the suspend is complete.
505 */
Mikulas Patocka04788502009-12-10 23:52:03 +0000506 bio_list_add(&ms->holds, bio);
507 spin_unlock_irq(&ms->lock);
508}
509
Heinz Mauelshagena4a82ce2023-01-26 15:48:30 +0100510/*
511 *---------------------------------------------------------------
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000512 * Reads
Heinz Mauelshagena4a82ce2023-01-26 15:48:30 +0100513 *---------------------------------------------------------------
514 */
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000515static void read_callback(unsigned long error, void *context)
516{
517 struct bio *bio = context;
518 struct mirror *m;
519
520 m = bio_get_m(bio);
521 bio_set_m(bio, NULL);
522
523 if (likely(!error)) {
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200524 bio_endio(bio);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000525 return;
526 }
527
528 fail_mirror(m, DM_RAID1_READ_ERROR);
529
530 if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
Heinz Mauelshagen2e84fec2023-02-03 18:55:47 +0100531 DMWARN_LIMIT("Read failure on mirror device %s. Trying alternative device.",
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000532 m->dev->name);
Christoph Hellwig70246282016-07-19 11:28:41 +0200533 queue_bio(m->ms, bio, bio_data_dir(bio));
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000534 return;
535 }
536
537 DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.",
538 m->dev->name);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200539 bio_io_error(bio);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000540}
541
542/* Asynchronous read. */
543static void read_async_bio(struct mirror *m, struct bio *bio)
544{
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100545 struct dm_io_region io;
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000546 struct dm_io_request io_req = {
Bart Van Assche581075e2022-07-14 11:06:47 -0700547 .bi_opf = REQ_OP_READ,
Kent Overstreet003b5c52013-10-11 15:45:43 -0700548 .mem.type = DM_IO_BIO,
549 .mem.ptr.bio = bio,
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000550 .notify.fn = read_callback,
551 .notify.context = bio,
552 .client = m->ms->io_client,
553 };
554
555 map_region(&io, m, bio);
556 bio_set_m(bio, m);
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100557 BUG_ON(dm_io(&io_req, 1, &io, NULL));
558}
559
560static inline int region_in_sync(struct mirror_set *ms, region_t region,
561 int may_block)
562{
563 int state = dm_rh_get_state(ms->rh, region, may_block);
564 return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565}
566
567static void do_reads(struct mirror_set *ms, struct bio_list *reads)
568{
569 region_t region;
570 struct bio *bio;
571 struct mirror *m;
572
573 while ((bio = bio_list_pop(reads))) {
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100574 region = dm_rh_bio_to_region(ms->rh, bio);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000575 m = get_default_mirror(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576
577 /*
578 * We can only read balance if the region is in sync.
579 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100580 if (likely(region_in_sync(ms, region, 1)))
Kent Overstreet4f024f32013-10-11 15:44:27 -0700581 m = choose_mirror(ms, bio->bi_iter.bi_sector);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000582 else if (m && atomic_read(&m->error_count))
583 m = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000585 if (likely(m))
586 read_async_bio(m, bio);
587 else
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200588 bio_io_error(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 }
590}
591
Heinz Mauelshagena4a82ce2023-01-26 15:48:30 +0100592/*
593 *---------------------------------------------------------------------
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 * Writes.
595 *
596 * We do different things with the write io depending on the
597 * state of the region that it's in:
598 *
Heinz Mauelshagen8ca817c2023-02-01 22:31:43 +0100599 * SYNC: increment pending, use kcopyd to write to *all* mirrors
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 * RECOVERING: delay the io until recovery completes
601 * NOSYNC: increment pending, just write to the default mirror
Heinz Mauelshagena4a82ce2023-01-26 15:48:30 +0100602 *---------------------------------------------------------------------
603 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604static void write_callback(unsigned long error, void *context)
605{
Heinz Mauelshagen86a32382023-01-25 21:14:58 +0100606 unsigned int i;
Yu Zhe26cb62a2023-03-17 09:35:54 +0800607 struct bio *bio = context;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 struct mirror_set *ms;
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000609 int should_wake = 0;
610 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000612 ms = bio_get_m(bio)->ms;
613 bio_set_m(bio, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614
615 /*
616 * NOTE: We don't decrement the pending count here,
617 * instead it is done by the targets endio function.
618 * This way we handle both writes to SYNC and NOSYNC
619 * regions with the same code.
620 */
Mikulas Patocka60f355e2009-12-10 23:52:05 +0000621 if (likely(!error)) {
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200622 bio_endio(bio);
Mikulas Patocka60f355e2009-12-10 23:52:05 +0000623 return;
624 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625
Mikulas Patockaf2ed51a2015-02-12 10:09:20 -0500626 /*
627 * If the bio is discard, return an error, but do not
628 * degrade the array.
629 */
Mike Christiee6047142016-06-05 14:32:04 -0500630 if (bio_op(bio) == REQ_OP_DISCARD) {
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200631 bio->bi_status = BLK_STS_NOTSUPP;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200632 bio_endio(bio);
Mikulas Patockaf2ed51a2015-02-12 10:09:20 -0500633 return;
634 }
635
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000636 for (i = 0; i < ms->nr_mirrors; i++)
637 if (test_bit(i, &error))
638 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000639
Mikulas Patocka60f355e2009-12-10 23:52:05 +0000640 /*
641 * Need to raise event. Since raising
642 * events can block, we need to do it in
643 * the main thread.
644 */
645 spin_lock_irqsave(&ms->lock, flags);
646 if (!ms->failures.head)
647 should_wake = 1;
648 bio_list_add(&ms->failures, bio);
649 spin_unlock_irqrestore(&ms->lock, flags);
650 if (should_wake)
651 wakeup_mirrord(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652}
653
654static void do_write(struct mirror_set *ms, struct bio *bio)
655{
656 unsigned int i;
Kees Cook65972a62018-04-10 21:43:15 -0700657 struct dm_io_region io[MAX_NR_MIRRORS], *dest = io;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 struct mirror *m;
Bart Van Assche581075e2022-07-14 11:06:47 -0700659 blk_opf_t op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH);
Milan Broz88be1632007-05-09 02:33:04 -0700660 struct dm_io_request io_req = {
Bart Van Assche581075e2022-07-14 11:06:47 -0700661 .bi_opf = REQ_OP_WRITE | op_flags,
Kent Overstreet003b5c52013-10-11 15:45:43 -0700662 .mem.type = DM_IO_BIO,
663 .mem.ptr.bio = bio,
Milan Broz88be1632007-05-09 02:33:04 -0700664 .notify.fn = write_callback,
665 .notify.context = bio,
666 .client = ms->io_client,
667 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668
Mike Christiee6047142016-06-05 14:32:04 -0500669 if (bio_op(bio) == REQ_OP_DISCARD) {
Bart Van Assche581075e2022-07-14 11:06:47 -0700670 io_req.bi_opf = REQ_OP_DISCARD | op_flags;
Mike Snitzer5fc2ffe2011-01-13 19:59:48 +0000671 io_req.mem.type = DM_IO_KMEM;
672 io_req.mem.ptr.addr = NULL;
673 }
674
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000675 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
676 map_region(dest++, m, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000678 /*
679 * Use default mirror because we only need it to retrieve the reference
680 * to the mirror set in write_callback().
681 */
682 bio_set_m(bio, get_default_mirror(ms));
Milan Broz88be1632007-05-09 02:33:04 -0700683
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100684 BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685}
686
687static void do_writes(struct mirror_set *ms, struct bio_list *writes)
688{
689 int state;
690 struct bio *bio;
691 struct bio_list sync, nosync, recover, *this_list = NULL;
Jonathan Brassow7513c2a2009-04-02 19:55:30 +0100692 struct bio_list requeue;
693 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
694 region_t region;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695
696 if (!writes->head)
697 return;
698
699 /*
700 * Classify each write.
701 */
702 bio_list_init(&sync);
703 bio_list_init(&nosync);
704 bio_list_init(&recover);
Jonathan Brassow7513c2a2009-04-02 19:55:30 +0100705 bio_list_init(&requeue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706
707 while ((bio = bio_list_pop(writes))) {
Jens Axboe1eff9d32016-08-05 15:35:16 -0600708 if ((bio->bi_opf & REQ_PREFLUSH) ||
Mike Christiee6047142016-06-05 14:32:04 -0500709 (bio_op(bio) == REQ_OP_DISCARD)) {
Mikulas Patocka41841532009-12-10 23:51:59 +0000710 bio_list_add(&sync, bio);
711 continue;
712 }
713
Jonathan Brassow7513c2a2009-04-02 19:55:30 +0100714 region = dm_rh_bio_to_region(ms->rh, bio);
715
716 if (log->type->is_remote_recovering &&
717 log->type->is_remote_recovering(log, region)) {
718 bio_list_add(&requeue, bio);
719 continue;
720 }
721
722 state = dm_rh_get_state(ms->rh, region, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 switch (state) {
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100724 case DM_RH_CLEAN:
725 case DM_RH_DIRTY:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 this_list = &sync;
727 break;
728
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100729 case DM_RH_NOSYNC:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 this_list = &nosync;
731 break;
732
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100733 case DM_RH_RECOVERING:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 this_list = &recover;
735 break;
736 }
737
738 bio_list_add(this_list, bio);
739 }
740
741 /*
Jonathan Brassow7513c2a2009-04-02 19:55:30 +0100742 * Add bios that are delayed due to remote recovery
743 * back on to the write queue
744 */
745 if (unlikely(requeue.head)) {
746 spin_lock_irq(&ms->lock);
747 bio_list_merge(&ms->writes, &requeue);
748 spin_unlock_irq(&ms->lock);
Mikulas Patocka69885682009-07-23 20:30:37 +0100749 delayed_wake(ms);
Jonathan Brassow7513c2a2009-04-02 19:55:30 +0100750 }
751
752 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 * Increment the pending counts for any regions that will
754 * be written to (writes to recover regions are going to
755 * be delayed).
756 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100757 dm_rh_inc_pending(ms->rh, &sync);
758 dm_rh_inc_pending(ms->rh, &nosync);
Jonathan Brassowd2b69862009-09-04 20:40:32 +0100759
760 /*
761 * If the flush fails on a previous call and succeeds here,
762 * we must not reset the log_failure variable. We need
763 * userspace interaction to do that.
764 */
765 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766
767 /*
768 * Dispatch io.
769 */
Mikulas Patocka5528d172010-02-16 18:42:55 +0000770 if (unlikely(ms->log_failure) && errors_handled(ms)) {
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000771 spin_lock_irq(&ms->lock);
772 bio_list_merge(&ms->failures, &sync);
773 spin_unlock_irq(&ms->lock);
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100774 wakeup_mirrord(ms);
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000775 } else
Jonathan Brassowfc1ff952007-07-12 17:29:15 +0100776 while ((bio = bio_list_pop(&sync)))
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000777 do_write(ms, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778
779 while ((bio = bio_list_pop(&recover)))
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100780 dm_rh_delay(ms->rh, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781
782 while ((bio = bio_list_pop(&nosync))) {
Lidong Zhonged632872015-05-13 14:04:10 +0800783 if (unlikely(ms->leg_failure) && errors_handled(ms) && !keep_log(ms)) {
Mikulas Patockaede5ea02010-03-06 02:32:22 +0000784 spin_lock_irq(&ms->lock);
785 bio_list_add(&ms->failures, bio);
786 spin_unlock_irq(&ms->lock);
787 wakeup_mirrord(ms);
788 } else {
Mikulas Patocka929be8f2009-12-10 23:52:06 +0000789 map_bio(get_default_mirror(ms), bio);
Christoph Hellwiged00aab2020-07-01 10:59:44 +0200790 submit_bio_noacct(bio);
Mikulas Patocka929be8f2009-12-10 23:52:06 +0000791 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 }
793}
794
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000795static void do_failures(struct mirror_set *ms, struct bio_list *failures)
796{
797 struct bio *bio;
798
Mikulas Patocka0f398a82009-12-10 23:52:04 +0000799 if (likely(!failures->head))
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000800 return;
801
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000802 /*
803 * If the log has failed, unattempted writes are being
Mikulas Patocka0f398a82009-12-10 23:52:04 +0000804 * put on the holds list. We can't issue those writes
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000805 * until a log has been marked, so we must store them.
806 *
807 * If a 'noflush' suspend is in progress, we can requeue
808 * the I/O's to the core. This give userspace a chance
809 * to reconfigure the mirror, at which point the core
810 * will reissue the writes. If the 'noflush' flag is
811 * not set, we have no choice but to return errors.
812 *
813 * Some writes on the failures list may have been
814 * submitted before the log failure and represent a
815 * failure to write to one of the devices. It is ok
816 * for us to treat them the same and requeue them
817 * as well.
818 */
Mikulas Patocka0f398a82009-12-10 23:52:04 +0000819 while ((bio = bio_list_pop(failures))) {
Mikulas Patocka60f355e2009-12-10 23:52:05 +0000820 if (!ms->log_failure) {
Mikulas Patocka0f398a82009-12-10 23:52:04 +0000821 ms->in_sync = 0;
Mikulas Patockac58098b2009-12-10 23:52:05 +0000822 dm_rh_mark_nosync(ms->rh, bio);
Mikulas Patocka0f398a82009-12-10 23:52:04 +0000823 }
Mikulas Patocka60f355e2009-12-10 23:52:05 +0000824
825 /*
826 * If all the legs are dead, fail the I/O.
Lidong Zhonged632872015-05-13 14:04:10 +0800827 * If the device has failed and keep_log is enabled,
828 * fail the I/O.
829 *
830 * If we have been told to handle errors, and keep_log
831 * isn't enabled, hold the bio and wait for userspace to
832 * deal with the problem.
833 *
Mikulas Patocka60f355e2009-12-10 23:52:05 +0000834 * Otherwise pretend that the I/O succeeded. (This would
835 * be wrong if the failed leg returned after reboot and
836 * got replicated back to the good legs.)
837 */
Lidong Zhonged632872015-05-13 14:04:10 +0800838 if (unlikely(!get_valid_mirror(ms) || (keep_log(ms) && ms->log_failure)))
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200839 bio_io_error(bio);
Lidong Zhonged632872015-05-13 14:04:10 +0800840 else if (errors_handled(ms) && !keep_log(ms))
Mikulas Patocka60f355e2009-12-10 23:52:05 +0000841 hold_bio(ms, bio);
842 else
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200843 bio_endio(bio);
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000844 }
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000845}
846
847static void trigger_event(struct work_struct *work)
848{
849 struct mirror_set *ms =
850 container_of(work, struct mirror_set, trigger_event);
851
852 dm_table_event(ms->ti->table);
853}
854
Heinz Mauelshagena4a82ce2023-01-26 15:48:30 +0100855/*
856 *---------------------------------------------------------------
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 * kmirrord
Heinz Mauelshagena4a82ce2023-01-26 15:48:30 +0100858 *---------------------------------------------------------------
859 */
Mikulas Patockaa2aebe02008-04-24 22:10:42 +0100860static void do_mirror(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100862 struct mirror_set *ms = container_of(work, struct mirror_set,
863 kmirrord_work);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000864 struct bio_list reads, writes, failures;
865 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000867 spin_lock_irqsave(&ms->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 reads = ms->reads;
869 writes = ms->writes;
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000870 failures = ms->failures;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 bio_list_init(&ms->reads);
872 bio_list_init(&ms->writes);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000873 bio_list_init(&ms->failures);
874 spin_unlock_irqrestore(&ms->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100876 dm_rh_update_states(ms->rh, errors_handled(ms));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 do_recovery(ms);
878 do_reads(ms, &reads);
879 do_writes(ms, &writes);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000880 do_failures(ms, &failures);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000881}
882
Heinz Mauelshagena4a82ce2023-01-26 15:48:30 +0100883/*
884 *---------------------------------------------------------------
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 * Target functions
Heinz Mauelshagena4a82ce2023-01-26 15:48:30 +0100886 *---------------------------------------------------------------
887 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888static struct mirror_set *alloc_context(unsigned int nr_mirrors,
889 uint32_t region_size,
890 struct dm_target *ti,
Heinz Mauelshagen416cd172008-04-24 21:43:35 +0100891 struct dm_dirty_log *dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892{
Gustavo A. R. Silvabcd67652019-08-05 19:18:25 -0500893 struct mirror_set *ms =
894 kzalloc(struct_size(ms, mirror, nr_mirrors), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 if (!ms) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700897 ti->error = "Cannot allocate mirror context";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 return NULL;
899 }
900
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 spin_lock_init(&ms->lock);
Mikulas Patocka5339fc22009-12-10 23:52:06 +0000902 bio_list_init(&ms->reads);
903 bio_list_init(&ms->writes);
904 bio_list_init(&ms->failures);
905 bio_list_init(&ms->holds);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906
907 ms->ti = ti;
908 ms->nr_mirrors = nr_mirrors;
909 ms->nr_regions = dm_sector_div_up(ti->len, region_size);
910 ms->in_sync = 0;
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000911 ms->log_failure = 0;
Mikulas Patocka929be8f2009-12-10 23:52:06 +0000912 ms->leg_failure = 0;
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000913 atomic_set(&ms->suspend, 0);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000914 atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915
Mikulas Patockabda8efe2011-05-29 13:03:09 +0100916 ms->io_client = dm_io_client_create();
Milan Broz88be1632007-05-09 02:33:04 -0700917 if (IS_ERR(ms->io_client)) {
918 ti->error = "Error creating dm_io client";
919 kfree(ms);
Heinz Mauelshagen255e2642023-01-25 23:31:55 +0100920 return NULL;
Milan Broz88be1632007-05-09 02:33:04 -0700921 }
922
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100923 ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
924 wakeup_all_recovery_waiters,
925 ms->ti->begin, MAX_RECOVERY,
926 dl, region_size, ms->nr_regions);
927 if (IS_ERR(ms->rh)) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700928 ti->error = "Error creating dirty region hash";
Dmitry Monakhova72cf732007-10-19 22:38:39 +0100929 dm_io_client_destroy(ms->io_client);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 kfree(ms);
931 return NULL;
932 }
933
934 return ms;
935}
936
937static void free_context(struct mirror_set *ms, struct dm_target *ti,
938 unsigned int m)
939{
940 while (m--)
941 dm_put_device(ti, ms->mirror[m].dev);
942
Milan Broz88be1632007-05-09 02:33:04 -0700943 dm_io_client_destroy(ms->io_client);
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100944 dm_region_hash_destroy(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 kfree(ms);
946}
947
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
949 unsigned int mirror, char **argv)
950{
Andrew Morton4ee218c2006-03-27 01:17:48 -0800951 unsigned long long offset;
Mikulas Patocka31998ef2012-03-28 18:41:26 +0100952 char dummy;
Vivek Goyale80d1c82015-07-31 09:20:36 -0400953 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954
Milan Brozef87bfc2018-11-07 22:24:55 +0100955 if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1 ||
956 offset != (sector_t)offset) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700957 ti->error = "Invalid offset";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 return -EINVAL;
959 }
960
Vivek Goyale80d1c82015-07-31 09:20:36 -0400961 ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
962 &ms->mirror[mirror].dev);
963 if (ret) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700964 ti->error = "Device lookup failure";
Vivek Goyale80d1c82015-07-31 09:20:36 -0400965 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 }
967
Jonathan Brassowaa5617c2007-10-19 22:47:58 +0100968 ms->mirror[mirror].ms = ms;
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000969 atomic_set(&(ms->mirror[mirror].error_count), 0);
970 ms->mirror[mirror].error_type = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 ms->mirror[mirror].offset = offset;
972
973 return 0;
974}
975
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976/*
977 * Create dirty log: log_type #log_params <log_params>
978 */
Heinz Mauelshagen416cd172008-04-24 21:43:35 +0100979static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
Heinz Mauelshagen86a32382023-01-25 21:14:58 +0100980 unsigned int argc, char **argv,
981 unsigned int *args_used)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982{
Heinz Mauelshagen86a32382023-01-25 21:14:58 +0100983 unsigned int param_count;
Heinz Mauelshagen416cd172008-04-24 21:43:35 +0100984 struct dm_dirty_log *dl;
Mikulas Patocka31998ef2012-03-28 18:41:26 +0100985 char dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986
987 if (argc < 2) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700988 ti->error = "Insufficient mirror log arguments";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 return NULL;
990 }
991
Mikulas Patocka31998ef2012-03-28 18:41:26 +0100992 if (sscanf(argv[1], "%u%c", &param_count, &dummy) != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700993 ti->error = "Invalid mirror log argument count";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 return NULL;
995 }
996
997 *args_used = 2 + param_count;
998
999 if (argc < *args_used) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001000 ti->error = "Insufficient mirror log arguments";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 return NULL;
1002 }
1003
Mikulas Patockac0da3742009-12-10 23:52:02 +00001004 dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count,
1005 argv + 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 if (!dl) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001007 ti->error = "Error creating mirror dirty log";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 return NULL;
1009 }
1010
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 return dl;
1012}
1013
Heinz Mauelshagen86a32382023-01-25 21:14:58 +01001014static int parse_features(struct mirror_set *ms, unsigned int argc, char **argv,
1015 unsigned int *args_used)
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001016{
Heinz Mauelshagen86a32382023-01-25 21:14:58 +01001017 unsigned int num_features;
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001018 struct dm_target *ti = ms->ti;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001019 char dummy;
Lidong Zhonged632872015-05-13 14:04:10 +08001020 int i;
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001021
1022 *args_used = 0;
1023
1024 if (!argc)
1025 return 0;
1026
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001027 if (sscanf(argv[0], "%u%c", &num_features, &dummy) != 1) {
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001028 ti->error = "Invalid number of features";
1029 return -EINVAL;
1030 }
1031
1032 argc--;
1033 argv++;
1034 (*args_used)++;
1035
1036 if (num_features > argc) {
1037 ti->error = "Not enough arguments to support feature count";
1038 return -EINVAL;
1039 }
1040
Lidong Zhonged632872015-05-13 14:04:10 +08001041 for (i = 0; i < num_features; i++) {
1042 if (!strcmp("handle_errors", argv[0]))
1043 ms->features |= DM_RAID1_HANDLE_ERRORS;
1044 else if (!strcmp("keep_log", argv[0]))
1045 ms->features |= DM_RAID1_KEEP_LOG;
1046 else {
1047 ti->error = "Unrecognised feature requested";
1048 return -EINVAL;
1049 }
1050
1051 argc--;
1052 argv++;
1053 (*args_used)++;
1054 }
1055 if (!errors_handled(ms) && keep_log(ms)) {
1056 ti->error = "keep_log feature requires the handle_errors feature";
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001057 return -EINVAL;
1058 }
1059
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001060 return 0;
1061}
1062
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063/*
1064 * Construct a mirror mapping:
1065 *
1066 * log_type #log_params <log_params>
1067 * #mirrors [mirror_path offset]{2,}
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001068 * [#features <features>]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 *
1070 * log_type is "core" or "disk"
1071 * #log_params is between 1 and 3
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001072 *
Lidong Zhonged632872015-05-13 14:04:10 +08001073 * If present, supported features are "handle_errors" and "keep_log".
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1076{
1077 int r;
1078 unsigned int nr_mirrors, m, args_used;
1079 struct mirror_set *ms;
Heinz Mauelshagen416cd172008-04-24 21:43:35 +01001080 struct dm_dirty_log *dl;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001081 char dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082
1083 dl = create_dirty_log(ti, argc, argv, &args_used);
1084 if (!dl)
1085 return -EINVAL;
1086
1087 argv += args_used;
1088 argc -= args_used;
1089
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001090 if (!argc || sscanf(argv[0], "%u%c", &nr_mirrors, &dummy) != 1 ||
Kees Cook65972a62018-04-10 21:43:15 -07001091 nr_mirrors < 2 || nr_mirrors > MAX_NR_MIRRORS) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001092 ti->error = "Invalid number of mirrors";
Heinz Mauelshagen416cd172008-04-24 21:43:35 +01001093 dm_dirty_log_destroy(dl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 return -EINVAL;
1095 }
1096
1097 argv++, argc--;
1098
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001099 if (argc < nr_mirrors * 2) {
1100 ti->error = "Too few mirror arguments";
Heinz Mauelshagen416cd172008-04-24 21:43:35 +01001101 dm_dirty_log_destroy(dl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102 return -EINVAL;
1103 }
1104
1105 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1106 if (!ms) {
Heinz Mauelshagen416cd172008-04-24 21:43:35 +01001107 dm_dirty_log_destroy(dl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 return -ENOMEM;
1109 }
1110
1111 /* Get the mirror parameter sets */
1112 for (m = 0; m < nr_mirrors; m++) {
1113 r = get_mirror(ms, ti, m, argv);
1114 if (r) {
1115 free_context(ms, ti, m);
1116 return r;
1117 }
1118 argv += 2;
1119 argc -= 2;
1120 }
1121
1122 ti->private = ms;
Mike Snitzer542f9032012-07-27 15:08:00 +01001123
1124 r = dm_set_target_max_io_len(ti, dm_rh_get_region_size(ms->rh));
1125 if (r)
1126 goto err_free_context;
1127
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00001128 ti->num_flush_bios = 1;
1129 ti->num_discard_bios = 1;
Mike Snitzer30187e12016-01-31 13:28:26 -05001130 ti->per_io_data_size = sizeof(struct dm_raid1_bio_record);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131
Tejun Heo670368a2013-07-30 08:40:21 -04001132 ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0);
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001133 if (!ms->kmirrord_wq) {
1134 DMERR("couldn't start kmirrord");
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001135 r = -ENOMEM;
1136 goto err_free_context;
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001137 }
1138 INIT_WORK(&ms->kmirrord_work, do_mirror);
Kees Cook8376d3c12017-10-16 17:01:48 -07001139 timer_setup(&ms->timer, delayed_wake_fn, 0);
Mikulas Patockaa2aebe02008-04-24 22:10:42 +01001140 ms->timer_pending = 0;
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001141 INIT_WORK(&ms->trigger_event, trigger_event);
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001142
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001143 r = parse_features(ms, argc, argv, &args_used);
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001144 if (r)
1145 goto err_destroy_wq;
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001146
1147 argv += args_used;
1148 argc -= args_used;
1149
Jonathan Brassowf44db672007-07-12 17:29:04 +01001150 /*
1151 * Any read-balancing addition depends on the
1152 * DM_RAID1_HANDLE_ERRORS flag being present.
1153 * This is because the decision to balance depends
1154 * on the sync state of a region. If the above
1155 * flag is not present, we ignore errors; and
1156 * the sync state may be inaccurate.
1157 */
1158
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001159 if (argc) {
1160 ti->error = "Too many mirror arguments";
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001161 r = -EINVAL;
1162 goto err_destroy_wq;
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001163 }
1164
Mikulas Patockadf5d2e92013-03-01 22:45:49 +00001165 ms->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
Mikulas Patockafa34ce72011-05-29 13:03:13 +01001166 if (IS_ERR(ms->kcopyd_client)) {
1167 r = PTR_ERR(ms->kcopyd_client);
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001168 goto err_destroy_wq;
Mikulas Patockafa34ce72011-05-29 13:03:13 +01001169 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001171 wakeup_mirrord(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172 return 0;
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001173
1174err_destroy_wq:
1175 destroy_workqueue(ms->kmirrord_wq);
1176err_free_context:
1177 free_context(ms, ti, ms->nr_mirrors);
1178 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179}
1180
1181static void mirror_dtr(struct dm_target *ti)
1182{
Yu Zhe26cb62a2023-03-17 09:35:54 +08001183 struct mirror_set *ms = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184
Mikulas Patockaa2aebe02008-04-24 22:10:42 +01001185 del_timer_sync(&ms->timer);
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001186 flush_workqueue(ms->kmirrord_wq);
Tejun Heo43829732012-08-20 14:51:24 -07001187 flush_work(&ms->trigger_event);
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +01001188 dm_kcopyd_client_destroy(ms->kcopyd_client);
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001189 destroy_workqueue(ms->kmirrord_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 free_context(ms, ti, ms->nr_mirrors);
1191}
1192
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193/*
1194 * Mirror mapping function
1195 */
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00001196static int mirror_map(struct dm_target *ti, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197{
Christoph Hellwig70246282016-07-19 11:28:41 +02001198 int r, rw = bio_data_dir(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 struct mirror *m;
1200 struct mirror_set *ms = ti->private;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001201 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Mikulas Patocka0045d612012-12-21 20:23:40 +00001202 struct dm_raid1_bio_record *bio_record =
1203 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
1204
Christoph Hellwig309dca302021-01-24 11:02:34 +01001205 bio_record->details.bi_bdev = NULL;
Mike Snitzercd15fb62017-06-15 08:39:15 -04001206
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 if (rw == WRITE) {
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001208 /* Save region for mirror_end_io() handler */
Mikulas Patocka0045d612012-12-21 20:23:40 +00001209 bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 queue_bio(ms, bio, rw);
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001211 return DM_MAPIO_SUBMITTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 }
1213
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001214 r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 if (r < 0 && r != -EWOULDBLOCK)
Christoph Hellwig846785e2017-06-03 09:38:02 +02001216 return DM_MAPIO_KILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 /*
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001219 * If region is not in-sync queue the bio.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220 */
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001221 if (!r || (r == -EWOULDBLOCK)) {
Jens Axboe1eff9d32016-08-05 15:35:16 -06001222 if (bio->bi_opf & REQ_RAHEAD)
Christoph Hellwig846785e2017-06-03 09:38:02 +02001223 return DM_MAPIO_KILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225 queue_bio(ms, bio, rw);
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001226 return DM_MAPIO_SUBMITTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227 }
1228
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001229 /*
1230 * The region is in-sync and we can perform reads directly.
1231 * Store enough information so we can retry if it fails.
1232 */
Kent Overstreet4f024f32013-10-11 15:44:27 -07001233 m = choose_mirror(ms, bio->bi_iter.bi_sector);
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001234 if (unlikely(!m))
Christoph Hellwig846785e2017-06-03 09:38:02 +02001235 return DM_MAPIO_KILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236
Mikulas Patocka89c7cd82012-12-21 20:23:39 +00001237 dm_bio_record(&bio_record->details, bio);
Mikulas Patocka89c7cd82012-12-21 20:23:39 +00001238 bio_record->m = m;
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001239
1240 map_bio(m, bio);
1241
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001242 return DM_MAPIO_REMAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243}
1244
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001245static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1246 blk_status_t *error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247{
Christoph Hellwig70246282016-07-19 11:28:41 +02001248 int rw = bio_data_dir(bio);
Yu Zhe26cb62a2023-03-17 09:35:54 +08001249 struct mirror_set *ms = ti->private;
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001250 struct mirror *m = NULL;
1251 struct dm_bio_details *bd = NULL;
Mikulas Patocka0045d612012-12-21 20:23:40 +00001252 struct dm_raid1_bio_record *bio_record =
1253 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254
1255 /*
1256 * We need to dec pending if this was a write.
1257 */
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001258 if (rw == WRITE) {
Jens Axboe1eff9d32016-08-05 15:35:16 -06001259 if (!(bio->bi_opf & REQ_PREFLUSH) &&
Mike Christie28a8f0d2016-06-05 14:32:25 -05001260 bio_op(bio) != REQ_OP_DISCARD)
Mikulas Patocka0045d612012-12-21 20:23:40 +00001261 dm_rh_dec(ms->rh, bio_record->write_region);
Christoph Hellwig1be56902017-06-03 09:38:03 +02001262 return DM_ENDIO_DONE;
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001263 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001265 if (*error == BLK_STS_NOTSUPP)
Mike Snitzercd15fb62017-06-15 08:39:15 -04001266 goto out;
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001267
Christoph Hellwig9966afa2017-06-03 09:37:57 +02001268 if (bio->bi_opf & REQ_RAHEAD)
Mike Snitzercd15fb62017-06-15 08:39:15 -04001269 goto out;
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001270
Christoph Hellwig1be56902017-06-03 09:38:03 +02001271 if (unlikely(*error)) {
Christoph Hellwig309dca302021-01-24 11:02:34 +01001272 if (!bio_record->details.bi_bdev) {
Mike Snitzercd15fb62017-06-15 08:39:15 -04001273 /*
1274 * There wasn't enough memory to record necessary
1275 * information for a retry or there was no other
1276 * mirror in-sync.
1277 */
1278 DMERR_LIMIT("Mirror read failed.");
Linus Torvaldsc6b1e362017-07-03 10:34:51 -07001279 return DM_ENDIO_DONE;
Mike Snitzercd15fb62017-06-15 08:39:15 -04001280 }
1281
Mikulas Patocka89c7cd82012-12-21 20:23:39 +00001282 m = bio_record->m;
Adrian Bunke03f1a82008-02-19 19:44:19 +00001283
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001284 DMERR("Mirror read failed from %s. Trying alternative device.",
1285 m->dev->name);
1286
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001287 fail_mirror(m, DM_RAID1_READ_ERROR);
1288
1289 /*
1290 * A failed read is requeued for another attempt using an intact
1291 * mirror.
1292 */
1293 if (default_ok(m) || mirror_available(ms, bio)) {
Mikulas Patocka89c7cd82012-12-21 20:23:39 +00001294 bd = &bio_record->details;
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001295
1296 dm_bio_restore(bd, bio);
Christoph Hellwig309dca302021-01-24 11:02:34 +01001297 bio_record->details.bi_bdev = NULL;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001298 bio->bi_status = 0;
Mikulas Patockaf3a44fe2014-02-18 09:57:22 -05001299
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001300 queue_bio(ms, bio, rw);
Mikulas Patocka19cbbc62012-12-21 20:23:32 +00001301 return DM_ENDIO_INCOMPLETE;
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001302 }
1303 DMERR("All replicated volumes dead, failing I/O");
1304 }
1305
Mike Snitzercd15fb62017-06-15 08:39:15 -04001306out:
Christoph Hellwig309dca302021-01-24 11:02:34 +01001307 bio_record->details.bi_bdev = NULL;
Mike Snitzercd15fb62017-06-15 08:39:15 -04001308
Christoph Hellwig1be56902017-06-03 09:38:03 +02001309 return DM_ENDIO_DONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310}
1311
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001312static void mirror_presuspend(struct dm_target *ti)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313{
Yu Zhe26cb62a2023-03-17 09:35:54 +08001314 struct mirror_set *ms = ti->private;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001315 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316
Mikulas Patocka04788502009-12-10 23:52:03 +00001317 struct bio_list holds;
1318 struct bio *bio;
1319
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001320 atomic_set(&ms->suspend, 1);
1321
1322 /*
Takahiro Yasuif0703042010-03-06 02:32:35 +00001323 * Process bios in the hold list to start recovery waiting
1324 * for bios in the hold list. After the process, no bio has
1325 * a chance to be added in the hold list because ms->suspend
1326 * is set.
1327 */
1328 spin_lock_irq(&ms->lock);
1329 holds = ms->holds;
1330 bio_list_init(&ms->holds);
1331 spin_unlock_irq(&ms->lock);
1332
1333 while ((bio = bio_list_pop(&holds)))
1334 hold_bio(ms, bio);
1335
1336 /*
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001337 * We must finish up all the work that we've
1338 * generated (i.e. recovery work).
1339 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001340 dm_rh_stop_recovery(ms->rh);
Jonathan E Brassow33184042006-11-08 17:44:44 -08001341
Jonathan E Brassow33184042006-11-08 17:44:44 -08001342 wait_event(_kmirrord_recovery_stopped,
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001343 !dm_rh_recovery_in_flight(ms->rh));
Jonathan E Brassow33184042006-11-08 17:44:44 -08001344
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001345 if (log->type->presuspend && log->type->presuspend(log))
1346 /* FIXME: need better error handling */
1347 DMWARN("log presuspend failed");
1348
1349 /*
1350 * Now that recovery is complete/stopped and the
1351 * delayed bios are queued, we need to wait for
1352 * the worker thread to complete. This way,
1353 * we know that all of our I/O has been pushed.
1354 */
1355 flush_workqueue(ms->kmirrord_wq);
1356}
1357
1358static void mirror_postsuspend(struct dm_target *ti)
1359{
1360 struct mirror_set *ms = ti->private;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001361 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001362
Jonathan Brassow6b3df0d2007-10-19 22:47:57 +01001363 if (log->type->postsuspend && log->type->postsuspend(log))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 /* FIXME: need better error handling */
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001365 DMWARN("log postsuspend failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366}
1367
1368static void mirror_resume(struct dm_target *ti)
1369{
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001370 struct mirror_set *ms = ti->private;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001371 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001372
1373 atomic_set(&ms->suspend, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374 if (log->type->resume && log->type->resume(log))
1375 /* FIXME: need better error handling */
1376 DMWARN("log resume failed");
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001377 dm_rh_start_recovery(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378}
1379
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001380/*
1381 * device_status_char
1382 * @m: mirror device/leg we want the status of
1383 *
1384 * We return one character representing the most severe error
1385 * we have encountered.
1386 * A => Alive - No failures
1387 * D => Dead - A write failure occurred leaving mirror out-of-sync
1388 * S => Sync - A sychronization failure occurred, mirror out-of-sync
1389 * R => Read - A read failure occurred, mirror data unaffected
1390 *
1391 * Returns: <char>
1392 */
1393static char device_status_char(struct mirror *m)
1394{
1395 if (!atomic_read(&(m->error_count)))
1396 return 'A';
1397
Mikulas Patocka64b30c42009-12-10 23:52:02 +00001398 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
1399 (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001400 (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' :
1401 (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U';
1402}
1403
1404
Mikulas Patockafd7c092e2013-03-01 22:45:44 +00001405static void mirror_status(struct dm_target *ti, status_type_t type,
Heinz Mauelshagen86a32382023-01-25 21:14:58 +01001406 unsigned int status_flags, char *result, unsigned int maxlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407{
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001408 unsigned int m, sz = 0;
Lidong Zhonged632872015-05-13 14:04:10 +08001409 int num_feature_args = 0;
Yu Zhe26cb62a2023-03-17 09:35:54 +08001410 struct mirror_set *ms = ti->private;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001411 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Kees Cook65972a62018-04-10 21:43:15 -07001412 char buffer[MAX_NR_MIRRORS + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 switch (type) {
1415 case STATUSTYPE_INFO:
1416 DMEMIT("%d ", ms->nr_mirrors);
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001417 for (m = 0; m < ms->nr_mirrors; m++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 DMEMIT("%s ", ms->mirror[m].dev->name);
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001419 buffer[m] = device_status_char(&(ms->mirror[m]));
1420 }
1421 buffer[m] = '\0';
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001423 DMEMIT("%llu/%llu 1 %s ",
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001424 (unsigned long long)log->type->get_sync_count(log),
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001425 (unsigned long long)ms->nr_regions, buffer);
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001426
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001427 sz += log->type->status(log, type, result+sz, maxlen-sz);
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001428
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 break;
1430
1431 case STATUSTYPE_TABLE:
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001432 sz = log->type->status(log, type, result, maxlen);
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001433
Jonathan Brassowe52b8f62006-10-03 01:15:32 -07001434 DMEMIT("%d", ms->nr_mirrors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 for (m = 0; m < ms->nr_mirrors; m++)
Jonathan Brassowe52b8f62006-10-03 01:15:32 -07001436 DMEMIT(" %s %llu", ms->mirror[m].dev->name,
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001437 (unsigned long long)ms->mirror[m].offset);
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001438
Lidong Zhonged632872015-05-13 14:04:10 +08001439 num_feature_args += !!errors_handled(ms);
1440 num_feature_args += !!keep_log(ms);
1441 if (num_feature_args) {
1442 DMEMIT(" %d", num_feature_args);
1443 if (errors_handled(ms))
1444 DMEMIT(" handle_errors");
1445 if (keep_log(ms))
1446 DMEMIT(" keep_log");
1447 }
1448
1449 break;
Tushar Sugandhi8ec45662021-07-12 17:49:03 -07001450
1451 case STATUSTYPE_IMA:
1452 DMEMIT_TARGET_NAME_VERSION(ti->type);
1453 DMEMIT(",nr_mirrors=%d", ms->nr_mirrors);
1454 for (m = 0; m < ms->nr_mirrors; m++) {
1455 DMEMIT(",mirror_device_%d=%s", m, ms->mirror[m].dev->name);
1456 DMEMIT(",mirror_device_%d_status=%c",
1457 m, device_status_char(&(ms->mirror[m])));
1458 }
1459
1460 DMEMIT(",handle_errors=%c", errors_handled(ms) ? 'y' : 'n');
1461 DMEMIT(",keep_log=%c", keep_log(ms) ? 'y' : 'n');
1462
1463 DMEMIT(",log_type_status=");
1464 sz += log->type->status(log, type, result+sz, maxlen-sz);
1465 DMEMIT(";");
1466 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468}
1469
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001470static int mirror_iterate_devices(struct dm_target *ti,
1471 iterate_devices_callout_fn fn, void *data)
1472{
1473 struct mirror_set *ms = ti->private;
1474 int ret = 0;
Heinz Mauelshagen86a32382023-01-25 21:14:58 +01001475 unsigned int i;
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001476
1477 for (i = 0; !ret && i < ms->nr_mirrors; i++)
1478 ret = fn(ti, ms->mirror[i].dev,
Mike Snitzer5dea2712009-07-23 20:30:42 +01001479 ms->mirror[i].offset, ti->len, data);
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001480
1481 return ret;
1482}
1483
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484static struct target_type mirror_target = {
1485 .name = "mirror",
Lidong Zhonged632872015-05-13 14:04:10 +08001486 .version = {1, 14, 0},
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 .module = THIS_MODULE,
1488 .ctr = mirror_ctr,
1489 .dtr = mirror_dtr,
1490 .map = mirror_map,
1491 .end_io = mirror_end_io,
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001492 .presuspend = mirror_presuspend,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 .postsuspend = mirror_postsuspend,
1494 .resume = mirror_resume,
1495 .status = mirror_status,
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001496 .iterate_devices = mirror_iterate_devices,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497};
1498
1499static int __init dm_mirror_init(void)
1500{
Yangtao Lib362c732023-03-18 21:16:33 +08001501 int r;
Tetsuo Handaa7e8f7f2022-04-20 14:12:26 +09001502
1503 dm_raid1_wq = alloc_workqueue("dm_raid1_wq", 0, 0);
Yangtao Li990f61e2023-04-04 11:44:33 -04001504 if (!dm_raid1_wq) {
1505 DMERR("Failed to alloc workqueue");
Yangtao Lib362c732023-03-18 21:16:33 +08001506 return -ENOMEM;
Yangtao Li990f61e2023-04-04 11:44:33 -04001507 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508
Mikulas Patocka95f8fac2009-04-02 19:55:24 +01001509 r = dm_register_target(&mirror_target);
1510 if (r < 0) {
Tetsuo Handaa7e8f7f2022-04-20 14:12:26 +09001511 destroy_workqueue(dm_raid1_wq);
Yangtao Lib362c732023-03-18 21:16:33 +08001512 return r;
Mikulas Patocka95f8fac2009-04-02 19:55:24 +01001513 }
1514
1515 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516}
1517
1518static void __exit dm_mirror_exit(void)
1519{
Tetsuo Handaa7e8f7f2022-04-20 14:12:26 +09001520 destroy_workqueue(dm_raid1_wq);
Mikulas Patocka10d3bd02009-01-06 03:04:58 +00001521 dm_unregister_target(&mirror_target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522}
1523
1524/* Module hooks */
1525module_init(dm_mirror_init);
1526module_exit(dm_mirror_exit);
1527
1528MODULE_DESCRIPTION(DM_NAME " mirror target");
1529MODULE_AUTHOR("Joe Thornber");
1530MODULE_LICENSE("GPL");