blob: 06a38dc320253763c5024ecb7b75f056063450a2 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2003 Sistina Software Limited.
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01003 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * This file is released under the GPL.
6 */
7
Jonathan Brassow06386bb2008-02-08 02:11:37 +00008#include "dm-bio-record.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -07009
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/init.h>
11#include <linux/mempool.h>
12#include <linux/module.h>
13#include <linux/pagemap.h>
14#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/workqueue.h>
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010016#include <linux/device-mapper.h>
Alasdair G Kergona765e202008-04-24 22:02:01 +010017#include <linux/dm-io.h>
18#include <linux/dm-dirty-log.h>
19#include <linux/dm-kcopyd.h>
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010020#include <linux/dm-region-hash.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
Alasdair G Kergon72d94862006-06-26 00:27:35 -070022#define DM_MSG_PREFIX "raid1"
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010023
24#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
Alasdair G Kergon72d94862006-06-26 00:27:35 -070025
Kees Cook65972a62018-04-10 21:43:15 -070026#define MAX_NR_MIRRORS (DM_KCOPYD_MAX_REGIONS + 1)
27
Lidong Zhonged632872015-05-13 14:04:10 +080028#define DM_RAID1_HANDLE_ERRORS 0x01
29#define DM_RAID1_KEEP_LOG 0x02
Jonathan Brassowf44db672007-07-12 17:29:04 +010030#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
Lidong Zhonged632872015-05-13 14:04:10 +080031#define keep_log(p) ((p)->features & DM_RAID1_KEEP_LOG)
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -070032
Jonathan E Brassow33184042006-11-08 17:44:44 -080033static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
Linus Torvalds1da177e2005-04-16 15:20:36 -070035/*-----------------------------------------------------------------
Neil Browne4c8b3b2006-06-26 00:27:26 -070036 * Mirror set structures.
37 *---------------------------------------------------------------*/
Jonathan Brassow72f4b312008-02-08 02:11:29 +000038enum dm_raid1_error {
39 DM_RAID1_WRITE_ERROR,
Mikulas Patocka64b30c42009-12-10 23:52:02 +000040 DM_RAID1_FLUSH_ERROR,
Jonathan Brassow72f4b312008-02-08 02:11:29 +000041 DM_RAID1_SYNC_ERROR,
42 DM_RAID1_READ_ERROR
43};
44
Neil Browne4c8b3b2006-06-26 00:27:26 -070045struct mirror {
Jonathan Brassowaa5617c2007-10-19 22:47:58 +010046 struct mirror_set *ms;
Neil Browne4c8b3b2006-06-26 00:27:26 -070047 atomic_t error_count;
Al Viro39ed7ad2008-02-13 03:53:00 +000048 unsigned long error_type;
Neil Browne4c8b3b2006-06-26 00:27:26 -070049 struct dm_dev *dev;
50 sector_t offset;
51};
52
53struct mirror_set {
54 struct dm_target *ti;
55 struct list_head list;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010056
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -070057 uint64_t features;
Neil Browne4c8b3b2006-06-26 00:27:26 -070058
Jonathan Brassow72f4b312008-02-08 02:11:29 +000059 spinlock_t lock; /* protects the lists */
Neil Browne4c8b3b2006-06-26 00:27:26 -070060 struct bio_list reads;
61 struct bio_list writes;
Jonathan Brassow72f4b312008-02-08 02:11:29 +000062 struct bio_list failures;
Mikulas Patocka04788502009-12-10 23:52:03 +000063 struct bio_list holds; /* bios are waiting until suspend */
Neil Browne4c8b3b2006-06-26 00:27:26 -070064
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010065 struct dm_region_hash *rh;
66 struct dm_kcopyd_client *kcopyd_client;
Milan Broz88be1632007-05-09 02:33:04 -070067 struct dm_io_client *io_client;
68
Neil Browne4c8b3b2006-06-26 00:27:26 -070069 /* recovery */
70 region_t nr_regions;
71 int in_sync;
Jonathan Brassowfc1ff952007-07-12 17:29:15 +010072 int log_failure;
Mikulas Patocka929be8f2009-12-10 23:52:06 +000073 int leg_failure;
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +000074 atomic_t suspend;
Neil Browne4c8b3b2006-06-26 00:27:26 -070075
Jonathan Brassow72f4b312008-02-08 02:11:29 +000076 atomic_t default_mirror; /* Default mirror */
Neil Browne4c8b3b2006-06-26 00:27:26 -070077
Holger Smolinski6ad36fe2007-05-09 02:32:50 -070078 struct workqueue_struct *kmirrord_wq;
79 struct work_struct kmirrord_work;
Mikulas Patockaa2aebe02008-04-24 22:10:42 +010080 struct timer_list timer;
81 unsigned long timer_pending;
82
Jonathan Brassow72f4b312008-02-08 02:11:29 +000083 struct work_struct trigger_event;
Holger Smolinski6ad36fe2007-05-09 02:32:50 -070084
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010085 unsigned nr_mirrors;
Gustavo A. R. Silvab18ae8d2020-05-07 13:51:58 -050086 struct mirror mirror[];
Neil Browne4c8b3b2006-06-26 00:27:26 -070087};
88
Mikulas Patockadf5d2e92013-03-01 22:45:49 +000089DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(raid1_resync_throttle,
90 "A percentage of time allocated for raid resynchronization");
91
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010092static void wakeup_mirrord(void *context)
Linus Torvalds1da177e2005-04-16 15:20:36 -070093{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010094 struct mirror_set *ms = context;
Linus Torvalds1da177e2005-04-16 15:20:36 -070095
Holger Smolinski6ad36fe2007-05-09 02:32:50 -070096 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
97}
98
Kees Cook8376d3c12017-10-16 17:01:48 -070099static void delayed_wake_fn(struct timer_list *t)
Mikulas Patockaa2aebe02008-04-24 22:10:42 +0100100{
Kees Cook8376d3c12017-10-16 17:01:48 -0700101 struct mirror_set *ms = from_timer(ms, t, timer);
Mikulas Patockaa2aebe02008-04-24 22:10:42 +0100102
103 clear_bit(0, &ms->timer_pending);
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100104 wakeup_mirrord(ms);
Mikulas Patockaa2aebe02008-04-24 22:10:42 +0100105}
106
107static void delayed_wake(struct mirror_set *ms)
108{
109 if (test_and_set_bit(0, &ms->timer_pending))
110 return;
111
112 ms->timer.expires = jiffies + HZ / 5;
Mikulas Patockaa2aebe02008-04-24 22:10:42 +0100113 add_timer(&ms->timer);
114}
115
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100116static void wakeup_all_recovery_waiters(void *context)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100118 wake_up_all(&_kmirrord_recovery_stopped);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119}
120
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100121static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122{
123 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 int should_wake = 0;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100125 struct bio_list *bl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100127 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
128 spin_lock_irqsave(&ms->lock, flags);
129 should_wake = !(bl->head);
130 bio_list_add(bl, bio);
131 spin_unlock_irqrestore(&ms->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
133 if (should_wake)
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100134 wakeup_mirrord(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135}
136
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100137static void dispatch_bios(void *context, struct bio_list *bio_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100139 struct mirror_set *ms = context;
140 struct bio *bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100142 while ((bio = bio_list_pop(bio_list)))
143 queue_bio(ms, bio, WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144}
145
Mikulas Patocka89c7cd82012-12-21 20:23:39 +0000146struct dm_raid1_bio_record {
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000147 struct mirror *m;
Christoph Hellwig309dca302021-01-24 11:02:34 +0100148 /* if details->bi_bdev == NULL, details were not saved */
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000149 struct dm_bio_details details;
Mikulas Patocka0045d612012-12-21 20:23:40 +0000150 region_t write_region;
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000151};
152
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153/*
154 * Every mirror should look like this one.
155 */
156#define DEFAULT_MIRROR 0
157
158/*
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000159 * This is yucky. We squirrel the mirror struct away inside
160 * bi_next for read/write buffers. This is safe since the bh
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 * doesn't get submitted to the lower levels of block layer.
162 */
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000163static struct mirror *bio_get_m(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164{
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000165 return (struct mirror *) bio->bi_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166}
167
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000168static void bio_set_m(struct bio *bio, struct mirror *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169{
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000170 bio->bi_next = (struct bio *) m;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171}
172
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000173static struct mirror *get_default_mirror(struct mirror_set *ms)
174{
175 return &ms->mirror[atomic_read(&ms->default_mirror)];
176}
177
178static void set_default_mirror(struct mirror *m)
179{
180 struct mirror_set *ms = m->ms;
181 struct mirror *m0 = &(ms->mirror[0]);
182
183 atomic_set(&ms->default_mirror, m - m0);
184}
185
Mikulas Patocka87968dd2009-12-10 23:52:04 +0000186static struct mirror *get_valid_mirror(struct mirror_set *ms)
187{
188 struct mirror *m;
189
190 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
191 if (!atomic_read(&m->error_count))
192 return m;
193
194 return NULL;
195}
196
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000197/* fail_mirror
198 * @m: mirror device to fail
199 * @error_type: one of the enum's, DM_RAID1_*_ERROR
200 *
201 * If errors are being handled, record the type of
202 * error encountered for this device. If this type
203 * of error has already been recorded, we can return;
204 * otherwise, we must signal userspace by triggering
205 * an event. Additionally, if the device is the
206 * primary device, we must choose a new primary, but
207 * only if the mirror is in-sync.
208 *
209 * This function must not block.
210 */
211static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
212{
213 struct mirror_set *ms = m->ms;
214 struct mirror *new;
215
Mikulas Patocka929be8f2009-12-10 23:52:06 +0000216 ms->leg_failure = 1;
217
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000218 /*
219 * error_count is used for nothing more than a
220 * simple way to tell if a device has encountered
221 * errors.
222 */
223 atomic_inc(&m->error_count);
224
225 if (test_and_set_bit(error_type, &m->error_type))
226 return;
227
Jonathan Brassowd460c652009-01-06 03:04:57 +0000228 if (!errors_handled(ms))
229 return;
230
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000231 if (m != get_default_mirror(ms))
232 goto out;
233
Lidong Zhonged632872015-05-13 14:04:10 +0800234 if (!ms->in_sync && !keep_log(ms)) {
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000235 /*
236 * Better to issue requests to same failing device
237 * than to risk returning corrupt data.
238 */
239 DMERR("Primary mirror (%s) failed while out-of-sync: "
240 "Reads may fail.", m->dev->name);
241 goto out;
242 }
243
Mikulas Patocka87968dd2009-12-10 23:52:04 +0000244 new = get_valid_mirror(ms);
245 if (new)
246 set_default_mirror(new);
247 else
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000248 DMWARN("All sides of mirror have failed.");
249
250out:
251 schedule_work(&ms->trigger_event);
252}
253
Mikulas Patockac0da3742009-12-10 23:52:02 +0000254static int mirror_flush(struct dm_target *ti)
255{
256 struct mirror_set *ms = ti->private;
257 unsigned long error_bits;
258
259 unsigned int i;
Kees Cook65972a62018-04-10 21:43:15 -0700260 struct dm_io_region io[MAX_NR_MIRRORS];
Mikulas Patockac0da3742009-12-10 23:52:02 +0000261 struct mirror *m;
262 struct dm_io_request io_req = {
Bart Van Assche581075e2022-07-14 11:06:47 -0700263 .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
Mikulas Patockac0da3742009-12-10 23:52:02 +0000264 .mem.type = DM_IO_KMEM,
Mike Snitzer5fc2ffe2011-01-13 19:59:48 +0000265 .mem.ptr.addr = NULL,
Mikulas Patockac0da3742009-12-10 23:52:02 +0000266 .client = ms->io_client,
267 };
268
269 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) {
270 io[i].bdev = m->dev->bdev;
271 io[i].sector = 0;
272 io[i].count = 0;
273 }
274
275 error_bits = -1;
276 dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
277 if (unlikely(error_bits != 0)) {
278 for (i = 0; i < ms->nr_mirrors; i++)
279 if (test_bit(i, &error_bits))
280 fail_mirror(ms->mirror + i,
Mikulas Patocka64b30c42009-12-10 23:52:02 +0000281 DM_RAID1_FLUSH_ERROR);
Mikulas Patockac0da3742009-12-10 23:52:02 +0000282 return -EIO;
283 }
284
285 return 0;
286}
287
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288/*-----------------------------------------------------------------
289 * Recovery.
290 *
291 * When a mirror is first activated we may find that some regions
292 * are in the no-sync state. We have to recover these by
293 * recopying from the default mirror to all the others.
294 *---------------------------------------------------------------*/
Alasdair G Kergon4cdc1d12008-03-28 14:16:10 -0700295static void recovery_complete(int read_err, unsigned long write_err,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 void *context)
297{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100298 struct dm_region *reg = context;
299 struct mirror_set *ms = dm_rh_region_context(reg);
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000300 int m, bit = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000302 if (read_err) {
Jonathan Brassowf44db672007-07-12 17:29:04 +0100303 /* Read error means the failure of default mirror. */
304 DMERR_LIMIT("Unable to read primary mirror during recovery");
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000305 fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
306 }
Jonathan Brassowf44db672007-07-12 17:29:04 +0100307
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000308 if (write_err) {
Alasdair G Kergon4cdc1d12008-03-28 14:16:10 -0700309 DMERR_LIMIT("Write error during recovery (error = 0x%lx)",
Jonathan Brassowf44db672007-07-12 17:29:04 +0100310 write_err);
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000311 /*
312 * Bits correspond to devices (excluding default mirror).
313 * The default mirror cannot change during recovery.
314 */
315 for (m = 0; m < ms->nr_mirrors; m++) {
316 if (&ms->mirror[m] == get_default_mirror(ms))
317 continue;
318 if (test_bit(bit, &write_err))
319 fail_mirror(ms->mirror + m,
320 DM_RAID1_SYNC_ERROR);
321 bit++;
322 }
323 }
Jonathan Brassowf44db672007-07-12 17:29:04 +0100324
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100325 dm_rh_recovery_end(reg, !(read_err || write_err));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326}
327
Mike Snitzer7209049d2018-07-31 17:27:02 -0400328static void recover(struct mirror_set *ms, struct dm_region *reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100330 unsigned i;
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +0100331 struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 struct mirror *m;
333 unsigned long flags = 0;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100334 region_t key = dm_rh_get_region_key(reg);
335 sector_t region_size = dm_rh_get_region_size(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336
337 /* fill in the source */
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000338 m = get_default_mirror(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 from.bdev = m->dev->bdev;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100340 from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
341 if (key == (ms->nr_regions - 1)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 /*
343 * The final region may be smaller than
344 * region_size.
345 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100346 from.count = ms->ti->len & (region_size - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 if (!from.count)
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100348 from.count = region_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 } else
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100350 from.count = region_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351
352 /* fill in the destinations */
353 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000354 if (&ms->mirror[i] == get_default_mirror(ms))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 continue;
356
357 m = ms->mirror + i;
358 dest->bdev = m->dev->bdev;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100359 dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 dest->count = from.count;
361 dest++;
362 }
363
364 /* hand to kcopyd */
Jonathan Brassowf7c83e22008-10-10 13:36:59 +0100365 if (!errors_handled(ms))
Mikulas Patockadb2351e2021-05-26 10:16:01 -0400366 flags |= BIT(DM_KCOPYD_IGNORE_ERROR);
Jonathan Brassowf7c83e22008-10-10 13:36:59 +0100367
Mike Snitzer7209049d2018-07-31 17:27:02 -0400368 dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
369 flags, recovery_complete, reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370}
371
Lidong Zhonged632872015-05-13 14:04:10 +0800372static void reset_ms_flags(struct mirror_set *ms)
373{
374 unsigned int m;
375
376 ms->leg_failure = 0;
377 for (m = 0; m < ms->nr_mirrors; m++) {
378 atomic_set(&(ms->mirror[m].error_count), 0);
379 ms->mirror[m].error_type = 0;
380 }
381}
382
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383static void do_recovery(struct mirror_set *ms)
384{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100385 struct dm_region *reg;
386 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387
388 /*
389 * Start quiescing some regions.
390 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100391 dm_rh_recovery_prepare(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
393 /*
394 * Copy any already quiesced regions.
395 */
Mike Snitzer7209049d2018-07-31 17:27:02 -0400396 while ((reg = dm_rh_recovery_start(ms->rh)))
397 recover(ms, reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398
399 /*
400 * Update the in sync flag.
401 */
402 if (!ms->in_sync &&
403 (log->type->get_sync_count(log) == ms->nr_regions)) {
404 /* the sync is complete */
405 dm_table_event(ms->ti->table);
406 ms->in_sync = 1;
Lidong Zhonged632872015-05-13 14:04:10 +0800407 reset_ms_flags(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 }
409}
410
411/*-----------------------------------------------------------------
412 * Reads
413 *---------------------------------------------------------------*/
414static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
415{
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000416 struct mirror *m = get_default_mirror(ms);
417
418 do {
419 if (likely(!atomic_read(&m->error_count)))
420 return m;
421
422 if (m-- == ms->mirror)
423 m += ms->nr_mirrors;
424 } while (m != get_default_mirror(ms));
425
426 return NULL;
427}
428
429static int default_ok(struct mirror *m)
430{
431 struct mirror *default_mirror = get_default_mirror(m->ms);
432
433 return !atomic_read(&default_mirror->error_count);
434}
435
436static int mirror_available(struct mirror_set *ms, struct bio *bio)
437{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100438 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
439 region_t region = dm_rh_bio_to_region(ms->rh, bio);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000440
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100441 if (log->type->in_sync(log, region, 0))
Kent Overstreet4f024f32013-10-11 15:44:27 -0700442 return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0;
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000443
444 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445}
446
447/*
448 * remap a buffer to a particular mirror.
449 */
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000450static sector_t map_sector(struct mirror *m, struct bio *bio)
451{
Kent Overstreet4f024f32013-10-11 15:44:27 -0700452 if (unlikely(!bio->bi_iter.bi_size))
Mikulas Patocka41841532009-12-10 23:51:59 +0000453 return 0;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700454 return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000455}
456
457static void map_bio(struct mirror *m, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458{
Christoph Hellwig74d46992017-08-23 19:10:32 +0200459 bio_set_dev(bio, m->dev->bdev);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700460 bio->bi_iter.bi_sector = map_sector(m, bio);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000461}
462
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100463static void map_region(struct dm_io_region *io, struct mirror *m,
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000464 struct bio *bio)
465{
466 io->bdev = m->dev->bdev;
467 io->sector = map_sector(m, bio);
Kent Overstreetaa8b57a2013-02-05 15:19:29 -0800468 io->count = bio_sectors(bio);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000469}
470
Mikulas Patocka04788502009-12-10 23:52:03 +0000471static void hold_bio(struct mirror_set *ms, struct bio *bio)
472{
473 /*
Takahiro Yasuif0703042010-03-06 02:32:35 +0000474 * Lock is required to avoid race condition during suspend
475 * process.
Mikulas Patocka04788502009-12-10 23:52:03 +0000476 */
Takahiro Yasuif0703042010-03-06 02:32:35 +0000477 spin_lock_irq(&ms->lock);
478
Mikulas Patocka04788502009-12-10 23:52:03 +0000479 if (atomic_read(&ms->suspend)) {
Takahiro Yasuif0703042010-03-06 02:32:35 +0000480 spin_unlock_irq(&ms->lock);
481
482 /*
483 * If device is suspended, complete the bio.
484 */
Mikulas Patocka04788502009-12-10 23:52:03 +0000485 if (dm_noflush_suspending(ms->ti))
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200486 bio->bi_status = BLK_STS_DM_REQUEUE;
Mikulas Patocka04788502009-12-10 23:52:03 +0000487 else
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200488 bio->bi_status = BLK_STS_IOERR;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200489
490 bio_endio(bio);
Mikulas Patocka04788502009-12-10 23:52:03 +0000491 return;
492 }
493
494 /*
495 * Hold bio until the suspend is complete.
496 */
Mikulas Patocka04788502009-12-10 23:52:03 +0000497 bio_list_add(&ms->holds, bio);
498 spin_unlock_irq(&ms->lock);
499}
500
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000501/*-----------------------------------------------------------------
502 * Reads
503 *---------------------------------------------------------------*/
504static void read_callback(unsigned long error, void *context)
505{
506 struct bio *bio = context;
507 struct mirror *m;
508
509 m = bio_get_m(bio);
510 bio_set_m(bio, NULL);
511
512 if (likely(!error)) {
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200513 bio_endio(bio);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000514 return;
515 }
516
517 fail_mirror(m, DM_RAID1_READ_ERROR);
518
519 if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
520 DMWARN_LIMIT("Read failure on mirror device %s. "
521 "Trying alternative device.",
522 m->dev->name);
Christoph Hellwig70246282016-07-19 11:28:41 +0200523 queue_bio(m->ms, bio, bio_data_dir(bio));
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000524 return;
525 }
526
527 DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.",
528 m->dev->name);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200529 bio_io_error(bio);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000530}
531
532/* Asynchronous read. */
533static void read_async_bio(struct mirror *m, struct bio *bio)
534{
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100535 struct dm_io_region io;
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000536 struct dm_io_request io_req = {
Bart Van Assche581075e2022-07-14 11:06:47 -0700537 .bi_opf = REQ_OP_READ,
Kent Overstreet003b5c52013-10-11 15:45:43 -0700538 .mem.type = DM_IO_BIO,
539 .mem.ptr.bio = bio,
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000540 .notify.fn = read_callback,
541 .notify.context = bio,
542 .client = m->ms->io_client,
543 };
544
545 map_region(&io, m, bio);
546 bio_set_m(bio, m);
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100547 BUG_ON(dm_io(&io_req, 1, &io, NULL));
548}
549
550static inline int region_in_sync(struct mirror_set *ms, region_t region,
551 int may_block)
552{
553 int state = dm_rh_get_state(ms->rh, region, may_block);
554 return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555}
556
557static void do_reads(struct mirror_set *ms, struct bio_list *reads)
558{
559 region_t region;
560 struct bio *bio;
561 struct mirror *m;
562
563 while ((bio = bio_list_pop(reads))) {
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100564 region = dm_rh_bio_to_region(ms->rh, bio);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000565 m = get_default_mirror(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566
567 /*
568 * We can only read balance if the region is in sync.
569 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100570 if (likely(region_in_sync(ms, region, 1)))
Kent Overstreet4f024f32013-10-11 15:44:27 -0700571 m = choose_mirror(ms, bio->bi_iter.bi_sector);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000572 else if (m && atomic_read(&m->error_count))
573 m = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000575 if (likely(m))
576 read_async_bio(m, bio);
577 else
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200578 bio_io_error(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 }
580}
581
582/*-----------------------------------------------------------------
583 * Writes.
584 *
585 * We do different things with the write io depending on the
586 * state of the region that it's in:
587 *
588 * SYNC: increment pending, use kcopyd to write to *all* mirrors
589 * RECOVERING: delay the io until recovery completes
590 * NOSYNC: increment pending, just write to the default mirror
591 *---------------------------------------------------------------*/
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000592
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000593
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594static void write_callback(unsigned long error, void *context)
595{
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200596 unsigned i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 struct bio *bio = (struct bio *) context;
598 struct mirror_set *ms;
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000599 int should_wake = 0;
600 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000602 ms = bio_get_m(bio)->ms;
603 bio_set_m(bio, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604
605 /*
606 * NOTE: We don't decrement the pending count here,
607 * instead it is done by the targets endio function.
608 * This way we handle both writes to SYNC and NOSYNC
609 * regions with the same code.
610 */
Mikulas Patocka60f355e2009-12-10 23:52:05 +0000611 if (likely(!error)) {
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200612 bio_endio(bio);
Mikulas Patocka60f355e2009-12-10 23:52:05 +0000613 return;
614 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615
Mikulas Patockaf2ed51a2015-02-12 10:09:20 -0500616 /*
617 * If the bio is discard, return an error, but do not
618 * degrade the array.
619 */
Mike Christiee6047142016-06-05 14:32:04 -0500620 if (bio_op(bio) == REQ_OP_DISCARD) {
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200621 bio->bi_status = BLK_STS_NOTSUPP;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200622 bio_endio(bio);
Mikulas Patockaf2ed51a2015-02-12 10:09:20 -0500623 return;
624 }
625
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000626 for (i = 0; i < ms->nr_mirrors; i++)
627 if (test_bit(i, &error))
628 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000629
Mikulas Patocka60f355e2009-12-10 23:52:05 +0000630 /*
631 * Need to raise event. Since raising
632 * events can block, we need to do it in
633 * the main thread.
634 */
635 spin_lock_irqsave(&ms->lock, flags);
636 if (!ms->failures.head)
637 should_wake = 1;
638 bio_list_add(&ms->failures, bio);
639 spin_unlock_irqrestore(&ms->lock, flags);
640 if (should_wake)
641 wakeup_mirrord(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642}
643
644static void do_write(struct mirror_set *ms, struct bio *bio)
645{
646 unsigned int i;
Kees Cook65972a62018-04-10 21:43:15 -0700647 struct dm_io_region io[MAX_NR_MIRRORS], *dest = io;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 struct mirror *m;
Bart Van Assche581075e2022-07-14 11:06:47 -0700649 blk_opf_t op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH);
Milan Broz88be1632007-05-09 02:33:04 -0700650 struct dm_io_request io_req = {
Bart Van Assche581075e2022-07-14 11:06:47 -0700651 .bi_opf = REQ_OP_WRITE | op_flags,
Kent Overstreet003b5c52013-10-11 15:45:43 -0700652 .mem.type = DM_IO_BIO,
653 .mem.ptr.bio = bio,
Milan Broz88be1632007-05-09 02:33:04 -0700654 .notify.fn = write_callback,
655 .notify.context = bio,
656 .client = ms->io_client,
657 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658
Mike Christiee6047142016-06-05 14:32:04 -0500659 if (bio_op(bio) == REQ_OP_DISCARD) {
Bart Van Assche581075e2022-07-14 11:06:47 -0700660 io_req.bi_opf = REQ_OP_DISCARD | op_flags;
Mike Snitzer5fc2ffe2011-01-13 19:59:48 +0000661 io_req.mem.type = DM_IO_KMEM;
662 io_req.mem.ptr.addr = NULL;
663 }
664
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000665 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
666 map_region(dest++, m, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000668 /*
669 * Use default mirror because we only need it to retrieve the reference
670 * to the mirror set in write_callback().
671 */
672 bio_set_m(bio, get_default_mirror(ms));
Milan Broz88be1632007-05-09 02:33:04 -0700673
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100674 BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675}
676
677static void do_writes(struct mirror_set *ms, struct bio_list *writes)
678{
679 int state;
680 struct bio *bio;
681 struct bio_list sync, nosync, recover, *this_list = NULL;
Jonathan Brassow7513c2a2009-04-02 19:55:30 +0100682 struct bio_list requeue;
683 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
684 region_t region;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685
686 if (!writes->head)
687 return;
688
689 /*
690 * Classify each write.
691 */
692 bio_list_init(&sync);
693 bio_list_init(&nosync);
694 bio_list_init(&recover);
Jonathan Brassow7513c2a2009-04-02 19:55:30 +0100695 bio_list_init(&requeue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696
697 while ((bio = bio_list_pop(writes))) {
Jens Axboe1eff9d32016-08-05 15:35:16 -0600698 if ((bio->bi_opf & REQ_PREFLUSH) ||
Mike Christiee6047142016-06-05 14:32:04 -0500699 (bio_op(bio) == REQ_OP_DISCARD)) {
Mikulas Patocka41841532009-12-10 23:51:59 +0000700 bio_list_add(&sync, bio);
701 continue;
702 }
703
Jonathan Brassow7513c2a2009-04-02 19:55:30 +0100704 region = dm_rh_bio_to_region(ms->rh, bio);
705
706 if (log->type->is_remote_recovering &&
707 log->type->is_remote_recovering(log, region)) {
708 bio_list_add(&requeue, bio);
709 continue;
710 }
711
712 state = dm_rh_get_state(ms->rh, region, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 switch (state) {
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100714 case DM_RH_CLEAN:
715 case DM_RH_DIRTY:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 this_list = &sync;
717 break;
718
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100719 case DM_RH_NOSYNC:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 this_list = &nosync;
721 break;
722
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100723 case DM_RH_RECOVERING:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 this_list = &recover;
725 break;
726 }
727
728 bio_list_add(this_list, bio);
729 }
730
731 /*
Jonathan Brassow7513c2a2009-04-02 19:55:30 +0100732 * Add bios that are delayed due to remote recovery
733 * back on to the write queue
734 */
735 if (unlikely(requeue.head)) {
736 spin_lock_irq(&ms->lock);
737 bio_list_merge(&ms->writes, &requeue);
738 spin_unlock_irq(&ms->lock);
Mikulas Patocka69885682009-07-23 20:30:37 +0100739 delayed_wake(ms);
Jonathan Brassow7513c2a2009-04-02 19:55:30 +0100740 }
741
742 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 * Increment the pending counts for any regions that will
744 * be written to (writes to recover regions are going to
745 * be delayed).
746 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100747 dm_rh_inc_pending(ms->rh, &sync);
748 dm_rh_inc_pending(ms->rh, &nosync);
Jonathan Brassowd2b69862009-09-04 20:40:32 +0100749
750 /*
751 * If the flush fails on a previous call and succeeds here,
752 * we must not reset the log_failure variable. We need
753 * userspace interaction to do that.
754 */
755 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756
757 /*
758 * Dispatch io.
759 */
Mikulas Patocka5528d172010-02-16 18:42:55 +0000760 if (unlikely(ms->log_failure) && errors_handled(ms)) {
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000761 spin_lock_irq(&ms->lock);
762 bio_list_merge(&ms->failures, &sync);
763 spin_unlock_irq(&ms->lock);
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100764 wakeup_mirrord(ms);
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000765 } else
Jonathan Brassowfc1ff952007-07-12 17:29:15 +0100766 while ((bio = bio_list_pop(&sync)))
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000767 do_write(ms, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768
769 while ((bio = bio_list_pop(&recover)))
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100770 dm_rh_delay(ms->rh, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771
772 while ((bio = bio_list_pop(&nosync))) {
Lidong Zhonged632872015-05-13 14:04:10 +0800773 if (unlikely(ms->leg_failure) && errors_handled(ms) && !keep_log(ms)) {
Mikulas Patockaede5ea02010-03-06 02:32:22 +0000774 spin_lock_irq(&ms->lock);
775 bio_list_add(&ms->failures, bio);
776 spin_unlock_irq(&ms->lock);
777 wakeup_mirrord(ms);
778 } else {
Mikulas Patocka929be8f2009-12-10 23:52:06 +0000779 map_bio(get_default_mirror(ms), bio);
Christoph Hellwiged00aab2020-07-01 10:59:44 +0200780 submit_bio_noacct(bio);
Mikulas Patocka929be8f2009-12-10 23:52:06 +0000781 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 }
783}
784
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000785static void do_failures(struct mirror_set *ms, struct bio_list *failures)
786{
787 struct bio *bio;
788
Mikulas Patocka0f398a82009-12-10 23:52:04 +0000789 if (likely(!failures->head))
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000790 return;
791
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000792 /*
793 * If the log has failed, unattempted writes are being
Mikulas Patocka0f398a82009-12-10 23:52:04 +0000794 * put on the holds list. We can't issue those writes
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000795 * until a log has been marked, so we must store them.
796 *
797 * If a 'noflush' suspend is in progress, we can requeue
798 * the I/O's to the core. This give userspace a chance
799 * to reconfigure the mirror, at which point the core
800 * will reissue the writes. If the 'noflush' flag is
801 * not set, we have no choice but to return errors.
802 *
803 * Some writes on the failures list may have been
804 * submitted before the log failure and represent a
805 * failure to write to one of the devices. It is ok
806 * for us to treat them the same and requeue them
807 * as well.
808 */
Mikulas Patocka0f398a82009-12-10 23:52:04 +0000809 while ((bio = bio_list_pop(failures))) {
Mikulas Patocka60f355e2009-12-10 23:52:05 +0000810 if (!ms->log_failure) {
Mikulas Patocka0f398a82009-12-10 23:52:04 +0000811 ms->in_sync = 0;
Mikulas Patockac58098b2009-12-10 23:52:05 +0000812 dm_rh_mark_nosync(ms->rh, bio);
Mikulas Patocka0f398a82009-12-10 23:52:04 +0000813 }
Mikulas Patocka60f355e2009-12-10 23:52:05 +0000814
815 /*
816 * If all the legs are dead, fail the I/O.
Lidong Zhonged632872015-05-13 14:04:10 +0800817 * If the device has failed and keep_log is enabled,
818 * fail the I/O.
819 *
820 * If we have been told to handle errors, and keep_log
821 * isn't enabled, hold the bio and wait for userspace to
822 * deal with the problem.
823 *
Mikulas Patocka60f355e2009-12-10 23:52:05 +0000824 * Otherwise pretend that the I/O succeeded. (This would
825 * be wrong if the failed leg returned after reboot and
826 * got replicated back to the good legs.)
827 */
Lidong Zhonged632872015-05-13 14:04:10 +0800828 if (unlikely(!get_valid_mirror(ms) || (keep_log(ms) && ms->log_failure)))
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200829 bio_io_error(bio);
Lidong Zhonged632872015-05-13 14:04:10 +0800830 else if (errors_handled(ms) && !keep_log(ms))
Mikulas Patocka60f355e2009-12-10 23:52:05 +0000831 hold_bio(ms, bio);
832 else
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200833 bio_endio(bio);
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000834 }
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000835}
836
837static void trigger_event(struct work_struct *work)
838{
839 struct mirror_set *ms =
840 container_of(work, struct mirror_set, trigger_event);
841
842 dm_table_event(ms->ti->table);
843}
844
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845/*-----------------------------------------------------------------
846 * kmirrord
847 *---------------------------------------------------------------*/
Mikulas Patockaa2aebe02008-04-24 22:10:42 +0100848static void do_mirror(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100850 struct mirror_set *ms = container_of(work, struct mirror_set,
851 kmirrord_work);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000852 struct bio_list reads, writes, failures;
853 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000855 spin_lock_irqsave(&ms->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 reads = ms->reads;
857 writes = ms->writes;
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000858 failures = ms->failures;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 bio_list_init(&ms->reads);
860 bio_list_init(&ms->writes);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000861 bio_list_init(&ms->failures);
862 spin_unlock_irqrestore(&ms->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100864 dm_rh_update_states(ms->rh, errors_handled(ms));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 do_recovery(ms);
866 do_reads(ms, &reads);
867 do_writes(ms, &writes);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000868 do_failures(ms, &failures);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000869}
870
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871/*-----------------------------------------------------------------
872 * Target functions
873 *---------------------------------------------------------------*/
874static struct mirror_set *alloc_context(unsigned int nr_mirrors,
875 uint32_t region_size,
876 struct dm_target *ti,
Heinz Mauelshagen416cd172008-04-24 21:43:35 +0100877 struct dm_dirty_log *dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878{
Gustavo A. R. Silvabcd67652019-08-05 19:18:25 -0500879 struct mirror_set *ms =
880 kzalloc(struct_size(ms, mirror, nr_mirrors), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 if (!ms) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700883 ti->error = "Cannot allocate mirror context";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 return NULL;
885 }
886
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 spin_lock_init(&ms->lock);
Mikulas Patocka5339fc22009-12-10 23:52:06 +0000888 bio_list_init(&ms->reads);
889 bio_list_init(&ms->writes);
890 bio_list_init(&ms->failures);
891 bio_list_init(&ms->holds);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892
893 ms->ti = ti;
894 ms->nr_mirrors = nr_mirrors;
895 ms->nr_regions = dm_sector_div_up(ti->len, region_size);
896 ms->in_sync = 0;
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000897 ms->log_failure = 0;
Mikulas Patocka929be8f2009-12-10 23:52:06 +0000898 ms->leg_failure = 0;
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000899 atomic_set(&ms->suspend, 0);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000900 atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901
Mikulas Patockabda8efe2011-05-29 13:03:09 +0100902 ms->io_client = dm_io_client_create();
Milan Broz88be1632007-05-09 02:33:04 -0700903 if (IS_ERR(ms->io_client)) {
904 ti->error = "Error creating dm_io client";
905 kfree(ms);
906 return NULL;
907 }
908
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100909 ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
910 wakeup_all_recovery_waiters,
911 ms->ti->begin, MAX_RECOVERY,
912 dl, region_size, ms->nr_regions);
913 if (IS_ERR(ms->rh)) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700914 ti->error = "Error creating dirty region hash";
Dmitry Monakhova72cf732007-10-19 22:38:39 +0100915 dm_io_client_destroy(ms->io_client);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916 kfree(ms);
917 return NULL;
918 }
919
920 return ms;
921}
922
923static void free_context(struct mirror_set *ms, struct dm_target *ti,
924 unsigned int m)
925{
926 while (m--)
927 dm_put_device(ti, ms->mirror[m].dev);
928
Milan Broz88be1632007-05-09 02:33:04 -0700929 dm_io_client_destroy(ms->io_client);
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100930 dm_region_hash_destroy(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 kfree(ms);
932}
933
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
935 unsigned int mirror, char **argv)
936{
Andrew Morton4ee218c2006-03-27 01:17:48 -0800937 unsigned long long offset;
Mikulas Patocka31998ef2012-03-28 18:41:26 +0100938 char dummy;
Vivek Goyale80d1c82015-07-31 09:20:36 -0400939 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940
Milan Brozef87bfc2018-11-07 22:24:55 +0100941 if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1 ||
942 offset != (sector_t)offset) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700943 ti->error = "Invalid offset";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 return -EINVAL;
945 }
946
Vivek Goyale80d1c82015-07-31 09:20:36 -0400947 ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
948 &ms->mirror[mirror].dev);
949 if (ret) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700950 ti->error = "Device lookup failure";
Vivek Goyale80d1c82015-07-31 09:20:36 -0400951 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 }
953
Jonathan Brassowaa5617c2007-10-19 22:47:58 +0100954 ms->mirror[mirror].ms = ms;
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000955 atomic_set(&(ms->mirror[mirror].error_count), 0);
956 ms->mirror[mirror].error_type = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 ms->mirror[mirror].offset = offset;
958
959 return 0;
960}
961
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962/*
963 * Create dirty log: log_type #log_params <log_params>
964 */
Heinz Mauelshagen416cd172008-04-24 21:43:35 +0100965static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100966 unsigned argc, char **argv,
967 unsigned *args_used)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100969 unsigned param_count;
Heinz Mauelshagen416cd172008-04-24 21:43:35 +0100970 struct dm_dirty_log *dl;
Mikulas Patocka31998ef2012-03-28 18:41:26 +0100971 char dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972
973 if (argc < 2) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700974 ti->error = "Insufficient mirror log arguments";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 return NULL;
976 }
977
Mikulas Patocka31998ef2012-03-28 18:41:26 +0100978 if (sscanf(argv[1], "%u%c", &param_count, &dummy) != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700979 ti->error = "Invalid mirror log argument count";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 return NULL;
981 }
982
983 *args_used = 2 + param_count;
984
985 if (argc < *args_used) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700986 ti->error = "Insufficient mirror log arguments";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 return NULL;
988 }
989
Mikulas Patockac0da3742009-12-10 23:52:02 +0000990 dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count,
991 argv + 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 if (!dl) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700993 ti->error = "Error creating mirror dirty log";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 return NULL;
995 }
996
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 return dl;
998}
999
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001000static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
1001 unsigned *args_used)
1002{
1003 unsigned num_features;
1004 struct dm_target *ti = ms->ti;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001005 char dummy;
Lidong Zhonged632872015-05-13 14:04:10 +08001006 int i;
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001007
1008 *args_used = 0;
1009
1010 if (!argc)
1011 return 0;
1012
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001013 if (sscanf(argv[0], "%u%c", &num_features, &dummy) != 1) {
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001014 ti->error = "Invalid number of features";
1015 return -EINVAL;
1016 }
1017
1018 argc--;
1019 argv++;
1020 (*args_used)++;
1021
1022 if (num_features > argc) {
1023 ti->error = "Not enough arguments to support feature count";
1024 return -EINVAL;
1025 }
1026
Lidong Zhonged632872015-05-13 14:04:10 +08001027 for (i = 0; i < num_features; i++) {
1028 if (!strcmp("handle_errors", argv[0]))
1029 ms->features |= DM_RAID1_HANDLE_ERRORS;
1030 else if (!strcmp("keep_log", argv[0]))
1031 ms->features |= DM_RAID1_KEEP_LOG;
1032 else {
1033 ti->error = "Unrecognised feature requested";
1034 return -EINVAL;
1035 }
1036
1037 argc--;
1038 argv++;
1039 (*args_used)++;
1040 }
1041 if (!errors_handled(ms) && keep_log(ms)) {
1042 ti->error = "keep_log feature requires the handle_errors feature";
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001043 return -EINVAL;
1044 }
1045
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001046 return 0;
1047}
1048
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049/*
1050 * Construct a mirror mapping:
1051 *
1052 * log_type #log_params <log_params>
1053 * #mirrors [mirror_path offset]{2,}
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001054 * [#features <features>]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 *
1056 * log_type is "core" or "disk"
1057 * #log_params is between 1 and 3
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001058 *
Lidong Zhonged632872015-05-13 14:04:10 +08001059 * If present, supported features are "handle_errors" and "keep_log".
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1062{
1063 int r;
1064 unsigned int nr_mirrors, m, args_used;
1065 struct mirror_set *ms;
Heinz Mauelshagen416cd172008-04-24 21:43:35 +01001066 struct dm_dirty_log *dl;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001067 char dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068
1069 dl = create_dirty_log(ti, argc, argv, &args_used);
1070 if (!dl)
1071 return -EINVAL;
1072
1073 argv += args_used;
1074 argc -= args_used;
1075
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001076 if (!argc || sscanf(argv[0], "%u%c", &nr_mirrors, &dummy) != 1 ||
Kees Cook65972a62018-04-10 21:43:15 -07001077 nr_mirrors < 2 || nr_mirrors > MAX_NR_MIRRORS) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001078 ti->error = "Invalid number of mirrors";
Heinz Mauelshagen416cd172008-04-24 21:43:35 +01001079 dm_dirty_log_destroy(dl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 return -EINVAL;
1081 }
1082
1083 argv++, argc--;
1084
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001085 if (argc < nr_mirrors * 2) {
1086 ti->error = "Too few mirror arguments";
Heinz Mauelshagen416cd172008-04-24 21:43:35 +01001087 dm_dirty_log_destroy(dl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088 return -EINVAL;
1089 }
1090
1091 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1092 if (!ms) {
Heinz Mauelshagen416cd172008-04-24 21:43:35 +01001093 dm_dirty_log_destroy(dl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 return -ENOMEM;
1095 }
1096
1097 /* Get the mirror parameter sets */
1098 for (m = 0; m < nr_mirrors; m++) {
1099 r = get_mirror(ms, ti, m, argv);
1100 if (r) {
1101 free_context(ms, ti, m);
1102 return r;
1103 }
1104 argv += 2;
1105 argc -= 2;
1106 }
1107
1108 ti->private = ms;
Mike Snitzer542f9032012-07-27 15:08:00 +01001109
1110 r = dm_set_target_max_io_len(ti, dm_rh_get_region_size(ms->rh));
1111 if (r)
1112 goto err_free_context;
1113
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00001114 ti->num_flush_bios = 1;
1115 ti->num_discard_bios = 1;
Mike Snitzer30187e12016-01-31 13:28:26 -05001116 ti->per_io_data_size = sizeof(struct dm_raid1_bio_record);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117
Tejun Heo670368a2013-07-30 08:40:21 -04001118 ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0);
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001119 if (!ms->kmirrord_wq) {
1120 DMERR("couldn't start kmirrord");
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001121 r = -ENOMEM;
1122 goto err_free_context;
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001123 }
1124 INIT_WORK(&ms->kmirrord_work, do_mirror);
Kees Cook8376d3c12017-10-16 17:01:48 -07001125 timer_setup(&ms->timer, delayed_wake_fn, 0);
Mikulas Patockaa2aebe02008-04-24 22:10:42 +01001126 ms->timer_pending = 0;
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001127 INIT_WORK(&ms->trigger_event, trigger_event);
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001128
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001129 r = parse_features(ms, argc, argv, &args_used);
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001130 if (r)
1131 goto err_destroy_wq;
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001132
1133 argv += args_used;
1134 argc -= args_used;
1135
Jonathan Brassowf44db672007-07-12 17:29:04 +01001136 /*
1137 * Any read-balancing addition depends on the
1138 * DM_RAID1_HANDLE_ERRORS flag being present.
1139 * This is because the decision to balance depends
1140 * on the sync state of a region. If the above
1141 * flag is not present, we ignore errors; and
1142 * the sync state may be inaccurate.
1143 */
1144
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001145 if (argc) {
1146 ti->error = "Too many mirror arguments";
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001147 r = -EINVAL;
1148 goto err_destroy_wq;
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001149 }
1150
Mikulas Patockadf5d2e92013-03-01 22:45:49 +00001151 ms->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
Mikulas Patockafa34ce72011-05-29 13:03:13 +01001152 if (IS_ERR(ms->kcopyd_client)) {
1153 r = PTR_ERR(ms->kcopyd_client);
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001154 goto err_destroy_wq;
Mikulas Patockafa34ce72011-05-29 13:03:13 +01001155 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001157 wakeup_mirrord(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 return 0;
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001159
1160err_destroy_wq:
1161 destroy_workqueue(ms->kmirrord_wq);
1162err_free_context:
1163 free_context(ms, ti, ms->nr_mirrors);
1164 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165}
1166
1167static void mirror_dtr(struct dm_target *ti)
1168{
1169 struct mirror_set *ms = (struct mirror_set *) ti->private;
1170
Mikulas Patockaa2aebe02008-04-24 22:10:42 +01001171 del_timer_sync(&ms->timer);
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001172 flush_workqueue(ms->kmirrord_wq);
Tejun Heo43829732012-08-20 14:51:24 -07001173 flush_work(&ms->trigger_event);
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +01001174 dm_kcopyd_client_destroy(ms->kcopyd_client);
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001175 destroy_workqueue(ms->kmirrord_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 free_context(ms, ti, ms->nr_mirrors);
1177}
1178
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179/*
1180 * Mirror mapping function
1181 */
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00001182static int mirror_map(struct dm_target *ti, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183{
Christoph Hellwig70246282016-07-19 11:28:41 +02001184 int r, rw = bio_data_dir(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 struct mirror *m;
1186 struct mirror_set *ms = ti->private;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001187 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Mikulas Patocka0045d612012-12-21 20:23:40 +00001188 struct dm_raid1_bio_record *bio_record =
1189 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
1190
Christoph Hellwig309dca302021-01-24 11:02:34 +01001191 bio_record->details.bi_bdev = NULL;
Mike Snitzercd15fb62017-06-15 08:39:15 -04001192
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193 if (rw == WRITE) {
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001194 /* Save region for mirror_end_io() handler */
Mikulas Patocka0045d612012-12-21 20:23:40 +00001195 bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 queue_bio(ms, bio, rw);
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001197 return DM_MAPIO_SUBMITTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198 }
1199
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001200 r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 if (r < 0 && r != -EWOULDBLOCK)
Christoph Hellwig846785e2017-06-03 09:38:02 +02001202 return DM_MAPIO_KILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 /*
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001205 * If region is not in-sync queue the bio.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 */
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001207 if (!r || (r == -EWOULDBLOCK)) {
Jens Axboe1eff9d32016-08-05 15:35:16 -06001208 if (bio->bi_opf & REQ_RAHEAD)
Christoph Hellwig846785e2017-06-03 09:38:02 +02001209 return DM_MAPIO_KILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 queue_bio(ms, bio, rw);
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001212 return DM_MAPIO_SUBMITTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 }
1214
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001215 /*
1216 * The region is in-sync and we can perform reads directly.
1217 * Store enough information so we can retry if it fails.
1218 */
Kent Overstreet4f024f32013-10-11 15:44:27 -07001219 m = choose_mirror(ms, bio->bi_iter.bi_sector);
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001220 if (unlikely(!m))
Christoph Hellwig846785e2017-06-03 09:38:02 +02001221 return DM_MAPIO_KILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222
Mikulas Patocka89c7cd82012-12-21 20:23:39 +00001223 dm_bio_record(&bio_record->details, bio);
Mikulas Patocka89c7cd82012-12-21 20:23:39 +00001224 bio_record->m = m;
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001225
1226 map_bio(m, bio);
1227
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001228 return DM_MAPIO_REMAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229}
1230
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001231static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1232 blk_status_t *error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233{
Christoph Hellwig70246282016-07-19 11:28:41 +02001234 int rw = bio_data_dir(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 struct mirror_set *ms = (struct mirror_set *) ti->private;
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001236 struct mirror *m = NULL;
1237 struct dm_bio_details *bd = NULL;
Mikulas Patocka0045d612012-12-21 20:23:40 +00001238 struct dm_raid1_bio_record *bio_record =
1239 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240
1241 /*
1242 * We need to dec pending if this was a write.
1243 */
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001244 if (rw == WRITE) {
Jens Axboe1eff9d32016-08-05 15:35:16 -06001245 if (!(bio->bi_opf & REQ_PREFLUSH) &&
Mike Christie28a8f0d2016-06-05 14:32:25 -05001246 bio_op(bio) != REQ_OP_DISCARD)
Mikulas Patocka0045d612012-12-21 20:23:40 +00001247 dm_rh_dec(ms->rh, bio_record->write_region);
Christoph Hellwig1be56902017-06-03 09:38:03 +02001248 return DM_ENDIO_DONE;
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001249 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001251 if (*error == BLK_STS_NOTSUPP)
Mike Snitzercd15fb62017-06-15 08:39:15 -04001252 goto out;
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001253
Christoph Hellwig9966afa2017-06-03 09:37:57 +02001254 if (bio->bi_opf & REQ_RAHEAD)
Mike Snitzercd15fb62017-06-15 08:39:15 -04001255 goto out;
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001256
Christoph Hellwig1be56902017-06-03 09:38:03 +02001257 if (unlikely(*error)) {
Christoph Hellwig309dca302021-01-24 11:02:34 +01001258 if (!bio_record->details.bi_bdev) {
Mike Snitzercd15fb62017-06-15 08:39:15 -04001259 /*
1260 * There wasn't enough memory to record necessary
1261 * information for a retry or there was no other
1262 * mirror in-sync.
1263 */
1264 DMERR_LIMIT("Mirror read failed.");
Linus Torvaldsc6b1e362017-07-03 10:34:51 -07001265 return DM_ENDIO_DONE;
Mike Snitzercd15fb62017-06-15 08:39:15 -04001266 }
1267
Mikulas Patocka89c7cd82012-12-21 20:23:39 +00001268 m = bio_record->m;
Adrian Bunke03f1a82008-02-19 19:44:19 +00001269
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001270 DMERR("Mirror read failed from %s. Trying alternative device.",
1271 m->dev->name);
1272
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001273 fail_mirror(m, DM_RAID1_READ_ERROR);
1274
1275 /*
1276 * A failed read is requeued for another attempt using an intact
1277 * mirror.
1278 */
1279 if (default_ok(m) || mirror_available(ms, bio)) {
Mikulas Patocka89c7cd82012-12-21 20:23:39 +00001280 bd = &bio_record->details;
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001281
1282 dm_bio_restore(bd, bio);
Christoph Hellwig309dca302021-01-24 11:02:34 +01001283 bio_record->details.bi_bdev = NULL;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001284 bio->bi_status = 0;
Mikulas Patockaf3a44fe2014-02-18 09:57:22 -05001285
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001286 queue_bio(ms, bio, rw);
Mikulas Patocka19cbbc62012-12-21 20:23:32 +00001287 return DM_ENDIO_INCOMPLETE;
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001288 }
1289 DMERR("All replicated volumes dead, failing I/O");
1290 }
1291
Mike Snitzercd15fb62017-06-15 08:39:15 -04001292out:
Christoph Hellwig309dca302021-01-24 11:02:34 +01001293 bio_record->details.bi_bdev = NULL;
Mike Snitzercd15fb62017-06-15 08:39:15 -04001294
Christoph Hellwig1be56902017-06-03 09:38:03 +02001295 return DM_ENDIO_DONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296}
1297
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001298static void mirror_presuspend(struct dm_target *ti)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299{
1300 struct mirror_set *ms = (struct mirror_set *) ti->private;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001301 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302
Mikulas Patocka04788502009-12-10 23:52:03 +00001303 struct bio_list holds;
1304 struct bio *bio;
1305
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001306 atomic_set(&ms->suspend, 1);
1307
1308 /*
Takahiro Yasuif0703042010-03-06 02:32:35 +00001309 * Process bios in the hold list to start recovery waiting
1310 * for bios in the hold list. After the process, no bio has
1311 * a chance to be added in the hold list because ms->suspend
1312 * is set.
1313 */
1314 spin_lock_irq(&ms->lock);
1315 holds = ms->holds;
1316 bio_list_init(&ms->holds);
1317 spin_unlock_irq(&ms->lock);
1318
1319 while ((bio = bio_list_pop(&holds)))
1320 hold_bio(ms, bio);
1321
1322 /*
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001323 * We must finish up all the work that we've
1324 * generated (i.e. recovery work).
1325 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001326 dm_rh_stop_recovery(ms->rh);
Jonathan E Brassow33184042006-11-08 17:44:44 -08001327
Jonathan E Brassow33184042006-11-08 17:44:44 -08001328 wait_event(_kmirrord_recovery_stopped,
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001329 !dm_rh_recovery_in_flight(ms->rh));
Jonathan E Brassow33184042006-11-08 17:44:44 -08001330
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001331 if (log->type->presuspend && log->type->presuspend(log))
1332 /* FIXME: need better error handling */
1333 DMWARN("log presuspend failed");
1334
1335 /*
1336 * Now that recovery is complete/stopped and the
1337 * delayed bios are queued, we need to wait for
1338 * the worker thread to complete. This way,
1339 * we know that all of our I/O has been pushed.
1340 */
1341 flush_workqueue(ms->kmirrord_wq);
1342}
1343
1344static void mirror_postsuspend(struct dm_target *ti)
1345{
1346 struct mirror_set *ms = ti->private;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001347 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001348
Jonathan Brassow6b3df0d2007-10-19 22:47:57 +01001349 if (log->type->postsuspend && log->type->postsuspend(log))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 /* FIXME: need better error handling */
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001351 DMWARN("log postsuspend failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352}
1353
1354static void mirror_resume(struct dm_target *ti)
1355{
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001356 struct mirror_set *ms = ti->private;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001357 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001358
1359 atomic_set(&ms->suspend, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 if (log->type->resume && log->type->resume(log))
1361 /* FIXME: need better error handling */
1362 DMWARN("log resume failed");
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001363 dm_rh_start_recovery(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364}
1365
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001366/*
1367 * device_status_char
1368 * @m: mirror device/leg we want the status of
1369 *
1370 * We return one character representing the most severe error
1371 * we have encountered.
1372 * A => Alive - No failures
1373 * D => Dead - A write failure occurred leaving mirror out-of-sync
1374 * S => Sync - A sychronization failure occurred, mirror out-of-sync
1375 * R => Read - A read failure occurred, mirror data unaffected
1376 *
1377 * Returns: <char>
1378 */
1379static char device_status_char(struct mirror *m)
1380{
1381 if (!atomic_read(&(m->error_count)))
1382 return 'A';
1383
Mikulas Patocka64b30c42009-12-10 23:52:02 +00001384 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
1385 (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001386 (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' :
1387 (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U';
1388}
1389
1390
Mikulas Patockafd7c092e2013-03-01 22:45:44 +00001391static void mirror_status(struct dm_target *ti, status_type_t type,
1392 unsigned status_flags, char *result, unsigned maxlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393{
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001394 unsigned int m, sz = 0;
Lidong Zhonged632872015-05-13 14:04:10 +08001395 int num_feature_args = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396 struct mirror_set *ms = (struct mirror_set *) ti->private;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001397 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Kees Cook65972a62018-04-10 21:43:15 -07001398 char buffer[MAX_NR_MIRRORS + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 switch (type) {
1401 case STATUSTYPE_INFO:
1402 DMEMIT("%d ", ms->nr_mirrors);
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001403 for (m = 0; m < ms->nr_mirrors; m++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404 DMEMIT("%s ", ms->mirror[m].dev->name);
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001405 buffer[m] = device_status_char(&(ms->mirror[m]));
1406 }
1407 buffer[m] = '\0';
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001409 DMEMIT("%llu/%llu 1 %s ",
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001410 (unsigned long long)log->type->get_sync_count(log),
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001411 (unsigned long long)ms->nr_regions, buffer);
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001412
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001413 sz += log->type->status(log, type, result+sz, maxlen-sz);
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001414
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 break;
1416
1417 case STATUSTYPE_TABLE:
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001418 sz = log->type->status(log, type, result, maxlen);
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001419
Jonathan Brassowe52b8f62006-10-03 01:15:32 -07001420 DMEMIT("%d", ms->nr_mirrors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 for (m = 0; m < ms->nr_mirrors; m++)
Jonathan Brassowe52b8f62006-10-03 01:15:32 -07001422 DMEMIT(" %s %llu", ms->mirror[m].dev->name,
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001423 (unsigned long long)ms->mirror[m].offset);
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001424
Lidong Zhonged632872015-05-13 14:04:10 +08001425 num_feature_args += !!errors_handled(ms);
1426 num_feature_args += !!keep_log(ms);
1427 if (num_feature_args) {
1428 DMEMIT(" %d", num_feature_args);
1429 if (errors_handled(ms))
1430 DMEMIT(" handle_errors");
1431 if (keep_log(ms))
1432 DMEMIT(" keep_log");
1433 }
1434
1435 break;
Tushar Sugandhi8ec45662021-07-12 17:49:03 -07001436
1437 case STATUSTYPE_IMA:
1438 DMEMIT_TARGET_NAME_VERSION(ti->type);
1439 DMEMIT(",nr_mirrors=%d", ms->nr_mirrors);
1440 for (m = 0; m < ms->nr_mirrors; m++) {
1441 DMEMIT(",mirror_device_%d=%s", m, ms->mirror[m].dev->name);
1442 DMEMIT(",mirror_device_%d_status=%c",
1443 m, device_status_char(&(ms->mirror[m])));
1444 }
1445
1446 DMEMIT(",handle_errors=%c", errors_handled(ms) ? 'y' : 'n');
1447 DMEMIT(",keep_log=%c", keep_log(ms) ? 'y' : 'n');
1448
1449 DMEMIT(",log_type_status=");
1450 sz += log->type->status(log, type, result+sz, maxlen-sz);
1451 DMEMIT(";");
1452 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454}
1455
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001456static int mirror_iterate_devices(struct dm_target *ti,
1457 iterate_devices_callout_fn fn, void *data)
1458{
1459 struct mirror_set *ms = ti->private;
1460 int ret = 0;
1461 unsigned i;
1462
1463 for (i = 0; !ret && i < ms->nr_mirrors; i++)
1464 ret = fn(ti, ms->mirror[i].dev,
Mike Snitzer5dea2712009-07-23 20:30:42 +01001465 ms->mirror[i].offset, ti->len, data);
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001466
1467 return ret;
1468}
1469
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470static struct target_type mirror_target = {
1471 .name = "mirror",
Lidong Zhonged632872015-05-13 14:04:10 +08001472 .version = {1, 14, 0},
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 .module = THIS_MODULE,
1474 .ctr = mirror_ctr,
1475 .dtr = mirror_dtr,
1476 .map = mirror_map,
1477 .end_io = mirror_end_io,
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001478 .presuspend = mirror_presuspend,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 .postsuspend = mirror_postsuspend,
1480 .resume = mirror_resume,
1481 .status = mirror_status,
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001482 .iterate_devices = mirror_iterate_devices,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483};
1484
1485static int __init dm_mirror_init(void)
1486{
1487 int r;
1488
Mikulas Patocka95f8fac2009-04-02 19:55:24 +01001489 r = dm_register_target(&mirror_target);
1490 if (r < 0) {
1491 DMERR("Failed to register mirror target");
1492 goto bad_target;
1493 }
1494
1495 return 0;
1496
1497bad_target:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 return r;
1499}
1500
1501static void __exit dm_mirror_exit(void)
1502{
Mikulas Patocka10d3bd02009-01-06 03:04:58 +00001503 dm_unregister_target(&mirror_target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504}
1505
1506/* Module hooks */
1507module_init(dm_mirror_init);
1508module_exit(dm_mirror_exit);
1509
1510MODULE_DESCRIPTION(DM_NAME " mirror target");
1511MODULE_AUTHOR("Joe Thornber");
1512MODULE_LICENSE("GPL");