blob: f9e39301c4afa3f78b93298d4d27a7ef0f3cd2b5 [file] [log] [blame]
Alex Eldere2a58ee2013-04-30 00:44:33 -05001
Yehuda Sadeh602adf42010-08-12 16:11:25 -07002/*
3 rbd.c -- Export ceph rados objects as a Linux block device
4
5
6 based on drivers/block/osdblk.c:
7
8 Copyright 2009 Red Hat, Inc.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22
23
24
Yehuda Sadehdfc56062010-11-19 14:51:04 -080025 For usage instructions, please refer to:
Yehuda Sadeh602adf42010-08-12 16:11:25 -070026
Yehuda Sadehdfc56062010-11-19 14:51:04 -080027 Documentation/ABI/testing/sysfs-bus-rbd
Yehuda Sadeh602adf42010-08-12 16:11:25 -070028
29 */
30
31#include <linux/ceph/libceph.h>
32#include <linux/ceph/osd_client.h>
33#include <linux/ceph/mon_client.h>
Ilya Dryomoved95b212016-08-12 16:40:02 +020034#include <linux/ceph/cls_lock_client.h>
Ilya Dryomov43df3d32018-02-02 15:23:22 +010035#include <linux/ceph/striper.h>
Yehuda Sadeh602adf42010-08-12 16:11:25 -070036#include <linux/ceph/decode.h>
David Howells82995cc2019-03-25 16:38:32 +000037#include <linux/fs_parser.h>
Alex Elder30d1cff2013-05-01 12:43:03 -050038#include <linux/bsearch.h>
Yehuda Sadeh602adf42010-08-12 16:11:25 -070039
40#include <linux/kernel.h>
41#include <linux/device.h>
42#include <linux/module.h>
Christoph Hellwig7ad18af2015-01-13 17:20:04 +010043#include <linux/blk-mq.h>
Yehuda Sadeh602adf42010-08-12 16:11:25 -070044#include <linux/fs.h>
45#include <linux/blkdev.h>
Alex Elder1c2a9df2013-05-01 12:43:03 -050046#include <linux/slab.h>
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +020047#include <linux/idr.h>
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +040048#include <linux/workqueue.h>
Yehuda Sadeh602adf42010-08-12 16:11:25 -070049
50#include "rbd_types.h"
51
Alex Elderaafb2302012-09-06 16:00:54 -050052#define RBD_DEBUG /* Activate rbd_assert() calls */
53
Alex Elder593a9e72012-02-07 12:03:37 -060054/*
Alex Eldera2acd002013-05-08 22:50:04 -050055 * Increment the given counter and return its updated value.
56 * If the counter is already 0 it will not be incremented.
57 * If the counter is already at its maximum value returns
58 * -EINVAL without updating it.
59 */
60static int atomic_inc_return_safe(atomic_t *v)
61{
62 unsigned int counter;
63
Mark Rutlandbfc18e32018-06-21 13:13:04 +010064 counter = (unsigned int)atomic_fetch_add_unless(v, 1, 0);
Alex Eldera2acd002013-05-08 22:50:04 -050065 if (counter <= (unsigned int)INT_MAX)
66 return (int)counter;
67
68 atomic_dec(v);
69
70 return -EINVAL;
71}
72
73/* Decrement the counter. Return the resulting value, or -EINVAL */
74static int atomic_dec_return_safe(atomic_t *v)
75{
76 int counter;
77
78 counter = atomic_dec_return(v);
79 if (counter >= 0)
80 return counter;
81
82 atomic_inc(v);
83
84 return -EINVAL;
85}
86
Alex Elderf0f8cef2012-01-29 13:57:44 -060087#define RBD_DRV_NAME "rbd"
Yehuda Sadeh602adf42010-08-12 16:11:25 -070088
Ilya Dryomov7e513d42013-12-16 19:26:32 +020089#define RBD_MINORS_PER_MAJOR 256
90#define RBD_SINGLE_MAJOR_PART_SHIFT 4
Yehuda Sadeh602adf42010-08-12 16:11:25 -070091
Ilya Dryomov6d69bb532015-10-11 19:38:00 +020092#define RBD_MAX_PARENT_CHAIN_LEN 16
93
Alex Elderd4b125e2012-07-03 16:01:19 -050094#define RBD_SNAP_DEV_NAME_PREFIX "snap_"
95#define RBD_MAX_SNAP_NAME_LEN \
96 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
97
Alex Elder35d489f2012-07-03 16:01:19 -050098#define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
Yehuda Sadeh602adf42010-08-12 16:11:25 -070099
100#define RBD_SNAP_HEAD_NAME "-"
101
Alex Elder9682fc62013-04-30 00:44:33 -0500102#define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
103
Alex Elder9e15b772012-10-30 19:40:33 -0500104/* This allows a single page to hold an image name sent by OSD */
105#define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
Alex Elder1e130192012-07-03 16:01:19 -0500106#define RBD_IMAGE_ID_LEN_MAX 64
Alex Elder9e15b772012-10-30 19:40:33 -0500107
Alex Elder1e130192012-07-03 16:01:19 -0500108#define RBD_OBJ_PREFIX_LEN_MAX 64
Alex Elder589d30e2012-07-10 20:30:11 -0500109
Ilya Dryomoved95b212016-08-12 16:40:02 +0200110#define RBD_NOTIFY_TIMEOUT 5 /* seconds */
Ilya Dryomov99d16942016-08-12 16:11:41 +0200111#define RBD_RETRY_DELAY msecs_to_jiffies(1000)
112
Alex Elderd8891402012-10-09 13:50:17 -0700113/* Feature bits */
114
Ilya Dryomov8767b292017-03-02 19:56:57 +0100115#define RBD_FEATURE_LAYERING (1ULL<<0)
116#define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
117#define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
Ilya Dryomov22e8bd52019-06-05 19:25:11 +0200118#define RBD_FEATURE_OBJECT_MAP (1ULL<<3)
119#define RBD_FEATURE_FAST_DIFF (1ULL<<4)
Ilya Dryomovb9f6d442019-02-25 18:55:38 +0100120#define RBD_FEATURE_DEEP_FLATTEN (1ULL<<5)
Ilya Dryomov8767b292017-03-02 19:56:57 +0100121#define RBD_FEATURE_DATA_POOL (1ULL<<7)
Ilya Dryomove5734272018-01-16 15:41:54 +0100122#define RBD_FEATURE_OPERATIONS (1ULL<<8)
Ilya Dryomov8767b292017-03-02 19:56:57 +0100123
Ilya Dryomoved95b212016-08-12 16:40:02 +0200124#define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
125 RBD_FEATURE_STRIPINGV2 | \
Ilya Dryomov7e973322017-01-25 18:16:22 +0100126 RBD_FEATURE_EXCLUSIVE_LOCK | \
Ilya Dryomov22e8bd52019-06-05 19:25:11 +0200127 RBD_FEATURE_OBJECT_MAP | \
128 RBD_FEATURE_FAST_DIFF | \
Ilya Dryomovb9f6d442019-02-25 18:55:38 +0100129 RBD_FEATURE_DEEP_FLATTEN | \
Ilya Dryomove5734272018-01-16 15:41:54 +0100130 RBD_FEATURE_DATA_POOL | \
131 RBD_FEATURE_OPERATIONS)
Alex Elderd8891402012-10-09 13:50:17 -0700132
133/* Features supported by this (client software) implementation. */
134
Alex Elder770eba62012-10-25 23:34:40 -0500135#define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
Alex Elderd8891402012-10-09 13:50:17 -0700136
Alex Elder81a89792012-02-02 08:13:30 -0600137/*
138 * An RBD device name will be "rbd#", where the "rbd" comes from
139 * RBD_DRV_NAME above, and # is a unique integer identifier.
Alex Elder81a89792012-02-02 08:13:30 -0600140 */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700141#define DEV_NAME_LEN 32
142
143/*
144 * block device image metadata (in-memory version)
145 */
146struct rbd_image_header {
Alex Elderf35a4de2013-05-06 09:51:29 -0500147 /* These six fields never change for a given rbd image */
Alex Elder849b4262012-07-09 21:04:24 -0500148 char *object_prefix;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700149 __u8 obj_order;
Alex Elderf35a4de2013-05-06 09:51:29 -0500150 u64 stripe_unit;
151 u64 stripe_count;
Ilya Dryomov7e973322017-01-25 18:16:22 +0100152 s64 data_pool_id;
Alex Elderf35a4de2013-05-06 09:51:29 -0500153 u64 features; /* Might be changeable someday? */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700154
Alex Elderf84344f2012-08-31 17:29:51 -0500155 /* The remaining fields need to be updated occasionally */
156 u64 image_size;
157 struct ceph_snap_context *snapc;
Alex Elderf35a4de2013-05-06 09:51:29 -0500158 char *snap_names; /* format 1 only */
159 u64 *snap_sizes; /* format 1 only */
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700160};
161
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500162/*
163 * An rbd image specification.
164 *
165 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
Alex Elderc66c6e02012-11-01 08:39:26 -0500166 * identify an image. Each rbd_dev structure includes a pointer to
167 * an rbd_spec structure that encapsulates this identity.
168 *
169 * Each of the id's in an rbd_spec has an associated name. For a
170 * user-mapped image, the names are supplied and the id's associated
171 * with them are looked up. For a layered image, a parent image is
172 * defined by the tuple, and the names are looked up.
173 *
174 * An rbd_dev structure contains a parent_spec pointer which is
175 * non-null if the image it represents is a child in a layered
176 * image. This pointer will refer to the rbd_spec structure used
177 * by the parent rbd_dev for its own identity (i.e., the structure
178 * is shared between the parent and child).
179 *
180 * Since these structures are populated once, during the discovery
181 * phase of image construction, they are effectively immutable so
182 * we make no effort to synchronize access to them.
183 *
184 * Note that code herein does not assume the image name is known (it
185 * could be a null pointer).
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500186 */
187struct rbd_spec {
188 u64 pool_id;
Alex Elderecb4dc222013-04-26 09:43:47 -0500189 const char *pool_name;
Ilya Dryomovb26c0472018-07-03 15:28:43 +0200190 const char *pool_ns; /* NULL if default, never "" */
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500191
Alex Elderecb4dc222013-04-26 09:43:47 -0500192 const char *image_id;
193 const char *image_name;
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500194
195 u64 snap_id;
Alex Elderecb4dc222013-04-26 09:43:47 -0500196 const char *snap_name;
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500197
198 struct kref kref;
199};
200
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700201/*
Alex Elderf0f8cef2012-01-29 13:57:44 -0600202 * an instance of the client. multiple devices may share an rbd client.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700203 */
204struct rbd_client {
205 struct ceph_client *client;
206 struct kref kref;
207 struct list_head node;
208};
209
Ilya Dryomov0192ce22019-05-16 15:06:56 +0200210struct pending_result {
211 int result; /* first nonzero result */
212 int num_pending;
213};
214
Alex Elderbf0d5f502012-11-22 00:00:08 -0600215struct rbd_img_request;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600216
Alex Elder9969ebc2013-01-18 12:31:10 -0600217enum obj_request_type {
Ilya Dryomova1fbb5e2018-01-16 12:15:02 +0100218 OBJ_REQUEST_NODATA = 1,
Ilya Dryomov5359a172018-01-20 10:30:10 +0100219 OBJ_REQUEST_BIO, /* pointer into provided bio (list) */
Ilya Dryomov7e07efb2018-01-20 10:30:11 +0100220 OBJ_REQUEST_BVECS, /* pointer into provided bio_vec array */
Ilya Dryomovafb97882018-02-06 19:26:35 +0100221 OBJ_REQUEST_OWN_BVECS, /* private bio_vec array, doesn't own pages */
Alex Elder9969ebc2013-01-18 12:31:10 -0600222};
Alex Elderbf0d5f502012-11-22 00:00:08 -0600223
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800224enum obj_operation_type {
Ilya Dryomova1fbb5e2018-01-16 12:15:02 +0100225 OBJ_OP_READ = 1,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800226 OBJ_OP_WRITE,
Guangliang Zhao90e98c52014-04-01 22:22:16 +0800227 OBJ_OP_DISCARD,
Ilya Dryomov6484cbe2019-01-29 12:46:25 +0100228 OBJ_OP_ZEROOUT,
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800229};
230
Ilya Dryomov0ad5d952019-05-14 20:45:38 +0200231#define RBD_OBJ_FLAG_DELETION (1U << 0)
232#define RBD_OBJ_FLAG_COPYUP_ENABLED (1U << 1)
Ilya Dryomov793333a302019-06-13 17:44:08 +0200233#define RBD_OBJ_FLAG_COPYUP_ZEROS (1U << 2)
Ilya Dryomov22e8bd52019-06-05 19:25:11 +0200234#define RBD_OBJ_FLAG_MAY_EXIST (1U << 3)
235#define RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT (1U << 4)
Ilya Dryomov0ad5d952019-05-14 20:45:38 +0200236
Ilya Dryomova9b67e62019-05-08 13:35:57 +0200237enum rbd_obj_read_state {
Ilya Dryomov85b5e6d2019-05-14 21:06:07 +0200238 RBD_OBJ_READ_START = 1,
239 RBD_OBJ_READ_OBJECT,
Ilya Dryomova9b67e62019-05-08 13:35:57 +0200240 RBD_OBJ_READ_PARENT,
241};
242
Ilya Dryomov3da691b2018-01-29 14:04:08 +0100243/*
244 * Writes go through the following state machine to deal with
245 * layering:
246 *
Ilya Dryomov89a59c12019-02-28 14:20:28 +0100247 * . . . . . RBD_OBJ_WRITE_GUARD. . . . . . . . . . . . . .
248 * . | .
249 * . v .
250 * . RBD_OBJ_WRITE_READ_FROM_PARENT. . . .
251 * . | . .
252 * . v v (deep-copyup .
253 * (image . RBD_OBJ_WRITE_COPYUP_EMPTY_SNAPC . not needed) .
254 * flattened) v | . .
255 * . v . .
256 * . . . .RBD_OBJ_WRITE_COPYUP_OPS. . . . . (copyup .
257 * | not needed) v
258 * v .
259 * done . . . . . . . . . . . . . . . . . .
260 * ^
261 * |
262 * RBD_OBJ_WRITE_FLAT
Ilya Dryomov3da691b2018-01-29 14:04:08 +0100263 *
264 * Writes start in RBD_OBJ_WRITE_GUARD or _FLAT, depending on whether
Ilya Dryomov89a59c12019-02-28 14:20:28 +0100265 * assert_exists guard is needed or not (in some cases it's not needed
266 * even if there is a parent).
Ilya Dryomov3da691b2018-01-29 14:04:08 +0100267 */
268enum rbd_obj_write_state {
Ilya Dryomov85b5e6d2019-05-14 21:06:07 +0200269 RBD_OBJ_WRITE_START = 1,
Ilya Dryomov22e8bd52019-06-05 19:25:11 +0200270 RBD_OBJ_WRITE_PRE_OBJECT_MAP,
Ilya Dryomov85b5e6d2019-05-14 21:06:07 +0200271 RBD_OBJ_WRITE_OBJECT,
Ilya Dryomov793333a302019-06-13 17:44:08 +0200272 __RBD_OBJ_WRITE_COPYUP,
273 RBD_OBJ_WRITE_COPYUP,
Ilya Dryomov22e8bd52019-06-05 19:25:11 +0200274 RBD_OBJ_WRITE_POST_OBJECT_MAP,
Ilya Dryomov793333a302019-06-13 17:44:08 +0200275};
276
277enum rbd_obj_copyup_state {
278 RBD_OBJ_COPYUP_START = 1,
279 RBD_OBJ_COPYUP_READ_PARENT,
Ilya Dryomov22e8bd52019-06-05 19:25:11 +0200280 __RBD_OBJ_COPYUP_OBJECT_MAPS,
281 RBD_OBJ_COPYUP_OBJECT_MAPS,
Ilya Dryomov793333a302019-06-13 17:44:08 +0200282 __RBD_OBJ_COPYUP_WRITE_OBJECT,
283 RBD_OBJ_COPYUP_WRITE_OBJECT,
Alex Elder926f9b32013-02-11 12:33:24 -0600284};
285
Alex Elderbf0d5f502012-11-22 00:00:08 -0600286struct rbd_obj_request {
Ilya Dryomov43df3d32018-02-02 15:23:22 +0100287 struct ceph_object_extent ex;
Ilya Dryomov0ad5d952019-05-14 20:45:38 +0200288 unsigned int flags; /* RBD_OBJ_FLAG_* */
Alex Elderc5b5ef62013-02-11 12:33:24 -0600289 union {
Ilya Dryomova9b67e62019-05-08 13:35:57 +0200290 enum rbd_obj_read_state read_state; /* for reads */
Ilya Dryomov3da691b2018-01-29 14:04:08 +0100291 enum rbd_obj_write_state write_state; /* for writes */
292 };
Alex Elderbf0d5f502012-11-22 00:00:08 -0600293
Ilya Dryomov51c35092018-01-29 14:04:08 +0100294 struct rbd_img_request *img_request;
Ilya Dryomov86bd7992018-02-06 19:26:33 +0100295 struct ceph_file_extent *img_extents;
296 u32 num_img_extents;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600297
Alex Elder788e2df2013-01-17 12:25:27 -0600298 union {
Ilya Dryomov5359a172018-01-20 10:30:10 +0100299 struct ceph_bio_iter bio_pos;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600300 struct {
Ilya Dryomov7e07efb2018-01-20 10:30:11 +0100301 struct ceph_bvec_iter bvec_pos;
302 u32 bvec_count;
Ilya Dryomovafb97882018-02-06 19:26:35 +0100303 u32 bvec_idx;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600304 };
305 };
Ilya Dryomov793333a302019-06-13 17:44:08 +0200306
307 enum rbd_obj_copyup_state copyup_state;
Ilya Dryomov7e07efb2018-01-20 10:30:11 +0100308 struct bio_vec *copyup_bvecs;
309 u32 copyup_bvec_count;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600310
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +0200311 struct list_head osd_reqs; /* w/ r_private_item */
Alex Elderbf0d5f502012-11-22 00:00:08 -0600312
Ilya Dryomov85b5e6d2019-05-14 21:06:07 +0200313 struct mutex state_mutex;
Ilya Dryomov793333a302019-06-13 17:44:08 +0200314 struct pending_result pending;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600315 struct kref kref;
316};
317
Alex Elder0c425242013-02-08 09:55:49 -0600318enum img_req_flags {
Alex Elder9849e982013-01-24 16:13:36 -0600319 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
Alex Elderd0b2e942013-01-24 16:13:36 -0600320 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
Alex Elder0c425242013-02-08 09:55:49 -0600321};
322
Ilya Dryomov0192ce22019-05-16 15:06:56 +0200323enum rbd_img_state {
324 RBD_IMG_START = 1,
Ilya Dryomov637cd062019-06-06 17:14:49 +0200325 RBD_IMG_EXCLUSIVE_LOCK,
Ilya Dryomov0192ce22019-05-16 15:06:56 +0200326 __RBD_IMG_OBJECT_REQUESTS,
327 RBD_IMG_OBJECT_REQUESTS,
328};
329
Alex Elderbf0d5f502012-11-22 00:00:08 -0600330struct rbd_img_request {
Alex Elderbf0d5f502012-11-22 00:00:08 -0600331 struct rbd_device *rbd_dev;
Ilya Dryomov9bb02482018-01-30 17:52:10 +0100332 enum obj_operation_type op_type;
Ilya Dryomovecc633c2018-02-01 11:50:47 +0100333 enum obj_request_type data_type;
Alex Elder0c425242013-02-08 09:55:49 -0600334 unsigned long flags;
Ilya Dryomov0192ce22019-05-16 15:06:56 +0200335 enum rbd_img_state state;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600336 union {
Alex Elder9849e982013-01-24 16:13:36 -0600337 u64 snap_id; /* for reads */
Alex Elderbf0d5f502012-11-22 00:00:08 -0600338 struct ceph_snap_context *snapc; /* for writes */
Alex Elder9849e982013-01-24 16:13:36 -0600339 };
Ilya Dryomov59e542c2020-02-12 15:23:58 +0100340 struct rbd_obj_request *obj_request; /* obj req initiator */
Alex Elderbf0d5f502012-11-22 00:00:08 -0600341
Ilya Dryomove1fddc82019-05-30 16:07:48 +0200342 struct list_head lock_item;
Ilya Dryomov43df3d32018-02-02 15:23:22 +0100343 struct list_head object_extents; /* obj_req.ex structs */
Alex Elderbf0d5f502012-11-22 00:00:08 -0600344
Ilya Dryomov0192ce22019-05-16 15:06:56 +0200345 struct mutex state_mutex;
346 struct pending_result pending;
347 struct work_struct work;
348 int work_result;
Alex Elderbf0d5f502012-11-22 00:00:08 -0600349};
350
351#define for_each_obj_request(ireq, oreq) \
Ilya Dryomov43df3d32018-02-02 15:23:22 +0100352 list_for_each_entry(oreq, &(ireq)->object_extents, ex.oe_item)
Alex Elderbf0d5f502012-11-22 00:00:08 -0600353#define for_each_obj_request_safe(ireq, oreq, n) \
Ilya Dryomov43df3d32018-02-02 15:23:22 +0100354 list_for_each_entry_safe(oreq, n, &(ireq)->object_extents, ex.oe_item)
Alex Elderbf0d5f502012-11-22 00:00:08 -0600355
Ilya Dryomov99d16942016-08-12 16:11:41 +0200356enum rbd_watch_state {
357 RBD_WATCH_STATE_UNREGISTERED,
358 RBD_WATCH_STATE_REGISTERED,
359 RBD_WATCH_STATE_ERROR,
360};
361
Ilya Dryomoved95b212016-08-12 16:40:02 +0200362enum rbd_lock_state {
363 RBD_LOCK_STATE_UNLOCKED,
364 RBD_LOCK_STATE_LOCKED,
365 RBD_LOCK_STATE_RELEASING,
366};
367
368/* WatchNotify::ClientId */
369struct rbd_client_id {
370 u64 gid;
371 u64 handle;
372};
373
Alex Elderf84344f2012-08-31 17:29:51 -0500374struct rbd_mapping {
Alex Elder99c1f08f2012-08-30 14:42:15 -0500375 u64 size;
Alex Elderf84344f2012-08-31 17:29:51 -0500376};
377
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700378/*
379 * a single device
380 */
381struct rbd_device {
Alex Elderde71a292012-07-03 16:01:19 -0500382 int dev_id; /* blkdev unique id */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700383
384 int major; /* blkdev assigned major */
Ilya Dryomovdd82fff2013-12-13 15:28:57 +0200385 int minor;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700386 struct gendisk *disk; /* blkdev's gendisk and rq */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700387
Alex Eldera30b71b2012-07-10 20:30:11 -0500388 u32 image_format; /* Either 1 or 2 */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700389 struct rbd_client *rbd_client;
390
391 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
392
Alex Elderb82d1672013-01-14 12:43:31 -0600393 spinlock_t lock; /* queue, flags, open_count */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700394
395 struct rbd_image_header header;
Alex Elderb82d1672013-01-14 12:43:31 -0600396 unsigned long flags; /* possibly lock protected */
Alex Elder0d7dbfc2012-10-25 23:34:41 -0500397 struct rbd_spec *spec;
Ilya Dryomovd1475432015-06-22 13:24:48 +0300398 struct rbd_options *opts;
Mike Christie0d6d1e9c2016-08-18 18:38:45 +0200399 char *config_info; /* add{,_single_major} string */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700400
Ilya Dryomovc41d13a2016-04-29 20:01:25 +0200401 struct ceph_object_id header_oid;
Ilya Dryomov922dab62016-05-26 01:15:02 +0200402 struct ceph_object_locator header_oloc;
Alex Elder971f8392012-10-25 23:34:41 -0500403
Ilya Dryomov1643dfa2016-08-12 15:45:52 +0200404 struct ceph_file_layout layout; /* used for all rbd requests */
Alex Elder0903e872012-11-14 12:25:19 -0600405
Ilya Dryomov99d16942016-08-12 16:11:41 +0200406 struct mutex watch_mutex;
407 enum rbd_watch_state watch_state;
Ilya Dryomov922dab62016-05-26 01:15:02 +0200408 struct ceph_osd_linger_request *watch_handle;
Ilya Dryomov99d16942016-08-12 16:11:41 +0200409 u64 watch_cookie;
410 struct delayed_work watch_dwork;
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700411
Ilya Dryomoved95b212016-08-12 16:40:02 +0200412 struct rw_semaphore lock_rwsem;
413 enum rbd_lock_state lock_state;
Ilya Dryomovcbbfb0f2017-04-13 12:17:38 +0200414 char lock_cookie[32];
Ilya Dryomoved95b212016-08-12 16:40:02 +0200415 struct rbd_client_id owner_cid;
416 struct work_struct acquired_lock_work;
417 struct work_struct released_lock_work;
418 struct delayed_work lock_dwork;
419 struct work_struct unlock_work;
Ilya Dryomove1fddc82019-05-30 16:07:48 +0200420 spinlock_t lock_lists_lock;
Ilya Dryomov637cd062019-06-06 17:14:49 +0200421 struct list_head acquiring_list;
Ilya Dryomove1fddc82019-05-30 16:07:48 +0200422 struct list_head running_list;
Ilya Dryomov637cd062019-06-06 17:14:49 +0200423 struct completion acquire_wait;
424 int acquire_err;
Ilya Dryomove1fddc82019-05-30 16:07:48 +0200425 struct completion releasing_wait;
Ilya Dryomoved95b212016-08-12 16:40:02 +0200426
Ilya Dryomov22e8bd52019-06-05 19:25:11 +0200427 spinlock_t object_map_lock;
428 u8 *object_map;
429 u64 object_map_size; /* in objects */
430 u64 object_map_flags;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700431
Ilya Dryomov1643dfa2016-08-12 15:45:52 +0200432 struct workqueue_struct *task_wq;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700433
Alex Elder86b00e02012-10-25 23:34:42 -0500434 struct rbd_spec *parent_spec;
435 u64 parent_overlap;
Alex Eldera2acd002013-05-08 22:50:04 -0500436 atomic_t parent_ref;
Alex Elder2f82ee52012-10-30 19:40:33 -0500437 struct rbd_device *parent;
Alex Elder86b00e02012-10-25 23:34:42 -0500438
Christoph Hellwig7ad18af2015-01-13 17:20:04 +0100439 /* Block layer tags. */
440 struct blk_mq_tag_set tag_set;
441
Josh Durginc6666012011-11-21 17:11:12 -0800442 /* protects updating the header */
443 struct rw_semaphore header_rwsem;
Alex Elderf84344f2012-08-31 17:29:51 -0500444
445 struct rbd_mapping mapping;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700446
447 struct list_head node;
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800448
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800449 /* sysfs related */
450 struct device dev;
Alex Elderb82d1672013-01-14 12:43:31 -0600451 unsigned long open_count; /* protected by lock */
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800452};
453
Alex Elderb82d1672013-01-14 12:43:31 -0600454/*
Ilya Dryomov87c0fde2016-09-29 13:41:05 +0200455 * Flag bits for rbd_dev->flags:
456 * - REMOVING (which is coupled with rbd_dev->open_count) is protected
457 * by rbd_dev->lock
Alex Elderb82d1672013-01-14 12:43:31 -0600458 */
Alex Elder6d292902013-01-14 12:43:31 -0600459enum rbd_dev_flags {
Ilya Dryomov686238b2019-11-18 12:51:02 +0100460 RBD_DEV_FLAG_EXISTS, /* rbd_dev_device_setup() ran */
Alex Elderb82d1672013-01-14 12:43:31 -0600461 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
Ilya Dryomov39258aa2019-11-07 17:16:23 +0100462 RBD_DEV_FLAG_READONLY, /* -o ro or snapshot */
Alex Elder6d292902013-01-14 12:43:31 -0600463};
464
Alex Eldercfbf6372013-05-31 17:40:45 -0500465static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
Alex Eldere124a82f2012-01-29 13:57:44 -0600466
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700467static LIST_HEAD(rbd_dev_list); /* devices */
Alex Eldere124a82f2012-01-29 13:57:44 -0600468static DEFINE_SPINLOCK(rbd_dev_list_lock);
469
Alex Elder432b8582012-01-29 13:57:44 -0600470static LIST_HEAD(rbd_client_list); /* clients */
471static DEFINE_SPINLOCK(rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700472
Alex Elder78c2a442013-05-01 12:43:04 -0500473/* Slab caches for frequently-allocated structures */
474
Alex Elder1c2a9df2013-05-01 12:43:03 -0500475static struct kmem_cache *rbd_img_request_cache;
Alex Elder868311b2013-05-01 12:43:03 -0500476static struct kmem_cache *rbd_obj_request_cache;
Alex Elder1c2a9df2013-05-01 12:43:03 -0500477
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200478static int rbd_major;
Ilya Dryomovf8a22fc2013-12-13 15:28:57 +0200479static DEFINE_IDA(rbd_dev_id_ida);
480
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +0400481static struct workqueue_struct *rbd_wq;
482
Ilya Dryomov89a59c12019-02-28 14:20:28 +0100483static struct ceph_snap_context rbd_empty_snapc = {
484 .nref = REFCOUNT_INIT(1),
485};
486
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200487/*
Ilya Dryomov3cfa3b12017-11-13 10:35:40 +0100488 * single-major requires >= 0.75 version of userspace rbd utility.
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200489 */
Ilya Dryomov3cfa3b12017-11-13 10:35:40 +0100490static bool single_major = true;
Joe Perches5657a812018-05-24 13:38:59 -0600491module_param(single_major, bool, 0444);
Ilya Dryomov3cfa3b12017-11-13 10:35:40 +0100492MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)");
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200493
Greg Kroah-Hartman7e9586b2018-12-21 08:54:38 +0100494static ssize_t add_store(struct bus_type *bus, const char *buf, size_t count);
495static ssize_t remove_store(struct bus_type *bus, const char *buf,
496 size_t count);
497static ssize_t add_single_major_store(struct bus_type *bus, const char *buf,
498 size_t count);
499static ssize_t remove_single_major_store(struct bus_type *bus, const char *buf,
500 size_t count);
Ilya Dryomov6d69bb532015-10-11 19:38:00 +0200501static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
Alex Elderf0f8cef2012-01-29 13:57:44 -0600502
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200503static int rbd_dev_id_to_minor(int dev_id)
504{
Ilya Dryomov7e513d42013-12-16 19:26:32 +0200505 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200506}
507
508static int minor_to_rbd_dev_id(int minor)
509{
Ilya Dryomov7e513d42013-12-16 19:26:32 +0200510 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200511}
512
Ilya Dryomov39258aa2019-11-07 17:16:23 +0100513static bool rbd_is_ro(struct rbd_device *rbd_dev)
514{
515 return test_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags);
516}
517
Ilya Dryomovf3c0e452019-11-07 16:22:10 +0100518static bool rbd_is_snap(struct rbd_device *rbd_dev)
519{
520 return rbd_dev->spec->snap_id != CEPH_NOSNAP;
521}
522
Ilya Dryomoved95b212016-08-12 16:40:02 +0200523static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
524{
Ilya Dryomov637cd062019-06-06 17:14:49 +0200525 lockdep_assert_held(&rbd_dev->lock_rwsem);
526
Ilya Dryomoved95b212016-08-12 16:40:02 +0200527 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
528 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
529}
530
531static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
532{
533 bool is_lock_owner;
534
535 down_read(&rbd_dev->lock_rwsem);
536 is_lock_owner = __rbd_is_lock_owner(rbd_dev);
537 up_read(&rbd_dev->lock_rwsem);
538 return is_lock_owner;
539}
540
Greg Kroah-Hartman7e9586b2018-12-21 08:54:38 +0100541static ssize_t supported_features_show(struct bus_type *bus, char *buf)
Ilya Dryomov8767b292017-03-02 19:56:57 +0100542{
543 return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
544}
545
Greg Kroah-Hartman7e9586b2018-12-21 08:54:38 +0100546static BUS_ATTR_WO(add);
547static BUS_ATTR_WO(remove);
548static BUS_ATTR_WO(add_single_major);
549static BUS_ATTR_WO(remove_single_major);
550static BUS_ATTR_RO(supported_features);
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700551
552static struct attribute *rbd_bus_attrs[] = {
553 &bus_attr_add.attr,
554 &bus_attr_remove.attr,
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200555 &bus_attr_add_single_major.attr,
556 &bus_attr_remove_single_major.attr,
Ilya Dryomov8767b292017-03-02 19:56:57 +0100557 &bus_attr_supported_features.attr,
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700558 NULL,
Alex Elderf0f8cef2012-01-29 13:57:44 -0600559};
Ilya Dryomov92c76dc2013-12-13 15:28:57 +0200560
561static umode_t rbd_bus_is_visible(struct kobject *kobj,
562 struct attribute *attr, int index)
563{
Ilya Dryomov9b60e702013-12-13 15:28:57 +0200564 if (!single_major &&
565 (attr == &bus_attr_add_single_major.attr ||
566 attr == &bus_attr_remove_single_major.attr))
567 return 0;
568
Ilya Dryomov92c76dc2013-12-13 15:28:57 +0200569 return attr->mode;
570}
571
572static const struct attribute_group rbd_bus_group = {
573 .attrs = rbd_bus_attrs,
574 .is_visible = rbd_bus_is_visible,
575};
576__ATTRIBUTE_GROUPS(rbd_bus);
Alex Elderf0f8cef2012-01-29 13:57:44 -0600577
578static struct bus_type rbd_bus_type = {
579 .name = "rbd",
Greg Kroah-Hartmanb15a21d2013-08-23 14:24:28 -0700580 .bus_groups = rbd_bus_groups,
Alex Elderf0f8cef2012-01-29 13:57:44 -0600581};
582
583static void rbd_root_dev_release(struct device *dev)
584{
585}
586
587static struct device rbd_root_dev = {
588 .init_name = "rbd",
589 .release = rbd_root_dev_release,
590};
591
Alex Elder06ecc6c2012-11-01 10:17:15 -0500592static __printf(2, 3)
593void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
594{
595 struct va_format vaf;
596 va_list args;
597
598 va_start(args, fmt);
599 vaf.fmt = fmt;
600 vaf.va = &args;
601
602 if (!rbd_dev)
603 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
604 else if (rbd_dev->disk)
605 printk(KERN_WARNING "%s: %s: %pV\n",
606 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
607 else if (rbd_dev->spec && rbd_dev->spec->image_name)
608 printk(KERN_WARNING "%s: image %s: %pV\n",
609 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
610 else if (rbd_dev->spec && rbd_dev->spec->image_id)
611 printk(KERN_WARNING "%s: id %s: %pV\n",
612 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
613 else /* punt */
614 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
615 RBD_DRV_NAME, rbd_dev, &vaf);
616 va_end(args);
617}
618
Alex Elderaafb2302012-09-06 16:00:54 -0500619#ifdef RBD_DEBUG
620#define rbd_assert(expr) \
621 if (unlikely(!(expr))) { \
622 printk(KERN_ERR "\nAssertion failure in %s() " \
623 "at line %d:\n\n" \
624 "\trbd_assert(%s);\n\n", \
625 __func__, __LINE__, #expr); \
626 BUG(); \
627 }
628#else /* !RBD_DEBUG */
629# define rbd_assert(expr) ((void) 0)
630#endif /* !RBD_DEBUG */
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800631
Alex Elder05a46af2013-04-26 15:44:36 -0500632static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
Alex Elder8b3e1a52013-01-24 16:13:36 -0600633
Alex Eldercc4a38bd2013-04-30 00:44:33 -0500634static int rbd_dev_refresh(struct rbd_device *rbd_dev);
Alex Elder2df3fac2013-05-06 09:51:30 -0500635static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
Ilya Dryomova720ae02014-07-23 17:11:19 +0400636static int rbd_dev_header_info(struct rbd_device *rbd_dev);
Ilya Dryomove8f59b52014-07-24 10:42:13 +0400637static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
Alex Elder54cac612013-04-30 00:44:33 -0500638static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
639 u64 snap_id);
Alex Elder2ad3d712013-04-30 00:44:33 -0500640static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
641 u8 *order, u64 *snap_size);
Ilya Dryomov22e8bd52019-06-05 19:25:11 +0200642static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev);
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700643
Ilya Dryomov54ab3b22019-05-11 16:21:49 +0200644static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result);
Ilya Dryomov0192ce22019-05-16 15:06:56 +0200645static void rbd_img_handle_request(struct rbd_img_request *img_req, int result);
646
647/*
648 * Return true if nothing else is pending.
649 */
650static bool pending_result_dec(struct pending_result *pending, int *result)
651{
652 rbd_assert(pending->num_pending > 0);
653
654 if (*result && !pending->result)
655 pending->result = *result;
656 if (--pending->num_pending)
657 return false;
658
659 *result = pending->result;
660 return true;
661}
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700662
663static int rbd_open(struct block_device *bdev, fmode_t mode)
664{
Alex Elderf0f8cef2012-01-29 13:57:44 -0600665 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
Alex Elderb82d1672013-01-14 12:43:31 -0600666 bool removing = false;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700667
Alex Eldera14ea262013-02-05 13:23:12 -0600668 spin_lock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600669 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
670 removing = true;
671 else
672 rbd_dev->open_count++;
Alex Eldera14ea262013-02-05 13:23:12 -0600673 spin_unlock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600674 if (removing)
675 return -ENOENT;
676
Alex Elderc3e946c2012-11-16 09:29:16 -0600677 (void) get_device(&rbd_dev->dev);
Alex Elder340c7a22012-08-10 13:12:07 -0700678
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700679 return 0;
680}
681
Al Virodb2a1442013-05-05 21:52:57 -0400682static void rbd_release(struct gendisk *disk, fmode_t mode)
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800683{
684 struct rbd_device *rbd_dev = disk->private_data;
Alex Elderb82d1672013-01-14 12:43:31 -0600685 unsigned long open_count_before;
686
Alex Eldera14ea262013-02-05 13:23:12 -0600687 spin_lock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600688 open_count_before = rbd_dev->open_count--;
Alex Eldera14ea262013-02-05 13:23:12 -0600689 spin_unlock_irq(&rbd_dev->lock);
Alex Elderb82d1672013-01-14 12:43:31 -0600690 rbd_assert(open_count_before > 0);
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800691
Alex Elderc3e946c2012-11-16 09:29:16 -0600692 put_device(&rbd_dev->dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800693}
694
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700695static const struct block_device_operations rbd_bd_ops = {
696 .owner = THIS_MODULE,
697 .open = rbd_open,
Yehuda Sadehdfc56062010-11-19 14:51:04 -0800698 .release = rbd_release,
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700699};
700
701/*
Alex Elder7262cfc2013-05-16 15:04:20 -0500702 * Initialize an rbd client instance. Success or not, this function
Alex Eldercfbf6372013-05-31 17:40:45 -0500703 * consumes ceph_opts. Caller holds client_mutex.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700704 */
Alex Elderf8c38922012-08-10 13:12:07 -0700705static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700706{
707 struct rbd_client *rbdc;
708 int ret = -ENOMEM;
709
Alex Elder37206ee2013-02-20 17:32:08 -0600710 dout("%s:\n", __func__);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700711 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
712 if (!rbdc)
713 goto out_opt;
714
715 kref_init(&rbdc->kref);
716 INIT_LIST_HEAD(&rbdc->node);
717
Ilya Dryomov74da4a0f2017-03-03 18:16:07 +0100718 rbdc->client = ceph_create_client(ceph_opts, rbdc);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700719 if (IS_ERR(rbdc->client))
Alex Elder08f75462013-05-29 11:19:00 -0500720 goto out_rbdc;
Alex Elder43ae4702012-07-03 16:01:18 -0500721 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700722
723 ret = ceph_open_session(rbdc->client);
724 if (ret < 0)
Alex Elder08f75462013-05-29 11:19:00 -0500725 goto out_client;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700726
Alex Elder432b8582012-01-29 13:57:44 -0600727 spin_lock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700728 list_add_tail(&rbdc->node, &rbd_client_list);
Alex Elder432b8582012-01-29 13:57:44 -0600729 spin_unlock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700730
Alex Elder37206ee2013-02-20 17:32:08 -0600731 dout("%s: rbdc %p\n", __func__, rbdc);
Alex Elderbc534d82012-01-29 13:57:44 -0600732
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700733 return rbdc;
Alex Elder08f75462013-05-29 11:19:00 -0500734out_client:
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700735 ceph_destroy_client(rbdc->client);
Alex Elder08f75462013-05-29 11:19:00 -0500736out_rbdc:
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700737 kfree(rbdc);
738out_opt:
Alex Elder43ae4702012-07-03 16:01:18 -0500739 if (ceph_opts)
740 ceph_destroy_options(ceph_opts);
Alex Elder37206ee2013-02-20 17:32:08 -0600741 dout("%s: error %d\n", __func__, ret);
742
Vasiliy Kulikov28f259b2010-09-26 12:59:37 +0400743 return ERR_PTR(ret);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700744}
745
Alex Elder2f82ee52012-10-30 19:40:33 -0500746static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
747{
748 kref_get(&rbdc->kref);
749
750 return rbdc;
751}
752
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700753/*
Alex Elder1f7ba332012-08-10 13:12:07 -0700754 * Find a ceph client with specific addr and configuration. If
755 * found, bump its reference count.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700756 */
Alex Elder1f7ba332012-08-10 13:12:07 -0700757static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700758{
Jakob Koschel3302ffd2022-03-24 08:20:50 +0100759 struct rbd_client *rbdc = NULL, *iter;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700760
Alex Elder43ae4702012-07-03 16:01:18 -0500761 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700762 return NULL;
763
Alex Elder1f7ba332012-08-10 13:12:07 -0700764 spin_lock(&rbd_client_list_lock);
Jakob Koschel3302ffd2022-03-24 08:20:50 +0100765 list_for_each_entry(iter, &rbd_client_list, node) {
766 if (!ceph_compare_options(ceph_opts, iter->client)) {
767 __rbd_get_client(iter);
Alex Elder2f82ee52012-10-30 19:40:33 -0500768
Jakob Koschel3302ffd2022-03-24 08:20:50 +0100769 rbdc = iter;
Alex Elder1f7ba332012-08-10 13:12:07 -0700770 break;
771 }
772 }
773 spin_unlock(&rbd_client_list_lock);
774
Jakob Koschel3302ffd2022-03-24 08:20:50 +0100775 return rbdc;
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700776}
777
778/*
Ilya Dryomov210c1042015-06-22 13:24:48 +0300779 * (Per device) rbd map options
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700780 */
781enum {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300782 Opt_queue_depth,
Ilya Dryomov0c93e1b2019-01-30 15:14:48 +0100783 Opt_alloc_size,
Dongsheng Yang34f55d02018-03-26 10:22:55 -0400784 Opt_lock_timeout,
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700785 /* int args above */
Ilya Dryomovb26c0472018-07-03 15:28:43 +0200786 Opt_pool_ns,
Ilya Dryomovdc1dad82020-05-29 20:51:23 +0200787 Opt_compression_hint,
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700788 /* string args above */
Alex Eldercc0538b2012-08-10 13:12:07 -0700789 Opt_read_only,
790 Opt_read_write,
Ilya Dryomov80de1912016-09-20 14:23:17 +0200791 Opt_lock_on_read,
Ilya Dryomove010dd02017-04-13 12:17:39 +0200792 Opt_exclusive,
Ilya Dryomovd9360542018-03-23 06:14:47 +0100793 Opt_notrim,
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700794};
795
Ilya Dryomovdc1dad82020-05-29 20:51:23 +0200796enum {
797 Opt_compression_hint_none,
798 Opt_compression_hint_compressible,
799 Opt_compression_hint_incompressible,
800};
801
802static const struct constant_table rbd_param_compression_hint[] = {
803 {"none", Opt_compression_hint_none},
804 {"compressible", Opt_compression_hint_compressible},
805 {"incompressible", Opt_compression_hint_incompressible},
806 {}
807};
808
Al Virod7167b12019-09-07 07:23:15 -0400809static const struct fs_parameter_spec rbd_parameters[] = {
David Howells82995cc2019-03-25 16:38:32 +0000810 fsparam_u32 ("alloc_size", Opt_alloc_size),
Ilya Dryomovdc1dad82020-05-29 20:51:23 +0200811 fsparam_enum ("compression_hint", Opt_compression_hint,
812 rbd_param_compression_hint),
David Howells82995cc2019-03-25 16:38:32 +0000813 fsparam_flag ("exclusive", Opt_exclusive),
814 fsparam_flag ("lock_on_read", Opt_lock_on_read),
815 fsparam_u32 ("lock_timeout", Opt_lock_timeout),
816 fsparam_flag ("notrim", Opt_notrim),
817 fsparam_string ("_pool_ns", Opt_pool_ns),
818 fsparam_u32 ("queue_depth", Opt_queue_depth),
819 fsparam_flag ("read_only", Opt_read_only),
820 fsparam_flag ("read_write", Opt_read_write),
821 fsparam_flag ("ro", Opt_read_only),
822 fsparam_flag ("rw", Opt_read_write),
823 {}
824};
825
Alex Elder98571b52013-01-20 14:44:42 -0600826struct rbd_options {
Ilya Dryomovb5584182015-06-23 16:21:19 +0300827 int queue_depth;
Ilya Dryomov0c93e1b2019-01-30 15:14:48 +0100828 int alloc_size;
Dongsheng Yang34f55d02018-03-26 10:22:55 -0400829 unsigned long lock_timeout;
Alex Elder98571b52013-01-20 14:44:42 -0600830 bool read_only;
Ilya Dryomov80de1912016-09-20 14:23:17 +0200831 bool lock_on_read;
Ilya Dryomove010dd02017-04-13 12:17:39 +0200832 bool exclusive;
Ilya Dryomovd9360542018-03-23 06:14:47 +0100833 bool trim;
Ilya Dryomovdc1dad82020-05-29 20:51:23 +0200834
835 u32 alloc_hint_flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */
Alex Elder98571b52013-01-20 14:44:42 -0600836};
837
John Garryd2a27962021-10-05 18:23:27 +0800838#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_DEFAULT_RQ
Ilya Dryomov0c93e1b2019-01-30 15:14:48 +0100839#define RBD_ALLOC_SIZE_DEFAULT (64 * 1024)
Dongsheng Yang34f55d02018-03-26 10:22:55 -0400840#define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
Alex Elder98571b52013-01-20 14:44:42 -0600841#define RBD_READ_ONLY_DEFAULT false
Ilya Dryomov80de1912016-09-20 14:23:17 +0200842#define RBD_LOCK_ON_READ_DEFAULT false
Ilya Dryomove010dd02017-04-13 12:17:39 +0200843#define RBD_EXCLUSIVE_DEFAULT false
Ilya Dryomovd9360542018-03-23 06:14:47 +0100844#define RBD_TRIM_DEFAULT true
Alex Elder98571b52013-01-20 14:44:42 -0600845
David Howells82995cc2019-03-25 16:38:32 +0000846struct rbd_parse_opts_ctx {
Ilya Dryomovc3001562018-07-03 15:28:43 +0200847 struct rbd_spec *spec;
David Howells82995cc2019-03-25 16:38:32 +0000848 struct ceph_options *copts;
Ilya Dryomovc3001562018-07-03 15:28:43 +0200849 struct rbd_options *opts;
850};
851
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800852static char* obj_op_name(enum obj_operation_type op_type)
853{
854 switch (op_type) {
855 case OBJ_OP_READ:
856 return "read";
857 case OBJ_OP_WRITE:
858 return "write";
Guangliang Zhao90e98c52014-04-01 22:22:16 +0800859 case OBJ_OP_DISCARD:
860 return "discard";
Ilya Dryomov6484cbe2019-01-29 12:46:25 +0100861 case OBJ_OP_ZEROOUT:
862 return "zeroout";
Guangliang Zhao6d2940c2014-03-13 11:21:35 +0800863 default:
864 return "???";
865 }
866}
867
Yehuda Sadeh59c2be12011-03-21 15:10:11 -0700868/*
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700869 * Destroy ceph client
Alex Elderd23a4b32012-01-29 13:57:43 -0600870 *
Alex Elder432b8582012-01-29 13:57:44 -0600871 * Caller must hold rbd_client_list_lock.
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700872 */
873static void rbd_client_release(struct kref *kref)
874{
875 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
876
Alex Elder37206ee2013-02-20 17:32:08 -0600877 dout("%s: rbdc %p\n", __func__, rbdc);
Alex Eldercd9d9f52012-04-04 13:35:44 -0500878 spin_lock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700879 list_del(&rbdc->node);
Alex Eldercd9d9f52012-04-04 13:35:44 -0500880 spin_unlock(&rbd_client_list_lock);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700881
882 ceph_destroy_client(rbdc->client);
883 kfree(rbdc);
884}
885
886/*
887 * Drop reference to ceph client node. If it's not referenced anymore, release
888 * it.
889 */
Alex Elder9d3997f2012-10-25 23:34:42 -0500890static void rbd_put_client(struct rbd_client *rbdc)
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700891{
Alex Elderc53d5892012-10-25 23:34:42 -0500892 if (rbdc)
893 kref_put(&rbdc->kref, rbd_client_release);
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700894}
895
Ilya Dryomov5feb0d8d2018-02-22 13:19:04 +0100896/*
897 * Get a ceph client with specific addr and configuration, if one does
898 * not exist create it. Either way, ceph_opts is consumed by this
899 * function.
900 */
901static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
902{
903 struct rbd_client *rbdc;
Ilya Dryomovdd435852018-02-22 13:43:24 +0100904 int ret;
Ilya Dryomov5feb0d8d2018-02-22 13:19:04 +0100905
Ilya Dryomova32e4142019-05-02 15:56:00 +0200906 mutex_lock(&client_mutex);
Ilya Dryomov5feb0d8d2018-02-22 13:19:04 +0100907 rbdc = rbd_client_find(ceph_opts);
Ilya Dryomovdd435852018-02-22 13:43:24 +0100908 if (rbdc) {
Ilya Dryomov5feb0d8d2018-02-22 13:19:04 +0100909 ceph_destroy_options(ceph_opts);
Ilya Dryomovdd435852018-02-22 13:43:24 +0100910
911 /*
912 * Using an existing client. Make sure ->pg_pools is up to
913 * date before we look up the pool id in do_rbd_add().
914 */
Ilya Dryomov9d4a2272019-03-20 10:58:05 +0100915 ret = ceph_wait_for_latest_osdmap(rbdc->client,
916 rbdc->client->options->mount_timeout);
Ilya Dryomovdd435852018-02-22 13:43:24 +0100917 if (ret) {
918 rbd_warn(NULL, "failed to get latest osdmap: %d", ret);
919 rbd_put_client(rbdc);
920 rbdc = ERR_PTR(ret);
921 }
922 } else {
Ilya Dryomov5feb0d8d2018-02-22 13:19:04 +0100923 rbdc = rbd_client_create(ceph_opts);
Ilya Dryomovdd435852018-02-22 13:43:24 +0100924 }
Ilya Dryomov5feb0d8d2018-02-22 13:19:04 +0100925 mutex_unlock(&client_mutex);
926
927 return rbdc;
928}
929
Alex Eldera30b71b2012-07-10 20:30:11 -0500930static bool rbd_image_format_valid(u32 image_format)
931{
932 return image_format == 1 || image_format == 2;
933}
934
Alex Elder8e94af82012-07-25 09:32:40 -0500935static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
936{
Alex Elder103a1502012-08-02 11:29:45 -0500937 size_t size;
938 u32 snap_count;
939
940 /* The header has to start with the magic rbd header text */
941 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
942 return false;
943
Alex Elderdb2388b2012-10-20 22:17:27 -0500944 /* The bio layer requires at least sector-sized I/O */
945
946 if (ondisk->options.order < SECTOR_SHIFT)
947 return false;
948
949 /* If we use u64 in a few spots we may be able to loosen this */
950
951 if (ondisk->options.order > 8 * sizeof (int) - 1)
952 return false;
953
Alex Elder103a1502012-08-02 11:29:45 -0500954 /*
955 * The size of a snapshot header has to fit in a size_t, and
956 * that limits the number of snapshots.
957 */
958 snap_count = le32_to_cpu(ondisk->snap_count);
959 size = SIZE_MAX - sizeof (struct ceph_snap_context);
960 if (snap_count > size / sizeof (__le64))
961 return false;
962
963 /*
964 * Not only that, but the size of the entire the snapshot
965 * header must also be representable in a size_t.
966 */
967 size -= snap_count * sizeof (__le64);
968 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
969 return false;
970
971 return true;
Alex Elder8e94af82012-07-25 09:32:40 -0500972}
973
Yehuda Sadeh602adf42010-08-12 16:11:25 -0700974/*
Ilya Dryomov5bc3fb12017-01-25 18:16:22 +0100975 * returns the size of an object in the image
976 */
977static u32 rbd_obj_bytes(struct rbd_image_header *header)
978{
979 return 1U << header->obj_order;
980}
981
Ilya Dryomov263423f2017-01-25 18:16:22 +0100982static void rbd_init_layout(struct rbd_device *rbd_dev)
983{
984 if (rbd_dev->header.stripe_unit == 0 ||
985 rbd_dev->header.stripe_count == 0) {
986 rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header);
987 rbd_dev->header.stripe_count = 1;
988 }
989
990 rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit;
991 rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count;
992 rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header);
Ilya Dryomov7e973322017-01-25 18:16:22 +0100993 rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ?
994 rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id;
Ilya Dryomov263423f2017-01-25 18:16:22 +0100995 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
996}
997
Ilya Dryomov5bc3fb12017-01-25 18:16:22 +0100998/*
Alex Elderbb23e372013-05-06 09:51:29 -0500999 * Fill an rbd image header with information from the given format 1
1000 * on-disk header.
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001001 */
Alex Elder662518b2013-05-06 09:51:29 -05001002static int rbd_header_from_disk(struct rbd_device *rbd_dev,
Alex Elder4156d9982012-08-02 11:29:46 -05001003 struct rbd_image_header_ondisk *ondisk)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001004{
Alex Elder662518b2013-05-06 09:51:29 -05001005 struct rbd_image_header *header = &rbd_dev->header;
Alex Elderbb23e372013-05-06 09:51:29 -05001006 bool first_time = header->object_prefix == NULL;
1007 struct ceph_snap_context *snapc;
1008 char *object_prefix = NULL;
1009 char *snap_names = NULL;
1010 u64 *snap_sizes = NULL;
Alex Elderccece232012-07-10 20:30:10 -05001011 u32 snap_count;
Alex Elderbb23e372013-05-06 09:51:29 -05001012 int ret = -ENOMEM;
Alex Elder621901d2012-08-23 23:22:06 -05001013 u32 i;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001014
Alex Elderbb23e372013-05-06 09:51:29 -05001015 /* Allocate this now to avoid having to handle failure below */
1016
1017 if (first_time) {
Ilya Dryomov848d7962017-01-25 18:16:21 +01001018 object_prefix = kstrndup(ondisk->object_prefix,
1019 sizeof(ondisk->object_prefix),
1020 GFP_KERNEL);
Alex Elderbb23e372013-05-06 09:51:29 -05001021 if (!object_prefix)
1022 return -ENOMEM;
Alex Elderbb23e372013-05-06 09:51:29 -05001023 }
1024
1025 /* Allocate the snapshot context and fill it in */
Alex Elder6a523252012-07-19 17:12:59 -05001026
Alex Elder103a1502012-08-02 11:29:45 -05001027 snap_count = le32_to_cpu(ondisk->snap_count);
Alex Elderbb23e372013-05-06 09:51:29 -05001028 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
1029 if (!snapc)
1030 goto out_err;
1031 snapc->seq = le64_to_cpu(ondisk->snap_seq);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001032 if (snap_count) {
Alex Elderbb23e372013-05-06 09:51:29 -05001033 struct rbd_image_snap_ondisk *snaps;
Alex Elderf785cc12012-08-23 23:22:06 -05001034 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
1035
Alex Elderbb23e372013-05-06 09:51:29 -05001036 /* We'll keep a copy of the snapshot names... */
Alex Elder621901d2012-08-23 23:22:06 -05001037
Alex Elderbb23e372013-05-06 09:51:29 -05001038 if (snap_names_len > (u64)SIZE_MAX)
1039 goto out_2big;
1040 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
1041 if (!snap_names)
Alex Elder6a523252012-07-19 17:12:59 -05001042 goto out_err;
Alex Elderbb23e372013-05-06 09:51:29 -05001043
1044 /* ...as well as the array of their sizes. */
Markus Elfring88a25a52016-09-11 12:21:25 +02001045 snap_sizes = kmalloc_array(snap_count,
1046 sizeof(*header->snap_sizes),
1047 GFP_KERNEL);
Alex Elderbb23e372013-05-06 09:51:29 -05001048 if (!snap_sizes)
1049 goto out_err;
1050
Alex Elderf785cc12012-08-23 23:22:06 -05001051 /*
Alex Elderbb23e372013-05-06 09:51:29 -05001052 * Copy the names, and fill in each snapshot's id
1053 * and size.
1054 *
Alex Elder99a41eb2013-05-06 09:51:30 -05001055 * Note that rbd_dev_v1_header_info() guarantees the
Alex Elderbb23e372013-05-06 09:51:29 -05001056 * ondisk buffer we're working with has
Alex Elderf785cc12012-08-23 23:22:06 -05001057 * snap_names_len bytes beyond the end of the
1058 * snapshot id array, this memcpy() is safe.
1059 */
Alex Elderbb23e372013-05-06 09:51:29 -05001060 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
1061 snaps = ondisk->snaps;
1062 for (i = 0; i < snap_count; i++) {
1063 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
1064 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
1065 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001066 }
Alex Elder849b4262012-07-09 21:04:24 -05001067
Alex Elderbb23e372013-05-06 09:51:29 -05001068 /* We won't fail any more, fill in the header */
Alex Elder6a523252012-07-19 17:12:59 -05001069
Alex Elderbb23e372013-05-06 09:51:29 -05001070 if (first_time) {
1071 header->object_prefix = object_prefix;
1072 header->obj_order = ondisk->options.order;
Ilya Dryomov263423f2017-01-25 18:16:22 +01001073 rbd_init_layout(rbd_dev);
Alex Elder662518b2013-05-06 09:51:29 -05001074 } else {
1075 ceph_put_snap_context(header->snapc);
1076 kfree(header->snap_names);
1077 kfree(header->snap_sizes);
Alex Elderbb23e372013-05-06 09:51:29 -05001078 }
1079
1080 /* The remaining fields always get updated (when we refresh) */
Alex Elder621901d2012-08-23 23:22:06 -05001081
Alex Elderf84344f2012-08-31 17:29:51 -05001082 header->image_size = le64_to_cpu(ondisk->image_size);
Alex Elderbb23e372013-05-06 09:51:29 -05001083 header->snapc = snapc;
1084 header->snap_names = snap_names;
1085 header->snap_sizes = snap_sizes;
Alex Elder468521c2013-04-26 09:43:47 -05001086
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001087 return 0;
Alex Elderbb23e372013-05-06 09:51:29 -05001088out_2big:
1089 ret = -EIO;
Alex Elder6a523252012-07-19 17:12:59 -05001090out_err:
Alex Elderbb23e372013-05-06 09:51:29 -05001091 kfree(snap_sizes);
1092 kfree(snap_names);
1093 ceph_put_snap_context(snapc);
1094 kfree(object_prefix);
Alex Elderccece232012-07-10 20:30:10 -05001095
Alex Elderbb23e372013-05-06 09:51:29 -05001096 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001097}
1098
Alex Elder9682fc62013-04-30 00:44:33 -05001099static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1100{
1101 const char *snap_name;
1102
1103 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1104
1105 /* Skip over names until we find the one we are looking for */
1106
1107 snap_name = rbd_dev->header.snap_names;
1108 while (which--)
1109 snap_name += strlen(snap_name) + 1;
1110
1111 return kstrdup(snap_name, GFP_KERNEL);
1112}
1113
Alex Elder30d1cff2013-05-01 12:43:03 -05001114/*
1115 * Snapshot id comparison function for use with qsort()/bsearch().
1116 * Note that result is for snapshots in *descending* order.
1117 */
1118static int snapid_compare_reverse(const void *s1, const void *s2)
1119{
1120 u64 snap_id1 = *(u64 *)s1;
1121 u64 snap_id2 = *(u64 *)s2;
1122
1123 if (snap_id1 < snap_id2)
1124 return 1;
1125 return snap_id1 == snap_id2 ? 0 : -1;
1126}
1127
1128/*
1129 * Search a snapshot context to see if the given snapshot id is
1130 * present.
1131 *
1132 * Returns the position of the snapshot id in the array if it's found,
1133 * or BAD_SNAP_INDEX otherwise.
1134 *
1135 * Note: The snapshot array is in kept sorted (by the osd) in
1136 * reverse order, highest snapshot id first.
1137 */
Alex Elder9682fc62013-04-30 00:44:33 -05001138static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1139{
1140 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
Alex Elder30d1cff2013-05-01 12:43:03 -05001141 u64 *found;
Alex Elder9682fc62013-04-30 00:44:33 -05001142
Alex Elder30d1cff2013-05-01 12:43:03 -05001143 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1144 sizeof (snap_id), snapid_compare_reverse);
Alex Elder9682fc62013-04-30 00:44:33 -05001145
Alex Elder30d1cff2013-05-01 12:43:03 -05001146 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
Alex Elder9682fc62013-04-30 00:44:33 -05001147}
1148
Alex Elder2ad3d712013-04-30 00:44:33 -05001149static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1150 u64 snap_id)
Alex Elder54cac612013-04-30 00:44:33 -05001151{
1152 u32 which;
Josh Durginda6a6b62013-09-04 17:57:31 -07001153 const char *snap_name;
Alex Elder54cac612013-04-30 00:44:33 -05001154
1155 which = rbd_dev_snap_index(rbd_dev, snap_id);
1156 if (which == BAD_SNAP_INDEX)
Josh Durginda6a6b62013-09-04 17:57:31 -07001157 return ERR_PTR(-ENOENT);
Alex Elder54cac612013-04-30 00:44:33 -05001158
Josh Durginda6a6b62013-09-04 17:57:31 -07001159 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1160 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
Alex Elder54cac612013-04-30 00:44:33 -05001161}
1162
Alex Elder9e15b772012-10-30 19:40:33 -05001163static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1164{
Alex Elder9e15b772012-10-30 19:40:33 -05001165 if (snap_id == CEPH_NOSNAP)
1166 return RBD_SNAP_HEAD_NAME;
1167
Alex Elder54cac612013-04-30 00:44:33 -05001168 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1169 if (rbd_dev->image_format == 1)
1170 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
Alex Elder9e15b772012-10-30 19:40:33 -05001171
Alex Elder54cac612013-04-30 00:44:33 -05001172 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
Alex Elder9e15b772012-10-30 19:40:33 -05001173}
1174
Alex Elder2ad3d712013-04-30 00:44:33 -05001175static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1176 u64 *snap_size)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001177{
Alex Elder2ad3d712013-04-30 00:44:33 -05001178 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1179 if (snap_id == CEPH_NOSNAP) {
1180 *snap_size = rbd_dev->header.image_size;
1181 } else if (rbd_dev->image_format == 1) {
1182 u32 which;
Alex Elder00f1f362012-02-07 12:03:36 -06001183
Alex Elder2ad3d712013-04-30 00:44:33 -05001184 which = rbd_dev_snap_index(rbd_dev, snap_id);
1185 if (which == BAD_SNAP_INDEX)
1186 return -ENOENT;
Alex Elder00f1f362012-02-07 12:03:36 -06001187
Alex Elder2ad3d712013-04-30 00:44:33 -05001188 *snap_size = rbd_dev->header.snap_sizes[which];
1189 } else {
1190 u64 size = 0;
1191 int ret;
1192
1193 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1194 if (ret)
1195 return ret;
1196
1197 *snap_size = size;
1198 }
1199 return 0;
1200}
1201
Alex Elderd1cf5782013-04-27 09:59:30 -05001202static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001203{
Alex Elder8f4b7d92013-05-06 07:40:30 -05001204 u64 snap_id = rbd_dev->spec->snap_id;
Alex Elder2ad3d712013-04-30 00:44:33 -05001205 u64 size = 0;
Alex Elder2ad3d712013-04-30 00:44:33 -05001206 int ret;
Alex Elder8b0241f2013-04-25 23:15:08 -05001207
Alex Elder2ad3d712013-04-30 00:44:33 -05001208 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1209 if (ret)
1210 return ret;
Alex Elder2ad3d712013-04-30 00:44:33 -05001211
1212 rbd_dev->mapping.size = size;
Alex Elder8b0241f2013-04-25 23:15:08 -05001213 return 0;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001214}
1215
Alex Elderd1cf5782013-04-27 09:59:30 -05001216static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1217{
1218 rbd_dev->mapping.size = 0;
Alex Elder200a6a82013-04-28 23:32:34 -05001219}
1220
Ilya Dryomov5359a172018-01-20 10:30:10 +01001221static void zero_bios(struct ceph_bio_iter *bio_pos, u32 off, u32 bytes)
Alex Elderb9434c52013-04-19 15:34:50 -05001222{
Ilya Dryomov5359a172018-01-20 10:30:10 +01001223 struct ceph_bio_iter it = *bio_pos;
Alex Elderb9434c52013-04-19 15:34:50 -05001224
Ilya Dryomov5359a172018-01-20 10:30:10 +01001225 ceph_bio_iter_advance(&it, off);
1226 ceph_bio_iter_advance_step(&it, bytes, ({
Christoph Hellwig732022b2021-07-27 07:56:37 +02001227 memzero_bvec(&bv);
Ilya Dryomov5359a172018-01-20 10:30:10 +01001228 }));
Alex Elderb9434c52013-04-19 15:34:50 -05001229}
1230
Ilya Dryomov7e07efb2018-01-20 10:30:11 +01001231static void zero_bvecs(struct ceph_bvec_iter *bvec_pos, u32 off, u32 bytes)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001232{
Ilya Dryomov7e07efb2018-01-20 10:30:11 +01001233 struct ceph_bvec_iter it = *bvec_pos;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001234
Ilya Dryomov7e07efb2018-01-20 10:30:11 +01001235 ceph_bvec_iter_advance(&it, off);
1236 ceph_bvec_iter_advance_step(&it, bytes, ({
Christoph Hellwig732022b2021-07-27 07:56:37 +02001237 memzero_bvec(&bv);
Ilya Dryomov7e07efb2018-01-20 10:30:11 +01001238 }));
Alex Elderf7760da2012-10-20 22:17:27 -05001239}
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001240
Alex Elderf7760da2012-10-20 22:17:27 -05001241/*
Ilya Dryomov3da691b2018-01-29 14:04:08 +01001242 * Zero a range in @obj_req data buffer defined by a bio (list) or
Ilya Dryomovafb97882018-02-06 19:26:35 +01001243 * (private) bio_vec array.
Alex Elderf7760da2012-10-20 22:17:27 -05001244 *
Ilya Dryomov3da691b2018-01-29 14:04:08 +01001245 * @off is relative to the start of the data buffer.
Alex Elderf7760da2012-10-20 22:17:27 -05001246 */
Ilya Dryomov3da691b2018-01-29 14:04:08 +01001247static void rbd_obj_zero_range(struct rbd_obj_request *obj_req, u32 off,
1248 u32 bytes)
Alex Elderf7760da2012-10-20 22:17:27 -05001249{
Ilya Dryomov54ab3b22019-05-11 16:21:49 +02001250 dout("%s %p data buf %u~%u\n", __func__, obj_req, off, bytes);
1251
Ilya Dryomovecc633c2018-02-01 11:50:47 +01001252 switch (obj_req->img_request->data_type) {
Ilya Dryomov3da691b2018-01-29 14:04:08 +01001253 case OBJ_REQUEST_BIO:
1254 zero_bios(&obj_req->bio_pos, off, bytes);
1255 break;
1256 case OBJ_REQUEST_BVECS:
Ilya Dryomovafb97882018-02-06 19:26:35 +01001257 case OBJ_REQUEST_OWN_BVECS:
Ilya Dryomov3da691b2018-01-29 14:04:08 +01001258 zero_bvecs(&obj_req->bvec_pos, off, bytes);
1259 break;
1260 default:
Arnd Bergmann16809372019-03-22 17:53:56 +01001261 BUG();
Yehuda Sadeh602adf42010-08-12 16:11:25 -07001262 }
Alex Elderbf0d5f502012-11-22 00:00:08 -06001263}
1264
1265static void rbd_obj_request_destroy(struct kref *kref);
1266static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1267{
1268 rbd_assert(obj_request != NULL);
Alex Elder37206ee2013-02-20 17:32:08 -06001269 dout("%s: obj %p (was %d)\n", __func__, obj_request,
Peter Zijlstra2c935bc2016-11-14 17:29:48 +01001270 kref_read(&obj_request->kref));
Alex Elderbf0d5f502012-11-22 00:00:08 -06001271 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1272}
1273
Alex Elderbf0d5f502012-11-22 00:00:08 -06001274static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1275 struct rbd_obj_request *obj_request)
1276{
Alex Elder25dcf952013-01-25 17:08:55 -06001277 rbd_assert(obj_request->img_request == NULL);
1278
Alex Elderb155e862013-04-15 14:50:37 -05001279 /* Image request now owns object's original reference */
Alex Elderbf0d5f502012-11-22 00:00:08 -06001280 obj_request->img_request = img_request;
Ilya Dryomov15961b42018-02-01 11:50:47 +01001281 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001282}
1283
1284static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1285 struct rbd_obj_request *obj_request)
1286{
Ilya Dryomov15961b42018-02-01 11:50:47 +01001287 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
Ilya Dryomov43df3d32018-02-02 15:23:22 +01001288 list_del(&obj_request->ex.oe_item);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001289 rbd_assert(obj_request->img_request == img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001290 rbd_obj_request_put(obj_request);
1291}
1292
Ilya Dryomova086a1b2019-06-12 18:33:31 +02001293static void rbd_osd_submit(struct ceph_osd_request *osd_req)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001294{
Ilya Dryomova086a1b2019-06-12 18:33:31 +02001295 struct rbd_obj_request *obj_req = osd_req->r_priv;
Ilya Dryomov980917f2016-09-12 18:59:42 +02001296
Ilya Dryomova086a1b2019-06-12 18:33:31 +02001297 dout("%s osd_req %p for obj_req %p objno %llu %llu~%llu\n",
1298 __func__, osd_req, obj_req, obj_req->ex.oe_objno,
1299 obj_req->ex.oe_off, obj_req->ex.oe_len);
Jeff Laytona8af0d62022-06-30 16:21:50 -04001300 ceph_osdc_start_request(osd_req->r_osdc, osd_req);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001301}
1302
Alex Elder0c425242013-02-08 09:55:49 -06001303/*
1304 * The default/initial value for all image request flags is 0. Each
1305 * is conditionally set to 1 at image request initialization time
1306 * and currently never change thereafter.
1307 */
Alex Elderd0b2e942013-01-24 16:13:36 -06001308static void img_request_layered_set(struct rbd_img_request *img_request)
1309{
1310 set_bit(IMG_REQ_LAYERED, &img_request->flags);
Alex Elderd0b2e942013-01-24 16:13:36 -06001311}
1312
1313static bool img_request_layered_test(struct rbd_img_request *img_request)
1314{
Alex Elderd0b2e942013-01-24 16:13:36 -06001315 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1316}
1317
Ilya Dryomov3da691b2018-01-29 14:04:08 +01001318static bool rbd_obj_is_entire(struct rbd_obj_request *obj_req)
Josh Durgin3b434a2a2014-04-04 17:32:15 -07001319{
Ilya Dryomov3da691b2018-01-29 14:04:08 +01001320 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1321
Ilya Dryomov43df3d32018-02-02 15:23:22 +01001322 return !obj_req->ex.oe_off &&
1323 obj_req->ex.oe_len == rbd_dev->layout.object_size;
Josh Durgin3b434a2a2014-04-04 17:32:15 -07001324}
1325
Ilya Dryomov3da691b2018-01-29 14:04:08 +01001326static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req)
Alex Elder6e2a4502013-03-27 09:16:30 -05001327{
Ilya Dryomov3da691b2018-01-29 14:04:08 +01001328 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
Alex Elderb9434c52013-04-19 15:34:50 -05001329
Ilya Dryomov43df3d32018-02-02 15:23:22 +01001330 return obj_req->ex.oe_off + obj_req->ex.oe_len ==
Ilya Dryomov3da691b2018-01-29 14:04:08 +01001331 rbd_dev->layout.object_size;
1332}
1333
Ilya Dryomov13488d52019-02-25 12:37:50 +01001334/*
1335 * Must be called after rbd_obj_calc_img_extents().
1336 */
1337static bool rbd_obj_copyup_enabled(struct rbd_obj_request *obj_req)
1338{
1339 if (!obj_req->num_img_extents ||
Ilya Dryomov9b17eb22019-02-28 15:51:39 +01001340 (rbd_obj_is_entire(obj_req) &&
1341 !obj_req->img_request->snapc->num_snaps))
Ilya Dryomov13488d52019-02-25 12:37:50 +01001342 return false;
1343
1344 return true;
1345}
1346
Ilya Dryomov86bd7992018-02-06 19:26:33 +01001347static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req)
1348{
1349 return ceph_file_extents_bytes(obj_req->img_extents,
1350 obj_req->num_img_extents);
1351}
1352
Ilya Dryomov3da691b2018-01-29 14:04:08 +01001353static bool rbd_img_is_write(struct rbd_img_request *img_req)
1354{
Ilya Dryomov9bb02482018-01-30 17:52:10 +01001355 switch (img_req->op_type) {
Ilya Dryomov3da691b2018-01-29 14:04:08 +01001356 case OBJ_OP_READ:
1357 return false;
1358 case OBJ_OP_WRITE:
1359 case OBJ_OP_DISCARD:
Ilya Dryomov6484cbe2019-01-29 12:46:25 +01001360 case OBJ_OP_ZEROOUT:
Ilya Dryomov3da691b2018-01-29 14:04:08 +01001361 return true;
1362 default:
Arnd Bergmannc6244b32018-04-04 14:53:39 +02001363 BUG();
Alex Elder6e2a4502013-03-27 09:16:30 -05001364 }
Alex Elder6e2a4502013-03-27 09:16:30 -05001365}
1366
Ilya Dryomov85e084f2016-04-28 16:07:24 +02001367static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001368{
Ilya Dryomov3da691b2018-01-29 14:04:08 +01001369 struct rbd_obj_request *obj_req = osd_req->r_priv;
Ilya Dryomov54ab3b22019-05-11 16:21:49 +02001370 int result;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001371
Ilya Dryomov3da691b2018-01-29 14:04:08 +01001372 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
1373 osd_req->r_result, obj_req);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001374
Ilya Dryomov54ab3b22019-05-11 16:21:49 +02001375 /*
1376 * Writes aren't allowed to return a data payload. In some
1377 * guarded write cases (e.g. stat + zero on an empty object)
1378 * a stat response makes it through, but we don't care.
1379 */
1380 if (osd_req->r_result > 0 && rbd_img_is_write(obj_req->img_request))
1381 result = 0;
Ilya Dryomov3da691b2018-01-29 14:04:08 +01001382 else
Ilya Dryomov54ab3b22019-05-11 16:21:49 +02001383 result = osd_req->r_result;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001384
Ilya Dryomov54ab3b22019-05-11 16:21:49 +02001385 rbd_obj_handle_request(obj_req, result);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001386}
1387
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02001388static void rbd_osd_format_read(struct ceph_osd_request *osd_req)
Alex Elder430c28c2013-04-03 21:32:51 -05001389{
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02001390 struct rbd_obj_request *obj_request = osd_req->r_priv;
Ilya Dryomov22d2cfd2020-06-04 11:12:34 +02001391 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1392 struct ceph_options *opt = rbd_dev->rbd_client->client->options;
Alex Elder430c28c2013-04-03 21:32:51 -05001393
Ilya Dryomov22d2cfd2020-06-04 11:12:34 +02001394 osd_req->r_flags = CEPH_OSD_FLAG_READ | opt->read_from_replica;
Ilya Dryomov7c848832016-09-15 17:56:39 +02001395 osd_req->r_snapid = obj_request->img_request->snap_id;
Alex Elder9d4df012013-04-19 15:34:50 -05001396}
1397
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02001398static void rbd_osd_format_write(struct ceph_osd_request *osd_req)
Alex Elder9d4df012013-04-19 15:34:50 -05001399{
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02001400 struct rbd_obj_request *obj_request = osd_req->r_priv;
Alex Elder9d4df012013-04-19 15:34:50 -05001401
Ilya Dryomova162b302018-01-30 17:52:10 +01001402 osd_req->r_flags = CEPH_OSD_FLAG_WRITE;
Arnd Bergmannfac02dd2018-07-13 22:18:37 +02001403 ktime_get_real_ts64(&osd_req->r_mtime);
Ilya Dryomov43df3d32018-02-02 15:23:22 +01001404 osd_req->r_data_offset = obj_request->ex.oe_off;
Alex Elder430c28c2013-04-03 21:32:51 -05001405}
1406
Ilya Dryomovbc812072017-01-25 18:16:23 +01001407static struct ceph_osd_request *
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02001408__rbd_obj_add_osd_request(struct rbd_obj_request *obj_req,
1409 struct ceph_snap_context *snapc, int num_ops)
Ilya Dryomovbc812072017-01-25 18:16:23 +01001410{
Ilya Dryomove28eded2019-02-25 11:42:26 +01001411 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
Ilya Dryomovbc812072017-01-25 18:16:23 +01001412 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1413 struct ceph_osd_request *req;
Ilya Dryomova90bb0c2017-01-25 18:16:23 +01001414 const char *name_format = rbd_dev->image_format == 1 ?
1415 RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT;
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02001416 int ret;
Ilya Dryomovbc812072017-01-25 18:16:23 +01001417
Ilya Dryomove28eded2019-02-25 11:42:26 +01001418 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, GFP_NOIO);
Ilya Dryomovbc812072017-01-25 18:16:23 +01001419 if (!req)
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02001420 return ERR_PTR(-ENOMEM);
Ilya Dryomovbc812072017-01-25 18:16:23 +01001421
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02001422 list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
Ilya Dryomovbc812072017-01-25 18:16:23 +01001423 req->r_callback = rbd_osd_req_callback;
Ilya Dryomova162b302018-01-30 17:52:10 +01001424 req->r_priv = obj_req;
Ilya Dryomovbc812072017-01-25 18:16:23 +01001425
Ilya Dryomovb26c0472018-07-03 15:28:43 +02001426 /*
1427 * Data objects may be stored in a separate pool, but always in
1428 * the same namespace in that pool as the header in its pool.
1429 */
1430 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
Ilya Dryomovbc812072017-01-25 18:16:23 +01001431 req->r_base_oloc.pool = rbd_dev->layout.pool_id;
Ilya Dryomovb26c0472018-07-03 15:28:43 +02001432
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02001433 ret = ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format,
1434 rbd_dev->header.object_prefix,
1435 obj_req->ex.oe_objno);
1436 if (ret)
1437 return ERR_PTR(ret);
Ilya Dryomovbc812072017-01-25 18:16:23 +01001438
Ilya Dryomovbc812072017-01-25 18:16:23 +01001439 return req;
Ilya Dryomovbc812072017-01-25 18:16:23 +01001440}
1441
Ilya Dryomove28eded2019-02-25 11:42:26 +01001442static struct ceph_osd_request *
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02001443rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, int num_ops)
Ilya Dryomove28eded2019-02-25 11:42:26 +01001444{
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02001445 return __rbd_obj_add_osd_request(obj_req, obj_req->img_request->snapc,
1446 num_ops);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001447}
1448
Ilya Dryomovecc633c2018-02-01 11:50:47 +01001449static struct rbd_obj_request *rbd_obj_request_create(void)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001450{
1451 struct rbd_obj_request *obj_request;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001452
Ilya Dryomov5a60e872015-06-24 17:24:33 +03001453 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
Ilya Dryomov6c696d82017-01-25 18:16:23 +01001454 if (!obj_request)
Alex Elderf907ad52013-05-01 12:43:03 -05001455 return NULL;
Alex Elderf907ad52013-05-01 12:43:03 -05001456
Ilya Dryomov43df3d32018-02-02 15:23:22 +01001457 ceph_object_extent_init(&obj_request->ex);
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02001458 INIT_LIST_HEAD(&obj_request->osd_reqs);
Ilya Dryomov85b5e6d2019-05-14 21:06:07 +02001459 mutex_init(&obj_request->state_mutex);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001460 kref_init(&obj_request->kref);
1461
Ilya Dryomov67e2b652017-01-25 18:16:22 +01001462 dout("%s %p\n", __func__, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001463 return obj_request;
1464}
1465
1466static void rbd_obj_request_destroy(struct kref *kref)
1467{
1468 struct rbd_obj_request *obj_request;
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02001469 struct ceph_osd_request *osd_req;
Ilya Dryomov7e07efb2018-01-20 10:30:11 +01001470 u32 i;
Alex Elderbf0d5f502012-11-22 00:00:08 -06001471
1472 obj_request = container_of(kref, struct rbd_obj_request, kref);
1473
Alex Elder37206ee2013-02-20 17:32:08 -06001474 dout("%s: obj %p\n", __func__, obj_request);
1475
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02001476 while (!list_empty(&obj_request->osd_reqs)) {
1477 osd_req = list_first_entry(&obj_request->osd_reqs,
1478 struct ceph_osd_request, r_private_item);
1479 list_del_init(&osd_req->r_private_item);
1480 ceph_osdc_put_request(osd_req);
1481 }
Alex Elderbf0d5f502012-11-22 00:00:08 -06001482
Ilya Dryomovecc633c2018-02-01 11:50:47 +01001483 switch (obj_request->img_request->data_type) {
Alex Elder9969ebc2013-01-18 12:31:10 -06001484 case OBJ_REQUEST_NODATA:
Alex Elderbf0d5f502012-11-22 00:00:08 -06001485 case OBJ_REQUEST_BIO:
Ilya Dryomov7e07efb2018-01-20 10:30:11 +01001486 case OBJ_REQUEST_BVECS:
Ilya Dryomov5359a172018-01-20 10:30:10 +01001487 break; /* Nothing to do */
Ilya Dryomovafb97882018-02-06 19:26:35 +01001488 case OBJ_REQUEST_OWN_BVECS:
1489 kfree(obj_request->bvec_pos.bvecs);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001490 break;
Ilya Dryomov7e07efb2018-01-20 10:30:11 +01001491 default:
Arnd Bergmann16809372019-03-22 17:53:56 +01001492 BUG();
Alex Elderbf0d5f502012-11-22 00:00:08 -06001493 }
1494
Ilya Dryomov86bd7992018-02-06 19:26:33 +01001495 kfree(obj_request->img_extents);
Ilya Dryomov7e07efb2018-01-20 10:30:11 +01001496 if (obj_request->copyup_bvecs) {
1497 for (i = 0; i < obj_request->copyup_bvec_count; i++) {
1498 if (obj_request->copyup_bvecs[i].bv_page)
1499 __free_page(obj_request->copyup_bvecs[i].bv_page);
1500 }
1501 kfree(obj_request->copyup_bvecs);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001502 }
1503
Alex Elder868311b2013-05-01 12:43:03 -05001504 kmem_cache_free(rbd_obj_request_cache, obj_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001505}
1506
Alex Elderfb65d2282013-05-08 22:50:04 -05001507/* It's OK to call this for a device with no parent */
1508
1509static void rbd_spec_put(struct rbd_spec *spec);
1510static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1511{
1512 rbd_dev_remove_parent(rbd_dev);
1513 rbd_spec_put(rbd_dev->parent_spec);
1514 rbd_dev->parent_spec = NULL;
1515 rbd_dev->parent_overlap = 0;
1516}
1517
Alex Elderbf0d5f502012-11-22 00:00:08 -06001518/*
Alex Eldera2acd002013-05-08 22:50:04 -05001519 * Parent image reference counting is used to determine when an
1520 * image's parent fields can be safely torn down--after there are no
1521 * more in-flight requests to the parent image. When the last
1522 * reference is dropped, cleaning them up is safe.
1523 */
1524static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1525{
1526 int counter;
1527
1528 if (!rbd_dev->parent_spec)
1529 return;
1530
1531 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1532 if (counter > 0)
1533 return;
1534
1535 /* Last reference; clean up parent data structures */
1536
1537 if (!counter)
1538 rbd_dev_unparent(rbd_dev);
1539 else
Ilya Dryomov9584d502014-07-11 12:11:20 +04001540 rbd_warn(rbd_dev, "parent reference underflow");
Alex Eldera2acd002013-05-08 22:50:04 -05001541}
1542
1543/*
1544 * If an image has a non-zero parent overlap, get a reference to its
1545 * parent.
1546 *
1547 * Returns true if the rbd device has a parent with a non-zero
1548 * overlap and a reference for it was successfully taken, or
1549 * false otherwise.
1550 */
1551static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1552{
Ilya Dryomovae43e9d2015-01-19 18:13:43 +03001553 int counter = 0;
Alex Eldera2acd002013-05-08 22:50:04 -05001554
1555 if (!rbd_dev->parent_spec)
1556 return false;
1557
Ilya Dryomovae43e9d2015-01-19 18:13:43 +03001558 if (rbd_dev->parent_overlap)
1559 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
Alex Eldera2acd002013-05-08 22:50:04 -05001560
1561 if (counter < 0)
Ilya Dryomov9584d502014-07-11 12:11:20 +04001562 rbd_warn(rbd_dev, "parent reference overflow");
Alex Eldera2acd002013-05-08 22:50:04 -05001563
Ilya Dryomovae43e9d2015-01-19 18:13:43 +03001564 return counter > 0;
Alex Eldera2acd002013-05-08 22:50:04 -05001565}
1566
Ilya Dryomov59e542c2020-02-12 15:23:58 +01001567static void rbd_img_request_init(struct rbd_img_request *img_request,
1568 struct rbd_device *rbd_dev,
1569 enum obj_operation_type op_type)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001570{
Ilya Dryomov59e542c2020-02-12 15:23:58 +01001571 memset(img_request, 0, sizeof(*img_request));
Alex Elderbf0d5f502012-11-22 00:00:08 -06001572
Alex Elderbf0d5f502012-11-22 00:00:08 -06001573 img_request->rbd_dev = rbd_dev;
Ilya Dryomov9bb02482018-01-30 17:52:10 +01001574 img_request->op_type = op_type;
Ilya Dryomova0c58952018-01-22 16:03:06 +01001575
Ilya Dryomove1fddc82019-05-30 16:07:48 +02001576 INIT_LIST_HEAD(&img_request->lock_item);
Ilya Dryomov43df3d32018-02-02 15:23:22 +01001577 INIT_LIST_HEAD(&img_request->object_extents);
Ilya Dryomov0192ce22019-05-16 15:06:56 +02001578 mutex_init(&img_request->state_mutex);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001579}
1580
Ilya Dryomova52cc682020-02-12 15:08:39 +01001581static void rbd_img_capture_header(struct rbd_img_request *img_req)
1582{
1583 struct rbd_device *rbd_dev = img_req->rbd_dev;
1584
1585 lockdep_assert_held(&rbd_dev->header_rwsem);
1586
1587 if (rbd_img_is_write(img_req))
1588 img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc);
1589 else
1590 img_req->snap_id = rbd_dev->spec->snap_id;
1591
1592 if (rbd_dev_parent_get(rbd_dev))
1593 img_request_layered_set(img_req);
1594}
1595
Hannes Reinecke679a97d2020-01-31 11:37:36 +01001596static void rbd_img_request_destroy(struct rbd_img_request *img_request)
Alex Elderbf0d5f502012-11-22 00:00:08 -06001597{
Alex Elderbf0d5f502012-11-22 00:00:08 -06001598 struct rbd_obj_request *obj_request;
1599 struct rbd_obj_request *next_obj_request;
1600
Alex Elder37206ee2013-02-20 17:32:08 -06001601 dout("%s: img %p\n", __func__, img_request);
1602
Ilya Dryomove1fddc82019-05-30 16:07:48 +02001603 WARN_ON(!list_empty(&img_request->lock_item));
Alex Elderbf0d5f502012-11-22 00:00:08 -06001604 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1605 rbd_img_obj_request_del(img_request, obj_request);
1606
Ilya Dryomov78b42a82020-02-12 14:34:03 +01001607 if (img_request_layered_test(img_request))
Alex Eldera2acd002013-05-08 22:50:04 -05001608 rbd_dev_parent_put(img_request->rbd_dev);
Alex Eldera2acd002013-05-08 22:50:04 -05001609
Ilya Dryomov9bb02482018-01-30 17:52:10 +01001610 if (rbd_img_is_write(img_request))
Alex Elder812164f82013-04-30 00:44:32 -05001611 ceph_put_snap_context(img_request->snapc);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001612
Ilya Dryomov59e542c2020-02-12 15:23:58 +01001613 if (test_bit(IMG_REQ_CHILD, &img_request->flags))
1614 kmem_cache_free(rbd_img_request_cache, img_request);
Alex Elderbf0d5f502012-11-22 00:00:08 -06001615}
1616
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02001617#define BITS_PER_OBJ 2
1618#define OBJS_PER_BYTE (BITS_PER_BYTE / BITS_PER_OBJ)
1619#define OBJ_MASK ((1 << BITS_PER_OBJ) - 1)
1620
1621static void __rbd_object_map_index(struct rbd_device *rbd_dev, u64 objno,
1622 u64 *index, u8 *shift)
1623{
1624 u32 off;
1625
1626 rbd_assert(objno < rbd_dev->object_map_size);
1627 *index = div_u64_rem(objno, OBJS_PER_BYTE, &off);
1628 *shift = (OBJS_PER_BYTE - off - 1) * BITS_PER_OBJ;
1629}
1630
1631static u8 __rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
1632{
1633 u64 index;
1634 u8 shift;
1635
1636 lockdep_assert_held(&rbd_dev->object_map_lock);
1637 __rbd_object_map_index(rbd_dev, objno, &index, &shift);
1638 return (rbd_dev->object_map[index] >> shift) & OBJ_MASK;
1639}
1640
1641static void __rbd_object_map_set(struct rbd_device *rbd_dev, u64 objno, u8 val)
1642{
1643 u64 index;
1644 u8 shift;
1645 u8 *p;
1646
1647 lockdep_assert_held(&rbd_dev->object_map_lock);
1648 rbd_assert(!(val & ~OBJ_MASK));
1649
1650 __rbd_object_map_index(rbd_dev, objno, &index, &shift);
1651 p = &rbd_dev->object_map[index];
1652 *p = (*p & ~(OBJ_MASK << shift)) | (val << shift);
1653}
1654
1655static u8 rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
1656{
1657 u8 state;
1658
1659 spin_lock(&rbd_dev->object_map_lock);
1660 state = __rbd_object_map_get(rbd_dev, objno);
1661 spin_unlock(&rbd_dev->object_map_lock);
1662 return state;
1663}
1664
1665static bool use_object_map(struct rbd_device *rbd_dev)
1666{
Ilya Dryomov3fe69922019-11-12 19:41:48 +01001667 /*
1668 * An image mapped read-only can't use the object map -- it isn't
1669 * loaded because the header lock isn't acquired. Someone else can
1670 * write to the image and update the object map behind our back.
1671 *
1672 * A snapshot can't be written to, so using the object map is always
1673 * safe.
1674 */
1675 if (!rbd_is_snap(rbd_dev) && rbd_is_ro(rbd_dev))
1676 return false;
1677
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02001678 return ((rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) &&
1679 !(rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID));
1680}
1681
1682static bool rbd_object_map_may_exist(struct rbd_device *rbd_dev, u64 objno)
1683{
1684 u8 state;
1685
1686 /* fall back to default logic if object map is disabled or invalid */
1687 if (!use_object_map(rbd_dev))
1688 return true;
1689
1690 state = rbd_object_map_get(rbd_dev, objno);
1691 return state != OBJECT_NONEXISTENT;
1692}
1693
1694static void rbd_object_map_name(struct rbd_device *rbd_dev, u64 snap_id,
1695 struct ceph_object_id *oid)
1696{
1697 if (snap_id == CEPH_NOSNAP)
1698 ceph_oid_printf(oid, "%s%s", RBD_OBJECT_MAP_PREFIX,
1699 rbd_dev->spec->image_id);
1700 else
1701 ceph_oid_printf(oid, "%s%s.%016llx", RBD_OBJECT_MAP_PREFIX,
1702 rbd_dev->spec->image_id, snap_id);
1703}
1704
1705static int rbd_object_map_lock(struct rbd_device *rbd_dev)
1706{
1707 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1708 CEPH_DEFINE_OID_ONSTACK(oid);
1709 u8 lock_type;
1710 char *lock_tag;
1711 struct ceph_locker *lockers;
1712 u32 num_lockers;
1713 bool broke_lock = false;
1714 int ret;
1715
1716 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
1717
1718again:
1719 ret = ceph_cls_lock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
1720 CEPH_CLS_LOCK_EXCLUSIVE, "", "", "", 0);
1721 if (ret != -EBUSY || broke_lock) {
1722 if (ret == -EEXIST)
1723 ret = 0; /* already locked by myself */
1724 if (ret)
1725 rbd_warn(rbd_dev, "failed to lock object map: %d", ret);
1726 return ret;
1727 }
1728
1729 ret = ceph_cls_lock_info(osdc, &oid, &rbd_dev->header_oloc,
1730 RBD_LOCK_NAME, &lock_type, &lock_tag,
1731 &lockers, &num_lockers);
1732 if (ret) {
1733 if (ret == -ENOENT)
1734 goto again;
1735
1736 rbd_warn(rbd_dev, "failed to get object map lockers: %d", ret);
1737 return ret;
1738 }
1739
1740 kfree(lock_tag);
1741 if (num_lockers == 0)
1742 goto again;
1743
1744 rbd_warn(rbd_dev, "breaking object map lock owned by %s%llu",
1745 ENTITY_NAME(lockers[0].id.name));
1746
1747 ret = ceph_cls_break_lock(osdc, &oid, &rbd_dev->header_oloc,
1748 RBD_LOCK_NAME, lockers[0].id.cookie,
1749 &lockers[0].id.name);
1750 ceph_free_lockers(lockers, num_lockers);
1751 if (ret) {
1752 if (ret == -ENOENT)
1753 goto again;
1754
1755 rbd_warn(rbd_dev, "failed to break object map lock: %d", ret);
1756 return ret;
1757 }
1758
1759 broke_lock = true;
1760 goto again;
1761}
1762
1763static void rbd_object_map_unlock(struct rbd_device *rbd_dev)
1764{
1765 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1766 CEPH_DEFINE_OID_ONSTACK(oid);
1767 int ret;
1768
1769 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
1770
1771 ret = ceph_cls_unlock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
1772 "");
1773 if (ret && ret != -ENOENT)
1774 rbd_warn(rbd_dev, "failed to unlock object map: %d", ret);
1775}
1776
1777static int decode_object_map_header(void **p, void *end, u64 *object_map_size)
1778{
1779 u8 struct_v;
1780 u32 struct_len;
1781 u32 header_len;
1782 void *header_end;
1783 int ret;
1784
1785 ceph_decode_32_safe(p, end, header_len, e_inval);
1786 header_end = *p + header_len;
1787
1788 ret = ceph_start_decoding(p, end, 1, "BitVector header", &struct_v,
1789 &struct_len);
1790 if (ret)
1791 return ret;
1792
1793 ceph_decode_64_safe(p, end, *object_map_size, e_inval);
1794
1795 *p = header_end;
1796 return 0;
1797
1798e_inval:
1799 return -EINVAL;
1800}
1801
1802static int __rbd_object_map_load(struct rbd_device *rbd_dev)
1803{
1804 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1805 CEPH_DEFINE_OID_ONSTACK(oid);
1806 struct page **pages;
1807 void *p, *end;
1808 size_t reply_len;
1809 u64 num_objects;
1810 u64 object_map_bytes;
1811 u64 object_map_size;
1812 int num_pages;
1813 int ret;
1814
1815 rbd_assert(!rbd_dev->object_map && !rbd_dev->object_map_size);
1816
1817 num_objects = ceph_get_num_objects(&rbd_dev->layout,
1818 rbd_dev->mapping.size);
1819 object_map_bytes = DIV_ROUND_UP_ULL(num_objects * BITS_PER_OBJ,
1820 BITS_PER_BYTE);
1821 num_pages = calc_pages_for(0, object_map_bytes) + 1;
1822 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1823 if (IS_ERR(pages))
1824 return PTR_ERR(pages);
1825
1826 reply_len = num_pages * PAGE_SIZE;
1827 rbd_object_map_name(rbd_dev, rbd_dev->spec->snap_id, &oid);
1828 ret = ceph_osdc_call(osdc, &oid, &rbd_dev->header_oloc,
1829 "rbd", "object_map_load", CEPH_OSD_FLAG_READ,
1830 NULL, 0, pages, &reply_len);
1831 if (ret)
1832 goto out;
1833
1834 p = page_address(pages[0]);
1835 end = p + min(reply_len, (size_t)PAGE_SIZE);
1836 ret = decode_object_map_header(&p, end, &object_map_size);
1837 if (ret)
1838 goto out;
1839
1840 if (object_map_size != num_objects) {
1841 rbd_warn(rbd_dev, "object map size mismatch: %llu vs %llu",
1842 object_map_size, num_objects);
1843 ret = -EINVAL;
1844 goto out;
1845 }
1846
1847 if (offset_in_page(p) + object_map_bytes > reply_len) {
1848 ret = -EINVAL;
1849 goto out;
1850 }
1851
1852 rbd_dev->object_map = kvmalloc(object_map_bytes, GFP_KERNEL);
1853 if (!rbd_dev->object_map) {
1854 ret = -ENOMEM;
1855 goto out;
1856 }
1857
1858 rbd_dev->object_map_size = object_map_size;
1859 ceph_copy_from_page_vector(pages, rbd_dev->object_map,
1860 offset_in_page(p), object_map_bytes);
1861
1862out:
1863 ceph_release_page_vector(pages, num_pages);
1864 return ret;
1865}
1866
1867static void rbd_object_map_free(struct rbd_device *rbd_dev)
1868{
1869 kvfree(rbd_dev->object_map);
1870 rbd_dev->object_map = NULL;
1871 rbd_dev->object_map_size = 0;
1872}
1873
1874static int rbd_object_map_load(struct rbd_device *rbd_dev)
1875{
1876 int ret;
1877
1878 ret = __rbd_object_map_load(rbd_dev);
1879 if (ret)
1880 return ret;
1881
1882 ret = rbd_dev_v2_get_flags(rbd_dev);
1883 if (ret) {
1884 rbd_object_map_free(rbd_dev);
1885 return ret;
1886 }
1887
1888 if (rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID)
1889 rbd_warn(rbd_dev, "object map is invalid");
1890
1891 return 0;
1892}
1893
1894static int rbd_object_map_open(struct rbd_device *rbd_dev)
1895{
1896 int ret;
1897
1898 ret = rbd_object_map_lock(rbd_dev);
1899 if (ret)
1900 return ret;
1901
1902 ret = rbd_object_map_load(rbd_dev);
1903 if (ret) {
1904 rbd_object_map_unlock(rbd_dev);
1905 return ret;
1906 }
1907
1908 return 0;
1909}
1910
1911static void rbd_object_map_close(struct rbd_device *rbd_dev)
1912{
1913 rbd_object_map_free(rbd_dev);
1914 rbd_object_map_unlock(rbd_dev);
1915}
1916
1917/*
1918 * This function needs snap_id (or more precisely just something to
1919 * distinguish between HEAD and snapshot object maps), new_state and
1920 * current_state that were passed to rbd_object_map_update().
1921 *
1922 * To avoid allocating and stashing a context we piggyback on the OSD
1923 * request. A HEAD update has two ops (assert_locked). For new_state
1924 * and current_state we decode our own object_map_update op, encoded in
1925 * rbd_cls_object_map_update().
1926 */
1927static int rbd_object_map_update_finish(struct rbd_obj_request *obj_req,
1928 struct ceph_osd_request *osd_req)
1929{
1930 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1931 struct ceph_osd_data *osd_data;
1932 u64 objno;
Kees Cook3f649ab2020-06-03 13:09:38 -07001933 u8 state, new_state, current_state;
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02001934 bool has_current_state;
1935 void *p;
1936
1937 if (osd_req->r_result)
1938 return osd_req->r_result;
1939
1940 /*
1941 * Nothing to do for a snapshot object map.
1942 */
1943 if (osd_req->r_num_ops == 1)
1944 return 0;
1945
1946 /*
1947 * Update in-memory HEAD object map.
1948 */
1949 rbd_assert(osd_req->r_num_ops == 2);
1950 osd_data = osd_req_op_data(osd_req, 1, cls, request_data);
1951 rbd_assert(osd_data->type == CEPH_OSD_DATA_TYPE_PAGES);
1952
1953 p = page_address(osd_data->pages[0]);
1954 objno = ceph_decode_64(&p);
1955 rbd_assert(objno == obj_req->ex.oe_objno);
1956 rbd_assert(ceph_decode_64(&p) == objno + 1);
1957 new_state = ceph_decode_8(&p);
1958 has_current_state = ceph_decode_8(&p);
1959 if (has_current_state)
1960 current_state = ceph_decode_8(&p);
1961
1962 spin_lock(&rbd_dev->object_map_lock);
1963 state = __rbd_object_map_get(rbd_dev, objno);
1964 if (!has_current_state || current_state == state ||
1965 (current_state == OBJECT_EXISTS && state == OBJECT_EXISTS_CLEAN))
1966 __rbd_object_map_set(rbd_dev, objno, new_state);
1967 spin_unlock(&rbd_dev->object_map_lock);
1968
1969 return 0;
1970}
1971
1972static void rbd_object_map_callback(struct ceph_osd_request *osd_req)
1973{
1974 struct rbd_obj_request *obj_req = osd_req->r_priv;
1975 int result;
1976
1977 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
1978 osd_req->r_result, obj_req);
1979
1980 result = rbd_object_map_update_finish(obj_req, osd_req);
1981 rbd_obj_handle_request(obj_req, result);
1982}
1983
1984static bool update_needed(struct rbd_device *rbd_dev, u64 objno, u8 new_state)
1985{
1986 u8 state = rbd_object_map_get(rbd_dev, objno);
1987
1988 if (state == new_state ||
1989 (new_state == OBJECT_PENDING && state == OBJECT_NONEXISTENT) ||
1990 (new_state == OBJECT_NONEXISTENT && state != OBJECT_PENDING))
1991 return false;
1992
1993 return true;
1994}
1995
1996static int rbd_cls_object_map_update(struct ceph_osd_request *req,
1997 int which, u64 objno, u8 new_state,
1998 const u8 *current_state)
1999{
2000 struct page **pages;
2001 void *p, *start;
2002 int ret;
2003
2004 ret = osd_req_op_cls_init(req, which, "rbd", "object_map_update");
2005 if (ret)
2006 return ret;
2007
2008 pages = ceph_alloc_page_vector(1, GFP_NOIO);
2009 if (IS_ERR(pages))
2010 return PTR_ERR(pages);
2011
2012 p = start = page_address(pages[0]);
2013 ceph_encode_64(&p, objno);
2014 ceph_encode_64(&p, objno + 1);
2015 ceph_encode_8(&p, new_state);
2016 if (current_state) {
2017 ceph_encode_8(&p, 1);
2018 ceph_encode_8(&p, *current_state);
2019 } else {
2020 ceph_encode_8(&p, 0);
2021 }
2022
2023 osd_req_op_cls_request_data_pages(req, which, pages, p - start, 0,
2024 false, true);
2025 return 0;
2026}
2027
2028/*
2029 * Return:
2030 * 0 - object map update sent
2031 * 1 - object map update isn't needed
2032 * <0 - error
2033 */
2034static int rbd_object_map_update(struct rbd_obj_request *obj_req, u64 snap_id,
2035 u8 new_state, const u8 *current_state)
2036{
2037 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2038 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2039 struct ceph_osd_request *req;
2040 int num_ops = 1;
2041 int which = 0;
2042 int ret;
2043
2044 if (snap_id == CEPH_NOSNAP) {
2045 if (!update_needed(rbd_dev, obj_req->ex.oe_objno, new_state))
2046 return 1;
2047
2048 num_ops++; /* assert_locked */
2049 }
2050
2051 req = ceph_osdc_alloc_request(osdc, NULL, num_ops, false, GFP_NOIO);
2052 if (!req)
2053 return -ENOMEM;
2054
2055 list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
2056 req->r_callback = rbd_object_map_callback;
2057 req->r_priv = obj_req;
2058
2059 rbd_object_map_name(rbd_dev, snap_id, &req->r_base_oid);
2060 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
2061 req->r_flags = CEPH_OSD_FLAG_WRITE;
2062 ktime_get_real_ts64(&req->r_mtime);
2063
2064 if (snap_id == CEPH_NOSNAP) {
2065 /*
2066 * Protect against possible race conditions during lock
2067 * ownership transitions.
2068 */
2069 ret = ceph_cls_assert_locked(req, which++, RBD_LOCK_NAME,
2070 CEPH_CLS_LOCK_EXCLUSIVE, "", "");
2071 if (ret)
2072 return ret;
2073 }
2074
2075 ret = rbd_cls_object_map_update(req, which, obj_req->ex.oe_objno,
2076 new_state, current_state);
2077 if (ret)
2078 return ret;
2079
2080 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
2081 if (ret)
2082 return ret;
2083
Jeff Laytona8af0d62022-06-30 16:21:50 -04002084 ceph_osdc_start_request(osdc, req);
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02002085 return 0;
2086}
2087
Ilya Dryomov86bd7992018-02-06 19:26:33 +01002088static void prune_extents(struct ceph_file_extent *img_extents,
2089 u32 *num_img_extents, u64 overlap)
Alex Eldere93f3152013-05-08 22:50:04 -05002090{
Ilya Dryomov86bd7992018-02-06 19:26:33 +01002091 u32 cnt = *num_img_extents;
Alex Eldere93f3152013-05-08 22:50:04 -05002092
Ilya Dryomov86bd7992018-02-06 19:26:33 +01002093 /* drop extents completely beyond the overlap */
2094 while (cnt && img_extents[cnt - 1].fe_off >= overlap)
2095 cnt--;
Alex Eldere93f3152013-05-08 22:50:04 -05002096
Ilya Dryomov86bd7992018-02-06 19:26:33 +01002097 if (cnt) {
2098 struct ceph_file_extent *ex = &img_extents[cnt - 1];
Alex Eldere93f3152013-05-08 22:50:04 -05002099
Ilya Dryomov86bd7992018-02-06 19:26:33 +01002100 /* trim final overlapping extent */
2101 if (ex->fe_off + ex->fe_len > overlap)
2102 ex->fe_len = overlap - ex->fe_off;
Alex Elder12178572013-02-08 09:55:49 -06002103 }
2104
Ilya Dryomov86bd7992018-02-06 19:26:33 +01002105 *num_img_extents = cnt;
Alex Elder21692382013-04-05 01:27:12 -05002106}
2107
Alex Elderf1a47392013-04-19 15:34:50 -05002108/*
Ilya Dryomov86bd7992018-02-06 19:26:33 +01002109 * Determine the byte range(s) covered by either just the object extent
2110 * or the entire object in the parent image.
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002111 */
Ilya Dryomov86bd7992018-02-06 19:26:33 +01002112static int rbd_obj_calc_img_extents(struct rbd_obj_request *obj_req,
2113 bool entire)
Josh Durgin3b434a2a2014-04-04 17:32:15 -07002114{
Ilya Dryomov86bd7992018-02-06 19:26:33 +01002115 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
Alex Elderc5b5ef62013-02-11 12:33:24 -06002116 int ret;
2117
Ilya Dryomov86bd7992018-02-06 19:26:33 +01002118 if (!rbd_dev->parent_overlap)
2119 return 0;
2120
2121 ret = ceph_extent_to_file(&rbd_dev->layout, obj_req->ex.oe_objno,
2122 entire ? 0 : obj_req->ex.oe_off,
2123 entire ? rbd_dev->layout.object_size :
2124 obj_req->ex.oe_len,
2125 &obj_req->img_extents,
2126 &obj_req->num_img_extents);
2127 if (ret)
2128 return ret;
2129
2130 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
2131 rbd_dev->parent_overlap);
2132 return 0;
2133}
2134
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02002135static void rbd_osd_setup_data(struct ceph_osd_request *osd_req, int which)
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002136{
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02002137 struct rbd_obj_request *obj_req = osd_req->r_priv;
2138
Ilya Dryomovecc633c2018-02-01 11:50:47 +01002139 switch (obj_req->img_request->data_type) {
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002140 case OBJ_REQUEST_BIO:
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02002141 osd_req_op_extent_osd_data_bio(osd_req, which,
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002142 &obj_req->bio_pos,
Ilya Dryomov43df3d32018-02-02 15:23:22 +01002143 obj_req->ex.oe_len);
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002144 break;
2145 case OBJ_REQUEST_BVECS:
Ilya Dryomovafb97882018-02-06 19:26:35 +01002146 case OBJ_REQUEST_OWN_BVECS:
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002147 rbd_assert(obj_req->bvec_pos.iter.bi_size ==
Ilya Dryomov43df3d32018-02-02 15:23:22 +01002148 obj_req->ex.oe_len);
Ilya Dryomovafb97882018-02-06 19:26:35 +01002149 rbd_assert(obj_req->bvec_idx == obj_req->bvec_count);
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02002150 osd_req_op_extent_osd_data_bvec_pos(osd_req, which,
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002151 &obj_req->bvec_pos);
2152 break;
2153 default:
Arnd Bergmann16809372019-03-22 17:53:56 +01002154 BUG();
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002155 }
2156}
2157
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02002158static int rbd_osd_setup_stat(struct ceph_osd_request *osd_req, int which)
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002159{
2160 struct page **pages;
Ilya Dryomov710214e2016-09-15 17:53:32 +02002161
Alex Elderc5b5ef62013-02-11 12:33:24 -06002162 /*
2163 * The response data for a STAT call consists of:
2164 * le64 length;
2165 * struct {
2166 * le32 tv_sec;
2167 * le32 tv_nsec;
2168 * } mtime;
2169 */
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002170 pages = ceph_alloc_page_vector(1, GFP_NOIO);
2171 if (IS_ERR(pages))
2172 return PTR_ERR(pages);
Alex Elderc5b5ef62013-02-11 12:33:24 -06002173
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02002174 osd_req_op_init(osd_req, which, CEPH_OSD_OP_STAT, 0);
2175 osd_req_op_raw_data_in_pages(osd_req, which, pages,
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002176 8 + sizeof(struct ceph_timespec),
2177 0, false, true);
Ilya Dryomov980917f2016-09-12 18:59:42 +02002178 return 0;
Alex Elderc5b5ef62013-02-11 12:33:24 -06002179}
2180
Ilya Dryomovb5ae8cb2019-05-29 16:53:14 +02002181static int rbd_osd_setup_copyup(struct ceph_osd_request *osd_req, int which,
2182 u32 bytes)
Ilya Dryomov13488d52019-02-25 12:37:50 +01002183{
Ilya Dryomovb5ae8cb2019-05-29 16:53:14 +02002184 struct rbd_obj_request *obj_req = osd_req->r_priv;
2185 int ret;
2186
2187 ret = osd_req_op_cls_init(osd_req, which, "rbd", "copyup");
2188 if (ret)
2189 return ret;
2190
2191 osd_req_op_cls_request_data_bvecs(osd_req, which, obj_req->copyup_bvecs,
2192 obj_req->copyup_bvec_count, bytes);
2193 return 0;
Ilya Dryomov13488d52019-02-25 12:37:50 +01002194}
2195
Ilya Dryomovea9b7432019-05-31 15:11:26 +02002196static int rbd_obj_init_read(struct rbd_obj_request *obj_req)
Alex Elderb454e362013-04-19 15:34:50 -05002197{
Ilya Dryomovea9b7432019-05-31 15:11:26 +02002198 obj_req->read_state = RBD_OBJ_READ_START;
2199 return 0;
2200}
2201
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02002202static void __rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req,
2203 int which)
Alex Elderb454e362013-04-19 15:34:50 -05002204{
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02002205 struct rbd_obj_request *obj_req = osd_req->r_priv;
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002206 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2207 u16 opcode;
Alex Elderb454e362013-04-19 15:34:50 -05002208
Ilya Dryomov8b5bec52019-06-19 15:45:27 +02002209 if (!use_object_map(rbd_dev) ||
2210 !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST)) {
2211 osd_req_op_alloc_hint_init(osd_req, which++,
2212 rbd_dev->layout.object_size,
Ilya Dryomovd3798ac2020-05-29 20:31:37 +02002213 rbd_dev->layout.object_size,
Ilya Dryomovdc1dad82020-05-29 20:51:23 +02002214 rbd_dev->opts->alloc_hint_flags);
Ilya Dryomov8b5bec52019-06-19 15:45:27 +02002215 }
Alex Elderb454e362013-04-19 15:34:50 -05002216
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002217 if (rbd_obj_is_entire(obj_req))
2218 opcode = CEPH_OSD_OP_WRITEFULL;
2219 else
2220 opcode = CEPH_OSD_OP_WRITE;
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002221
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02002222 osd_req_op_extent_init(osd_req, which, opcode,
Ilya Dryomov43df3d32018-02-02 15:23:22 +01002223 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02002224 rbd_osd_setup_data(osd_req, which);
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002225}
2226
Ilya Dryomovea9b7432019-05-31 15:11:26 +02002227static int rbd_obj_init_write(struct rbd_obj_request *obj_req)
Ilya Dryomov70d045f2014-09-12 16:02:01 +04002228{
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002229 int ret;
Ilya Dryomov058aa992016-09-12 14:44:45 +02002230
Ilya Dryomov86bd7992018-02-06 19:26:33 +01002231 /* reverse map the entire object onto the parent */
2232 ret = rbd_obj_calc_img_extents(obj_req, true);
2233 if (ret)
2234 return ret;
2235
Ilya Dryomov0ad5d952019-05-14 20:45:38 +02002236 if (rbd_obj_copyup_enabled(obj_req))
2237 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002238
Ilya Dryomov85b5e6d2019-05-14 21:06:07 +02002239 obj_req->write_state = RBD_OBJ_WRITE_START;
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002240 return 0;
2241}
2242
Ilya Dryomov6484cbe2019-01-29 12:46:25 +01002243static u16 truncate_or_zero_opcode(struct rbd_obj_request *obj_req)
2244{
2245 return rbd_obj_is_tail(obj_req) ? CEPH_OSD_OP_TRUNCATE :
2246 CEPH_OSD_OP_ZERO;
2247}
2248
Ilya Dryomov27bbd912019-05-29 17:31:37 +02002249static void __rbd_osd_setup_discard_ops(struct ceph_osd_request *osd_req,
2250 int which)
2251{
2252 struct rbd_obj_request *obj_req = osd_req->r_priv;
2253
2254 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents) {
2255 rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION);
2256 osd_req_op_init(osd_req, which, CEPH_OSD_OP_DELETE, 0);
2257 } else {
2258 osd_req_op_extent_init(osd_req, which,
2259 truncate_or_zero_opcode(obj_req),
2260 obj_req->ex.oe_off, obj_req->ex.oe_len,
2261 0, 0);
2262 }
2263}
2264
Ilya Dryomovea9b7432019-05-31 15:11:26 +02002265static int rbd_obj_init_discard(struct rbd_obj_request *obj_req)
Ilya Dryomov6484cbe2019-01-29 12:46:25 +01002266{
Ilya Dryomov0c93e1b2019-01-30 15:14:48 +01002267 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
Ilya Dryomov27bbd912019-05-29 17:31:37 +02002268 u64 off, next_off;
Ilya Dryomov6484cbe2019-01-29 12:46:25 +01002269 int ret;
2270
Ilya Dryomov0c93e1b2019-01-30 15:14:48 +01002271 /*
2272 * Align the range to alloc_size boundary and punt on discards
2273 * that are too small to free up any space.
2274 *
2275 * alloc_size == object_size && is_tail() is a special case for
2276 * filestore with filestore_punch_hole = false, needed to allow
2277 * truncate (in addition to delete).
2278 */
2279 if (rbd_dev->opts->alloc_size != rbd_dev->layout.object_size ||
2280 !rbd_obj_is_tail(obj_req)) {
Ilya Dryomov27bbd912019-05-29 17:31:37 +02002281 off = round_up(obj_req->ex.oe_off, rbd_dev->opts->alloc_size);
2282 next_off = round_down(obj_req->ex.oe_off + obj_req->ex.oe_len,
2283 rbd_dev->opts->alloc_size);
Ilya Dryomov0c93e1b2019-01-30 15:14:48 +01002284 if (off >= next_off)
2285 return 1;
Ilya Dryomov27bbd912019-05-29 17:31:37 +02002286
2287 dout("%s %p %llu~%llu -> %llu~%llu\n", __func__,
2288 obj_req, obj_req->ex.oe_off, obj_req->ex.oe_len,
2289 off, next_off - off);
2290 obj_req->ex.oe_off = off;
2291 obj_req->ex.oe_len = next_off - off;
Ilya Dryomov0c93e1b2019-01-30 15:14:48 +01002292 }
2293
Ilya Dryomov6484cbe2019-01-29 12:46:25 +01002294 /* reverse map the entire object onto the parent */
2295 ret = rbd_obj_calc_img_extents(obj_req, true);
2296 if (ret)
2297 return ret;
2298
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02002299 obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
Ilya Dryomov0ad5d952019-05-14 20:45:38 +02002300 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents)
2301 obj_req->flags |= RBD_OBJ_FLAG_DELETION;
Ilya Dryomov6484cbe2019-01-29 12:46:25 +01002302
Ilya Dryomov85b5e6d2019-05-14 21:06:07 +02002303 obj_req->write_state = RBD_OBJ_WRITE_START;
Ilya Dryomov6484cbe2019-01-29 12:46:25 +01002304 return 0;
2305}
2306
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02002307static void __rbd_osd_setup_zeroout_ops(struct ceph_osd_request *osd_req,
2308 int which)
Ilya Dryomov13488d52019-02-25 12:37:50 +01002309{
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02002310 struct rbd_obj_request *obj_req = osd_req->r_priv;
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002311 u16 opcode;
2312
2313 if (rbd_obj_is_entire(obj_req)) {
Ilya Dryomov86bd7992018-02-06 19:26:33 +01002314 if (obj_req->num_img_extents) {
Ilya Dryomov0ad5d952019-05-14 20:45:38 +02002315 if (!(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED))
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02002316 osd_req_op_init(osd_req, which++,
Ilya Dryomov9b17eb22019-02-28 15:51:39 +01002317 CEPH_OSD_OP_CREATE, 0);
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002318 opcode = CEPH_OSD_OP_TRUNCATE;
2319 } else {
Ilya Dryomov0ad5d952019-05-14 20:45:38 +02002320 rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION);
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02002321 osd_req_op_init(osd_req, which++,
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002322 CEPH_OSD_OP_DELETE, 0);
2323 opcode = 0;
2324 }
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002325 } else {
Ilya Dryomov6484cbe2019-01-29 12:46:25 +01002326 opcode = truncate_or_zero_opcode(obj_req);
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002327 }
2328
2329 if (opcode)
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02002330 osd_req_op_extent_init(osd_req, which, opcode,
Ilya Dryomov43df3d32018-02-02 15:23:22 +01002331 obj_req->ex.oe_off, obj_req->ex.oe_len,
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002332 0, 0);
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002333}
2334
Ilya Dryomovea9b7432019-05-31 15:11:26 +02002335static int rbd_obj_init_zeroout(struct rbd_obj_request *obj_req)
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002336{
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002337 int ret;
2338
Ilya Dryomov86bd7992018-02-06 19:26:33 +01002339 /* reverse map the entire object onto the parent */
2340 ret = rbd_obj_calc_img_extents(obj_req, true);
2341 if (ret)
2342 return ret;
2343
Ilya Dryomov0ad5d952019-05-14 20:45:38 +02002344 if (rbd_obj_copyup_enabled(obj_req))
2345 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
2346 if (!obj_req->num_img_extents) {
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02002347 obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
Ilya Dryomov0ad5d952019-05-14 20:45:38 +02002348 if (rbd_obj_is_entire(obj_req))
2349 obj_req->flags |= RBD_OBJ_FLAG_DELETION;
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002350 }
2351
Ilya Dryomov85b5e6d2019-05-14 21:06:07 +02002352 obj_req->write_state = RBD_OBJ_WRITE_START;
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002353 return 0;
2354}
2355
Ilya Dryomova086a1b2019-06-12 18:33:31 +02002356static int count_write_ops(struct rbd_obj_request *obj_req)
2357{
Ilya Dryomov8b5bec52019-06-19 15:45:27 +02002358 struct rbd_img_request *img_req = obj_req->img_request;
2359
2360 switch (img_req->op_type) {
Ilya Dryomova086a1b2019-06-12 18:33:31 +02002361 case OBJ_OP_WRITE:
Ilya Dryomov8b5bec52019-06-19 15:45:27 +02002362 if (!use_object_map(img_req->rbd_dev) ||
2363 !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST))
2364 return 2; /* setallochint + write/writefull */
2365
2366 return 1; /* write/writefull */
Ilya Dryomova086a1b2019-06-12 18:33:31 +02002367 case OBJ_OP_DISCARD:
2368 return 1; /* delete/truncate/zero */
2369 case OBJ_OP_ZEROOUT:
2370 if (rbd_obj_is_entire(obj_req) && obj_req->num_img_extents &&
2371 !(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED))
2372 return 2; /* create + truncate */
2373
2374 return 1; /* delete/truncate/zero */
2375 default:
2376 BUG();
2377 }
2378}
2379
2380static void rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req,
2381 int which)
2382{
2383 struct rbd_obj_request *obj_req = osd_req->r_priv;
2384
2385 switch (obj_req->img_request->op_type) {
2386 case OBJ_OP_WRITE:
2387 __rbd_osd_setup_write_ops(osd_req, which);
2388 break;
2389 case OBJ_OP_DISCARD:
2390 __rbd_osd_setup_discard_ops(osd_req, which);
2391 break;
2392 case OBJ_OP_ZEROOUT:
2393 __rbd_osd_setup_zeroout_ops(osd_req, which);
2394 break;
2395 default:
2396 BUG();
2397 }
2398}
2399
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002400/*
Ilya Dryomova086a1b2019-06-12 18:33:31 +02002401 * Prune the list of object requests (adjust offset and/or length, drop
2402 * redundant requests). Prepare object request state machines and image
2403 * request state machine for execution.
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002404 */
2405static int __rbd_img_fill_request(struct rbd_img_request *img_req)
2406{
Ilya Dryomov0c93e1b2019-01-30 15:14:48 +01002407 struct rbd_obj_request *obj_req, *next_obj_req;
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002408 int ret;
2409
Ilya Dryomov0c93e1b2019-01-30 15:14:48 +01002410 for_each_obj_request_safe(img_req, obj_req, next_obj_req) {
Ilya Dryomov9bb02482018-01-30 17:52:10 +01002411 switch (img_req->op_type) {
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002412 case OBJ_OP_READ:
Ilya Dryomovea9b7432019-05-31 15:11:26 +02002413 ret = rbd_obj_init_read(obj_req);
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002414 break;
2415 case OBJ_OP_WRITE:
Ilya Dryomovea9b7432019-05-31 15:11:26 +02002416 ret = rbd_obj_init_write(obj_req);
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002417 break;
2418 case OBJ_OP_DISCARD:
Ilya Dryomovea9b7432019-05-31 15:11:26 +02002419 ret = rbd_obj_init_discard(obj_req);
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002420 break;
Ilya Dryomov6484cbe2019-01-29 12:46:25 +01002421 case OBJ_OP_ZEROOUT:
Ilya Dryomovea9b7432019-05-31 15:11:26 +02002422 ret = rbd_obj_init_zeroout(obj_req);
Ilya Dryomov6484cbe2019-01-29 12:46:25 +01002423 break;
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002424 default:
Arnd Bergmann16809372019-03-22 17:53:56 +01002425 BUG();
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002426 }
Ilya Dryomov0c93e1b2019-01-30 15:14:48 +01002427 if (ret < 0)
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002428 return ret;
Ilya Dryomov0c93e1b2019-01-30 15:14:48 +01002429 if (ret > 0) {
Ilya Dryomov0c93e1b2019-01-30 15:14:48 +01002430 rbd_img_obj_request_del(img_req, obj_req);
2431 continue;
2432 }
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002433 }
2434
Ilya Dryomov0192ce22019-05-16 15:06:56 +02002435 img_req->state = RBD_IMG_START;
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002436 return 0;
2437}
2438
Ilya Dryomov5a237812018-02-06 19:26:34 +01002439union rbd_img_fill_iter {
2440 struct ceph_bio_iter bio_iter;
2441 struct ceph_bvec_iter bvec_iter;
2442};
2443
2444struct rbd_img_fill_ctx {
2445 enum obj_request_type pos_type;
2446 union rbd_img_fill_iter *pos;
2447 union rbd_img_fill_iter iter;
2448 ceph_object_extent_fn_t set_pos_fn;
Ilya Dryomovafb97882018-02-06 19:26:35 +01002449 ceph_object_extent_fn_t count_fn;
2450 ceph_object_extent_fn_t copy_fn;
Ilya Dryomov5a237812018-02-06 19:26:34 +01002451};
2452
2453static struct ceph_object_extent *alloc_object_extent(void *arg)
2454{
2455 struct rbd_img_request *img_req = arg;
2456 struct rbd_obj_request *obj_req;
2457
2458 obj_req = rbd_obj_request_create();
2459 if (!obj_req)
2460 return NULL;
2461
2462 rbd_img_obj_request_add(img_req, obj_req);
2463 return &obj_req->ex;
2464}
2465
2466/*
Ilya Dryomovafb97882018-02-06 19:26:35 +01002467 * While su != os && sc == 1 is technically not fancy (it's the same
2468 * layout as su == os && sc == 1), we can't use the nocopy path for it
2469 * because ->set_pos_fn() should be called only once per object.
2470 * ceph_file_to_extents() invokes action_fn once per stripe unit, so
2471 * treat su != os && sc == 1 as fancy.
Ilya Dryomov5a237812018-02-06 19:26:34 +01002472 */
Ilya Dryomovafb97882018-02-06 19:26:35 +01002473static bool rbd_layout_is_fancy(struct ceph_file_layout *l)
2474{
2475 return l->stripe_unit != l->object_size;
2476}
2477
2478static int rbd_img_fill_request_nocopy(struct rbd_img_request *img_req,
2479 struct ceph_file_extent *img_extents,
2480 u32 num_img_extents,
2481 struct rbd_img_fill_ctx *fctx)
Ilya Dryomov5a237812018-02-06 19:26:34 +01002482{
2483 u32 i;
2484 int ret;
2485
2486 img_req->data_type = fctx->pos_type;
2487
2488 /*
2489 * Create object requests and set each object request's starting
2490 * position in the provided bio (list) or bio_vec array.
2491 */
2492 fctx->iter = *fctx->pos;
2493 for (i = 0; i < num_img_extents; i++) {
2494 ret = ceph_file_to_extents(&img_req->rbd_dev->layout,
2495 img_extents[i].fe_off,
2496 img_extents[i].fe_len,
2497 &img_req->object_extents,
2498 alloc_object_extent, img_req,
2499 fctx->set_pos_fn, &fctx->iter);
2500 if (ret)
2501 return ret;
2502 }
2503
2504 return __rbd_img_fill_request(img_req);
2505}
2506
Ilya Dryomovafb97882018-02-06 19:26:35 +01002507/*
2508 * Map a list of image extents to a list of object extents, create the
2509 * corresponding object requests (normally each to a different object,
2510 * but not always) and add them to @img_req. For each object request,
2511 * set up its data descriptor to point to the corresponding chunk(s) of
2512 * @fctx->pos data buffer.
2513 *
2514 * Because ceph_file_to_extents() will merge adjacent object extents
2515 * together, each object request's data descriptor may point to multiple
2516 * different chunks of @fctx->pos data buffer.
2517 *
2518 * @fctx->pos data buffer is assumed to be large enough.
2519 */
2520static int rbd_img_fill_request(struct rbd_img_request *img_req,
2521 struct ceph_file_extent *img_extents,
2522 u32 num_img_extents,
2523 struct rbd_img_fill_ctx *fctx)
2524{
2525 struct rbd_device *rbd_dev = img_req->rbd_dev;
2526 struct rbd_obj_request *obj_req;
2527 u32 i;
2528 int ret;
2529
2530 if (fctx->pos_type == OBJ_REQUEST_NODATA ||
2531 !rbd_layout_is_fancy(&rbd_dev->layout))
2532 return rbd_img_fill_request_nocopy(img_req, img_extents,
2533 num_img_extents, fctx);
2534
2535 img_req->data_type = OBJ_REQUEST_OWN_BVECS;
2536
2537 /*
2538 * Create object requests and determine ->bvec_count for each object
2539 * request. Note that ->bvec_count sum over all object requests may
2540 * be greater than the number of bio_vecs in the provided bio (list)
2541 * or bio_vec array because when mapped, those bio_vecs can straddle
2542 * stripe unit boundaries.
2543 */
2544 fctx->iter = *fctx->pos;
2545 for (i = 0; i < num_img_extents; i++) {
2546 ret = ceph_file_to_extents(&rbd_dev->layout,
2547 img_extents[i].fe_off,
2548 img_extents[i].fe_len,
2549 &img_req->object_extents,
2550 alloc_object_extent, img_req,
2551 fctx->count_fn, &fctx->iter);
2552 if (ret)
2553 return ret;
2554 }
2555
2556 for_each_obj_request(img_req, obj_req) {
2557 obj_req->bvec_pos.bvecs = kmalloc_array(obj_req->bvec_count,
2558 sizeof(*obj_req->bvec_pos.bvecs),
2559 GFP_NOIO);
2560 if (!obj_req->bvec_pos.bvecs)
2561 return -ENOMEM;
Alex Elderb454e362013-04-19 15:34:50 -05002562 }
2563
2564 /*
Ilya Dryomovafb97882018-02-06 19:26:35 +01002565 * Fill in each object request's private bio_vec array, splitting and
2566 * rearranging the provided bio_vecs in stripe unit chunks as needed.
Alex Elderb454e362013-04-19 15:34:50 -05002567 */
Ilya Dryomovafb97882018-02-06 19:26:35 +01002568 fctx->iter = *fctx->pos;
2569 for (i = 0; i < num_img_extents; i++) {
2570 ret = ceph_iterate_extents(&rbd_dev->layout,
2571 img_extents[i].fe_off,
2572 img_extents[i].fe_len,
2573 &img_req->object_extents,
2574 fctx->copy_fn, &fctx->iter);
2575 if (ret)
2576 return ret;
2577 }
Alex Elder3d7efd12013-04-19 15:34:50 -05002578
Ilya Dryomovafb97882018-02-06 19:26:35 +01002579 return __rbd_img_fill_request(img_req);
Alex Elderb454e362013-04-19 15:34:50 -05002580}
2581
Ilya Dryomov5a237812018-02-06 19:26:34 +01002582static int rbd_img_fill_nodata(struct rbd_img_request *img_req,
2583 u64 off, u64 len)
2584{
2585 struct ceph_file_extent ex = { off, len };
Arnd Bergmanna55e6012020-01-07 22:01:04 +01002586 union rbd_img_fill_iter dummy = {};
Ilya Dryomov5a237812018-02-06 19:26:34 +01002587 struct rbd_img_fill_ctx fctx = {
2588 .pos_type = OBJ_REQUEST_NODATA,
2589 .pos = &dummy,
2590 };
2591
2592 return rbd_img_fill_request(img_req, &ex, 1, &fctx);
2593}
2594
2595static void set_bio_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2596{
2597 struct rbd_obj_request *obj_req =
2598 container_of(ex, struct rbd_obj_request, ex);
2599 struct ceph_bio_iter *it = arg;
2600
2601 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2602 obj_req->bio_pos = *it;
2603 ceph_bio_iter_advance(it, bytes);
2604}
2605
Ilya Dryomovafb97882018-02-06 19:26:35 +01002606static void count_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2607{
2608 struct rbd_obj_request *obj_req =
2609 container_of(ex, struct rbd_obj_request, ex);
2610 struct ceph_bio_iter *it = arg;
2611
2612 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2613 ceph_bio_iter_advance_step(it, bytes, ({
2614 obj_req->bvec_count++;
2615 }));
2616
2617}
2618
2619static void copy_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2620{
2621 struct rbd_obj_request *obj_req =
2622 container_of(ex, struct rbd_obj_request, ex);
2623 struct ceph_bio_iter *it = arg;
2624
2625 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2626 ceph_bio_iter_advance_step(it, bytes, ({
2627 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2628 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2629 }));
2630}
2631
Ilya Dryomov5a237812018-02-06 19:26:34 +01002632static int __rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2633 struct ceph_file_extent *img_extents,
2634 u32 num_img_extents,
2635 struct ceph_bio_iter *bio_pos)
2636{
2637 struct rbd_img_fill_ctx fctx = {
2638 .pos_type = OBJ_REQUEST_BIO,
2639 .pos = (union rbd_img_fill_iter *)bio_pos,
2640 .set_pos_fn = set_bio_pos,
Ilya Dryomovafb97882018-02-06 19:26:35 +01002641 .count_fn = count_bio_bvecs,
2642 .copy_fn = copy_bio_bvecs,
Ilya Dryomov5a237812018-02-06 19:26:34 +01002643 };
2644
2645 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2646 &fctx);
2647}
2648
2649static int rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2650 u64 off, u64 len, struct bio *bio)
2651{
2652 struct ceph_file_extent ex = { off, len };
2653 struct ceph_bio_iter it = { .bio = bio, .iter = bio->bi_iter };
2654
2655 return __rbd_img_fill_from_bio(img_req, &ex, 1, &it);
2656}
2657
2658static void set_bvec_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2659{
2660 struct rbd_obj_request *obj_req =
2661 container_of(ex, struct rbd_obj_request, ex);
2662 struct ceph_bvec_iter *it = arg;
2663
2664 obj_req->bvec_pos = *it;
2665 ceph_bvec_iter_shorten(&obj_req->bvec_pos, bytes);
2666 ceph_bvec_iter_advance(it, bytes);
2667}
2668
Ilya Dryomovafb97882018-02-06 19:26:35 +01002669static void count_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2670{
2671 struct rbd_obj_request *obj_req =
2672 container_of(ex, struct rbd_obj_request, ex);
2673 struct ceph_bvec_iter *it = arg;
2674
2675 ceph_bvec_iter_advance_step(it, bytes, ({
2676 obj_req->bvec_count++;
2677 }));
2678}
2679
2680static void copy_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2681{
2682 struct rbd_obj_request *obj_req =
2683 container_of(ex, struct rbd_obj_request, ex);
2684 struct ceph_bvec_iter *it = arg;
2685
2686 ceph_bvec_iter_advance_step(it, bytes, ({
2687 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2688 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2689 }));
2690}
2691
Ilya Dryomov5a237812018-02-06 19:26:34 +01002692static int __rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2693 struct ceph_file_extent *img_extents,
2694 u32 num_img_extents,
2695 struct ceph_bvec_iter *bvec_pos)
2696{
2697 struct rbd_img_fill_ctx fctx = {
2698 .pos_type = OBJ_REQUEST_BVECS,
2699 .pos = (union rbd_img_fill_iter *)bvec_pos,
2700 .set_pos_fn = set_bvec_pos,
Ilya Dryomovafb97882018-02-06 19:26:35 +01002701 .count_fn = count_bvecs,
2702 .copy_fn = copy_bvecs,
Ilya Dryomov5a237812018-02-06 19:26:34 +01002703 };
2704
2705 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2706 &fctx);
2707}
2708
2709static int rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2710 struct ceph_file_extent *img_extents,
2711 u32 num_img_extents,
2712 struct bio_vec *bvecs)
2713{
2714 struct ceph_bvec_iter it = {
2715 .bvecs = bvecs,
2716 .iter = { .bi_size = ceph_file_extents_bytes(img_extents,
2717 num_img_extents) },
2718 };
2719
2720 return __rbd_img_fill_from_bvecs(img_req, img_extents, num_img_extents,
2721 &it);
2722}
2723
Ilya Dryomov0192ce22019-05-16 15:06:56 +02002724static void rbd_img_handle_request_work(struct work_struct *work)
Alex Elderbf0d5f502012-11-22 00:00:08 -06002725{
Ilya Dryomov0192ce22019-05-16 15:06:56 +02002726 struct rbd_img_request *img_req =
2727 container_of(work, struct rbd_img_request, work);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002728
Ilya Dryomov0192ce22019-05-16 15:06:56 +02002729 rbd_img_handle_request(img_req, img_req->work_result);
2730}
Alex Elderbf0d5f502012-11-22 00:00:08 -06002731
Ilya Dryomov0192ce22019-05-16 15:06:56 +02002732static void rbd_img_schedule(struct rbd_img_request *img_req, int result)
2733{
2734 INIT_WORK(&img_req->work, rbd_img_handle_request_work);
2735 img_req->work_result = result;
2736 queue_work(rbd_wq, &img_req->work);
Alex Elderbf0d5f502012-11-22 00:00:08 -06002737}
2738
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02002739static bool rbd_obj_may_exist(struct rbd_obj_request *obj_req)
2740{
2741 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2742
2743 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno)) {
2744 obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST;
2745 return true;
2746 }
2747
2748 dout("%s %p objno %llu assuming dne\n", __func__, obj_req,
2749 obj_req->ex.oe_objno);
2750 return false;
2751}
2752
Ilya Dryomov85b5e6d2019-05-14 21:06:07 +02002753static int rbd_obj_read_object(struct rbd_obj_request *obj_req)
2754{
Ilya Dryomova086a1b2019-06-12 18:33:31 +02002755 struct ceph_osd_request *osd_req;
2756 int ret;
2757
2758 osd_req = __rbd_obj_add_osd_request(obj_req, NULL, 1);
2759 if (IS_ERR(osd_req))
2760 return PTR_ERR(osd_req);
2761
2762 osd_req_op_extent_init(osd_req, 0, CEPH_OSD_OP_READ,
2763 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
2764 rbd_osd_setup_data(osd_req, 0);
2765 rbd_osd_format_read(osd_req);
2766
2767 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
2768 if (ret)
2769 return ret;
2770
2771 rbd_osd_submit(osd_req);
Ilya Dryomov85b5e6d2019-05-14 21:06:07 +02002772 return 0;
Alex Elderbf0d5f502012-11-22 00:00:08 -06002773}
2774
Ilya Dryomov86bd7992018-02-06 19:26:33 +01002775static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
Alex Elder8b3e1a52013-01-24 16:13:36 -06002776{
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002777 struct rbd_img_request *img_req = obj_req->img_request;
Ilya Dryomova52cc682020-02-12 15:08:39 +01002778 struct rbd_device *parent = img_req->rbd_dev->parent;
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002779 struct rbd_img_request *child_img_req;
2780 int ret;
Alex Elder8b3e1a52013-01-24 16:13:36 -06002781
Ilya Dryomov59e542c2020-02-12 15:23:58 +01002782 child_img_req = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002783 if (!child_img_req)
2784 return -ENOMEM;
Alex Elder8b3e1a52013-01-24 16:13:36 -06002785
Ilya Dryomov59e542c2020-02-12 15:23:58 +01002786 rbd_img_request_init(child_img_req, parent, OBJ_OP_READ);
Ilya Dryomove93aca02018-02-06 19:26:35 +01002787 __set_bit(IMG_REQ_CHILD, &child_img_req->flags);
2788 child_img_req->obj_request = obj_req;
Alex Elder02c74fb2013-05-06 17:40:33 -05002789
Ilya Dryomova52cc682020-02-12 15:08:39 +01002790 down_read(&parent->header_rwsem);
2791 rbd_img_capture_header(child_img_req);
2792 up_read(&parent->header_rwsem);
2793
Ilya Dryomov21ed05a2019-08-30 17:31:06 +02002794 dout("%s child_img_req %p for obj_req %p\n", __func__, child_img_req,
2795 obj_req);
2796
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002797 if (!rbd_img_is_write(img_req)) {
Ilya Dryomovecc633c2018-02-01 11:50:47 +01002798 switch (img_req->data_type) {
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002799 case OBJ_REQUEST_BIO:
Ilya Dryomov5a237812018-02-06 19:26:34 +01002800 ret = __rbd_img_fill_from_bio(child_img_req,
2801 obj_req->img_extents,
2802 obj_req->num_img_extents,
2803 &obj_req->bio_pos);
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002804 break;
2805 case OBJ_REQUEST_BVECS:
Ilya Dryomovafb97882018-02-06 19:26:35 +01002806 case OBJ_REQUEST_OWN_BVECS:
Ilya Dryomov5a237812018-02-06 19:26:34 +01002807 ret = __rbd_img_fill_from_bvecs(child_img_req,
2808 obj_req->img_extents,
2809 obj_req->num_img_extents,
2810 &obj_req->bvec_pos);
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002811 break;
2812 default:
Arnd Bergmannd342a152019-03-22 15:36:37 +01002813 BUG();
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002814 }
2815 } else {
Ilya Dryomov5a237812018-02-06 19:26:34 +01002816 ret = rbd_img_fill_from_bvecs(child_img_req,
2817 obj_req->img_extents,
2818 obj_req->num_img_extents,
2819 obj_req->copyup_bvecs);
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002820 }
2821 if (ret) {
Hannes Reinecke679a97d2020-01-31 11:37:36 +01002822 rbd_img_request_destroy(child_img_req);
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002823 return ret;
2824 }
2825
Ilya Dryomov0192ce22019-05-16 15:06:56 +02002826 /* avoid parent chain recursion */
2827 rbd_img_schedule(child_img_req, 0);
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002828 return 0;
2829}
2830
Ilya Dryomov85b5e6d2019-05-14 21:06:07 +02002831static bool rbd_obj_advance_read(struct rbd_obj_request *obj_req, int *result)
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002832{
2833 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2834 int ret;
2835
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02002836again:
Ilya Dryomova9b67e62019-05-08 13:35:57 +02002837 switch (obj_req->read_state) {
Ilya Dryomov85b5e6d2019-05-14 21:06:07 +02002838 case RBD_OBJ_READ_START:
2839 rbd_assert(!*result);
2840
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02002841 if (!rbd_obj_may_exist(obj_req)) {
2842 *result = -ENOENT;
2843 obj_req->read_state = RBD_OBJ_READ_OBJECT;
2844 goto again;
2845 }
2846
Ilya Dryomov85b5e6d2019-05-14 21:06:07 +02002847 ret = rbd_obj_read_object(obj_req);
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002848 if (ret) {
Ilya Dryomov85b5e6d2019-05-14 21:06:07 +02002849 *result = ret;
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002850 return true;
2851 }
Ilya Dryomov85b5e6d2019-05-14 21:06:07 +02002852 obj_req->read_state = RBD_OBJ_READ_OBJECT;
2853 return false;
Ilya Dryomova9b67e62019-05-08 13:35:57 +02002854 case RBD_OBJ_READ_OBJECT:
2855 if (*result == -ENOENT && rbd_dev->parent_overlap) {
2856 /* reverse map this object extent onto the parent */
2857 ret = rbd_obj_calc_img_extents(obj_req, false);
Ilya Dryomov86bd7992018-02-06 19:26:33 +01002858 if (ret) {
Ilya Dryomov54ab3b22019-05-11 16:21:49 +02002859 *result = ret;
Ilya Dryomov86bd7992018-02-06 19:26:33 +01002860 return true;
2861 }
Ilya Dryomova9b67e62019-05-08 13:35:57 +02002862 if (obj_req->num_img_extents) {
2863 ret = rbd_obj_read_from_parent(obj_req);
2864 if (ret) {
2865 *result = ret;
2866 return true;
2867 }
2868 obj_req->read_state = RBD_OBJ_READ_PARENT;
2869 return false;
2870 }
Ilya Dryomov86bd7992018-02-06 19:26:33 +01002871 }
Alex Elder02c74fb2013-05-06 17:40:33 -05002872
Ilya Dryomova9b67e62019-05-08 13:35:57 +02002873 /*
2874 * -ENOENT means a hole in the image -- zero-fill the entire
2875 * length of the request. A short read also implies zero-fill
2876 * to the end of the request.
2877 */
2878 if (*result == -ENOENT) {
2879 rbd_obj_zero_range(obj_req, 0, obj_req->ex.oe_len);
2880 *result = 0;
2881 } else if (*result >= 0) {
2882 if (*result < obj_req->ex.oe_len)
2883 rbd_obj_zero_range(obj_req, *result,
2884 obj_req->ex.oe_len - *result);
2885 else
2886 rbd_assert(*result == obj_req->ex.oe_len);
2887 *result = 0;
2888 }
2889 return true;
2890 case RBD_OBJ_READ_PARENT:
Ilya Dryomovd435c9a2019-08-27 16:45:10 +02002891 /*
2892 * The parent image is read only up to the overlap -- zero-fill
2893 * from the overlap to the end of the request.
2894 */
2895 if (!*result) {
2896 u32 obj_overlap = rbd_obj_img_extents_bytes(obj_req);
2897
2898 if (obj_overlap < obj_req->ex.oe_len)
2899 rbd_obj_zero_range(obj_req, obj_overlap,
2900 obj_req->ex.oe_len - obj_overlap);
2901 }
Ilya Dryomova9b67e62019-05-08 13:35:57 +02002902 return true;
2903 default:
2904 BUG();
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002905 }
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002906}
2907
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02002908static bool rbd_obj_write_is_noop(struct rbd_obj_request *obj_req)
2909{
2910 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2911
2912 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno))
2913 obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST;
2914
2915 if (!(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST) &&
2916 (obj_req->flags & RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT)) {
2917 dout("%s %p noop for nonexistent\n", __func__, obj_req);
2918 return true;
Alex Elder02c74fb2013-05-06 17:40:33 -05002919 }
2920
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02002921 return false;
2922}
2923
2924/*
2925 * Return:
2926 * 0 - object map update sent
2927 * 1 - object map update isn't needed
2928 * <0 - error
2929 */
2930static int rbd_obj_write_pre_object_map(struct rbd_obj_request *obj_req)
2931{
2932 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2933 u8 new_state;
2934
2935 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
2936 return 1;
2937
2938 if (obj_req->flags & RBD_OBJ_FLAG_DELETION)
2939 new_state = OBJECT_PENDING;
2940 else
2941 new_state = OBJECT_EXISTS;
2942
2943 return rbd_object_map_update(obj_req, CEPH_NOSNAP, new_state, NULL);
2944}
2945
Ilya Dryomov85b5e6d2019-05-14 21:06:07 +02002946static int rbd_obj_write_object(struct rbd_obj_request *obj_req)
2947{
Ilya Dryomova086a1b2019-06-12 18:33:31 +02002948 struct ceph_osd_request *osd_req;
2949 int num_ops = count_write_ops(obj_req);
2950 int which = 0;
2951 int ret;
2952
2953 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED)
2954 num_ops++; /* stat */
2955
2956 osd_req = rbd_obj_add_osd_request(obj_req, num_ops);
2957 if (IS_ERR(osd_req))
2958 return PTR_ERR(osd_req);
2959
2960 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) {
2961 ret = rbd_osd_setup_stat(osd_req, which++);
2962 if (ret)
2963 return ret;
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002964 }
2965
Ilya Dryomova086a1b2019-06-12 18:33:31 +02002966 rbd_osd_setup_write_ops(osd_req, which);
2967 rbd_osd_format_write(osd_req);
2968
2969 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
2970 if (ret)
2971 return ret;
2972
2973 rbd_osd_submit(osd_req);
Ilya Dryomov85b5e6d2019-05-14 21:06:07 +02002974 return 0;
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002975}
2976
2977/*
2978 * copyup_bvecs pages are never highmem pages
2979 */
2980static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes)
2981{
2982 struct ceph_bvec_iter it = {
2983 .bvecs = bvecs,
2984 .iter = { .bi_size = bytes },
2985 };
2986
2987 ceph_bvec_iter_advance_step(&it, bytes, ({
Christoph Hellwigcf58b532021-08-04 11:56:26 +02002988 if (memchr_inv(bvec_virt(&bv), 0, bv.bv_len))
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002989 return false;
2990 }));
2991 return true;
2992}
2993
Ilya Dryomov3a482502019-02-28 10:49:12 +01002994#define MODS_ONLY U32_MAX
2995
Ilya Dryomov793333a302019-06-13 17:44:08 +02002996static int rbd_obj_copyup_empty_snapc(struct rbd_obj_request *obj_req,
2997 u32 bytes)
Ilya Dryomov3da691b2018-01-29 14:04:08 +01002998{
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02002999 struct ceph_osd_request *osd_req;
Chengguang Xufe943d52018-04-12 12:04:55 +08003000 int ret;
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003001
3002 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
Ilya Dryomov89a59c12019-02-28 14:20:28 +01003003 rbd_assert(bytes > 0 && bytes != MODS_ONLY);
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003004
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02003005 osd_req = __rbd_obj_add_osd_request(obj_req, &rbd_empty_snapc, 1);
3006 if (IS_ERR(osd_req))
3007 return PTR_ERR(osd_req);
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003008
Ilya Dryomovb5ae8cb2019-05-29 16:53:14 +02003009 ret = rbd_osd_setup_copyup(osd_req, 0, bytes);
Chengguang Xufe943d52018-04-12 12:04:55 +08003010 if (ret)
3011 return ret;
3012
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02003013 rbd_osd_format_write(osd_req);
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003014
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02003015 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
Ilya Dryomov89a59c12019-02-28 14:20:28 +01003016 if (ret)
3017 return ret;
3018
Ilya Dryomova086a1b2019-06-12 18:33:31 +02003019 rbd_osd_submit(osd_req);
Ilya Dryomov89a59c12019-02-28 14:20:28 +01003020 return 0;
3021}
3022
Ilya Dryomov793333a302019-06-13 17:44:08 +02003023static int rbd_obj_copyup_current_snapc(struct rbd_obj_request *obj_req,
3024 u32 bytes)
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003025{
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02003026 struct ceph_osd_request *osd_req;
Ilya Dryomova086a1b2019-06-12 18:33:31 +02003027 int num_ops = count_write_ops(obj_req);
3028 int which = 0;
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003029 int ret;
3030
3031 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003032
Ilya Dryomova086a1b2019-06-12 18:33:31 +02003033 if (bytes != MODS_ONLY)
3034 num_ops++; /* copyup */
Ilya Dryomov13488d52019-02-25 12:37:50 +01003035
Ilya Dryomova086a1b2019-06-12 18:33:31 +02003036 osd_req = rbd_obj_add_osd_request(obj_req, num_ops);
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02003037 if (IS_ERR(osd_req))
3038 return PTR_ERR(osd_req);
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003039
Ilya Dryomov3a482502019-02-28 10:49:12 +01003040 if (bytes != MODS_ONLY) {
Ilya Dryomovb5ae8cb2019-05-29 16:53:14 +02003041 ret = rbd_osd_setup_copyup(osd_req, which++, bytes);
Ilya Dryomov3a482502019-02-28 10:49:12 +01003042 if (ret)
3043 return ret;
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003044 }
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003045
Ilya Dryomova086a1b2019-06-12 18:33:31 +02003046 rbd_osd_setup_write_ops(osd_req, which);
3047 rbd_osd_format_write(osd_req);
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003048
Ilya Dryomovbcbab1d2019-05-27 11:41:36 +02003049 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
Ilya Dryomov26f887e2018-10-15 16:11:37 +02003050 if (ret)
3051 return ret;
3052
Ilya Dryomova086a1b2019-06-12 18:33:31 +02003053 rbd_osd_submit(osd_req);
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003054 return 0;
3055}
3056
Ilya Dryomov7e07efb2018-01-20 10:30:11 +01003057static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap)
3058{
3059 u32 i;
3060
3061 rbd_assert(!obj_req->copyup_bvecs);
3062 obj_req->copyup_bvec_count = calc_pages_for(0, obj_overlap);
3063 obj_req->copyup_bvecs = kcalloc(obj_req->copyup_bvec_count,
3064 sizeof(*obj_req->copyup_bvecs),
3065 GFP_NOIO);
3066 if (!obj_req->copyup_bvecs)
3067 return -ENOMEM;
3068
3069 for (i = 0; i < obj_req->copyup_bvec_count; i++) {
3070 unsigned int len = min(obj_overlap, (u64)PAGE_SIZE);
3071
3072 obj_req->copyup_bvecs[i].bv_page = alloc_page(GFP_NOIO);
3073 if (!obj_req->copyup_bvecs[i].bv_page)
3074 return -ENOMEM;
3075
3076 obj_req->copyup_bvecs[i].bv_offset = 0;
3077 obj_req->copyup_bvecs[i].bv_len = len;
3078 obj_overlap -= len;
3079 }
3080
3081 rbd_assert(!obj_overlap);
3082 return 0;
3083}
3084
Ilya Dryomov0ad5d952019-05-14 20:45:38 +02003085/*
3086 * The target object doesn't exist. Read the data for the entire
3087 * target object up to the overlap point (if any) from the parent,
3088 * so we can use it for a copyup.
3089 */
Ilya Dryomov793333a302019-06-13 17:44:08 +02003090static int rbd_obj_copyup_read_parent(struct rbd_obj_request *obj_req)
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003091{
3092 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003093 int ret;
3094
Ilya Dryomov86bd7992018-02-06 19:26:33 +01003095 rbd_assert(obj_req->num_img_extents);
3096 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
3097 rbd_dev->parent_overlap);
3098 if (!obj_req->num_img_extents) {
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003099 /*
3100 * The overlap has become 0 (most likely because the
Ilya Dryomov3a482502019-02-28 10:49:12 +01003101 * image has been flattened). Re-submit the original write
3102 * request -- pass MODS_ONLY since the copyup isn't needed
3103 * anymore.
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003104 */
Ilya Dryomov793333a302019-06-13 17:44:08 +02003105 return rbd_obj_copyup_current_snapc(obj_req, MODS_ONLY);
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003106 }
3107
Ilya Dryomov86bd7992018-02-06 19:26:33 +01003108 ret = setup_copyup_bvecs(obj_req, rbd_obj_img_extents_bytes(obj_req));
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003109 if (ret)
3110 return ret;
3111
Ilya Dryomov86bd7992018-02-06 19:26:33 +01003112 return rbd_obj_read_from_parent(obj_req);
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003113}
3114
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02003115static void rbd_obj_copyup_object_maps(struct rbd_obj_request *obj_req)
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003116{
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02003117 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3118 struct ceph_snap_context *snapc = obj_req->img_request->snapc;
3119 u8 new_state;
3120 u32 i;
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003121 int ret;
3122
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02003123 rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending);
3124
3125 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3126 return;
3127
3128 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS)
3129 return;
3130
3131 for (i = 0; i < snapc->num_snaps; i++) {
3132 if ((rbd_dev->header.features & RBD_FEATURE_FAST_DIFF) &&
3133 i + 1 < snapc->num_snaps)
3134 new_state = OBJECT_EXISTS_CLEAN;
3135 else
3136 new_state = OBJECT_EXISTS;
3137
3138 ret = rbd_object_map_update(obj_req, snapc->snaps[i],
3139 new_state, NULL);
3140 if (ret < 0) {
3141 obj_req->pending.result = ret;
3142 return;
3143 }
3144
3145 rbd_assert(!ret);
3146 obj_req->pending.num_pending++;
3147 }
3148}
3149
Ilya Dryomov793333a302019-06-13 17:44:08 +02003150static void rbd_obj_copyup_write_object(struct rbd_obj_request *obj_req)
3151{
3152 u32 bytes = rbd_obj_img_extents_bytes(obj_req);
3153 int ret;
3154
3155 rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending);
3156
3157 /*
3158 * Only send non-zero copyup data to save some I/O and network
3159 * bandwidth -- zero copyup data is equivalent to the object not
3160 * existing.
3161 */
3162 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS)
3163 bytes = 0;
3164
3165 if (obj_req->img_request->snapc->num_snaps && bytes > 0) {
3166 /*
3167 * Send a copyup request with an empty snapshot context to
3168 * deep-copyup the object through all existing snapshots.
3169 * A second request with the current snapshot context will be
3170 * sent for the actual modification.
3171 */
3172 ret = rbd_obj_copyup_empty_snapc(obj_req, bytes);
3173 if (ret) {
3174 obj_req->pending.result = ret;
3175 return;
3176 }
3177
3178 obj_req->pending.num_pending++;
3179 bytes = MODS_ONLY;
3180 }
3181
3182 ret = rbd_obj_copyup_current_snapc(obj_req, bytes);
3183 if (ret) {
3184 obj_req->pending.result = ret;
3185 return;
3186 }
3187
3188 obj_req->pending.num_pending++;
3189}
3190
3191static bool rbd_obj_advance_copyup(struct rbd_obj_request *obj_req, int *result)
3192{
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02003193 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
Ilya Dryomov793333a302019-06-13 17:44:08 +02003194 int ret;
3195
3196again:
3197 switch (obj_req->copyup_state) {
3198 case RBD_OBJ_COPYUP_START:
3199 rbd_assert(!*result);
3200
3201 ret = rbd_obj_copyup_read_parent(obj_req);
3202 if (ret) {
3203 *result = ret;
3204 return true;
3205 }
3206 if (obj_req->num_img_extents)
3207 obj_req->copyup_state = RBD_OBJ_COPYUP_READ_PARENT;
3208 else
3209 obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT;
3210 return false;
3211 case RBD_OBJ_COPYUP_READ_PARENT:
3212 if (*result)
3213 return true;
3214
3215 if (is_zero_bvecs(obj_req->copyup_bvecs,
3216 rbd_obj_img_extents_bytes(obj_req))) {
3217 dout("%s %p detected zeros\n", __func__, obj_req);
3218 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ZEROS;
3219 }
3220
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02003221 rbd_obj_copyup_object_maps(obj_req);
3222 if (!obj_req->pending.num_pending) {
3223 *result = obj_req->pending.result;
3224 obj_req->copyup_state = RBD_OBJ_COPYUP_OBJECT_MAPS;
3225 goto again;
3226 }
3227 obj_req->copyup_state = __RBD_OBJ_COPYUP_OBJECT_MAPS;
3228 return false;
3229 case __RBD_OBJ_COPYUP_OBJECT_MAPS:
3230 if (!pending_result_dec(&obj_req->pending, result))
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003231 return false;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05003232 fallthrough;
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02003233 case RBD_OBJ_COPYUP_OBJECT_MAPS:
3234 if (*result) {
3235 rbd_warn(rbd_dev, "snap object map update failed: %d",
3236 *result);
3237 return true;
3238 }
3239
Ilya Dryomov793333a302019-06-13 17:44:08 +02003240 rbd_obj_copyup_write_object(obj_req);
3241 if (!obj_req->pending.num_pending) {
3242 *result = obj_req->pending.result;
3243 obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT;
3244 goto again;
3245 }
3246 obj_req->copyup_state = __RBD_OBJ_COPYUP_WRITE_OBJECT;
3247 return false;
3248 case __RBD_OBJ_COPYUP_WRITE_OBJECT:
3249 if (!pending_result_dec(&obj_req->pending, result))
3250 return false;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05003251 fallthrough;
Ilya Dryomov793333a302019-06-13 17:44:08 +02003252 case RBD_OBJ_COPYUP_WRITE_OBJECT:
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003253 return true;
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003254 default:
Arnd Bergmannc6244b32018-04-04 14:53:39 +02003255 BUG();
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003256 }
3257}
3258
3259/*
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02003260 * Return:
3261 * 0 - object map update sent
3262 * 1 - object map update isn't needed
3263 * <0 - error
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003264 */
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02003265static int rbd_obj_write_post_object_map(struct rbd_obj_request *obj_req)
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003266{
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02003267 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3268 u8 current_state = OBJECT_PENDING;
3269
3270 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3271 return 1;
3272
3273 if (!(obj_req->flags & RBD_OBJ_FLAG_DELETION))
3274 return 1;
3275
3276 return rbd_object_map_update(obj_req, CEPH_NOSNAP, OBJECT_NONEXISTENT,
3277 &current_state);
3278}
3279
Ilya Dryomov85b5e6d2019-05-14 21:06:07 +02003280static bool rbd_obj_advance_write(struct rbd_obj_request *obj_req, int *result)
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003281{
Ilya Dryomov793333a302019-06-13 17:44:08 +02003282 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003283 int ret;
3284
Ilya Dryomov793333a302019-06-13 17:44:08 +02003285again:
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003286 switch (obj_req->write_state) {
Ilya Dryomov85b5e6d2019-05-14 21:06:07 +02003287 case RBD_OBJ_WRITE_START:
3288 rbd_assert(!*result);
3289
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02003290 if (rbd_obj_write_is_noop(obj_req))
3291 return true;
3292
3293 ret = rbd_obj_write_pre_object_map(obj_req);
3294 if (ret < 0) {
3295 *result = ret;
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003296 return true;
3297 }
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02003298 obj_req->write_state = RBD_OBJ_WRITE_PRE_OBJECT_MAP;
3299 if (ret > 0)
3300 goto again;
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003301 return false;
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02003302 case RBD_OBJ_WRITE_PRE_OBJECT_MAP:
3303 if (*result) {
3304 rbd_warn(rbd_dev, "pre object map update failed: %d",
3305 *result);
3306 return true;
3307 }
Ilya Dryomov85b5e6d2019-05-14 21:06:07 +02003308 ret = rbd_obj_write_object(obj_req);
3309 if (ret) {
3310 *result = ret;
3311 return true;
3312 }
3313 obj_req->write_state = RBD_OBJ_WRITE_OBJECT;
3314 return false;
Ilya Dryomov0ad5d952019-05-14 20:45:38 +02003315 case RBD_OBJ_WRITE_OBJECT:
Ilya Dryomov54ab3b22019-05-11 16:21:49 +02003316 if (*result == -ENOENT) {
Ilya Dryomov0ad5d952019-05-14 20:45:38 +02003317 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) {
Ilya Dryomov793333a302019-06-13 17:44:08 +02003318 *result = 0;
3319 obj_req->copyup_state = RBD_OBJ_COPYUP_START;
3320 obj_req->write_state = __RBD_OBJ_WRITE_COPYUP;
3321 goto again;
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003322 }
Ilya Dryomov0ad5d952019-05-14 20:45:38 +02003323 /*
3324 * On a non-existent object:
3325 * delete - -ENOENT, truncate/zero - 0
3326 */
3327 if (obj_req->flags & RBD_OBJ_FLAG_DELETION)
3328 *result = 0;
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003329 }
Ilya Dryomov793333a302019-06-13 17:44:08 +02003330 if (*result)
3331 return true;
3332
3333 obj_req->write_state = RBD_OBJ_WRITE_COPYUP;
3334 goto again;
3335 case __RBD_OBJ_WRITE_COPYUP:
3336 if (!rbd_obj_advance_copyup(obj_req, result))
3337 return false;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05003338 fallthrough;
Ilya Dryomov793333a302019-06-13 17:44:08 +02003339 case RBD_OBJ_WRITE_COPYUP:
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02003340 if (*result) {
Ilya Dryomov793333a302019-06-13 17:44:08 +02003341 rbd_warn(rbd_dev, "copyup failed: %d", *result);
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02003342 return true;
3343 }
3344 ret = rbd_obj_write_post_object_map(obj_req);
3345 if (ret < 0) {
3346 *result = ret;
3347 return true;
3348 }
3349 obj_req->write_state = RBD_OBJ_WRITE_POST_OBJECT_MAP;
3350 if (ret > 0)
3351 goto again;
3352 return false;
3353 case RBD_OBJ_WRITE_POST_OBJECT_MAP:
3354 if (*result)
3355 rbd_warn(rbd_dev, "post object map update failed: %d",
3356 *result);
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003357 return true;
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003358 default:
Arnd Bergmannc6244b32018-04-04 14:53:39 +02003359 BUG();
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003360 }
3361}
3362
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003363/*
Ilya Dryomov0ad5d952019-05-14 20:45:38 +02003364 * Return true if @obj_req is completed.
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003365 */
Ilya Dryomov54ab3b22019-05-11 16:21:49 +02003366static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req,
3367 int *result)
Ilya Dryomov7114eda2018-02-01 11:50:47 +01003368{
3369 struct rbd_img_request *img_req = obj_req->img_request;
Ilya Dryomov0192ce22019-05-16 15:06:56 +02003370 struct rbd_device *rbd_dev = img_req->rbd_dev;
Ilya Dryomov0ad5d952019-05-14 20:45:38 +02003371 bool done;
Ilya Dryomov7114eda2018-02-01 11:50:47 +01003372
Ilya Dryomov85b5e6d2019-05-14 21:06:07 +02003373 mutex_lock(&obj_req->state_mutex);
Ilya Dryomov0ad5d952019-05-14 20:45:38 +02003374 if (!rbd_img_is_write(img_req))
Ilya Dryomov85b5e6d2019-05-14 21:06:07 +02003375 done = rbd_obj_advance_read(obj_req, result);
Ilya Dryomov0ad5d952019-05-14 20:45:38 +02003376 else
Ilya Dryomov85b5e6d2019-05-14 21:06:07 +02003377 done = rbd_obj_advance_write(obj_req, result);
3378 mutex_unlock(&obj_req->state_mutex);
Alex Elder02c74fb2013-05-06 17:40:33 -05003379
Ilya Dryomov0192ce22019-05-16 15:06:56 +02003380 if (done && *result) {
3381 rbd_assert(*result < 0);
3382 rbd_warn(rbd_dev, "%s at objno %llu %llu~%llu result %d",
3383 obj_op_name(img_req->op_type), obj_req->ex.oe_objno,
3384 obj_req->ex.oe_off, obj_req->ex.oe_len, *result);
Alex Eldera9e8ba2c2013-04-21 00:32:07 -05003385 }
Ilya Dryomov0ad5d952019-05-14 20:45:38 +02003386 return done;
Alex Elder8b3e1a52013-01-24 16:13:36 -06003387}
3388
Ilya Dryomov0192ce22019-05-16 15:06:56 +02003389/*
3390 * This is open-coded in rbd_img_handle_request() to avoid parent chain
3391 * recursion.
3392 */
Ilya Dryomov54ab3b22019-05-11 16:21:49 +02003393static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result)
Alex Elder8b3e1a52013-01-24 16:13:36 -06003394{
Ilya Dryomov0192ce22019-05-16 15:06:56 +02003395 if (__rbd_obj_handle_request(obj_req, &result))
3396 rbd_img_handle_request(obj_req->img_request, result);
Ilya Dryomov7114eda2018-02-01 11:50:47 +01003397}
Alex Elder8b3e1a52013-01-24 16:13:36 -06003398
Ilya Dryomove1fddc82019-05-30 16:07:48 +02003399static bool need_exclusive_lock(struct rbd_img_request *img_req)
Ilya Dryomov7114eda2018-02-01 11:50:47 +01003400{
Ilya Dryomove1fddc82019-05-30 16:07:48 +02003401 struct rbd_device *rbd_dev = img_req->rbd_dev;
3402
3403 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK))
3404 return false;
3405
Ilya Dryomov3fe69922019-11-12 19:41:48 +01003406 if (rbd_is_ro(rbd_dev))
Ilya Dryomove1fddc82019-05-30 16:07:48 +02003407 return false;
3408
Ilya Dryomov7114eda2018-02-01 11:50:47 +01003409 rbd_assert(!test_bit(IMG_REQ_CHILD, &img_req->flags));
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02003410 if (rbd_dev->opts->lock_on_read ||
3411 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
Ilya Dryomove1fddc82019-05-30 16:07:48 +02003412 return true;
Alex Elder8b3e1a52013-01-24 16:13:36 -06003413
Ilya Dryomove1fddc82019-05-30 16:07:48 +02003414 return rbd_img_is_write(img_req);
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003415}
Alex Elder8b3e1a52013-01-24 16:13:36 -06003416
Ilya Dryomov637cd062019-06-06 17:14:49 +02003417static bool rbd_lock_add_request(struct rbd_img_request *img_req)
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003418{
Ilya Dryomove1fddc82019-05-30 16:07:48 +02003419 struct rbd_device *rbd_dev = img_req->rbd_dev;
Ilya Dryomov637cd062019-06-06 17:14:49 +02003420 bool locked;
Ilya Dryomove1fddc82019-05-30 16:07:48 +02003421
3422 lockdep_assert_held(&rbd_dev->lock_rwsem);
Ilya Dryomov637cd062019-06-06 17:14:49 +02003423 locked = rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED;
Ilya Dryomove1fddc82019-05-30 16:07:48 +02003424 spin_lock(&rbd_dev->lock_lists_lock);
3425 rbd_assert(list_empty(&img_req->lock_item));
Ilya Dryomov637cd062019-06-06 17:14:49 +02003426 if (!locked)
3427 list_add_tail(&img_req->lock_item, &rbd_dev->acquiring_list);
3428 else
3429 list_add_tail(&img_req->lock_item, &rbd_dev->running_list);
Ilya Dryomove1fddc82019-05-30 16:07:48 +02003430 spin_unlock(&rbd_dev->lock_lists_lock);
Ilya Dryomov637cd062019-06-06 17:14:49 +02003431 return locked;
Ilya Dryomove1fddc82019-05-30 16:07:48 +02003432}
3433
3434static void rbd_lock_del_request(struct rbd_img_request *img_req)
3435{
3436 struct rbd_device *rbd_dev = img_req->rbd_dev;
3437 bool need_wakeup;
3438
3439 lockdep_assert_held(&rbd_dev->lock_rwsem);
3440 spin_lock(&rbd_dev->lock_lists_lock);
3441 rbd_assert(!list_empty(&img_req->lock_item));
3442 list_del_init(&img_req->lock_item);
3443 need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
3444 list_empty(&rbd_dev->running_list));
3445 spin_unlock(&rbd_dev->lock_lists_lock);
3446 if (need_wakeup)
3447 complete(&rbd_dev->releasing_wait);
3448}
3449
Ilya Dryomov637cd062019-06-06 17:14:49 +02003450static int rbd_img_exclusive_lock(struct rbd_img_request *img_req)
3451{
3452 struct rbd_device *rbd_dev = img_req->rbd_dev;
3453
3454 if (!need_exclusive_lock(img_req))
3455 return 1;
3456
3457 if (rbd_lock_add_request(img_req))
3458 return 1;
3459
3460 if (rbd_dev->opts->exclusive) {
3461 WARN_ON(1); /* lock got released? */
3462 return -EROFS;
3463 }
3464
3465 /*
3466 * Note the use of mod_delayed_work() in rbd_acquire_lock()
3467 * and cancel_delayed_work() in wake_lock_waiters().
3468 */
3469 dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
3470 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
3471 return 0;
3472}
3473
Ilya Dryomov0192ce22019-05-16 15:06:56 +02003474static void rbd_img_object_requests(struct rbd_img_request *img_req)
3475{
3476 struct rbd_obj_request *obj_req;
3477
3478 rbd_assert(!img_req->pending.result && !img_req->pending.num_pending);
3479
3480 for_each_obj_request(img_req, obj_req) {
3481 int result = 0;
3482
3483 if (__rbd_obj_handle_request(obj_req, &result)) {
3484 if (result) {
3485 img_req->pending.result = result;
3486 return;
3487 }
3488 } else {
3489 img_req->pending.num_pending++;
3490 }
3491 }
3492}
3493
3494static bool rbd_img_advance(struct rbd_img_request *img_req, int *result)
3495{
Ilya Dryomov637cd062019-06-06 17:14:49 +02003496 struct rbd_device *rbd_dev = img_req->rbd_dev;
3497 int ret;
Ilya Dryomov7114eda2018-02-01 11:50:47 +01003498
3499again:
Ilya Dryomov0192ce22019-05-16 15:06:56 +02003500 switch (img_req->state) {
3501 case RBD_IMG_START:
3502 rbd_assert(!*result);
Ilya Dryomov3da691b2018-01-29 14:04:08 +01003503
Ilya Dryomov637cd062019-06-06 17:14:49 +02003504 ret = rbd_img_exclusive_lock(img_req);
3505 if (ret < 0) {
3506 *result = ret;
3507 return true;
3508 }
3509 img_req->state = RBD_IMG_EXCLUSIVE_LOCK;
3510 if (ret > 0)
3511 goto again;
3512 return false;
3513 case RBD_IMG_EXCLUSIVE_LOCK:
3514 if (*result)
3515 return true;
3516
3517 rbd_assert(!need_exclusive_lock(img_req) ||
3518 __rbd_is_lock_owner(rbd_dev));
3519
Ilya Dryomov0192ce22019-05-16 15:06:56 +02003520 rbd_img_object_requests(img_req);
3521 if (!img_req->pending.num_pending) {
3522 *result = img_req->pending.result;
3523 img_req->state = RBD_IMG_OBJECT_REQUESTS;
3524 goto again;
3525 }
3526 img_req->state = __RBD_IMG_OBJECT_REQUESTS;
3527 return false;
3528 case __RBD_IMG_OBJECT_REQUESTS:
3529 if (!pending_result_dec(&img_req->pending, result))
3530 return false;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05003531 fallthrough;
Ilya Dryomov0192ce22019-05-16 15:06:56 +02003532 case RBD_IMG_OBJECT_REQUESTS:
3533 return true;
3534 default:
3535 BUG();
Ilya Dryomov7114eda2018-02-01 11:50:47 +01003536 }
Ilya Dryomov0192ce22019-05-16 15:06:56 +02003537}
Ilya Dryomov7114eda2018-02-01 11:50:47 +01003538
Ilya Dryomov0192ce22019-05-16 15:06:56 +02003539/*
3540 * Return true if @img_req is completed.
3541 */
3542static bool __rbd_img_handle_request(struct rbd_img_request *img_req,
3543 int *result)
3544{
3545 struct rbd_device *rbd_dev = img_req->rbd_dev;
3546 bool done;
3547
Ilya Dryomove1fddc82019-05-30 16:07:48 +02003548 if (need_exclusive_lock(img_req)) {
3549 down_read(&rbd_dev->lock_rwsem);
3550 mutex_lock(&img_req->state_mutex);
3551 done = rbd_img_advance(img_req, result);
3552 if (done)
3553 rbd_lock_del_request(img_req);
3554 mutex_unlock(&img_req->state_mutex);
3555 up_read(&rbd_dev->lock_rwsem);
3556 } else {
3557 mutex_lock(&img_req->state_mutex);
3558 done = rbd_img_advance(img_req, result);
3559 mutex_unlock(&img_req->state_mutex);
Ilya Dryomov7114eda2018-02-01 11:50:47 +01003560 }
3561
Ilya Dryomov0192ce22019-05-16 15:06:56 +02003562 if (done && *result) {
3563 rbd_assert(*result < 0);
3564 rbd_warn(rbd_dev, "%s%s result %d",
3565 test_bit(IMG_REQ_CHILD, &img_req->flags) ? "child " : "",
3566 obj_op_name(img_req->op_type), *result);
3567 }
3568 return done;
3569}
3570
3571static void rbd_img_handle_request(struct rbd_img_request *img_req, int result)
3572{
3573again:
3574 if (!__rbd_img_handle_request(img_req, &result))
3575 return;
3576
Ilya Dryomov7114eda2018-02-01 11:50:47 +01003577 if (test_bit(IMG_REQ_CHILD, &img_req->flags)) {
Ilya Dryomov0192ce22019-05-16 15:06:56 +02003578 struct rbd_obj_request *obj_req = img_req->obj_request;
3579
Hannes Reinecke679a97d2020-01-31 11:37:36 +01003580 rbd_img_request_destroy(img_req);
Ilya Dryomov0192ce22019-05-16 15:06:56 +02003581 if (__rbd_obj_handle_request(obj_req, &result)) {
3582 img_req = obj_req->img_request;
3583 goto again;
3584 }
3585 } else {
Ilya Dryomov59e542c2020-02-12 15:23:58 +01003586 struct request *rq = blk_mq_rq_from_pdu(img_req);
Ilya Dryomov0192ce22019-05-16 15:06:56 +02003587
Hannes Reinecke679a97d2020-01-31 11:37:36 +01003588 rbd_img_request_destroy(img_req);
Ilya Dryomov0192ce22019-05-16 15:06:56 +02003589 blk_mq_end_request(rq, errno_to_blk_status(result));
Ilya Dryomov7114eda2018-02-01 11:50:47 +01003590 }
Alex Elder8b3e1a52013-01-24 16:13:36 -06003591}
3592
Ilya Dryomoved95b212016-08-12 16:40:02 +02003593static const struct rbd_client_id rbd_empty_cid;
3594
3595static bool rbd_cid_equal(const struct rbd_client_id *lhs,
3596 const struct rbd_client_id *rhs)
3597{
3598 return lhs->gid == rhs->gid && lhs->handle == rhs->handle;
3599}
3600
3601static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
3602{
3603 struct rbd_client_id cid;
3604
3605 mutex_lock(&rbd_dev->watch_mutex);
3606 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
3607 cid.handle = rbd_dev->watch_cookie;
3608 mutex_unlock(&rbd_dev->watch_mutex);
3609 return cid;
3610}
3611
3612/*
3613 * lock_rwsem must be held for write
3614 */
3615static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
3616 const struct rbd_client_id *cid)
3617{
3618 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
3619 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
3620 cid->gid, cid->handle);
3621 rbd_dev->owner_cid = *cid; /* struct */
3622}
3623
3624static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
3625{
3626 mutex_lock(&rbd_dev->watch_mutex);
3627 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
3628 mutex_unlock(&rbd_dev->watch_mutex);
3629}
3630
Florian Margaineedd8ca82017-12-13 16:43:59 +01003631static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
3632{
3633 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3634
Ilya Dryomova2b1da02019-05-30 11:15:23 +02003635 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
Florian Margaineedd8ca82017-12-13 16:43:59 +01003636 strcpy(rbd_dev->lock_cookie, cookie);
3637 rbd_set_owner_cid(rbd_dev, &cid);
3638 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
3639}
3640
Ilya Dryomoved95b212016-08-12 16:40:02 +02003641/*
3642 * lock_rwsem must be held for write
3643 */
3644static int rbd_lock(struct rbd_device *rbd_dev)
3645{
3646 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Ilya Dryomoved95b212016-08-12 16:40:02 +02003647 char cookie[32];
3648 int ret;
3649
Ilya Dryomovcbbfb0f2017-04-13 12:17:38 +02003650 WARN_ON(__rbd_is_lock_owner(rbd_dev) ||
3651 rbd_dev->lock_cookie[0] != '\0');
Ilya Dryomoved95b212016-08-12 16:40:02 +02003652
3653 format_lock_cookie(rbd_dev, cookie);
3654 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3655 RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
3656 RBD_LOCK_TAG, "", 0);
3657 if (ret)
3658 return ret;
3659
Florian Margaineedd8ca82017-12-13 16:43:59 +01003660 __rbd_lock(rbd_dev, cookie);
Ilya Dryomoved95b212016-08-12 16:40:02 +02003661 return 0;
3662}
3663
3664/*
3665 * lock_rwsem must be held for write
3666 */
Ilya Dryomovbbead742017-04-13 12:17:38 +02003667static void rbd_unlock(struct rbd_device *rbd_dev)
Ilya Dryomoved95b212016-08-12 16:40:02 +02003668{
3669 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Ilya Dryomoved95b212016-08-12 16:40:02 +02003670 int ret;
3671
Ilya Dryomovcbbfb0f2017-04-13 12:17:38 +02003672 WARN_ON(!__rbd_is_lock_owner(rbd_dev) ||
3673 rbd_dev->lock_cookie[0] == '\0');
Ilya Dryomoved95b212016-08-12 16:40:02 +02003674
Ilya Dryomoved95b212016-08-12 16:40:02 +02003675 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
Ilya Dryomovcbbfb0f2017-04-13 12:17:38 +02003676 RBD_LOCK_NAME, rbd_dev->lock_cookie);
Ilya Dryomovbbead742017-04-13 12:17:38 +02003677 if (ret && ret != -ENOENT)
Ilya Dryomov637cd062019-06-06 17:14:49 +02003678 rbd_warn(rbd_dev, "failed to unlock header: %d", ret);
Ilya Dryomoved95b212016-08-12 16:40:02 +02003679
Ilya Dryomovbbead742017-04-13 12:17:38 +02003680 /* treat errors as the image is unlocked */
3681 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
Ilya Dryomovcbbfb0f2017-04-13 12:17:38 +02003682 rbd_dev->lock_cookie[0] = '\0';
Ilya Dryomoved95b212016-08-12 16:40:02 +02003683 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3684 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
Ilya Dryomoved95b212016-08-12 16:40:02 +02003685}
3686
3687static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
3688 enum rbd_notify_op notify_op,
3689 struct page ***preply_pages,
3690 size_t *preply_len)
3691{
3692 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3693 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
Kyle Spiers08a79102018-03-17 09:44:01 -07003694 char buf[4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN];
3695 int buf_size = sizeof(buf);
Ilya Dryomoved95b212016-08-12 16:40:02 +02003696 void *p = buf;
3697
3698 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
3699
3700 /* encode *LockPayload NotifyMessage (op + ClientId) */
3701 ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
3702 ceph_encode_32(&p, notify_op);
3703 ceph_encode_64(&p, cid.gid);
3704 ceph_encode_64(&p, cid.handle);
3705
3706 return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
3707 &rbd_dev->header_oloc, buf, buf_size,
3708 RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
3709}
3710
3711static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
3712 enum rbd_notify_op notify_op)
3713{
Ilya Dryomov8ae02992020-03-17 15:18:48 +01003714 __rbd_notify_op_lock(rbd_dev, notify_op, NULL, NULL);
Ilya Dryomoved95b212016-08-12 16:40:02 +02003715}
3716
3717static void rbd_notify_acquired_lock(struct work_struct *work)
3718{
3719 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3720 acquired_lock_work);
3721
3722 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
3723}
3724
3725static void rbd_notify_released_lock(struct work_struct *work)
3726{
3727 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3728 released_lock_work);
3729
3730 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
3731}
3732
3733static int rbd_request_lock(struct rbd_device *rbd_dev)
3734{
3735 struct page **reply_pages;
3736 size_t reply_len;
3737 bool lock_owner_responded = false;
3738 int ret;
3739
3740 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3741
3742 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
3743 &reply_pages, &reply_len);
3744 if (ret && ret != -ETIMEDOUT) {
3745 rbd_warn(rbd_dev, "failed to request lock: %d", ret);
3746 goto out;
3747 }
3748
3749 if (reply_len > 0 && reply_len <= PAGE_SIZE) {
3750 void *p = page_address(reply_pages[0]);
3751 void *const end = p + reply_len;
3752 u32 n;
3753
3754 ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
3755 while (n--) {
3756 u8 struct_v;
3757 u32 len;
3758
3759 ceph_decode_need(&p, end, 8 + 8, e_inval);
3760 p += 8 + 8; /* skip gid and cookie */
3761
3762 ceph_decode_32_safe(&p, end, len, e_inval);
3763 if (!len)
3764 continue;
3765
3766 if (lock_owner_responded) {
3767 rbd_warn(rbd_dev,
3768 "duplicate lock owners detected");
3769 ret = -EIO;
3770 goto out;
3771 }
3772
3773 lock_owner_responded = true;
3774 ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
3775 &struct_v, &len);
3776 if (ret) {
3777 rbd_warn(rbd_dev,
3778 "failed to decode ResponseMessage: %d",
3779 ret);
3780 goto e_inval;
3781 }
3782
3783 ret = ceph_decode_32(&p);
3784 }
3785 }
3786
3787 if (!lock_owner_responded) {
3788 rbd_warn(rbd_dev, "no lock owners detected");
3789 ret = -ETIMEDOUT;
3790 }
3791
3792out:
3793 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
3794 return ret;
3795
3796e_inval:
3797 ret = -EINVAL;
3798 goto out;
3799}
3800
Ilya Dryomov637cd062019-06-06 17:14:49 +02003801/*
3802 * Either image request state machine(s) or rbd_add_acquire_lock()
3803 * (i.e. "rbd map").
3804 */
3805static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
Ilya Dryomoved95b212016-08-12 16:40:02 +02003806{
Ilya Dryomov637cd062019-06-06 17:14:49 +02003807 struct rbd_img_request *img_req;
3808
3809 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
Linus Torvaldsd9b9c892019-07-18 11:05:25 -07003810 lockdep_assert_held_write(&rbd_dev->lock_rwsem);
Ilya Dryomoved95b212016-08-12 16:40:02 +02003811
3812 cancel_delayed_work(&rbd_dev->lock_dwork);
Ilya Dryomov637cd062019-06-06 17:14:49 +02003813 if (!completion_done(&rbd_dev->acquire_wait)) {
3814 rbd_assert(list_empty(&rbd_dev->acquiring_list) &&
3815 list_empty(&rbd_dev->running_list));
3816 rbd_dev->acquire_err = result;
3817 complete_all(&rbd_dev->acquire_wait);
3818 return;
3819 }
3820
3821 list_for_each_entry(img_req, &rbd_dev->acquiring_list, lock_item) {
3822 mutex_lock(&img_req->state_mutex);
3823 rbd_assert(img_req->state == RBD_IMG_EXCLUSIVE_LOCK);
3824 rbd_img_schedule(img_req, result);
3825 mutex_unlock(&img_req->state_mutex);
3826 }
3827
3828 list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
Ilya Dryomoved95b212016-08-12 16:40:02 +02003829}
3830
3831static int get_lock_owner_info(struct rbd_device *rbd_dev,
3832 struct ceph_locker **lockers, u32 *num_lockers)
3833{
3834 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3835 u8 lock_type;
3836 char *lock_tag;
3837 int ret;
3838
3839 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3840
3841 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
3842 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3843 &lock_type, &lock_tag, lockers, num_lockers);
3844 if (ret)
3845 return ret;
3846
3847 if (*num_lockers == 0) {
3848 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
3849 goto out;
3850 }
3851
3852 if (strcmp(lock_tag, RBD_LOCK_TAG)) {
3853 rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
3854 lock_tag);
3855 ret = -EBUSY;
3856 goto out;
3857 }
3858
3859 if (lock_type == CEPH_CLS_LOCK_SHARED) {
3860 rbd_warn(rbd_dev, "shared lock type detected");
3861 ret = -EBUSY;
3862 goto out;
3863 }
3864
3865 if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
3866 strlen(RBD_LOCK_COOKIE_PREFIX))) {
3867 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
3868 (*lockers)[0].id.cookie);
3869 ret = -EBUSY;
3870 goto out;
3871 }
3872
3873out:
3874 kfree(lock_tag);
3875 return ret;
3876}
3877
3878static int find_watcher(struct rbd_device *rbd_dev,
3879 const struct ceph_locker *locker)
3880{
3881 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3882 struct ceph_watch_item *watchers;
3883 u32 num_watchers;
3884 u64 cookie;
3885 int i;
3886 int ret;
3887
3888 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
3889 &rbd_dev->header_oloc, &watchers,
3890 &num_watchers);
3891 if (ret)
3892 return ret;
3893
3894 sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
3895 for (i = 0; i < num_watchers; i++) {
Ilya Dryomov313771e2020-11-25 14:41:59 +01003896 /*
3897 * Ignore addr->type while comparing. This mimics
3898 * entity_addr_t::get_legacy_str() + strcmp().
3899 */
3900 if (ceph_addr_equal_no_type(&watchers[i].addr,
3901 &locker->info.addr) &&
Ilya Dryomoved95b212016-08-12 16:40:02 +02003902 watchers[i].cookie == cookie) {
3903 struct rbd_client_id cid = {
3904 .gid = le64_to_cpu(watchers[i].name.num),
3905 .handle = cookie,
3906 };
3907
3908 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
3909 rbd_dev, cid.gid, cid.handle);
3910 rbd_set_owner_cid(rbd_dev, &cid);
3911 ret = 1;
3912 goto out;
3913 }
3914 }
3915
3916 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
3917 ret = 0;
3918out:
3919 kfree(watchers);
3920 return ret;
3921}
3922
3923/*
3924 * lock_rwsem must be held for write
3925 */
3926static int rbd_try_lock(struct rbd_device *rbd_dev)
3927{
3928 struct ceph_client *client = rbd_dev->rbd_client->client;
3929 struct ceph_locker *lockers;
3930 u32 num_lockers;
3931 int ret;
3932
3933 for (;;) {
3934 ret = rbd_lock(rbd_dev);
3935 if (ret != -EBUSY)
3936 return ret;
3937
3938 /* determine if the current lock holder is still alive */
3939 ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers);
3940 if (ret)
3941 return ret;
3942
3943 if (num_lockers == 0)
3944 goto again;
3945
3946 ret = find_watcher(rbd_dev, lockers);
Ilya Dryomov637cd062019-06-06 17:14:49 +02003947 if (ret)
3948 goto out; /* request lock or error */
Ilya Dryomoved95b212016-08-12 16:40:02 +02003949
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02003950 rbd_warn(rbd_dev, "breaking header lock owned by %s%llu",
Ilya Dryomoved95b212016-08-12 16:40:02 +02003951 ENTITY_NAME(lockers[0].id.name));
3952
Ilya Dryomov0b98acd2020-09-14 13:39:19 +02003953 ret = ceph_monc_blocklist_add(&client->monc,
Ilya Dryomoved95b212016-08-12 16:40:02 +02003954 &lockers[0].info.addr);
3955 if (ret) {
Ilya Dryomov0b98acd2020-09-14 13:39:19 +02003956 rbd_warn(rbd_dev, "blocklist of %s%llu failed: %d",
Ilya Dryomoved95b212016-08-12 16:40:02 +02003957 ENTITY_NAME(lockers[0].id.name), ret);
3958 goto out;
3959 }
3960
3961 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
3962 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3963 lockers[0].id.cookie,
3964 &lockers[0].id.name);
3965 if (ret && ret != -ENOENT)
3966 goto out;
3967
3968again:
3969 ceph_free_lockers(lockers, num_lockers);
3970 }
3971
3972out:
3973 ceph_free_lockers(lockers, num_lockers);
3974 return ret;
3975}
3976
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02003977static int rbd_post_acquire_action(struct rbd_device *rbd_dev)
Ilya Dryomoved95b212016-08-12 16:40:02 +02003978{
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02003979 int ret;
3980
3981 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) {
3982 ret = rbd_object_map_open(rbd_dev);
3983 if (ret)
3984 return ret;
3985 }
3986
3987 return 0;
3988}
3989
Ilya Dryomoved95b212016-08-12 16:40:02 +02003990/*
Ilya Dryomov637cd062019-06-06 17:14:49 +02003991 * Return:
3992 * 0 - lock acquired
3993 * 1 - caller should call rbd_request_lock()
3994 * <0 - error
Ilya Dryomoved95b212016-08-12 16:40:02 +02003995 */
Ilya Dryomov637cd062019-06-06 17:14:49 +02003996static int rbd_try_acquire_lock(struct rbd_device *rbd_dev)
Ilya Dryomoved95b212016-08-12 16:40:02 +02003997{
Ilya Dryomov637cd062019-06-06 17:14:49 +02003998 int ret;
Ilya Dryomoved95b212016-08-12 16:40:02 +02003999
4000 down_read(&rbd_dev->lock_rwsem);
4001 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
4002 rbd_dev->lock_state);
4003 if (__rbd_is_lock_owner(rbd_dev)) {
Ilya Dryomoved95b212016-08-12 16:40:02 +02004004 up_read(&rbd_dev->lock_rwsem);
Ilya Dryomov637cd062019-06-06 17:14:49 +02004005 return 0;
Ilya Dryomoved95b212016-08-12 16:40:02 +02004006 }
4007
4008 up_read(&rbd_dev->lock_rwsem);
4009 down_write(&rbd_dev->lock_rwsem);
4010 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
4011 rbd_dev->lock_state);
Ilya Dryomov637cd062019-06-06 17:14:49 +02004012 if (__rbd_is_lock_owner(rbd_dev)) {
4013 up_write(&rbd_dev->lock_rwsem);
4014 return 0;
Ilya Dryomoved95b212016-08-12 16:40:02 +02004015 }
4016
Ilya Dryomov637cd062019-06-06 17:14:49 +02004017 ret = rbd_try_lock(rbd_dev);
4018 if (ret < 0) {
4019 rbd_warn(rbd_dev, "failed to lock header: %d", ret);
Ilya Dryomov0b98acd2020-09-14 13:39:19 +02004020 if (ret == -EBLOCKLISTED)
Ilya Dryomov637cd062019-06-06 17:14:49 +02004021 goto out;
4022
4023 ret = 1; /* request lock anyway */
4024 }
4025 if (ret > 0) {
4026 up_write(&rbd_dev->lock_rwsem);
4027 return ret;
4028 }
4029
4030 rbd_assert(rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED);
4031 rbd_assert(list_empty(&rbd_dev->running_list));
4032
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02004033 ret = rbd_post_acquire_action(rbd_dev);
4034 if (ret) {
4035 rbd_warn(rbd_dev, "post-acquire action failed: %d", ret);
4036 /*
4037 * Can't stay in RBD_LOCK_STATE_LOCKED because
4038 * rbd_lock_add_request() would let the request through,
4039 * assuming that e.g. object map is locked and loaded.
4040 */
4041 rbd_unlock(rbd_dev);
4042 }
4043
Ilya Dryomov637cd062019-06-06 17:14:49 +02004044out:
4045 wake_lock_waiters(rbd_dev, ret);
Ilya Dryomoved95b212016-08-12 16:40:02 +02004046 up_write(&rbd_dev->lock_rwsem);
Ilya Dryomov637cd062019-06-06 17:14:49 +02004047 return ret;
Ilya Dryomoved95b212016-08-12 16:40:02 +02004048}
4049
4050static void rbd_acquire_lock(struct work_struct *work)
4051{
4052 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
4053 struct rbd_device, lock_dwork);
Ilya Dryomov637cd062019-06-06 17:14:49 +02004054 int ret;
Ilya Dryomoved95b212016-08-12 16:40:02 +02004055
4056 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4057again:
Ilya Dryomov637cd062019-06-06 17:14:49 +02004058 ret = rbd_try_acquire_lock(rbd_dev);
4059 if (ret <= 0) {
4060 dout("%s rbd_dev %p ret %d - done\n", __func__, rbd_dev, ret);
Ilya Dryomoved95b212016-08-12 16:40:02 +02004061 return;
4062 }
4063
4064 ret = rbd_request_lock(rbd_dev);
4065 if (ret == -ETIMEDOUT) {
4066 goto again; /* treat this as a dead client */
Ilya Dryomove010dd02017-04-13 12:17:39 +02004067 } else if (ret == -EROFS) {
4068 rbd_warn(rbd_dev, "peer will not release lock");
Ilya Dryomov637cd062019-06-06 17:14:49 +02004069 down_write(&rbd_dev->lock_rwsem);
4070 wake_lock_waiters(rbd_dev, ret);
4071 up_write(&rbd_dev->lock_rwsem);
Ilya Dryomoved95b212016-08-12 16:40:02 +02004072 } else if (ret < 0) {
4073 rbd_warn(rbd_dev, "error requesting lock: %d", ret);
4074 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
4075 RBD_RETRY_DELAY);
4076 } else {
4077 /*
4078 * lock owner acked, but resend if we don't see them
4079 * release the lock
4080 */
Colin Ian King6b0a8772019-11-07 22:36:46 +00004081 dout("%s rbd_dev %p requeuing lock_dwork\n", __func__,
Ilya Dryomoved95b212016-08-12 16:40:02 +02004082 rbd_dev);
4083 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
4084 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
4085 }
4086}
4087
Ilya Dryomova2b1da02019-05-30 11:15:23 +02004088static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
Ilya Dryomoved95b212016-08-12 16:40:02 +02004089{
Ilya Dryomova2b1da02019-05-30 11:15:23 +02004090 dout("%s rbd_dev %p\n", __func__, rbd_dev);
Linus Torvaldsd9b9c892019-07-18 11:05:25 -07004091 lockdep_assert_held_write(&rbd_dev->lock_rwsem);
Ilya Dryomova2b1da02019-05-30 11:15:23 +02004092
Ilya Dryomoved95b212016-08-12 16:40:02 +02004093 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
4094 return false;
4095
Ilya Dryomoved95b212016-08-12 16:40:02 +02004096 /*
4097 * Ensure that all in-flight IO is flushed.
Ilya Dryomoved95b212016-08-12 16:40:02 +02004098 */
Ilya Dryomove1fddc82019-05-30 16:07:48 +02004099 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
4100 rbd_assert(!completion_done(&rbd_dev->releasing_wait));
Ilya Dryomoved9eb712021-07-03 11:31:26 +02004101 if (list_empty(&rbd_dev->running_list))
4102 return true;
4103
4104 up_write(&rbd_dev->lock_rwsem);
4105 wait_for_completion(&rbd_dev->releasing_wait);
Ilya Dryomoved95b212016-08-12 16:40:02 +02004106
4107 down_write(&rbd_dev->lock_rwsem);
Ilya Dryomoved95b212016-08-12 16:40:02 +02004108 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
4109 return false;
4110
Ilya Dryomove1fddc82019-05-30 16:07:48 +02004111 rbd_assert(list_empty(&rbd_dev->running_list));
Ilya Dryomova2b1da02019-05-30 11:15:23 +02004112 return true;
4113}
4114
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02004115static void rbd_pre_release_action(struct rbd_device *rbd_dev)
4116{
4117 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)
4118 rbd_object_map_close(rbd_dev);
4119}
4120
Ilya Dryomove1fddc82019-05-30 16:07:48 +02004121static void __rbd_release_lock(struct rbd_device *rbd_dev)
4122{
4123 rbd_assert(list_empty(&rbd_dev->running_list));
4124
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02004125 rbd_pre_release_action(rbd_dev);
Ilya Dryomovbbead742017-04-13 12:17:38 +02004126 rbd_unlock(rbd_dev);
Ilya Dryomove1fddc82019-05-30 16:07:48 +02004127}
4128
Ilya Dryomova2b1da02019-05-30 11:15:23 +02004129/*
4130 * lock_rwsem must be held for write
4131 */
4132static void rbd_release_lock(struct rbd_device *rbd_dev)
4133{
4134 if (!rbd_quiesce_lock(rbd_dev))
4135 return;
4136
Ilya Dryomove1fddc82019-05-30 16:07:48 +02004137 __rbd_release_lock(rbd_dev);
Ilya Dryomova2b1da02019-05-30 11:15:23 +02004138
Ilya Dryomovbbead742017-04-13 12:17:38 +02004139 /*
4140 * Give others a chance to grab the lock - we would re-acquire
Ilya Dryomov637cd062019-06-06 17:14:49 +02004141 * almost immediately if we got new IO while draining the running
4142 * list otherwise. We need to ack our own notifications, so this
4143 * lock_dwork will be requeued from rbd_handle_released_lock() by
4144 * way of maybe_kick_acquire().
Ilya Dryomovbbead742017-04-13 12:17:38 +02004145 */
4146 cancel_delayed_work(&rbd_dev->lock_dwork);
Ilya Dryomoved95b212016-08-12 16:40:02 +02004147}
4148
4149static void rbd_release_lock_work(struct work_struct *work)
4150{
4151 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
4152 unlock_work);
4153
4154 down_write(&rbd_dev->lock_rwsem);
4155 rbd_release_lock(rbd_dev);
4156 up_write(&rbd_dev->lock_rwsem);
4157}
4158
Ilya Dryomov637cd062019-06-06 17:14:49 +02004159static void maybe_kick_acquire(struct rbd_device *rbd_dev)
4160{
4161 bool have_requests;
4162
4163 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4164 if (__rbd_is_lock_owner(rbd_dev))
4165 return;
4166
4167 spin_lock(&rbd_dev->lock_lists_lock);
4168 have_requests = !list_empty(&rbd_dev->acquiring_list);
4169 spin_unlock(&rbd_dev->lock_lists_lock);
4170 if (have_requests || delayed_work_pending(&rbd_dev->lock_dwork)) {
4171 dout("%s rbd_dev %p kicking lock_dwork\n", __func__, rbd_dev);
4172 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4173 }
4174}
4175
Ilya Dryomoved95b212016-08-12 16:40:02 +02004176static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
4177 void **p)
4178{
4179 struct rbd_client_id cid = { 0 };
4180
4181 if (struct_v >= 2) {
4182 cid.gid = ceph_decode_64(p);
4183 cid.handle = ceph_decode_64(p);
4184 }
4185
4186 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4187 cid.handle);
4188 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
4189 down_write(&rbd_dev->lock_rwsem);
4190 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
Ilya Dryomov8798d072021-07-03 11:56:55 +02004191 dout("%s rbd_dev %p cid %llu-%llu == owner_cid\n",
4192 __func__, rbd_dev, cid.gid, cid.handle);
4193 } else {
4194 rbd_set_owner_cid(rbd_dev, &cid);
Ilya Dryomoved95b212016-08-12 16:40:02 +02004195 }
Ilya Dryomoved95b212016-08-12 16:40:02 +02004196 downgrade_write(&rbd_dev->lock_rwsem);
4197 } else {
4198 down_read(&rbd_dev->lock_rwsem);
4199 }
4200
Ilya Dryomov637cd062019-06-06 17:14:49 +02004201 maybe_kick_acquire(rbd_dev);
Ilya Dryomoved95b212016-08-12 16:40:02 +02004202 up_read(&rbd_dev->lock_rwsem);
4203}
4204
4205static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
4206 void **p)
4207{
4208 struct rbd_client_id cid = { 0 };
4209
4210 if (struct_v >= 2) {
4211 cid.gid = ceph_decode_64(p);
4212 cid.handle = ceph_decode_64(p);
4213 }
4214
4215 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4216 cid.handle);
4217 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
4218 down_write(&rbd_dev->lock_rwsem);
4219 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
Ilya Dryomov8798d072021-07-03 11:56:55 +02004220 dout("%s rbd_dev %p cid %llu-%llu != owner_cid %llu-%llu\n",
Ilya Dryomoved95b212016-08-12 16:40:02 +02004221 __func__, rbd_dev, cid.gid, cid.handle,
4222 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
Ilya Dryomov8798d072021-07-03 11:56:55 +02004223 } else {
4224 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
Ilya Dryomoved95b212016-08-12 16:40:02 +02004225 }
Ilya Dryomoved95b212016-08-12 16:40:02 +02004226 downgrade_write(&rbd_dev->lock_rwsem);
4227 } else {
4228 down_read(&rbd_dev->lock_rwsem);
4229 }
4230
Ilya Dryomov637cd062019-06-06 17:14:49 +02004231 maybe_kick_acquire(rbd_dev);
Ilya Dryomoved95b212016-08-12 16:40:02 +02004232 up_read(&rbd_dev->lock_rwsem);
4233}
4234
Ilya Dryomov3b77faa2017-04-13 12:17:39 +02004235/*
4236 * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no
4237 * ResponseMessage is needed.
4238 */
4239static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
4240 void **p)
Ilya Dryomoved95b212016-08-12 16:40:02 +02004241{
4242 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
4243 struct rbd_client_id cid = { 0 };
Ilya Dryomov3b77faa2017-04-13 12:17:39 +02004244 int result = 1;
Ilya Dryomoved95b212016-08-12 16:40:02 +02004245
4246 if (struct_v >= 2) {
4247 cid.gid = ceph_decode_64(p);
4248 cid.handle = ceph_decode_64(p);
4249 }
4250
4251 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4252 cid.handle);
4253 if (rbd_cid_equal(&cid, &my_cid))
Ilya Dryomov3b77faa2017-04-13 12:17:39 +02004254 return result;
Ilya Dryomoved95b212016-08-12 16:40:02 +02004255
4256 down_read(&rbd_dev->lock_rwsem);
Ilya Dryomov3b77faa2017-04-13 12:17:39 +02004257 if (__rbd_is_lock_owner(rbd_dev)) {
4258 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED &&
4259 rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid))
4260 goto out_unlock;
4261
4262 /*
4263 * encode ResponseMessage(0) so the peer can detect
4264 * a missing owner
4265 */
4266 result = 0;
4267
4268 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
Ilya Dryomove010dd02017-04-13 12:17:39 +02004269 if (!rbd_dev->opts->exclusive) {
4270 dout("%s rbd_dev %p queueing unlock_work\n",
4271 __func__, rbd_dev);
4272 queue_work(rbd_dev->task_wq,
4273 &rbd_dev->unlock_work);
4274 } else {
4275 /* refuse to release the lock */
4276 result = -EROFS;
4277 }
Ilya Dryomoved95b212016-08-12 16:40:02 +02004278 }
4279 }
Ilya Dryomov3b77faa2017-04-13 12:17:39 +02004280
4281out_unlock:
Ilya Dryomoved95b212016-08-12 16:40:02 +02004282 up_read(&rbd_dev->lock_rwsem);
Ilya Dryomov3b77faa2017-04-13 12:17:39 +02004283 return result;
Ilya Dryomoved95b212016-08-12 16:40:02 +02004284}
4285
4286static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
4287 u64 notify_id, u64 cookie, s32 *result)
4288{
4289 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Kyle Spiers08a79102018-03-17 09:44:01 -07004290 char buf[4 + CEPH_ENCODING_START_BLK_LEN];
4291 int buf_size = sizeof(buf);
Ilya Dryomoved95b212016-08-12 16:40:02 +02004292 int ret;
4293
4294 if (result) {
4295 void *p = buf;
4296
4297 /* encode ResponseMessage */
4298 ceph_start_encoding(&p, 1, 1,
4299 buf_size - CEPH_ENCODING_START_BLK_LEN);
4300 ceph_encode_32(&p, *result);
4301 } else {
4302 buf_size = 0;
4303 }
4304
4305 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
4306 &rbd_dev->header_oloc, notify_id, cookie,
4307 buf, buf_size);
4308 if (ret)
4309 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
4310}
4311
4312static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
4313 u64 cookie)
4314{
4315 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4316 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
4317}
4318
4319static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
4320 u64 notify_id, u64 cookie, s32 result)
4321{
4322 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
4323 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
4324}
Ilya Dryomov922dab62016-05-26 01:15:02 +02004325
4326static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
4327 u64 notifier_id, void *data, size_t data_len)
Alex Elderb8d70032012-11-30 17:53:04 -06004328{
Ilya Dryomov922dab62016-05-26 01:15:02 +02004329 struct rbd_device *rbd_dev = arg;
Ilya Dryomoved95b212016-08-12 16:40:02 +02004330 void *p = data;
4331 void *const end = p + data_len;
Ilya Dryomovd4c22692016-09-06 11:15:48 +02004332 u8 struct_v = 0;
Ilya Dryomoved95b212016-08-12 16:40:02 +02004333 u32 len;
4334 u32 notify_op;
Alex Elderb8d70032012-11-30 17:53:04 -06004335 int ret;
4336
Ilya Dryomoved95b212016-08-12 16:40:02 +02004337 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
4338 __func__, rbd_dev, cookie, notify_id, data_len);
4339 if (data_len) {
4340 ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
4341 &struct_v, &len);
4342 if (ret) {
4343 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
4344 ret);
4345 return;
4346 }
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04004347
Ilya Dryomoved95b212016-08-12 16:40:02 +02004348 notify_op = ceph_decode_32(&p);
4349 } else {
4350 /* legacy notification for header updates */
4351 notify_op = RBD_NOTIFY_OP_HEADER_UPDATE;
4352 len = 0;
4353 }
Alex Elderb8d70032012-11-30 17:53:04 -06004354
Ilya Dryomoved95b212016-08-12 16:40:02 +02004355 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
4356 switch (notify_op) {
4357 case RBD_NOTIFY_OP_ACQUIRED_LOCK:
4358 rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
4359 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4360 break;
4361 case RBD_NOTIFY_OP_RELEASED_LOCK:
4362 rbd_handle_released_lock(rbd_dev, struct_v, &p);
4363 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4364 break;
4365 case RBD_NOTIFY_OP_REQUEST_LOCK:
Ilya Dryomov3b77faa2017-04-13 12:17:39 +02004366 ret = rbd_handle_request_lock(rbd_dev, struct_v, &p);
4367 if (ret <= 0)
Ilya Dryomoved95b212016-08-12 16:40:02 +02004368 rbd_acknowledge_notify_result(rbd_dev, notify_id,
Ilya Dryomov3b77faa2017-04-13 12:17:39 +02004369 cookie, ret);
Ilya Dryomoved95b212016-08-12 16:40:02 +02004370 else
4371 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4372 break;
4373 case RBD_NOTIFY_OP_HEADER_UPDATE:
4374 ret = rbd_dev_refresh(rbd_dev);
4375 if (ret)
4376 rbd_warn(rbd_dev, "refresh failed: %d", ret);
4377
4378 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4379 break;
4380 default:
4381 if (rbd_is_lock_owner(rbd_dev))
4382 rbd_acknowledge_notify_result(rbd_dev, notify_id,
4383 cookie, -EOPNOTSUPP);
4384 else
4385 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4386 break;
4387 }
Alex Elderb8d70032012-11-30 17:53:04 -06004388}
4389
Ilya Dryomov99d16942016-08-12 16:11:41 +02004390static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
4391
Ilya Dryomov922dab62016-05-26 01:15:02 +02004392static void rbd_watch_errcb(void *arg, u64 cookie, int err)
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04004393{
Ilya Dryomov922dab62016-05-26 01:15:02 +02004394 struct rbd_device *rbd_dev = arg;
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04004395
Ilya Dryomov922dab62016-05-26 01:15:02 +02004396 rbd_warn(rbd_dev, "encountered watch error: %d", err);
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04004397
Ilya Dryomoved95b212016-08-12 16:40:02 +02004398 down_write(&rbd_dev->lock_rwsem);
4399 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
4400 up_write(&rbd_dev->lock_rwsem);
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04004401
Ilya Dryomov99d16942016-08-12 16:11:41 +02004402 mutex_lock(&rbd_dev->watch_mutex);
4403 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
4404 __rbd_unregister_watch(rbd_dev);
4405 rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04004406
Ilya Dryomov99d16942016-08-12 16:11:41 +02004407 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04004408 }
Ilya Dryomov99d16942016-08-12 16:11:41 +02004409 mutex_unlock(&rbd_dev->watch_mutex);
Ilya Dryomovbb040aa2014-06-19 11:38:14 +04004410}
4411
4412/*
Ilya Dryomov99d16942016-08-12 16:11:41 +02004413 * watch_mutex must be locked
Alex Elder9969ebc2013-01-18 12:31:10 -06004414 */
Ilya Dryomov99d16942016-08-12 16:11:41 +02004415static int __rbd_register_watch(struct rbd_device *rbd_dev)
Alex Elder9969ebc2013-01-18 12:31:10 -06004416{
4417 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
Ilya Dryomov922dab62016-05-26 01:15:02 +02004418 struct ceph_osd_linger_request *handle;
Alex Elder9969ebc2013-01-18 12:31:10 -06004419
Ilya Dryomov922dab62016-05-26 01:15:02 +02004420 rbd_assert(!rbd_dev->watch_handle);
Ilya Dryomov99d16942016-08-12 16:11:41 +02004421 dout("%s rbd_dev %p\n", __func__, rbd_dev);
Alex Elder9969ebc2013-01-18 12:31:10 -06004422
Ilya Dryomov922dab62016-05-26 01:15:02 +02004423 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
4424 &rbd_dev->header_oloc, rbd_watch_cb,
4425 rbd_watch_errcb, rbd_dev);
4426 if (IS_ERR(handle))
4427 return PTR_ERR(handle);
Alex Elder9969ebc2013-01-18 12:31:10 -06004428
Ilya Dryomov922dab62016-05-26 01:15:02 +02004429 rbd_dev->watch_handle = handle;
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04004430 return 0;
Alex Elder9969ebc2013-01-18 12:31:10 -06004431}
4432
Ilya Dryomov99d16942016-08-12 16:11:41 +02004433/*
4434 * watch_mutex must be locked
4435 */
4436static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
Ilya Dryomovfca27062013-12-16 18:02:40 +02004437{
Ilya Dryomov922dab62016-05-26 01:15:02 +02004438 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4439 int ret;
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04004440
Ilya Dryomov99d16942016-08-12 16:11:41 +02004441 rbd_assert(rbd_dev->watch_handle);
4442 dout("%s rbd_dev %p\n", __func__, rbd_dev);
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04004443
Ilya Dryomov922dab62016-05-26 01:15:02 +02004444 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
4445 if (ret)
4446 rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
Ilya Dryomovb30a01f2014-05-22 19:28:52 +04004447
Ilya Dryomov922dab62016-05-26 01:15:02 +02004448 rbd_dev->watch_handle = NULL;
Ilya Dryomovc525f032016-04-28 16:07:26 +02004449}
4450
Ilya Dryomov99d16942016-08-12 16:11:41 +02004451static int rbd_register_watch(struct rbd_device *rbd_dev)
Ilya Dryomovc525f032016-04-28 16:07:26 +02004452{
Ilya Dryomov99d16942016-08-12 16:11:41 +02004453 int ret;
Ilya Dryomov811c6682016-04-15 16:22:16 +02004454
Ilya Dryomov99d16942016-08-12 16:11:41 +02004455 mutex_lock(&rbd_dev->watch_mutex);
4456 rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
4457 ret = __rbd_register_watch(rbd_dev);
4458 if (ret)
4459 goto out;
4460
4461 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
4462 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
4463
4464out:
4465 mutex_unlock(&rbd_dev->watch_mutex);
4466 return ret;
4467}
4468
4469static void cancel_tasks_sync(struct rbd_device *rbd_dev)
4470{
4471 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4472
Ilya Dryomoved95b212016-08-12 16:40:02 +02004473 cancel_work_sync(&rbd_dev->acquired_lock_work);
4474 cancel_work_sync(&rbd_dev->released_lock_work);
4475 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
4476 cancel_work_sync(&rbd_dev->unlock_work);
Ilya Dryomov99d16942016-08-12 16:11:41 +02004477}
4478
Ilya Dryomov0e4e1de52020-03-13 11:20:51 +01004479/*
4480 * header_rwsem must not be held to avoid a deadlock with
4481 * rbd_dev_refresh() when flushing notifies.
4482 */
Ilya Dryomov99d16942016-08-12 16:11:41 +02004483static void rbd_unregister_watch(struct rbd_device *rbd_dev)
4484{
4485 cancel_tasks_sync(rbd_dev);
4486
4487 mutex_lock(&rbd_dev->watch_mutex);
4488 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
4489 __rbd_unregister_watch(rbd_dev);
4490 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
4491 mutex_unlock(&rbd_dev->watch_mutex);
4492
Dongsheng Yang23edca82018-06-04 06:24:37 -04004493 cancel_delayed_work_sync(&rbd_dev->watch_dwork);
Ilya Dryomov811c6682016-04-15 16:22:16 +02004494 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
Ilya Dryomovfca27062013-12-16 18:02:40 +02004495}
4496
Ilya Dryomov14bb2112017-04-13 12:17:38 +02004497/*
4498 * lock_rwsem must be held for write
4499 */
4500static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
4501{
4502 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4503 char cookie[32];
4504 int ret;
4505
Ilya Dryomova2b1da02019-05-30 11:15:23 +02004506 if (!rbd_quiesce_lock(rbd_dev))
4507 return;
Ilya Dryomov14bb2112017-04-13 12:17:38 +02004508
4509 format_lock_cookie(rbd_dev, cookie);
4510 ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid,
4511 &rbd_dev->header_oloc, RBD_LOCK_NAME,
4512 CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie,
4513 RBD_LOCK_TAG, cookie);
4514 if (ret) {
4515 if (ret != -EOPNOTSUPP)
4516 rbd_warn(rbd_dev, "failed to update lock cookie: %d",
4517 ret);
4518
4519 /*
4520 * Lock cookie cannot be updated on older OSDs, so do
4521 * a manual release and queue an acquire.
4522 */
Ilya Dryomove1fddc82019-05-30 16:07:48 +02004523 __rbd_release_lock(rbd_dev);
Ilya Dryomova2b1da02019-05-30 11:15:23 +02004524 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
Ilya Dryomov14bb2112017-04-13 12:17:38 +02004525 } else {
Florian Margaineedd8ca82017-12-13 16:43:59 +01004526 __rbd_lock(rbd_dev, cookie);
Ilya Dryomov637cd062019-06-06 17:14:49 +02004527 wake_lock_waiters(rbd_dev, 0);
Ilya Dryomov14bb2112017-04-13 12:17:38 +02004528 }
4529}
4530
Ilya Dryomov99d16942016-08-12 16:11:41 +02004531static void rbd_reregister_watch(struct work_struct *work)
4532{
4533 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
4534 struct rbd_device, watch_dwork);
4535 int ret;
4536
4537 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4538
4539 mutex_lock(&rbd_dev->watch_mutex);
Ilya Dryomov87c0fde2016-09-29 13:41:05 +02004540 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
4541 mutex_unlock(&rbd_dev->watch_mutex);
Ilya Dryomov14bb2112017-04-13 12:17:38 +02004542 return;
Ilya Dryomov87c0fde2016-09-29 13:41:05 +02004543 }
Ilya Dryomov99d16942016-08-12 16:11:41 +02004544
4545 ret = __rbd_register_watch(rbd_dev);
4546 if (ret) {
4547 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
Ilya Dryomov0b98acd2020-09-14 13:39:19 +02004548 if (ret != -EBLOCKLISTED && ret != -ENOENT) {
Ilya Dryomov99d16942016-08-12 16:11:41 +02004549 queue_delayed_work(rbd_dev->task_wq,
4550 &rbd_dev->watch_dwork,
4551 RBD_RETRY_DELAY);
Ilya Dryomov637cd062019-06-06 17:14:49 +02004552 mutex_unlock(&rbd_dev->watch_mutex);
4553 return;
Ilya Dryomov87c0fde2016-09-29 13:41:05 +02004554 }
Ilya Dryomov637cd062019-06-06 17:14:49 +02004555
Ilya Dryomov87c0fde2016-09-29 13:41:05 +02004556 mutex_unlock(&rbd_dev->watch_mutex);
Ilya Dryomov637cd062019-06-06 17:14:49 +02004557 down_write(&rbd_dev->lock_rwsem);
4558 wake_lock_waiters(rbd_dev, ret);
4559 up_write(&rbd_dev->lock_rwsem);
Ilya Dryomov14bb2112017-04-13 12:17:38 +02004560 return;
Ilya Dryomov99d16942016-08-12 16:11:41 +02004561 }
4562
4563 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
4564 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
4565 mutex_unlock(&rbd_dev->watch_mutex);
4566
Ilya Dryomov14bb2112017-04-13 12:17:38 +02004567 down_write(&rbd_dev->lock_rwsem);
4568 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
4569 rbd_reacquire_lock(rbd_dev);
4570 up_write(&rbd_dev->lock_rwsem);
4571
Ilya Dryomov99d16942016-08-12 16:11:41 +02004572 ret = rbd_dev_refresh(rbd_dev);
4573 if (ret)
Colin Ian Kingf6870cc2018-03-19 13:33:10 +00004574 rbd_warn(rbd_dev, "reregistration refresh failed: %d", ret);
Ilya Dryomov99d16942016-08-12 16:11:41 +02004575}
4576
Alex Elder36be9a72013-01-19 00:30:28 -06004577/*
Alex Elderf40eb342013-04-25 15:09:42 -05004578 * Synchronous osd object method call. Returns the number of bytes
4579 * returned in the outbound buffer, or a negative error code.
Alex Elder36be9a72013-01-19 00:30:28 -06004580 */
4581static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
Ilya Dryomovecd4a682017-01-25 18:16:21 +01004582 struct ceph_object_id *oid,
4583 struct ceph_object_locator *oloc,
Alex Elder36be9a72013-01-19 00:30:28 -06004584 const char *method_name,
Alex Elder41579762013-04-21 12:14:45 -05004585 const void *outbound,
Alex Elder36be9a72013-01-19 00:30:28 -06004586 size_t outbound_size,
Alex Elder41579762013-04-21 12:14:45 -05004587 void *inbound,
Alex Eldere2a58ee2013-04-30 00:44:33 -05004588 size_t inbound_size)
Alex Elder36be9a72013-01-19 00:30:28 -06004589{
Ilya Dryomovecd4a682017-01-25 18:16:21 +01004590 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4591 struct page *req_page = NULL;
4592 struct page *reply_page;
Alex Elder36be9a72013-01-19 00:30:28 -06004593 int ret;
4594
4595 /*
Alex Elder6010a452013-04-05 01:27:11 -05004596 * Method calls are ultimately read operations. The result
4597 * should placed into the inbound buffer provided. They
4598 * also supply outbound data--parameters for the object
4599 * method. Currently if this is present it will be a
4600 * snapshot id.
Alex Elder36be9a72013-01-19 00:30:28 -06004601 */
Ilya Dryomovecd4a682017-01-25 18:16:21 +01004602 if (outbound) {
4603 if (outbound_size > PAGE_SIZE)
4604 return -E2BIG;
Alex Elder36be9a72013-01-19 00:30:28 -06004605
Ilya Dryomovecd4a682017-01-25 18:16:21 +01004606 req_page = alloc_page(GFP_KERNEL);
4607 if (!req_page)
4608 return -ENOMEM;
Alex Elder36be9a72013-01-19 00:30:28 -06004609
Ilya Dryomovecd4a682017-01-25 18:16:21 +01004610 memcpy(page_address(req_page), outbound, outbound_size);
Alex Elder04017e22013-04-05 14:46:02 -05004611 }
Alex Elder430c28c2013-04-03 21:32:51 -05004612
Ilya Dryomovecd4a682017-01-25 18:16:21 +01004613 reply_page = alloc_page(GFP_KERNEL);
4614 if (!reply_page) {
4615 if (req_page)
4616 __free_page(req_page);
4617 return -ENOMEM;
4618 }
Alex Elder36be9a72013-01-19 00:30:28 -06004619
Ilya Dryomovecd4a682017-01-25 18:16:21 +01004620 ret = ceph_osdc_call(osdc, oid, oloc, RBD_DRV_NAME, method_name,
4621 CEPH_OSD_FLAG_READ, req_page, outbound_size,
Ilya Dryomov68ada912019-06-14 18:16:51 +02004622 &reply_page, &inbound_size);
Ilya Dryomovecd4a682017-01-25 18:16:21 +01004623 if (!ret) {
4624 memcpy(inbound, page_address(reply_page), inbound_size);
4625 ret = inbound_size;
4626 }
Alex Elder57385b52013-04-21 12:14:45 -05004627
Ilya Dryomovecd4a682017-01-25 18:16:21 +01004628 if (req_page)
4629 __free_page(req_page);
4630 __free_page(reply_page);
Alex Elder36be9a72013-01-19 00:30:28 -06004631 return ret;
4632}
4633
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01004634static void rbd_queue_workfn(struct work_struct *work)
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04004635{
Ilya Dryomov59e542c2020-02-12 15:23:58 +01004636 struct rbd_img_request *img_request =
4637 container_of(work, struct rbd_img_request, work);
4638 struct rbd_device *rbd_dev = img_request->rbd_dev;
4639 enum obj_operation_type op_type = img_request->op_type;
4640 struct request *rq = blk_mq_rq_from_pdu(img_request);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04004641 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
4642 u64 length = blk_rq_bytes(rq);
Josh Durgin4e752f02014-04-08 11:12:11 -07004643 u64 mapping_size;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04004644 int result;
4645
4646 /* Ignore/skip any zero-length requests */
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04004647 if (!length) {
4648 dout("%s: zero-length request\n", __func__);
4649 result = 0;
Ilya Dryomov59e542c2020-02-12 15:23:58 +01004650 goto err_img_request;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04004651 }
4652
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01004653 blk_mq_start_request(rq);
4654
Josh Durgin4e752f02014-04-08 11:12:11 -07004655 down_read(&rbd_dev->header_rwsem);
4656 mapping_size = rbd_dev->mapping.size;
Ilya Dryomova52cc682020-02-12 15:08:39 +01004657 rbd_img_capture_header(img_request);
Josh Durgin4e752f02014-04-08 11:12:11 -07004658 up_read(&rbd_dev->header_rwsem);
4659
4660 if (offset + length > mapping_size) {
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04004661 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
Josh Durgin4e752f02014-04-08 11:12:11 -07004662 length, mapping_size);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04004663 result = -EIO;
Ilya Dryomova52cc682020-02-12 15:08:39 +01004664 goto err_img_request;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04004665 }
4666
Ilya Dryomov21ed05a2019-08-30 17:31:06 +02004667 dout("%s rbd_dev %p img_req %p %s %llu~%llu\n", __func__, rbd_dev,
4668 img_request, obj_op_name(op_type), offset, length);
4669
Ilya Dryomov6484cbe2019-01-29 12:46:25 +01004670 if (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_ZEROOUT)
Ilya Dryomov5a237812018-02-06 19:26:34 +01004671 result = rbd_img_fill_nodata(img_request, offset, length);
Guangliang Zhao90e98c52014-04-01 22:22:16 +08004672 else
Ilya Dryomov5a237812018-02-06 19:26:34 +01004673 result = rbd_img_fill_from_bio(img_request, offset, length,
4674 rq->bio);
Ilya Dryomov0192ce22019-05-16 15:06:56 +02004675 if (result)
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04004676 goto err_img_request;
4677
Ilya Dryomove1fddc82019-05-30 16:07:48 +02004678 rbd_img_handle_request(img_request, 0);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04004679 return;
4680
4681err_img_request:
Hannes Reinecke679a97d2020-01-31 11:37:36 +01004682 rbd_img_request_destroy(img_request);
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04004683 if (result)
4684 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
Guangliang Zhao6d2940c2014-03-13 11:21:35 +08004685 obj_op_name(op_type), length, offset, result);
Christoph Hellwig2a842ac2017-06-03 09:38:04 +02004686 blk_mq_end_request(rq, errno_to_blk_status(result));
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04004687}
4688
Christoph Hellwigfc17b652017-06-03 09:38:05 +02004689static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01004690 const struct blk_mq_queue_data *bd)
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04004691{
Ilya Dryomov59e542c2020-02-12 15:23:58 +01004692 struct rbd_device *rbd_dev = hctx->queue->queuedata;
4693 struct rbd_img_request *img_req = blk_mq_rq_to_pdu(bd->rq);
4694 enum obj_operation_type op_type;
Ilya Dryomovbc1ecc62014-08-04 18:04:39 +04004695
Ilya Dryomov59e542c2020-02-12 15:23:58 +01004696 switch (req_op(bd->rq)) {
4697 case REQ_OP_DISCARD:
4698 op_type = OBJ_OP_DISCARD;
4699 break;
4700 case REQ_OP_WRITE_ZEROES:
4701 op_type = OBJ_OP_ZEROOUT;
4702 break;
4703 case REQ_OP_WRITE:
4704 op_type = OBJ_OP_WRITE;
4705 break;
4706 case REQ_OP_READ:
4707 op_type = OBJ_OP_READ;
4708 break;
4709 default:
4710 rbd_warn(rbd_dev, "unknown req_op %d", req_op(bd->rq));
4711 return BLK_STS_IOERR;
4712 }
4713
4714 rbd_img_request_init(img_req, rbd_dev, op_type);
4715
4716 if (rbd_img_is_write(img_req)) {
4717 if (rbd_is_ro(rbd_dev)) {
4718 rbd_warn(rbd_dev, "%s on read-only mapping",
4719 obj_op_name(img_req->op_type));
4720 return BLK_STS_IOERR;
4721 }
4722 rbd_assert(!rbd_is_snap(rbd_dev));
4723 }
4724
4725 INIT_WORK(&img_req->work, rbd_queue_workfn);
4726 queue_work(rbd_wq, &img_req->work);
Christoph Hellwigfc17b652017-06-03 09:38:05 +02004727 return BLK_STS_OK;
Alex Elderbf0d5f502012-11-22 00:00:08 -06004728}
4729
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004730static void rbd_free_disk(struct rbd_device *rbd_dev)
4731{
Christoph Hellwig8b9ab622022-06-19 08:05:52 +02004732 put_disk(rbd_dev->disk);
Ilya Dryomov5769ed02017-04-13 12:17:38 +02004733 blk_mq_free_tag_set(&rbd_dev->tag_set);
Alex Eldera0cab922013-04-25 23:15:08 -05004734 rbd_dev->disk = NULL;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004735}
4736
Alex Elder788e2df2013-01-17 12:25:27 -06004737static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
Ilya Dryomovfe5478e2017-01-25 18:16:21 +01004738 struct ceph_object_id *oid,
4739 struct ceph_object_locator *oloc,
4740 void *buf, int buf_len)
Alex Elder788e2df2013-01-17 12:25:27 -06004741
4742{
Ilya Dryomovfe5478e2017-01-25 18:16:21 +01004743 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4744 struct ceph_osd_request *req;
4745 struct page **pages;
4746 int num_pages = calc_pages_for(0, buf_len);
Alex Elder788e2df2013-01-17 12:25:27 -06004747 int ret;
4748
Ilya Dryomovfe5478e2017-01-25 18:16:21 +01004749 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
4750 if (!req)
4751 return -ENOMEM;
Alex Elder788e2df2013-01-17 12:25:27 -06004752
Ilya Dryomovfe5478e2017-01-25 18:16:21 +01004753 ceph_oid_copy(&req->r_base_oid, oid);
4754 ceph_oloc_copy(&req->r_base_oloc, oloc);
4755 req->r_flags = CEPH_OSD_FLAG_READ;
Alex Elder788e2df2013-01-17 12:25:27 -06004756
Ilya Dryomovfe5478e2017-01-25 18:16:21 +01004757 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
4758 if (IS_ERR(pages)) {
4759 ret = PTR_ERR(pages);
4760 goto out_req;
4761 }
Alex Elder1ceae7e2013-02-06 13:11:38 -06004762
Ilya Dryomovfe5478e2017-01-25 18:16:21 +01004763 osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0);
4764 osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false,
4765 true);
Alex Elder788e2df2013-01-17 12:25:27 -06004766
Ilya Dryomov26f887e2018-10-15 16:11:37 +02004767 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
4768 if (ret)
4769 goto out_req;
4770
Jeff Laytona8af0d62022-06-30 16:21:50 -04004771 ceph_osdc_start_request(osdc, req);
Ilya Dryomovfe5478e2017-01-25 18:16:21 +01004772 ret = ceph_osdc_wait_request(osdc, req);
4773 if (ret >= 0)
4774 ceph_copy_from_page_vector(pages, buf, 0, ret);
4775
4776out_req:
4777 ceph_osdc_put_request(req);
Alex Elder788e2df2013-01-17 12:25:27 -06004778 return ret;
4779}
4780
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004781/*
Alex Elder662518b2013-05-06 09:51:29 -05004782 * Read the complete header for the given rbd device. On successful
4783 * return, the rbd_dev->header field will contain up-to-date
4784 * information about the image.
Alex Elder4156d9982012-08-02 11:29:46 -05004785 */
Alex Elder99a41eb2013-05-06 09:51:30 -05004786static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
Alex Elder4156d9982012-08-02 11:29:46 -05004787{
4788 struct rbd_image_header_ondisk *ondisk = NULL;
4789 u32 snap_count = 0;
4790 u64 names_size = 0;
4791 u32 want_count;
4792 int ret;
4793
4794 /*
4795 * The complete header will include an array of its 64-bit
4796 * snapshot ids, followed by the names of those snapshots as
4797 * a contiguous block of NUL-terminated strings. Note that
4798 * the number of snapshots could change by the time we read
4799 * it in, in which case we re-read it.
4800 */
4801 do {
4802 size_t size;
4803
4804 kfree(ondisk);
4805
4806 size = sizeof (*ondisk);
4807 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
4808 size += names_size;
4809 ondisk = kmalloc(size, GFP_KERNEL);
4810 if (!ondisk)
Alex Elder662518b2013-05-06 09:51:29 -05004811 return -ENOMEM;
Alex Elder4156d9982012-08-02 11:29:46 -05004812
Ilya Dryomovfe5478e2017-01-25 18:16:21 +01004813 ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid,
4814 &rbd_dev->header_oloc, ondisk, size);
Alex Elder4156d9982012-08-02 11:29:46 -05004815 if (ret < 0)
Alex Elder662518b2013-05-06 09:51:29 -05004816 goto out;
Alex Elderc0cd10db2013-04-26 09:43:47 -05004817 if ((size_t)ret < size) {
Alex Elder4156d9982012-08-02 11:29:46 -05004818 ret = -ENXIO;
Alex Elder06ecc6c2012-11-01 10:17:15 -05004819 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
4820 size, ret);
Alex Elder662518b2013-05-06 09:51:29 -05004821 goto out;
Alex Elder4156d9982012-08-02 11:29:46 -05004822 }
4823 if (!rbd_dev_ondisk_valid(ondisk)) {
4824 ret = -ENXIO;
Alex Elder06ecc6c2012-11-01 10:17:15 -05004825 rbd_warn(rbd_dev, "invalid header");
Alex Elder662518b2013-05-06 09:51:29 -05004826 goto out;
Alex Elder4156d9982012-08-02 11:29:46 -05004827 }
4828
4829 names_size = le64_to_cpu(ondisk->snap_names_len);
4830 want_count = snap_count;
4831 snap_count = le32_to_cpu(ondisk->snap_count);
4832 } while (snap_count != want_count);
4833
Alex Elder662518b2013-05-06 09:51:29 -05004834 ret = rbd_header_from_disk(rbd_dev, ondisk);
4835out:
Alex Elder4156d9982012-08-02 11:29:46 -05004836 kfree(ondisk);
4837
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004838 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004839}
4840
Josh Durgin98752012013-08-29 17:26:31 -07004841static void rbd_dev_update_size(struct rbd_device *rbd_dev)
4842{
4843 sector_t size;
Josh Durgin98752012013-08-29 17:26:31 -07004844
4845 /*
Ilya Dryomov811c6682016-04-15 16:22:16 +02004846 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
4847 * try to update its size. If REMOVING is set, updating size
4848 * is just useless work since the device can't be opened.
Josh Durgin98752012013-08-29 17:26:31 -07004849 */
Ilya Dryomov811c6682016-04-15 16:22:16 +02004850 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
4851 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
Josh Durgin98752012013-08-29 17:26:31 -07004852 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
4853 dout("setting size to %llu sectors", (unsigned long long)size);
Christoph Hellwige864e492020-11-16 15:57:07 +01004854 set_capacity_and_notify(rbd_dev->disk, size);
Josh Durgin98752012013-08-29 17:26:31 -07004855 }
4856}
4857
Alex Eldercc4a38bd2013-04-30 00:44:33 -05004858static int rbd_dev_refresh(struct rbd_device *rbd_dev)
Alex Elder1fe5e992012-07-25 09:32:41 -05004859{
Alex Eldere627db02013-05-06 07:40:30 -05004860 u64 mapping_size;
Alex Elder1fe5e992012-07-25 09:32:41 -05004861 int ret;
4862
Alex Eldercfbf6372013-05-31 17:40:45 -05004863 down_write(&rbd_dev->header_rwsem);
Alex Elder3b5cf2a2013-05-29 11:18:59 -05004864 mapping_size = rbd_dev->mapping.size;
Ilya Dryomova720ae02014-07-23 17:11:19 +04004865
4866 ret = rbd_dev_header_info(rbd_dev);
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04004867 if (ret)
Ilya Dryomov73e39e42015-01-08 20:18:22 +03004868 goto out;
Alex Elder15228ed2013-05-01 12:43:03 -05004869
Ilya Dryomove8f59b52014-07-24 10:42:13 +04004870 /*
4871 * If there is a parent, see if it has disappeared due to the
4872 * mapped image getting flattened.
4873 */
4874 if (rbd_dev->parent) {
4875 ret = rbd_dev_v2_parent_info(rbd_dev);
4876 if (ret)
Ilya Dryomov73e39e42015-01-08 20:18:22 +03004877 goto out;
Ilya Dryomove8f59b52014-07-24 10:42:13 +04004878 }
4879
Ilya Dryomov686238b2019-11-18 12:51:02 +01004880 rbd_assert(!rbd_is_snap(rbd_dev));
4881 rbd_dev->mapping.size = rbd_dev->header.image_size;
Alex Elder15228ed2013-05-01 12:43:03 -05004882
Ilya Dryomov73e39e42015-01-08 20:18:22 +03004883out:
Alex Eldercfbf6372013-05-31 17:40:45 -05004884 up_write(&rbd_dev->header_rwsem);
Ilya Dryomov73e39e42015-01-08 20:18:22 +03004885 if (!ret && mapping_size != rbd_dev->mapping.size)
Josh Durgin98752012013-08-29 17:26:31 -07004886 rbd_dev_update_size(rbd_dev);
Alex Elder1fe5e992012-07-25 09:32:41 -05004887
Ilya Dryomov73e39e42015-01-08 20:18:22 +03004888 return ret;
Alex Elder1fe5e992012-07-25 09:32:41 -05004889}
4890
Eric Biggersf363b082017-03-30 13:39:16 -07004891static const struct blk_mq_ops rbd_mq_ops = {
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01004892 .queue_rq = rbd_queue_rq,
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01004893};
4894
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004895static int rbd_init_disk(struct rbd_device *rbd_dev)
4896{
4897 struct gendisk *disk;
4898 struct request_queue *q;
Ilya Dryomov420efbd2018-04-16 09:32:18 +02004899 unsigned int objset_bytes =
4900 rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01004901 int err;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004902
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01004903 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
4904 rbd_dev->tag_set.ops = &rbd_mq_ops;
Ilya Dryomovb5584182015-06-23 16:21:19 +03004905 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01004906 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
Ming Lei56d18f62019-02-15 19:13:24 +08004907 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
Hannes Reineckef9b6b982020-01-31 11:37:39 +01004908 rbd_dev->tag_set.nr_hw_queues = num_present_cpus();
Ilya Dryomov59e542c2020-02-12 15:23:58 +01004909 rbd_dev->tag_set.cmd_size = sizeof(struct rbd_img_request);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01004910
4911 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
4912 if (err)
Christoph Hellwig195b1952021-06-02 09:53:37 +03004913 return err;
Josh Durgin029bcbd2011-07-22 11:35:23 -07004914
Christoph Hellwig195b1952021-06-02 09:53:37 +03004915 disk = blk_mq_alloc_disk(&rbd_dev->tag_set, rbd_dev);
4916 if (IS_ERR(disk)) {
4917 err = PTR_ERR(disk);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01004918 goto out_tag_set;
4919 }
Christoph Hellwig195b1952021-06-02 09:53:37 +03004920 q = disk->queue;
4921
4922 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
4923 rbd_dev->dev_id);
4924 disk->major = rbd_dev->major;
4925 disk->first_minor = rbd_dev->minor;
Christoph Hellwig1ebe2e52021-11-22 14:06:22 +01004926 if (single_major)
Christoph Hellwig195b1952021-06-02 09:53:37 +03004927 disk->minors = (1 << RBD_SINGLE_MAJOR_PART_SHIFT);
Christoph Hellwig1ebe2e52021-11-22 14:06:22 +01004928 else
Christoph Hellwig195b1952021-06-02 09:53:37 +03004929 disk->minors = RBD_MINORS_PER_MAJOR;
Christoph Hellwig195b1952021-06-02 09:53:37 +03004930 disk->fops = &rbd_bd_ops;
Ilya Dryomov0077a5002021-07-21 12:16:26 +02004931 disk->private_data = rbd_dev;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01004932
Bart Van Assche8b904b52018-03-07 17:10:10 -08004933 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
Ilya Dryomovd8a2c892015-03-24 16:15:17 +03004934 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
Alex Elder593a9e72012-02-07 12:03:37 -06004935
Ilya Dryomov420efbd2018-04-16 09:32:18 +02004936 blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT);
Ilya Dryomov0d9fde42015-10-07 16:09:35 +02004937 q->limits.max_sectors = queue_max_hw_sectors(q);
Ilya Dryomov21acdf42017-12-21 15:35:11 +01004938 blk_queue_max_segments(q, USHRT_MAX);
Ilya Dryomov24f1df62018-01-12 17:22:10 +01004939 blk_queue_max_segment_size(q, UINT_MAX);
Ilya Dryomov16d80c52019-03-15 14:50:04 +01004940 blk_queue_io_min(q, rbd_dev->opts->alloc_size);
4941 blk_queue_io_opt(q, rbd_dev->opts->alloc_size);
Josh Durgin029bcbd2011-07-22 11:35:23 -07004942
Ilya Dryomovd9360542018-03-23 06:14:47 +01004943 if (rbd_dev->opts->trim) {
Ilya Dryomov16d80c52019-03-15 14:50:04 +01004944 q->limits.discard_granularity = rbd_dev->opts->alloc_size;
Ilya Dryomovd9360542018-03-23 06:14:47 +01004945 blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
4946 blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
4947 }
Guangliang Zhao90e98c52014-04-01 22:22:16 +08004948
Ronny Hegewaldbae818ee2015-10-15 18:50:46 +00004949 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
Christoph Hellwig1cb039f2020-09-24 08:51:38 +02004950 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
Ronny Hegewaldbae818ee2015-10-15 18:50:46 +00004951
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004952 rbd_dev->disk = disk;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004953
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004954 return 0;
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01004955out_tag_set:
4956 blk_mq_free_tag_set(&rbd_dev->tag_set);
Christoph Hellwig7ad18af2015-01-13 17:20:04 +01004957 return err;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004958}
4959
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004960/*
4961 sysfs
4962*/
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004963
Alex Elder593a9e72012-02-07 12:03:37 -06004964static struct rbd_device *dev_to_rbd_dev(struct device *dev)
4965{
4966 return container_of(dev, struct rbd_device, dev);
4967}
4968
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004969static ssize_t rbd_size_show(struct device *dev,
4970 struct device_attribute *attr, char *buf)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004971{
Alex Elder593a9e72012-02-07 12:03:37 -06004972 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004973
Alex Elderfc71d832013-04-26 15:44:36 -05004974 return sprintf(buf, "%llu\n",
4975 (unsigned long long)rbd_dev->mapping.size);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004976}
4977
Alex Elder34b13182012-07-13 20:35:12 -05004978static ssize_t rbd_features_show(struct device *dev,
4979 struct device_attribute *attr, char *buf)
4980{
4981 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4982
Ilya Dryomovfa58bca2019-11-05 13:16:52 +01004983 return sprintf(buf, "0x%016llx\n", rbd_dev->header.features);
Alex Elder34b13182012-07-13 20:35:12 -05004984}
4985
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004986static ssize_t rbd_major_show(struct device *dev,
4987 struct device_attribute *attr, char *buf)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07004988{
Alex Elder593a9e72012-02-07 12:03:37 -06004989 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08004990
Alex Elderfc71d832013-04-26 15:44:36 -05004991 if (rbd_dev->major)
4992 return sprintf(buf, "%d\n", rbd_dev->major);
4993
4994 return sprintf(buf, "(none)\n");
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02004995}
Alex Elderfc71d832013-04-26 15:44:36 -05004996
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02004997static ssize_t rbd_minor_show(struct device *dev,
4998 struct device_attribute *attr, char *buf)
4999{
5000 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5001
5002 return sprintf(buf, "%d\n", rbd_dev->minor);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005003}
5004
Ilya Dryomov005a07bf2016-08-18 18:38:43 +02005005static ssize_t rbd_client_addr_show(struct device *dev,
5006 struct device_attribute *attr, char *buf)
5007{
5008 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5009 struct ceph_entity_addr *client_addr =
5010 ceph_client_addr(rbd_dev->rbd_client->client);
5011
5012 return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr,
5013 le32_to_cpu(client_addr->nonce));
5014}
5015
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005016static ssize_t rbd_client_id_show(struct device *dev,
5017 struct device_attribute *attr, char *buf)
5018{
Alex Elder593a9e72012-02-07 12:03:37 -06005019 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005020
Alex Elder1dbb4392012-01-24 10:08:37 -06005021 return sprintf(buf, "client%lld\n",
Ilya Dryomov033268a2016-08-12 14:59:58 +02005022 ceph_client_gid(rbd_dev->rbd_client->client));
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005023}
5024
Mike Christie267fb902016-08-18 18:38:43 +02005025static ssize_t rbd_cluster_fsid_show(struct device *dev,
5026 struct device_attribute *attr, char *buf)
5027{
5028 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5029
5030 return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid);
5031}
5032
Mike Christie0d6d1e9c2016-08-18 18:38:45 +02005033static ssize_t rbd_config_info_show(struct device *dev,
5034 struct device_attribute *attr, char *buf)
5035{
5036 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5037
Ilya Dryomovf44d04e2020-09-03 13:24:11 +02005038 if (!capable(CAP_SYS_ADMIN))
5039 return -EPERM;
5040
Mike Christie0d6d1e9c2016-08-18 18:38:45 +02005041 return sprintf(buf, "%s\n", rbd_dev->config_info);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005042}
5043
5044static ssize_t rbd_pool_show(struct device *dev,
5045 struct device_attribute *attr, char *buf)
5046{
Alex Elder593a9e72012-02-07 12:03:37 -06005047 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005048
Alex Elder0d7dbfc2012-10-25 23:34:41 -05005049 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005050}
5051
Alex Elder9bb2f332012-07-12 10:46:35 -05005052static ssize_t rbd_pool_id_show(struct device *dev,
5053 struct device_attribute *attr, char *buf)
5054{
5055 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5056
Alex Elder0d7dbfc2012-10-25 23:34:41 -05005057 return sprintf(buf, "%llu\n",
Alex Elderfc71d832013-04-26 15:44:36 -05005058 (unsigned long long) rbd_dev->spec->pool_id);
Alex Elder9bb2f332012-07-12 10:46:35 -05005059}
5060
Ilya Dryomovb26c0472018-07-03 15:28:43 +02005061static ssize_t rbd_pool_ns_show(struct device *dev,
5062 struct device_attribute *attr, char *buf)
5063{
5064 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5065
5066 return sprintf(buf, "%s\n", rbd_dev->spec->pool_ns ?: "");
5067}
5068
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005069static ssize_t rbd_name_show(struct device *dev,
5070 struct device_attribute *attr, char *buf)
5071{
Alex Elder593a9e72012-02-07 12:03:37 -06005072 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005073
Alex Eldera92ffdf2012-10-30 19:40:33 -05005074 if (rbd_dev->spec->image_name)
5075 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
5076
5077 return sprintf(buf, "(unknown)\n");
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005078}
5079
Alex Elder589d30e2012-07-10 20:30:11 -05005080static ssize_t rbd_image_id_show(struct device *dev,
5081 struct device_attribute *attr, char *buf)
5082{
5083 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5084
Alex Elder0d7dbfc2012-10-25 23:34:41 -05005085 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
Alex Elder589d30e2012-07-10 20:30:11 -05005086}
5087
Alex Elder34b13182012-07-13 20:35:12 -05005088/*
5089 * Shows the name of the currently-mapped snapshot (or
5090 * RBD_SNAP_HEAD_NAME for the base image).
5091 */
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005092static ssize_t rbd_snap_show(struct device *dev,
5093 struct device_attribute *attr,
5094 char *buf)
5095{
Alex Elder593a9e72012-02-07 12:03:37 -06005096 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005097
Alex Elder0d7dbfc2012-10-25 23:34:41 -05005098 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005099}
5100
Mike Christie92a58672016-08-18 18:38:44 +02005101static ssize_t rbd_snap_id_show(struct device *dev,
5102 struct device_attribute *attr, char *buf)
5103{
5104 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5105
5106 return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id);
5107}
5108
Alex Elder86b00e02012-10-25 23:34:42 -05005109/*
Ilya Dryomovff961282014-07-22 21:53:07 +04005110 * For a v2 image, shows the chain of parent images, separated by empty
5111 * lines. For v1 images or if there is no parent, shows "(no parent
5112 * image)".
Alex Elder86b00e02012-10-25 23:34:42 -05005113 */
5114static ssize_t rbd_parent_show(struct device *dev,
Ilya Dryomovff961282014-07-22 21:53:07 +04005115 struct device_attribute *attr,
5116 char *buf)
Alex Elder86b00e02012-10-25 23:34:42 -05005117{
5118 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Ilya Dryomovff961282014-07-22 21:53:07 +04005119 ssize_t count = 0;
Alex Elder86b00e02012-10-25 23:34:42 -05005120
Ilya Dryomovff961282014-07-22 21:53:07 +04005121 if (!rbd_dev->parent)
Alex Elder86b00e02012-10-25 23:34:42 -05005122 return sprintf(buf, "(no parent image)\n");
5123
Ilya Dryomovff961282014-07-22 21:53:07 +04005124 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
5125 struct rbd_spec *spec = rbd_dev->parent_spec;
Alex Elder86b00e02012-10-25 23:34:42 -05005126
Ilya Dryomovff961282014-07-22 21:53:07 +04005127 count += sprintf(&buf[count], "%s"
5128 "pool_id %llu\npool_name %s\n"
Ilya Dryomove92c0ea2018-08-22 17:26:10 +02005129 "pool_ns %s\n"
Ilya Dryomovff961282014-07-22 21:53:07 +04005130 "image_id %s\nimage_name %s\n"
5131 "snap_id %llu\nsnap_name %s\n"
5132 "overlap %llu\n",
5133 !count ? "" : "\n", /* first? */
5134 spec->pool_id, spec->pool_name,
Ilya Dryomove92c0ea2018-08-22 17:26:10 +02005135 spec->pool_ns ?: "",
Ilya Dryomovff961282014-07-22 21:53:07 +04005136 spec->image_id, spec->image_name ?: "(unknown)",
5137 spec->snap_id, spec->snap_name,
5138 rbd_dev->parent_overlap);
5139 }
Alex Elder86b00e02012-10-25 23:34:42 -05005140
Ilya Dryomovff961282014-07-22 21:53:07 +04005141 return count;
Alex Elder86b00e02012-10-25 23:34:42 -05005142}
5143
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005144static ssize_t rbd_image_refresh(struct device *dev,
5145 struct device_attribute *attr,
5146 const char *buf,
5147 size_t size)
5148{
Alex Elder593a9e72012-02-07 12:03:37 -06005149 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
Alex Elderb8136232012-07-25 09:32:41 -05005150 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005151
Ilya Dryomovf44d04e2020-09-03 13:24:11 +02005152 if (!capable(CAP_SYS_ADMIN))
5153 return -EPERM;
5154
Alex Eldercc4a38bd2013-04-30 00:44:33 -05005155 ret = rbd_dev_refresh(rbd_dev);
Alex Eldere627db02013-05-06 07:40:30 -05005156 if (ret)
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04005157 return ret;
Alex Elderb8136232012-07-25 09:32:41 -05005158
Ilya Dryomov52bb1f92014-07-23 17:11:20 +04005159 return size;
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005160}
Yehuda Sadeh602adf42010-08-12 16:11:25 -07005161
Joe Perches5657a812018-05-24 13:38:59 -06005162static DEVICE_ATTR(size, 0444, rbd_size_show, NULL);
5163static DEVICE_ATTR(features, 0444, rbd_features_show, NULL);
5164static DEVICE_ATTR(major, 0444, rbd_major_show, NULL);
5165static DEVICE_ATTR(minor, 0444, rbd_minor_show, NULL);
5166static DEVICE_ATTR(client_addr, 0444, rbd_client_addr_show, NULL);
5167static DEVICE_ATTR(client_id, 0444, rbd_client_id_show, NULL);
5168static DEVICE_ATTR(cluster_fsid, 0444, rbd_cluster_fsid_show, NULL);
5169static DEVICE_ATTR(config_info, 0400, rbd_config_info_show, NULL);
5170static DEVICE_ATTR(pool, 0444, rbd_pool_show, NULL);
5171static DEVICE_ATTR(pool_id, 0444, rbd_pool_id_show, NULL);
Ilya Dryomovb26c0472018-07-03 15:28:43 +02005172static DEVICE_ATTR(pool_ns, 0444, rbd_pool_ns_show, NULL);
Joe Perches5657a812018-05-24 13:38:59 -06005173static DEVICE_ATTR(name, 0444, rbd_name_show, NULL);
5174static DEVICE_ATTR(image_id, 0444, rbd_image_id_show, NULL);
5175static DEVICE_ATTR(refresh, 0200, NULL, rbd_image_refresh);
5176static DEVICE_ATTR(current_snap, 0444, rbd_snap_show, NULL);
5177static DEVICE_ATTR(snap_id, 0444, rbd_snap_id_show, NULL);
5178static DEVICE_ATTR(parent, 0444, rbd_parent_show, NULL);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005179
5180static struct attribute *rbd_attrs[] = {
5181 &dev_attr_size.attr,
Alex Elder34b13182012-07-13 20:35:12 -05005182 &dev_attr_features.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005183 &dev_attr_major.attr,
Ilya Dryomovdd82fff2013-12-13 15:28:57 +02005184 &dev_attr_minor.attr,
Ilya Dryomov005a07bf2016-08-18 18:38:43 +02005185 &dev_attr_client_addr.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005186 &dev_attr_client_id.attr,
Mike Christie267fb902016-08-18 18:38:43 +02005187 &dev_attr_cluster_fsid.attr,
Mike Christie0d6d1e9c2016-08-18 18:38:45 +02005188 &dev_attr_config_info.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005189 &dev_attr_pool.attr,
Alex Elder9bb2f332012-07-12 10:46:35 -05005190 &dev_attr_pool_id.attr,
Ilya Dryomovb26c0472018-07-03 15:28:43 +02005191 &dev_attr_pool_ns.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005192 &dev_attr_name.attr,
Alex Elder589d30e2012-07-10 20:30:11 -05005193 &dev_attr_image_id.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005194 &dev_attr_current_snap.attr,
Mike Christie92a58672016-08-18 18:38:44 +02005195 &dev_attr_snap_id.attr,
Alex Elder86b00e02012-10-25 23:34:42 -05005196 &dev_attr_parent.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005197 &dev_attr_refresh.attr,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005198 NULL
5199};
5200
5201static struct attribute_group rbd_attr_group = {
5202 .attrs = rbd_attrs,
5203};
5204
5205static const struct attribute_group *rbd_attr_groups[] = {
5206 &rbd_attr_group,
5207 NULL
5208};
5209
Ilya Dryomov6cac4692015-10-16 20:11:25 +02005210static void rbd_dev_release(struct device *dev);
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005211
Bhumika Goyalb9942bc2017-02-11 12:14:38 +05305212static const struct device_type rbd_device_type = {
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005213 .name = "rbd",
5214 .groups = rbd_attr_groups,
Ilya Dryomov6cac4692015-10-16 20:11:25 +02005215 .release = rbd_dev_release,
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005216};
5217
Alex Elder8b8fb992012-10-26 17:25:24 -05005218static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
5219{
5220 kref_get(&spec->kref);
5221
5222 return spec;
5223}
5224
5225static void rbd_spec_free(struct kref *kref);
5226static void rbd_spec_put(struct rbd_spec *spec)
5227{
5228 if (spec)
5229 kref_put(&spec->kref, rbd_spec_free);
5230}
5231
5232static struct rbd_spec *rbd_spec_alloc(void)
5233{
5234 struct rbd_spec *spec;
5235
5236 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
5237 if (!spec)
5238 return NULL;
Ilya Dryomov04077592014-07-23 17:11:20 +04005239
5240 spec->pool_id = CEPH_NOPOOL;
5241 spec->snap_id = CEPH_NOSNAP;
Alex Elder8b8fb992012-10-26 17:25:24 -05005242 kref_init(&spec->kref);
5243
Alex Elder8b8fb992012-10-26 17:25:24 -05005244 return spec;
5245}
5246
5247static void rbd_spec_free(struct kref *kref)
5248{
5249 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
5250
5251 kfree(spec->pool_name);
Ilya Dryomovb26c0472018-07-03 15:28:43 +02005252 kfree(spec->pool_ns);
Alex Elder8b8fb992012-10-26 17:25:24 -05005253 kfree(spec->image_id);
5254 kfree(spec->image_name);
5255 kfree(spec->snap_name);
5256 kfree(spec);
5257}
5258
Ilya Dryomov1643dfa2016-08-12 15:45:52 +02005259static void rbd_dev_free(struct rbd_device *rbd_dev)
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005260{
Ilya Dryomov99d16942016-08-12 16:11:41 +02005261 WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);
Ilya Dryomoved95b212016-08-12 16:40:02 +02005262 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED);
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005263
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02005264 ceph_oid_destroy(&rbd_dev->header_oid);
Ilya Dryomov6b6dddb2016-08-05 16:15:38 +02005265 ceph_oloc_destroy(&rbd_dev->header_oloc);
Mike Christie0d6d1e9c2016-08-18 18:38:45 +02005266 kfree(rbd_dev->config_info);
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02005267
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005268 rbd_put_client(rbd_dev->rbd_client);
5269 rbd_spec_put(rbd_dev->spec);
5270 kfree(rbd_dev->opts);
5271 kfree(rbd_dev);
Ilya Dryomov1643dfa2016-08-12 15:45:52 +02005272}
5273
5274static void rbd_dev_release(struct device *dev)
5275{
5276 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5277 bool need_put = !!rbd_dev->opts;
5278
5279 if (need_put) {
5280 destroy_workqueue(rbd_dev->task_wq);
5281 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
5282 }
5283
5284 rbd_dev_free(rbd_dev);
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005285
5286 /*
5287 * This is racy, but way better than putting module outside of
5288 * the release callback. The race window is pretty small, so
5289 * doing something similar to dm (dm-builtin.c) is overkill.
5290 */
5291 if (need_put)
5292 module_put(THIS_MODULE);
5293}
5294
Ilya Dryomov1643dfa2016-08-12 15:45:52 +02005295static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
5296 struct rbd_spec *spec)
Alex Elderc53d5892012-10-25 23:34:42 -05005297{
5298 struct rbd_device *rbd_dev;
5299
Ilya Dryomov1643dfa2016-08-12 15:45:52 +02005300 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
Alex Elderc53d5892012-10-25 23:34:42 -05005301 if (!rbd_dev)
5302 return NULL;
5303
5304 spin_lock_init(&rbd_dev->lock);
5305 INIT_LIST_HEAD(&rbd_dev->node);
Alex Elderc53d5892012-10-25 23:34:42 -05005306 init_rwsem(&rbd_dev->header_rwsem);
5307
Ilya Dryomov7e973322017-01-25 18:16:22 +01005308 rbd_dev->header.data_pool_id = CEPH_NOPOOL;
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02005309 ceph_oid_init(&rbd_dev->header_oid);
Ilya Dryomov431a02c2017-01-25 18:16:21 +01005310 rbd_dev->header_oloc.pool = spec->pool_id;
Ilya Dryomovb26c0472018-07-03 15:28:43 +02005311 if (spec->pool_ns) {
5312 WARN_ON(!*spec->pool_ns);
5313 rbd_dev->header_oloc.pool_ns =
5314 ceph_find_or_create_string(spec->pool_ns,
5315 strlen(spec->pool_ns));
5316 }
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02005317
Ilya Dryomov99d16942016-08-12 16:11:41 +02005318 mutex_init(&rbd_dev->watch_mutex);
5319 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
5320 INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);
5321
Ilya Dryomoved95b212016-08-12 16:40:02 +02005322 init_rwsem(&rbd_dev->lock_rwsem);
5323 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
5324 INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock);
5325 INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock);
5326 INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock);
5327 INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work);
Ilya Dryomove1fddc82019-05-30 16:07:48 +02005328 spin_lock_init(&rbd_dev->lock_lists_lock);
Ilya Dryomov637cd062019-06-06 17:14:49 +02005329 INIT_LIST_HEAD(&rbd_dev->acquiring_list);
Ilya Dryomove1fddc82019-05-30 16:07:48 +02005330 INIT_LIST_HEAD(&rbd_dev->running_list);
Ilya Dryomov637cd062019-06-06 17:14:49 +02005331 init_completion(&rbd_dev->acquire_wait);
Ilya Dryomove1fddc82019-05-30 16:07:48 +02005332 init_completion(&rbd_dev->releasing_wait);
Ilya Dryomoved95b212016-08-12 16:40:02 +02005333
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02005334 spin_lock_init(&rbd_dev->object_map_lock);
Alex Elderc53d5892012-10-25 23:34:42 -05005335
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005336 rbd_dev->dev.bus = &rbd_bus_type;
5337 rbd_dev->dev.type = &rbd_device_type;
5338 rbd_dev->dev.parent = &rbd_root_dev;
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005339 device_initialize(&rbd_dev->dev);
5340
Alex Elderc53d5892012-10-25 23:34:42 -05005341 rbd_dev->rbd_client = rbdc;
Ilya Dryomovd1475432015-06-22 13:24:48 +03005342 rbd_dev->spec = spec;
Alex Elder0903e872012-11-14 12:25:19 -06005343
Alex Elderc53d5892012-10-25 23:34:42 -05005344 return rbd_dev;
5345}
5346
Ilya Dryomov1643dfa2016-08-12 15:45:52 +02005347/*
5348 * Create a mapping rbd_dev.
5349 */
5350static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
5351 struct rbd_spec *spec,
5352 struct rbd_options *opts)
5353{
5354 struct rbd_device *rbd_dev;
5355
5356 rbd_dev = __rbd_dev_create(rbdc, spec);
5357 if (!rbd_dev)
5358 return NULL;
5359
5360 rbd_dev->opts = opts;
5361
5362 /* get an id and fill in device name */
5363 rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
5364 minor_to_rbd_dev_id(1 << MINORBITS),
5365 GFP_KERNEL);
5366 if (rbd_dev->dev_id < 0)
5367 goto fail_rbd_dev;
5368
5369 sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
5370 rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
5371 rbd_dev->name);
5372 if (!rbd_dev->task_wq)
5373 goto fail_dev_id;
5374
5375 /* we have a ref from do_rbd_add() */
5376 __module_get(THIS_MODULE);
5377
5378 dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
5379 return rbd_dev;
5380
5381fail_dev_id:
5382 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
5383fail_rbd_dev:
5384 rbd_dev_free(rbd_dev);
5385 return NULL;
5386}
5387
Alex Elderc53d5892012-10-25 23:34:42 -05005388static void rbd_dev_destroy(struct rbd_device *rbd_dev)
5389{
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02005390 if (rbd_dev)
5391 put_device(&rbd_dev->dev);
Alex Elderc53d5892012-10-25 23:34:42 -05005392}
5393
Yehuda Sadehdfc56062010-11-19 14:51:04 -08005394/*
Alex Elder9d475de52012-07-03 16:01:19 -05005395 * Get the size and object order for an image snapshot, or if
5396 * snap_id is CEPH_NOSNAP, gets this information for the base
5397 * image.
5398 */
5399static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
5400 u8 *order, u64 *snap_size)
5401{
5402 __le64 snapid = cpu_to_le64(snap_id);
5403 int ret;
5404 struct {
5405 u8 order;
5406 __le64 size;
5407 } __attribute__ ((packed)) size_buf = { 0 };
5408
Ilya Dryomovecd4a682017-01-25 18:16:21 +01005409 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5410 &rbd_dev->header_oloc, "get_size",
5411 &snapid, sizeof(snapid),
5412 &size_buf, sizeof(size_buf));
Alex Elder36be9a72013-01-19 00:30:28 -06005413 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder9d475de52012-07-03 16:01:19 -05005414 if (ret < 0)
5415 return ret;
Alex Elder57385b52013-04-21 12:14:45 -05005416 if (ret < sizeof (size_buf))
5417 return -ERANGE;
Alex Elder9d475de52012-07-03 16:01:19 -05005418
Josh Durginc3545572013-08-28 17:08:10 -07005419 if (order) {
Alex Elderc86f86e2013-04-25 15:09:41 -05005420 *order = size_buf.order;
Josh Durginc3545572013-08-28 17:08:10 -07005421 dout(" order %u", (unsigned int)*order);
5422 }
Alex Elder9d475de52012-07-03 16:01:19 -05005423 *snap_size = le64_to_cpu(size_buf.size);
5424
Josh Durginc3545572013-08-28 17:08:10 -07005425 dout(" snap_id 0x%016llx snap_size = %llu\n",
5426 (unsigned long long)snap_id,
Alex Elder57385b52013-04-21 12:14:45 -05005427 (unsigned long long)*snap_size);
Alex Elder9d475de52012-07-03 16:01:19 -05005428
5429 return 0;
5430}
5431
5432static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
5433{
5434 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
5435 &rbd_dev->header.obj_order,
5436 &rbd_dev->header.image_size);
5437}
5438
Alex Elder1e130192012-07-03 16:01:19 -05005439static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
5440{
Dongsheng Yang5435d2062019-08-09 07:05:27 +00005441 size_t size;
Alex Elder1e130192012-07-03 16:01:19 -05005442 void *reply_buf;
5443 int ret;
5444 void *p;
5445
Dongsheng Yang5435d2062019-08-09 07:05:27 +00005446 /* Response will be an encoded string, which includes a length */
5447 size = sizeof(__le32) + RBD_OBJ_PREFIX_LEN_MAX;
5448 reply_buf = kzalloc(size, GFP_KERNEL);
Alex Elder1e130192012-07-03 16:01:19 -05005449 if (!reply_buf)
5450 return -ENOMEM;
5451
Ilya Dryomovecd4a682017-01-25 18:16:21 +01005452 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5453 &rbd_dev->header_oloc, "get_object_prefix",
Dongsheng Yang5435d2062019-08-09 07:05:27 +00005454 NULL, 0, reply_buf, size);
Alex Elder36be9a72013-01-19 00:30:28 -06005455 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder1e130192012-07-03 16:01:19 -05005456 if (ret < 0)
5457 goto out;
5458
5459 p = reply_buf;
5460 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
Alex Elder57385b52013-04-21 12:14:45 -05005461 p + ret, NULL, GFP_NOIO);
5462 ret = 0;
Alex Elder1e130192012-07-03 16:01:19 -05005463
5464 if (IS_ERR(rbd_dev->header.object_prefix)) {
5465 ret = PTR_ERR(rbd_dev->header.object_prefix);
5466 rbd_dev->header.object_prefix = NULL;
5467 } else {
5468 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
5469 }
Alex Elder1e130192012-07-03 16:01:19 -05005470out:
5471 kfree(reply_buf);
5472
5473 return ret;
5474}
5475
Alex Elderb1b54022012-07-03 16:01:19 -05005476static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
Ilya Dryomov196e2d62019-11-05 15:38:46 +01005477 bool read_only, u64 *snap_features)
Alex Elderb1b54022012-07-03 16:01:19 -05005478{
Ilya Dryomov196e2d62019-11-05 15:38:46 +01005479 struct {
5480 __le64 snap_id;
5481 u8 read_only;
5482 } features_in;
Alex Elderb1b54022012-07-03 16:01:19 -05005483 struct {
5484 __le64 features;
5485 __le64 incompat;
Alex Elder41579762013-04-21 12:14:45 -05005486 } __attribute__ ((packed)) features_buf = { 0 };
Ilya Dryomovd3767f02016-04-13 14:15:50 +02005487 u64 unsup;
Alex Elderb1b54022012-07-03 16:01:19 -05005488 int ret;
5489
Ilya Dryomov196e2d62019-11-05 15:38:46 +01005490 features_in.snap_id = cpu_to_le64(snap_id);
5491 features_in.read_only = read_only;
5492
Ilya Dryomovecd4a682017-01-25 18:16:21 +01005493 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5494 &rbd_dev->header_oloc, "get_features",
Ilya Dryomov196e2d62019-11-05 15:38:46 +01005495 &features_in, sizeof(features_in),
Ilya Dryomovecd4a682017-01-25 18:16:21 +01005496 &features_buf, sizeof(features_buf));
Alex Elder36be9a72013-01-19 00:30:28 -06005497 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elderb1b54022012-07-03 16:01:19 -05005498 if (ret < 0)
5499 return ret;
Alex Elder57385b52013-04-21 12:14:45 -05005500 if (ret < sizeof (features_buf))
5501 return -ERANGE;
Alex Elderd8891402012-10-09 13:50:17 -07005502
Ilya Dryomovd3767f02016-04-13 14:15:50 +02005503 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
5504 if (unsup) {
5505 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
5506 unsup);
Alex Elderb8f5c6e2012-11-01 08:39:26 -05005507 return -ENXIO;
Ilya Dryomovd3767f02016-04-13 14:15:50 +02005508 }
Alex Elderd8891402012-10-09 13:50:17 -07005509
Alex Elderb1b54022012-07-03 16:01:19 -05005510 *snap_features = le64_to_cpu(features_buf.features);
5511
5512 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
Alex Elder57385b52013-04-21 12:14:45 -05005513 (unsigned long long)snap_id,
5514 (unsigned long long)*snap_features,
5515 (unsigned long long)le64_to_cpu(features_buf.incompat));
Alex Elderb1b54022012-07-03 16:01:19 -05005516
5517 return 0;
5518}
5519
5520static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
5521{
5522 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
Ilya Dryomov196e2d62019-11-05 15:38:46 +01005523 rbd_is_ro(rbd_dev),
5524 &rbd_dev->header.features);
Alex Elderb1b54022012-07-03 16:01:19 -05005525}
5526
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02005527/*
5528 * These are generic image flags, but since they are used only for
5529 * object map, store them in rbd_dev->object_map_flags.
5530 *
5531 * For the same reason, this function is called only on object map
5532 * (re)load and not on header refresh.
5533 */
5534static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev)
5535{
5536 __le64 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
5537 __le64 flags;
5538 int ret;
5539
5540 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5541 &rbd_dev->header_oloc, "get_flags",
5542 &snapid, sizeof(snapid),
5543 &flags, sizeof(flags));
5544 if (ret < 0)
5545 return ret;
5546 if (ret < sizeof(flags))
5547 return -EBADMSG;
5548
5549 rbd_dev->object_map_flags = le64_to_cpu(flags);
5550 return 0;
5551}
5552
Ilya Dryomoveb3b2d62018-08-22 17:11:27 +02005553struct parent_image_info {
5554 u64 pool_id;
Ilya Dryomove92c0ea2018-08-22 17:26:10 +02005555 const char *pool_ns;
Ilya Dryomoveb3b2d62018-08-22 17:11:27 +02005556 const char *image_id;
5557 u64 snap_id;
5558
Ilya Dryomove92c0ea2018-08-22 17:26:10 +02005559 bool has_overlap;
Ilya Dryomoveb3b2d62018-08-22 17:11:27 +02005560 u64 overlap;
5561};
5562
5563/*
5564 * The caller is responsible for @pii.
5565 */
Ilya Dryomove92c0ea2018-08-22 17:26:10 +02005566static int decode_parent_image_spec(void **p, void *end,
5567 struct parent_image_info *pii)
5568{
5569 u8 struct_v;
5570 u32 struct_len;
5571 int ret;
5572
5573 ret = ceph_start_decoding(p, end, 1, "ParentImageSpec",
5574 &struct_v, &struct_len);
5575 if (ret)
5576 return ret;
5577
5578 ceph_decode_64_safe(p, end, pii->pool_id, e_inval);
5579 pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
5580 if (IS_ERR(pii->pool_ns)) {
5581 ret = PTR_ERR(pii->pool_ns);
5582 pii->pool_ns = NULL;
5583 return ret;
5584 }
5585 pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
5586 if (IS_ERR(pii->image_id)) {
5587 ret = PTR_ERR(pii->image_id);
5588 pii->image_id = NULL;
5589 return ret;
5590 }
5591 ceph_decode_64_safe(p, end, pii->snap_id, e_inval);
5592 return 0;
5593
5594e_inval:
5595 return -EINVAL;
5596}
5597
5598static int __get_parent_info(struct rbd_device *rbd_dev,
5599 struct page *req_page,
5600 struct page *reply_page,
5601 struct parent_image_info *pii)
5602{
5603 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5604 size_t reply_len = PAGE_SIZE;
5605 void *p, *end;
5606 int ret;
5607
5608 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5609 "rbd", "parent_get", CEPH_OSD_FLAG_READ,
Ilya Dryomov68ada912019-06-14 18:16:51 +02005610 req_page, sizeof(u64), &reply_page, &reply_len);
Ilya Dryomove92c0ea2018-08-22 17:26:10 +02005611 if (ret)
5612 return ret == -EOPNOTSUPP ? 1 : ret;
5613
5614 p = page_address(reply_page);
5615 end = p + reply_len;
5616 ret = decode_parent_image_spec(&p, end, pii);
5617 if (ret)
5618 return ret;
5619
5620 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5621 "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ,
Ilya Dryomov68ada912019-06-14 18:16:51 +02005622 req_page, sizeof(u64), &reply_page, &reply_len);
Ilya Dryomove92c0ea2018-08-22 17:26:10 +02005623 if (ret)
5624 return ret;
5625
5626 p = page_address(reply_page);
5627 end = p + reply_len;
5628 ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval);
5629 if (pii->has_overlap)
5630 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
5631
5632 return 0;
5633
5634e_inval:
5635 return -EINVAL;
5636}
5637
5638/*
5639 * The caller is responsible for @pii.
5640 */
Ilya Dryomoveb3b2d62018-08-22 17:11:27 +02005641static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
5642 struct page *req_page,
5643 struct page *reply_page,
5644 struct parent_image_info *pii)
5645{
5646 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5647 size_t reply_len = PAGE_SIZE;
5648 void *p, *end;
5649 int ret;
5650
5651 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5652 "rbd", "get_parent", CEPH_OSD_FLAG_READ,
Ilya Dryomov68ada912019-06-14 18:16:51 +02005653 req_page, sizeof(u64), &reply_page, &reply_len);
Ilya Dryomoveb3b2d62018-08-22 17:11:27 +02005654 if (ret)
5655 return ret;
5656
5657 p = page_address(reply_page);
5658 end = p + reply_len;
5659 ceph_decode_64_safe(&p, end, pii->pool_id, e_inval);
5660 pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
5661 if (IS_ERR(pii->image_id)) {
5662 ret = PTR_ERR(pii->image_id);
5663 pii->image_id = NULL;
5664 return ret;
5665 }
5666 ceph_decode_64_safe(&p, end, pii->snap_id, e_inval);
Ilya Dryomove92c0ea2018-08-22 17:26:10 +02005667 pii->has_overlap = true;
Ilya Dryomoveb3b2d62018-08-22 17:11:27 +02005668 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
5669
5670 return 0;
5671
5672e_inval:
5673 return -EINVAL;
5674}
5675
5676static int get_parent_info(struct rbd_device *rbd_dev,
5677 struct parent_image_info *pii)
5678{
5679 struct page *req_page, *reply_page;
5680 void *p;
5681 int ret;
5682
5683 req_page = alloc_page(GFP_KERNEL);
5684 if (!req_page)
5685 return -ENOMEM;
5686
5687 reply_page = alloc_page(GFP_KERNEL);
5688 if (!reply_page) {
5689 __free_page(req_page);
5690 return -ENOMEM;
5691 }
5692
5693 p = page_address(req_page);
5694 ceph_encode_64(&p, rbd_dev->spec->snap_id);
Ilya Dryomove92c0ea2018-08-22 17:26:10 +02005695 ret = __get_parent_info(rbd_dev, req_page, reply_page, pii);
5696 if (ret > 0)
5697 ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page,
5698 pii);
Ilya Dryomoveb3b2d62018-08-22 17:11:27 +02005699
5700 __free_page(req_page);
5701 __free_page(reply_page);
5702 return ret;
5703}
5704
Alex Elder86b00e02012-10-25 23:34:42 -05005705static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
5706{
5707 struct rbd_spec *parent_spec;
Ilya Dryomoveb3b2d62018-08-22 17:11:27 +02005708 struct parent_image_info pii = { 0 };
Alex Elder86b00e02012-10-25 23:34:42 -05005709 int ret;
5710
5711 parent_spec = rbd_spec_alloc();
5712 if (!parent_spec)
5713 return -ENOMEM;
5714
Ilya Dryomoveb3b2d62018-08-22 17:11:27 +02005715 ret = get_parent_info(rbd_dev, &pii);
5716 if (ret)
Alex Elder86b00e02012-10-25 23:34:42 -05005717 goto out_err;
5718
Ilya Dryomove92c0ea2018-08-22 17:26:10 +02005719 dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
5720 __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
5721 pii.has_overlap, pii.overlap);
Ilya Dryomoveb3b2d62018-08-22 17:11:27 +02005722
Ilya Dryomove92c0ea2018-08-22 17:26:10 +02005723 if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
Alex Elder392a9da2013-05-06 17:40:33 -05005724 /*
5725 * Either the parent never existed, or we have
5726 * record of it but the image got flattened so it no
5727 * longer has a parent. When the parent of a
5728 * layered image disappears we immediately set the
5729 * overlap to 0. The effect of this is that all new
5730 * requests will be treated as if the image had no
5731 * parent.
Ilya Dryomove92c0ea2018-08-22 17:26:10 +02005732 *
5733 * If !pii.has_overlap, the parent image spec is not
5734 * applicable. It's there to avoid duplication in each
5735 * snapshot record.
Alex Elder392a9da2013-05-06 17:40:33 -05005736 */
5737 if (rbd_dev->parent_overlap) {
5738 rbd_dev->parent_overlap = 0;
Alex Elder392a9da2013-05-06 17:40:33 -05005739 rbd_dev_parent_put(rbd_dev);
5740 pr_info("%s: clone image has been flattened\n",
5741 rbd_dev->disk->disk_name);
5742 }
5743
Alex Elder86b00e02012-10-25 23:34:42 -05005744 goto out; /* No parent? No problem. */
Alex Elder392a9da2013-05-06 17:40:33 -05005745 }
Alex Elder86b00e02012-10-25 23:34:42 -05005746
Alex Elder0903e872012-11-14 12:25:19 -06005747 /* The ceph file layout needs to fit pool id in 32 bits */
5748
5749 ret = -EIO;
Ilya Dryomoveb3b2d62018-08-22 17:11:27 +02005750 if (pii.pool_id > (u64)U32_MAX) {
Ilya Dryomov9584d502014-07-11 12:11:20 +04005751 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
Ilya Dryomoveb3b2d62018-08-22 17:11:27 +02005752 (unsigned long long)pii.pool_id, U32_MAX);
Alex Elder57385b52013-04-21 12:14:45 -05005753 goto out_err;
Alex Elderc0cd10db2013-04-26 09:43:47 -05005754 }
Alex Elder0903e872012-11-14 12:25:19 -06005755
Alex Elder3b5cf2a2013-05-29 11:18:59 -05005756 /*
5757 * The parent won't change (except when the clone is
5758 * flattened, already handled that). So we only need to
5759 * record the parent spec we have not already done so.
5760 */
5761 if (!rbd_dev->parent_spec) {
Ilya Dryomoveb3b2d62018-08-22 17:11:27 +02005762 parent_spec->pool_id = pii.pool_id;
Ilya Dryomove92c0ea2018-08-22 17:26:10 +02005763 if (pii.pool_ns && *pii.pool_ns) {
5764 parent_spec->pool_ns = pii.pool_ns;
5765 pii.pool_ns = NULL;
5766 }
Ilya Dryomoveb3b2d62018-08-22 17:11:27 +02005767 parent_spec->image_id = pii.image_id;
5768 pii.image_id = NULL;
5769 parent_spec->snap_id = pii.snap_id;
Ilya Dryomovb26c0472018-07-03 15:28:43 +02005770
Alex Elder70cf49c2013-05-06 17:40:33 -05005771 rbd_dev->parent_spec = parent_spec;
5772 parent_spec = NULL; /* rbd_dev now owns this */
Alex Elder3b5cf2a2013-05-29 11:18:59 -05005773 }
5774
5775 /*
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03005776 * We always update the parent overlap. If it's zero we issue
5777 * a warning, as we will proceed as if there was no parent.
Alex Elder3b5cf2a2013-05-29 11:18:59 -05005778 */
Ilya Dryomoveb3b2d62018-08-22 17:11:27 +02005779 if (!pii.overlap) {
Alex Elder3b5cf2a2013-05-29 11:18:59 -05005780 if (parent_spec) {
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03005781 /* refresh, careful to warn just once */
5782 if (rbd_dev->parent_overlap)
5783 rbd_warn(rbd_dev,
5784 "clone now standalone (overlap became 0)");
Alex Elder3b5cf2a2013-05-29 11:18:59 -05005785 } else {
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03005786 /* initial probe */
5787 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
Alex Elder3b5cf2a2013-05-29 11:18:59 -05005788 }
Alex Elder70cf49c2013-05-06 17:40:33 -05005789 }
Ilya Dryomoveb3b2d62018-08-22 17:11:27 +02005790 rbd_dev->parent_overlap = pii.overlap;
Ilya Dryomovcf32bd92015-01-19 22:57:39 +03005791
Alex Elder86b00e02012-10-25 23:34:42 -05005792out:
5793 ret = 0;
5794out_err:
Ilya Dryomove92c0ea2018-08-22 17:26:10 +02005795 kfree(pii.pool_ns);
Ilya Dryomoveb3b2d62018-08-22 17:11:27 +02005796 kfree(pii.image_id);
Alex Elder86b00e02012-10-25 23:34:42 -05005797 rbd_spec_put(parent_spec);
Alex Elder86b00e02012-10-25 23:34:42 -05005798 return ret;
5799}
5800
Alex Eldercc070d52013-04-21 12:14:45 -05005801static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
5802{
5803 struct {
5804 __le64 stripe_unit;
5805 __le64 stripe_count;
5806 } __attribute__ ((packed)) striping_info_buf = { 0 };
5807 size_t size = sizeof (striping_info_buf);
5808 void *p;
Alex Eldercc070d52013-04-21 12:14:45 -05005809 int ret;
5810
Ilya Dryomovecd4a682017-01-25 18:16:21 +01005811 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5812 &rbd_dev->header_oloc, "get_stripe_unit_count",
5813 NULL, 0, &striping_info_buf, size);
Alex Eldercc070d52013-04-21 12:14:45 -05005814 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5815 if (ret < 0)
5816 return ret;
5817 if (ret < size)
5818 return -ERANGE;
5819
Alex Eldercc070d52013-04-21 12:14:45 -05005820 p = &striping_info_buf;
Ilya Dryomovb1331852018-02-07 12:09:12 +01005821 rbd_dev->header.stripe_unit = ceph_decode_64(&p);
5822 rbd_dev->header.stripe_count = ceph_decode_64(&p);
Alex Eldercc070d52013-04-21 12:14:45 -05005823 return 0;
5824}
5825
Ilya Dryomov7e973322017-01-25 18:16:22 +01005826static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev)
5827{
5828 __le64 data_pool_id;
5829 int ret;
5830
5831 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5832 &rbd_dev->header_oloc, "get_data_pool",
5833 NULL, 0, &data_pool_id, sizeof(data_pool_id));
5834 if (ret < 0)
5835 return ret;
5836 if (ret < sizeof(data_pool_id))
5837 return -EBADMSG;
5838
5839 rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id);
5840 WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL);
5841 return 0;
5842}
5843
Alex Elder9e15b772012-10-30 19:40:33 -05005844static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
5845{
Ilya Dryomovecd4a682017-01-25 18:16:21 +01005846 CEPH_DEFINE_OID_ONSTACK(oid);
Alex Elder9e15b772012-10-30 19:40:33 -05005847 size_t image_id_size;
5848 char *image_id;
5849 void *p;
5850 void *end;
5851 size_t size;
5852 void *reply_buf = NULL;
5853 size_t len = 0;
5854 char *image_name = NULL;
5855 int ret;
5856
5857 rbd_assert(!rbd_dev->spec->image_name);
5858
Alex Elder69e7a022012-11-01 08:39:26 -05005859 len = strlen(rbd_dev->spec->image_id);
5860 image_id_size = sizeof (__le32) + len;
Alex Elder9e15b772012-10-30 19:40:33 -05005861 image_id = kmalloc(image_id_size, GFP_KERNEL);
5862 if (!image_id)
5863 return NULL;
5864
5865 p = image_id;
Alex Elder41579762013-04-21 12:14:45 -05005866 end = image_id + image_id_size;
Alex Elder57385b52013-04-21 12:14:45 -05005867 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
Alex Elder9e15b772012-10-30 19:40:33 -05005868
5869 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
5870 reply_buf = kmalloc(size, GFP_KERNEL);
5871 if (!reply_buf)
5872 goto out;
5873
Ilya Dryomovecd4a682017-01-25 18:16:21 +01005874 ceph_oid_printf(&oid, "%s", RBD_DIRECTORY);
5875 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
5876 "dir_get_name", image_id, image_id_size,
5877 reply_buf, size);
Alex Elder9e15b772012-10-30 19:40:33 -05005878 if (ret < 0)
5879 goto out;
5880 p = reply_buf;
Alex Elderf40eb342013-04-25 15:09:42 -05005881 end = reply_buf + ret;
5882
Alex Elder9e15b772012-10-30 19:40:33 -05005883 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
5884 if (IS_ERR(image_name))
5885 image_name = NULL;
5886 else
5887 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
5888out:
5889 kfree(reply_buf);
5890 kfree(image_id);
5891
5892 return image_name;
5893}
5894
Alex Elder2ad3d712013-04-30 00:44:33 -05005895static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5896{
5897 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5898 const char *snap_name;
5899 u32 which = 0;
5900
5901 /* Skip over names until we find the one we are looking for */
5902
5903 snap_name = rbd_dev->header.snap_names;
5904 while (which < snapc->num_snaps) {
5905 if (!strcmp(name, snap_name))
5906 return snapc->snaps[which];
5907 snap_name += strlen(snap_name) + 1;
5908 which++;
5909 }
5910 return CEPH_NOSNAP;
5911}
5912
5913static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5914{
5915 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5916 u32 which;
5917 bool found = false;
5918 u64 snap_id;
5919
5920 for (which = 0; !found && which < snapc->num_snaps; which++) {
5921 const char *snap_name;
5922
5923 snap_id = snapc->snaps[which];
5924 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
Josh Durginefadc982013-08-29 19:16:42 -07005925 if (IS_ERR(snap_name)) {
5926 /* ignore no-longer existing snapshots */
5927 if (PTR_ERR(snap_name) == -ENOENT)
5928 continue;
5929 else
5930 break;
5931 }
Alex Elder2ad3d712013-04-30 00:44:33 -05005932 found = !strcmp(name, snap_name);
5933 kfree(snap_name);
5934 }
5935 return found ? snap_id : CEPH_NOSNAP;
5936}
5937
5938/*
5939 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
5940 * no snapshot by that name is found, or if an error occurs.
5941 */
5942static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5943{
5944 if (rbd_dev->image_format == 1)
5945 return rbd_v1_snap_id_by_name(rbd_dev, name);
5946
5947 return rbd_v2_snap_id_by_name(rbd_dev, name);
5948}
5949
Alex Elder9e15b772012-10-30 19:40:33 -05005950/*
Ilya Dryomov04077592014-07-23 17:11:20 +04005951 * An image being mapped will have everything but the snap id.
Alex Elder9e15b772012-10-30 19:40:33 -05005952 */
Ilya Dryomov04077592014-07-23 17:11:20 +04005953static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
5954{
5955 struct rbd_spec *spec = rbd_dev->spec;
5956
5957 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
5958 rbd_assert(spec->image_id && spec->image_name);
5959 rbd_assert(spec->snap_name);
5960
5961 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
5962 u64 snap_id;
5963
5964 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
5965 if (snap_id == CEPH_NOSNAP)
5966 return -ENOENT;
5967
5968 spec->snap_id = snap_id;
5969 } else {
5970 spec->snap_id = CEPH_NOSNAP;
5971 }
5972
5973 return 0;
5974}
5975
5976/*
5977 * A parent image will have all ids but none of the names.
5978 *
5979 * All names in an rbd spec are dynamically allocated. It's OK if we
5980 * can't figure out the name for an image id.
5981 */
5982static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
Alex Elder9e15b772012-10-30 19:40:33 -05005983{
Alex Elder2e9f7f12013-04-26 09:43:48 -05005984 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5985 struct rbd_spec *spec = rbd_dev->spec;
5986 const char *pool_name;
5987 const char *image_name;
5988 const char *snap_name;
Alex Elder9e15b772012-10-30 19:40:33 -05005989 int ret;
5990
Ilya Dryomov04077592014-07-23 17:11:20 +04005991 rbd_assert(spec->pool_id != CEPH_NOPOOL);
5992 rbd_assert(spec->image_id);
5993 rbd_assert(spec->snap_id != CEPH_NOSNAP);
Alex Elder9e15b772012-10-30 19:40:33 -05005994
Alex Elder2e9f7f12013-04-26 09:43:48 -05005995 /* Get the pool name; we have to make our own copy of this */
Alex Elder9e15b772012-10-30 19:40:33 -05005996
Alex Elder2e9f7f12013-04-26 09:43:48 -05005997 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
5998 if (!pool_name) {
5999 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
Alex Elder935dc892012-11-01 10:17:15 -05006000 return -EIO;
6001 }
Alex Elder2e9f7f12013-04-26 09:43:48 -05006002 pool_name = kstrdup(pool_name, GFP_KERNEL);
6003 if (!pool_name)
Alex Elder9e15b772012-10-30 19:40:33 -05006004 return -ENOMEM;
6005
6006 /* Fetch the image name; tolerate failure here */
6007
Alex Elder2e9f7f12013-04-26 09:43:48 -05006008 image_name = rbd_dev_image_name(rbd_dev);
6009 if (!image_name)
Alex Elder06ecc6c2012-11-01 10:17:15 -05006010 rbd_warn(rbd_dev, "unable to get image name");
Alex Elder9e15b772012-10-30 19:40:33 -05006011
Ilya Dryomov04077592014-07-23 17:11:20 +04006012 /* Fetch the snapshot name */
Alex Elder9e15b772012-10-30 19:40:33 -05006013
Alex Elder2e9f7f12013-04-26 09:43:48 -05006014 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
Josh Durginda6a6b62013-09-04 17:57:31 -07006015 if (IS_ERR(snap_name)) {
6016 ret = PTR_ERR(snap_name);
Alex Elder9e15b772012-10-30 19:40:33 -05006017 goto out_err;
Alex Elder2e9f7f12013-04-26 09:43:48 -05006018 }
6019
6020 spec->pool_name = pool_name;
6021 spec->image_name = image_name;
6022 spec->snap_name = snap_name;
Alex Elder9e15b772012-10-30 19:40:33 -05006023
6024 return 0;
Ilya Dryomov04077592014-07-23 17:11:20 +04006025
Alex Elder9e15b772012-10-30 19:40:33 -05006026out_err:
Alex Elder2e9f7f12013-04-26 09:43:48 -05006027 kfree(image_name);
6028 kfree(pool_name);
Alex Elder9e15b772012-10-30 19:40:33 -05006029 return ret;
6030}
6031
Alex Eldercc4a38bd2013-04-30 00:44:33 -05006032static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
Alex Elder35d489f2012-07-03 16:01:19 -05006033{
6034 size_t size;
6035 int ret;
6036 void *reply_buf;
6037 void *p;
6038 void *end;
6039 u64 seq;
6040 u32 snap_count;
6041 struct ceph_snap_context *snapc;
6042 u32 i;
6043
6044 /*
6045 * We'll need room for the seq value (maximum snapshot id),
6046 * snapshot count, and array of that many snapshot ids.
6047 * For now we have a fixed upper limit on the number we're
6048 * prepared to receive.
6049 */
6050 size = sizeof (__le64) + sizeof (__le32) +
6051 RBD_MAX_SNAP_COUNT * sizeof (__le64);
6052 reply_buf = kzalloc(size, GFP_KERNEL);
6053 if (!reply_buf)
6054 return -ENOMEM;
6055
Ilya Dryomovecd4a682017-01-25 18:16:21 +01006056 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6057 &rbd_dev->header_oloc, "get_snapcontext",
6058 NULL, 0, reply_buf, size);
Alex Elder36be9a72013-01-19 00:30:28 -06006059 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elder35d489f2012-07-03 16:01:19 -05006060 if (ret < 0)
6061 goto out;
6062
Alex Elder35d489f2012-07-03 16:01:19 -05006063 p = reply_buf;
Alex Elder57385b52013-04-21 12:14:45 -05006064 end = reply_buf + ret;
6065 ret = -ERANGE;
Alex Elder35d489f2012-07-03 16:01:19 -05006066 ceph_decode_64_safe(&p, end, seq, out);
6067 ceph_decode_32_safe(&p, end, snap_count, out);
6068
6069 /*
6070 * Make sure the reported number of snapshot ids wouldn't go
6071 * beyond the end of our buffer. But before checking that,
6072 * make sure the computed size of the snapshot context we
6073 * allocate is representable in a size_t.
6074 */
6075 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
6076 / sizeof (u64)) {
6077 ret = -EINVAL;
6078 goto out;
6079 }
6080 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
6081 goto out;
Alex Elder468521c2013-04-26 09:43:47 -05006082 ret = 0;
Alex Elder35d489f2012-07-03 16:01:19 -05006083
Alex Elder812164f82013-04-30 00:44:32 -05006084 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
Alex Elder35d489f2012-07-03 16:01:19 -05006085 if (!snapc) {
6086 ret = -ENOMEM;
6087 goto out;
6088 }
Alex Elder35d489f2012-07-03 16:01:19 -05006089 snapc->seq = seq;
Alex Elder35d489f2012-07-03 16:01:19 -05006090 for (i = 0; i < snap_count; i++)
6091 snapc->snaps[i] = ceph_decode_64(&p);
6092
Alex Elder49ece552013-05-06 08:37:00 -05006093 ceph_put_snap_context(rbd_dev->header.snapc);
Alex Elder35d489f2012-07-03 16:01:19 -05006094 rbd_dev->header.snapc = snapc;
6095
6096 dout(" snap context seq = %llu, snap_count = %u\n",
Alex Elder57385b52013-04-21 12:14:45 -05006097 (unsigned long long)seq, (unsigned int)snap_count);
Alex Elder35d489f2012-07-03 16:01:19 -05006098out:
6099 kfree(reply_buf);
6100
Alex Elder57385b52013-04-21 12:14:45 -05006101 return ret;
Alex Elder35d489f2012-07-03 16:01:19 -05006102}
6103
Alex Elder54cac612013-04-30 00:44:33 -05006104static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
6105 u64 snap_id)
Alex Elderb8b1e2d2012-07-03 16:01:19 -05006106{
6107 size_t size;
6108 void *reply_buf;
Alex Elder54cac612013-04-30 00:44:33 -05006109 __le64 snapid;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05006110 int ret;
6111 void *p;
6112 void *end;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05006113 char *snap_name;
6114
6115 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
6116 reply_buf = kmalloc(size, GFP_KERNEL);
6117 if (!reply_buf)
6118 return ERR_PTR(-ENOMEM);
6119
Alex Elder54cac612013-04-30 00:44:33 -05006120 snapid = cpu_to_le64(snap_id);
Ilya Dryomovecd4a682017-01-25 18:16:21 +01006121 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6122 &rbd_dev->header_oloc, "get_snapshot_name",
6123 &snapid, sizeof(snapid), reply_buf, size);
Alex Elder36be9a72013-01-19 00:30:28 -06006124 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elderf40eb342013-04-25 15:09:42 -05006125 if (ret < 0) {
6126 snap_name = ERR_PTR(ret);
Alex Elderb8b1e2d2012-07-03 16:01:19 -05006127 goto out;
Alex Elderf40eb342013-04-25 15:09:42 -05006128 }
Alex Elderb8b1e2d2012-07-03 16:01:19 -05006129
6130 p = reply_buf;
Alex Elderf40eb342013-04-25 15:09:42 -05006131 end = reply_buf + ret;
Alex Eldere5c35532012-10-25 23:34:41 -05006132 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
Alex Elderf40eb342013-04-25 15:09:42 -05006133 if (IS_ERR(snap_name))
Alex Elderb8b1e2d2012-07-03 16:01:19 -05006134 goto out;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05006135
Alex Elderf40eb342013-04-25 15:09:42 -05006136 dout(" snap_id 0x%016llx snap_name = %s\n",
Alex Elder54cac612013-04-30 00:44:33 -05006137 (unsigned long long)snap_id, snap_name);
Alex Elderb8b1e2d2012-07-03 16:01:19 -05006138out:
6139 kfree(reply_buf);
6140
Alex Elderf40eb342013-04-25 15:09:42 -05006141 return snap_name;
Alex Elderb8b1e2d2012-07-03 16:01:19 -05006142}
6143
Alex Elder2df3fac2013-05-06 09:51:30 -05006144static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
Alex Elder117973f2012-08-31 17:29:55 -05006145{
Alex Elder2df3fac2013-05-06 09:51:30 -05006146 bool first_time = rbd_dev->header.object_prefix == NULL;
Alex Elder117973f2012-08-31 17:29:55 -05006147 int ret;
Alex Elder117973f2012-08-31 17:29:55 -05006148
Josh Durgin1617e402013-06-12 14:43:10 -07006149 ret = rbd_dev_v2_image_size(rbd_dev);
6150 if (ret)
Alex Eldercfbf6372013-05-31 17:40:45 -05006151 return ret;
Josh Durgin1617e402013-06-12 14:43:10 -07006152
Alex Elder2df3fac2013-05-06 09:51:30 -05006153 if (first_time) {
6154 ret = rbd_dev_v2_header_onetime(rbd_dev);
6155 if (ret)
Alex Eldercfbf6372013-05-31 17:40:45 -05006156 return ret;
Alex Elder2df3fac2013-05-06 09:51:30 -05006157 }
6158
Alex Eldercc4a38bd2013-04-30 00:44:33 -05006159 ret = rbd_dev_v2_snap_context(rbd_dev);
Ilya Dryomovd194cd12015-08-31 18:22:10 +03006160 if (ret && first_time) {
6161 kfree(rbd_dev->header.object_prefix);
6162 rbd_dev->header.object_prefix = NULL;
6163 }
Alex Elder117973f2012-08-31 17:29:55 -05006164
6165 return ret;
6166}
6167
Ilya Dryomova720ae02014-07-23 17:11:19 +04006168static int rbd_dev_header_info(struct rbd_device *rbd_dev)
6169{
6170 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6171
6172 if (rbd_dev->image_format == 1)
6173 return rbd_dev_v1_header_info(rbd_dev);
6174
6175 return rbd_dev_v2_header_info(rbd_dev);
6176}
6177
Alex Elder1ddbe942012-01-29 13:57:44 -06006178/*
Alex Eldere28fff262012-02-02 08:13:30 -06006179 * Skips over white space at *buf, and updates *buf to point to the
6180 * first found non-space character (if any). Returns the length of
Alex Elder593a9e72012-02-07 12:03:37 -06006181 * the token (string of non-white space characters) found. Note
6182 * that *buf must be terminated with '\0'.
Alex Eldere28fff262012-02-02 08:13:30 -06006183 */
6184static inline size_t next_token(const char **buf)
6185{
6186 /*
6187 * These are the characters that produce nonzero for
6188 * isspace() in the "C" and "POSIX" locales.
6189 */
Colin Ian King435a1202021-11-27 17:21:04 +00006190 static const char spaces[] = " \f\n\r\t\v";
Alex Eldere28fff262012-02-02 08:13:30 -06006191
6192 *buf += strspn(*buf, spaces); /* Find start of token */
6193
6194 return strcspn(*buf, spaces); /* Return token length */
6195}
6196
6197/*
Alex Elderea3352f2012-07-09 21:04:23 -05006198 * Finds the next token in *buf, dynamically allocates a buffer big
6199 * enough to hold a copy of it, and copies the token into the new
6200 * buffer. The copy is guaranteed to be terminated with '\0'. Note
6201 * that a duplicate buffer is created even for a zero-length token.
6202 *
6203 * Returns a pointer to the newly-allocated duplicate, or a null
6204 * pointer if memory for the duplicate was not available. If
6205 * the lenp argument is a non-null pointer, the length of the token
6206 * (not including the '\0') is returned in *lenp.
6207 *
6208 * If successful, the *buf pointer will be updated to point beyond
6209 * the end of the found token.
6210 *
6211 * Note: uses GFP_KERNEL for allocation.
6212 */
6213static inline char *dup_token(const char **buf, size_t *lenp)
6214{
6215 char *dup;
6216 size_t len;
6217
6218 len = next_token(buf);
Alex Elder4caf35f2012-11-01 08:39:27 -05006219 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
Alex Elderea3352f2012-07-09 21:04:23 -05006220 if (!dup)
6221 return NULL;
Alex Elderea3352f2012-07-09 21:04:23 -05006222 *(dup + len) = '\0';
6223 *buf += len;
6224
6225 if (lenp)
6226 *lenp = len;
6227
6228 return dup;
6229}
6230
David Howells82995cc2019-03-25 16:38:32 +00006231static int rbd_parse_param(struct fs_parameter *param,
6232 struct rbd_parse_opts_ctx *pctx)
6233{
6234 struct rbd_options *opt = pctx->opts;
6235 struct fs_parse_result result;
Al Viro3fbb8d52019-12-20 23:43:32 -05006236 struct p_log log = {.prefix = "rbd"};
David Howells82995cc2019-03-25 16:38:32 +00006237 int token, ret;
6238
6239 ret = ceph_parse_param(param, pctx->copts, NULL);
6240 if (ret != -ENOPARAM)
6241 return ret;
6242
Al Virod7167b12019-09-07 07:23:15 -04006243 token = __fs_parse(&log, rbd_parameters, param, &result);
David Howells82995cc2019-03-25 16:38:32 +00006244 dout("%s fs_parse '%s' token %d\n", __func__, param->key, token);
6245 if (token < 0) {
Al Viro2c3f3dc2019-12-20 23:43:32 -05006246 if (token == -ENOPARAM)
6247 return inval_plog(&log, "Unknown parameter '%s'",
6248 param->key);
David Howells82995cc2019-03-25 16:38:32 +00006249 return token;
6250 }
6251
6252 switch (token) {
6253 case Opt_queue_depth:
6254 if (result.uint_32 < 1)
6255 goto out_of_range;
6256 opt->queue_depth = result.uint_32;
6257 break;
6258 case Opt_alloc_size:
6259 if (result.uint_32 < SECTOR_SIZE)
6260 goto out_of_range;
Al Viro2c3f3dc2019-12-20 23:43:32 -05006261 if (!is_power_of_2(result.uint_32))
6262 return inval_plog(&log, "alloc_size must be a power of 2");
David Howells82995cc2019-03-25 16:38:32 +00006263 opt->alloc_size = result.uint_32;
6264 break;
6265 case Opt_lock_timeout:
6266 /* 0 is "wait forever" (i.e. infinite timeout) */
6267 if (result.uint_32 > INT_MAX / 1000)
6268 goto out_of_range;
6269 opt->lock_timeout = msecs_to_jiffies(result.uint_32 * 1000);
6270 break;
6271 case Opt_pool_ns:
6272 kfree(pctx->spec->pool_ns);
6273 pctx->spec->pool_ns = param->string;
6274 param->string = NULL;
6275 break;
Ilya Dryomovdc1dad82020-05-29 20:51:23 +02006276 case Opt_compression_hint:
6277 switch (result.uint_32) {
6278 case Opt_compression_hint_none:
6279 opt->alloc_hint_flags &=
6280 ~(CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE |
6281 CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE);
6282 break;
6283 case Opt_compression_hint_compressible:
6284 opt->alloc_hint_flags |=
6285 CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE;
6286 opt->alloc_hint_flags &=
6287 ~CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE;
6288 break;
6289 case Opt_compression_hint_incompressible:
6290 opt->alloc_hint_flags |=
6291 CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE;
6292 opt->alloc_hint_flags &=
6293 ~CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE;
6294 break;
6295 default:
6296 BUG();
6297 }
6298 break;
David Howells82995cc2019-03-25 16:38:32 +00006299 case Opt_read_only:
6300 opt->read_only = true;
6301 break;
6302 case Opt_read_write:
6303 opt->read_only = false;
6304 break;
6305 case Opt_lock_on_read:
6306 opt->lock_on_read = true;
6307 break;
6308 case Opt_exclusive:
6309 opt->exclusive = true;
6310 break;
6311 case Opt_notrim:
6312 opt->trim = false;
6313 break;
6314 default:
6315 BUG();
6316 }
6317
6318 return 0;
6319
6320out_of_range:
Al Viro2c3f3dc2019-12-20 23:43:32 -05006321 return inval_plog(&log, "%s out of range", param->key);
David Howells82995cc2019-03-25 16:38:32 +00006322}
6323
6324/*
6325 * This duplicates most of generic_parse_monolithic(), untying it from
6326 * fs_context and skipping standard superblock and security options.
6327 */
6328static int rbd_parse_options(char *options, struct rbd_parse_opts_ctx *pctx)
6329{
6330 char *key;
6331 int ret = 0;
6332
6333 dout("%s '%s'\n", __func__, options);
6334 while ((key = strsep(&options, ",")) != NULL) {
6335 if (*key) {
6336 struct fs_parameter param = {
6337 .key = key,
Al Viro0f895892019-12-17 14:15:04 -05006338 .type = fs_value_is_flag,
David Howells82995cc2019-03-25 16:38:32 +00006339 };
6340 char *value = strchr(key, '=');
6341 size_t v_len = 0;
6342
6343 if (value) {
6344 if (value == key)
6345 continue;
6346 *value++ = 0;
6347 v_len = strlen(value);
David Howells82995cc2019-03-25 16:38:32 +00006348 param.string = kmemdup_nul(value, v_len,
6349 GFP_KERNEL);
6350 if (!param.string)
6351 return -ENOMEM;
Al Viro0f895892019-12-17 14:15:04 -05006352 param.type = fs_value_is_string;
David Howells82995cc2019-03-25 16:38:32 +00006353 }
6354 param.size = v_len;
6355
6356 ret = rbd_parse_param(&param, pctx);
6357 kfree(param.string);
6358 if (ret)
6359 break;
6360 }
6361 }
6362
6363 return ret;
6364}
6365
Alex Elderea3352f2012-07-09 21:04:23 -05006366/*
Alex Elder859c31d2012-10-25 23:34:42 -05006367 * Parse the options provided for an "rbd add" (i.e., rbd image
6368 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
6369 * and the data written is passed here via a NUL-terminated buffer.
6370 * Returns 0 if successful or an error code otherwise.
Alex Elderd22f76e2012-07-12 10:46:35 -05006371 *
Alex Elder859c31d2012-10-25 23:34:42 -05006372 * The information extracted from these options is recorded in
6373 * the other parameters which return dynamically-allocated
6374 * structures:
6375 * ceph_opts
6376 * The address of a pointer that will refer to a ceph options
6377 * structure. Caller must release the returned pointer using
6378 * ceph_destroy_options() when it is no longer needed.
6379 * rbd_opts
6380 * Address of an rbd options pointer. Fully initialized by
6381 * this function; caller must release with kfree().
6382 * spec
6383 * Address of an rbd image specification pointer. Fully
6384 * initialized by this function based on parsed options.
6385 * Caller must release with rbd_spec_put().
6386 *
6387 * The options passed take this form:
6388 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
6389 * where:
6390 * <mon_addrs>
6391 * A comma-separated list of one or more monitor addresses.
6392 * A monitor address is an ip address, optionally followed
6393 * by a port number (separated by a colon).
6394 * I.e.: ip1[:port1][,ip2[:port2]...]
6395 * <options>
6396 * A comma-separated list of ceph and/or rbd options.
6397 * <pool_name>
6398 * The name of the rados pool containing the rbd image.
6399 * <image_name>
6400 * The name of the image in that pool to map.
6401 * <snap_id>
6402 * An optional snapshot id. If provided, the mapping will
6403 * present data from the image at the time that snapshot was
6404 * created. The image head is used if no snapshot id is
6405 * provided. Snapshot mappings are always read-only.
Alex Eldera725f65e2012-02-02 08:13:30 -06006406 */
Alex Elder859c31d2012-10-25 23:34:42 -05006407static int rbd_add_parse_args(const char *buf,
Alex Elderdc79b112012-10-25 23:34:41 -05006408 struct ceph_options **ceph_opts,
Alex Elder859c31d2012-10-25 23:34:42 -05006409 struct rbd_options **opts,
6410 struct rbd_spec **rbd_spec)
Alex Eldera725f65e2012-02-02 08:13:30 -06006411{
Alex Elderd22f76e2012-07-12 10:46:35 -05006412 size_t len;
Alex Elder859c31d2012-10-25 23:34:42 -05006413 char *options;
Alex Elder0ddebc02012-10-25 23:34:41 -05006414 const char *mon_addrs;
Alex Elderecb4dc222013-04-26 09:43:47 -05006415 char *snap_name;
Alex Elder0ddebc02012-10-25 23:34:41 -05006416 size_t mon_addrs_size;
David Howells82995cc2019-03-25 16:38:32 +00006417 struct rbd_parse_opts_ctx pctx = { 0 };
Alex Elderdc79b112012-10-25 23:34:41 -05006418 int ret;
Alex Eldere28fff262012-02-02 08:13:30 -06006419
6420 /* The first four tokens are required */
6421
Alex Elder7ef32142012-02-02 08:13:30 -06006422 len = next_token(&buf);
Alex Elder4fb5d6712012-11-01 10:17:15 -05006423 if (!len) {
6424 rbd_warn(NULL, "no monitor address(es) provided");
6425 return -EINVAL;
6426 }
Alex Elder0ddebc02012-10-25 23:34:41 -05006427 mon_addrs = buf;
David Howells82995cc2019-03-25 16:38:32 +00006428 mon_addrs_size = len;
Alex Elder7ef32142012-02-02 08:13:30 -06006429 buf += len;
Alex Eldera725f65e2012-02-02 08:13:30 -06006430
Alex Elderdc79b112012-10-25 23:34:41 -05006431 ret = -EINVAL;
Alex Elderf28e5652012-10-25 23:34:41 -05006432 options = dup_token(&buf, NULL);
6433 if (!options)
Alex Elderdc79b112012-10-25 23:34:41 -05006434 return -ENOMEM;
Alex Elder4fb5d6712012-11-01 10:17:15 -05006435 if (!*options) {
6436 rbd_warn(NULL, "no options provided");
6437 goto out_err;
6438 }
Alex Eldera725f65e2012-02-02 08:13:30 -06006439
Ilya Dryomovc3001562018-07-03 15:28:43 +02006440 pctx.spec = rbd_spec_alloc();
6441 if (!pctx.spec)
Alex Elderf28e5652012-10-25 23:34:41 -05006442 goto out_mem;
Alex Elder859c31d2012-10-25 23:34:42 -05006443
Ilya Dryomovc3001562018-07-03 15:28:43 +02006444 pctx.spec->pool_name = dup_token(&buf, NULL);
6445 if (!pctx.spec->pool_name)
Alex Elder859c31d2012-10-25 23:34:42 -05006446 goto out_mem;
Ilya Dryomovc3001562018-07-03 15:28:43 +02006447 if (!*pctx.spec->pool_name) {
Alex Elder4fb5d6712012-11-01 10:17:15 -05006448 rbd_warn(NULL, "no pool name provided");
6449 goto out_err;
6450 }
Alex Eldere28fff262012-02-02 08:13:30 -06006451
Ilya Dryomovc3001562018-07-03 15:28:43 +02006452 pctx.spec->image_name = dup_token(&buf, NULL);
6453 if (!pctx.spec->image_name)
Alex Elderf28e5652012-10-25 23:34:41 -05006454 goto out_mem;
Ilya Dryomovc3001562018-07-03 15:28:43 +02006455 if (!*pctx.spec->image_name) {
Alex Elder4fb5d6712012-11-01 10:17:15 -05006456 rbd_warn(NULL, "no image name provided");
6457 goto out_err;
6458 }
Alex Eldere28fff262012-02-02 08:13:30 -06006459
Alex Elderf28e5652012-10-25 23:34:41 -05006460 /*
6461 * Snapshot name is optional; default is to use "-"
6462 * (indicating the head/no snapshot).
6463 */
Alex Elder3feeb8942012-08-31 17:29:52 -05006464 len = next_token(&buf);
Alex Elder820a5f32012-07-09 21:04:24 -05006465 if (!len) {
Alex Elder3feeb8942012-08-31 17:29:52 -05006466 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
6467 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
Alex Elderf28e5652012-10-25 23:34:41 -05006468 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
Alex Elderdc79b112012-10-25 23:34:41 -05006469 ret = -ENAMETOOLONG;
Alex Elderf28e5652012-10-25 23:34:41 -05006470 goto out_err;
Alex Elder849b4262012-07-09 21:04:24 -05006471 }
Alex Elderecb4dc222013-04-26 09:43:47 -05006472 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
6473 if (!snap_name)
Alex Elderf28e5652012-10-25 23:34:41 -05006474 goto out_mem;
Alex Elderecb4dc222013-04-26 09:43:47 -05006475 *(snap_name + len) = '\0';
Ilya Dryomovc3001562018-07-03 15:28:43 +02006476 pctx.spec->snap_name = snap_name;
Alex Eldere5c35532012-10-25 23:34:41 -05006477
David Howells82995cc2019-03-25 16:38:32 +00006478 pctx.copts = ceph_alloc_options();
6479 if (!pctx.copts)
6480 goto out_mem;
6481
Alex Elder0ddebc02012-10-25 23:34:41 -05006482 /* Initialize all rbd options to the defaults */
Alex Eldere28fff262012-02-02 08:13:30 -06006483
Ilya Dryomovc3001562018-07-03 15:28:43 +02006484 pctx.opts = kzalloc(sizeof(*pctx.opts), GFP_KERNEL);
6485 if (!pctx.opts)
Alex Elder4e9afeba2012-10-25 23:34:41 -05006486 goto out_mem;
6487
Ilya Dryomovc3001562018-07-03 15:28:43 +02006488 pctx.opts->read_only = RBD_READ_ONLY_DEFAULT;
6489 pctx.opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
Ilya Dryomov0c93e1b2019-01-30 15:14:48 +01006490 pctx.opts->alloc_size = RBD_ALLOC_SIZE_DEFAULT;
Ilya Dryomovc3001562018-07-03 15:28:43 +02006491 pctx.opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT;
6492 pctx.opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
6493 pctx.opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
6494 pctx.opts->trim = RBD_TRIM_DEFAULT;
Alex Elderd22f76e2012-07-12 10:46:35 -05006495
Venky Shankar2d7c86a2021-07-14 15:35:50 +05306496 ret = ceph_parse_mon_ips(mon_addrs, mon_addrs_size, pctx.copts, NULL,
6497 ',');
David Howells82995cc2019-03-25 16:38:32 +00006498 if (ret)
Alex Elderdc79b112012-10-25 23:34:41 -05006499 goto out_err;
Alex Elder859c31d2012-10-25 23:34:42 -05006500
David Howells82995cc2019-03-25 16:38:32 +00006501 ret = rbd_parse_options(options, &pctx);
6502 if (ret)
6503 goto out_err;
6504
6505 *ceph_opts = pctx.copts;
Ilya Dryomovc3001562018-07-03 15:28:43 +02006506 *opts = pctx.opts;
6507 *rbd_spec = pctx.spec;
David Howells82995cc2019-03-25 16:38:32 +00006508 kfree(options);
Alex Elderdc79b112012-10-25 23:34:41 -05006509 return 0;
David Howells82995cc2019-03-25 16:38:32 +00006510
Alex Elderf28e5652012-10-25 23:34:41 -05006511out_mem:
Alex Elderdc79b112012-10-25 23:34:41 -05006512 ret = -ENOMEM;
Alex Elderd22f76e2012-07-12 10:46:35 -05006513out_err:
Ilya Dryomovc3001562018-07-03 15:28:43 +02006514 kfree(pctx.opts);
David Howells82995cc2019-03-25 16:38:32 +00006515 ceph_destroy_options(pctx.copts);
Ilya Dryomovc3001562018-07-03 15:28:43 +02006516 rbd_spec_put(pctx.spec);
Alex Elderf28e5652012-10-25 23:34:41 -05006517 kfree(options);
Alex Elderdc79b112012-10-25 23:34:41 -05006518 return ret;
Alex Eldera725f65e2012-02-02 08:13:30 -06006519}
6520
Ilya Dryomove010dd02017-04-13 12:17:39 +02006521static void rbd_dev_image_unlock(struct rbd_device *rbd_dev)
6522{
6523 down_write(&rbd_dev->lock_rwsem);
6524 if (__rbd_is_lock_owner(rbd_dev))
Ilya Dryomove1fddc82019-05-30 16:07:48 +02006525 __rbd_release_lock(rbd_dev);
Ilya Dryomove010dd02017-04-13 12:17:39 +02006526 up_write(&rbd_dev->lock_rwsem);
6527}
6528
Ilya Dryomov637cd062019-06-06 17:14:49 +02006529/*
6530 * If the wait is interrupted, an error is returned even if the lock
6531 * was successfully acquired. rbd_dev_image_unlock() will release it
6532 * if needed.
6533 */
Ilya Dryomove010dd02017-04-13 12:17:39 +02006534static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
6535{
Ilya Dryomov637cd062019-06-06 17:14:49 +02006536 long ret;
Ilya Dryomov2f18d462018-04-04 10:15:38 +02006537
Ilya Dryomove010dd02017-04-13 12:17:39 +02006538 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) {
Ilya Dryomov637cd062019-06-06 17:14:49 +02006539 if (!rbd_dev->opts->exclusive && !rbd_dev->opts->lock_on_read)
6540 return 0;
6541
Ilya Dryomove010dd02017-04-13 12:17:39 +02006542 rbd_warn(rbd_dev, "exclusive-lock feature is not enabled");
6543 return -EINVAL;
6544 }
6545
Ilya Dryomov3fe69922019-11-12 19:41:48 +01006546 if (rbd_is_ro(rbd_dev))
Ilya Dryomov637cd062019-06-06 17:14:49 +02006547 return 0;
6548
6549 rbd_assert(!rbd_is_lock_owner(rbd_dev));
6550 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
6551 ret = wait_for_completion_killable_timeout(&rbd_dev->acquire_wait,
6552 ceph_timeout_jiffies(rbd_dev->opts->lock_timeout));
Dongsheng Yang25e6be22019-09-27 15:33:22 +00006553 if (ret > 0) {
Ilya Dryomov637cd062019-06-06 17:14:49 +02006554 ret = rbd_dev->acquire_err;
Dongsheng Yang25e6be22019-09-27 15:33:22 +00006555 } else {
6556 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
6557 if (!ret)
6558 ret = -ETIMEDOUT;
6559 }
Ilya Dryomov637cd062019-06-06 17:14:49 +02006560
Ilya Dryomov2f18d462018-04-04 10:15:38 +02006561 if (ret) {
Ilya Dryomov637cd062019-06-06 17:14:49 +02006562 rbd_warn(rbd_dev, "failed to acquire exclusive lock: %ld", ret);
6563 return ret;
Ilya Dryomove010dd02017-04-13 12:17:39 +02006564 }
6565
Ilya Dryomov637cd062019-06-06 17:14:49 +02006566 /*
6567 * The lock may have been released by now, unless automatic lock
6568 * transitions are disabled.
6569 */
6570 rbd_assert(!rbd_dev->opts->exclusive || rbd_is_lock_owner(rbd_dev));
Ilya Dryomove010dd02017-04-13 12:17:39 +02006571 return 0;
6572}
6573
Ilya Dryomov30ba1f02014-05-13 11:19:27 +04006574/*
Alex Elder589d30e2012-07-10 20:30:11 -05006575 * An rbd format 2 image has a unique identifier, distinct from the
6576 * name given to it by the user. Internally, that identifier is
6577 * what's used to specify the names of objects related to the image.
6578 *
6579 * A special "rbd id" object is used to map an rbd image name to its
6580 * id. If that object doesn't exist, then there is no v2 rbd image
6581 * with the supplied name.
6582 *
6583 * This function will record the given rbd_dev's image_id field if
6584 * it can be determined, and in that case will return 0. If any
6585 * errors occur a negative errno will be returned and the rbd_dev's
6586 * image_id field will be unchanged (and should be NULL).
6587 */
6588static int rbd_dev_image_id(struct rbd_device *rbd_dev)
6589{
6590 int ret;
6591 size_t size;
Ilya Dryomovecd4a682017-01-25 18:16:21 +01006592 CEPH_DEFINE_OID_ONSTACK(oid);
Alex Elder589d30e2012-07-10 20:30:11 -05006593 void *response;
Alex Elderc0fba362013-04-25 23:15:08 -05006594 char *image_id;
Alex Elder2f82ee52012-10-30 19:40:33 -05006595
Alex Elder589d30e2012-07-10 20:30:11 -05006596 /*
Alex Elder2c0d0a12012-10-30 19:40:33 -05006597 * When probing a parent image, the image id is already
6598 * known (and the image name likely is not). There's no
Alex Elderc0fba362013-04-25 23:15:08 -05006599 * need to fetch the image id again in this case. We
6600 * do still need to set the image format though.
Alex Elder2c0d0a12012-10-30 19:40:33 -05006601 */
Alex Elderc0fba362013-04-25 23:15:08 -05006602 if (rbd_dev->spec->image_id) {
6603 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
6604
Alex Elder2c0d0a12012-10-30 19:40:33 -05006605 return 0;
Alex Elderc0fba362013-04-25 23:15:08 -05006606 }
Alex Elder2c0d0a12012-10-30 19:40:33 -05006607
6608 /*
Alex Elder589d30e2012-07-10 20:30:11 -05006609 * First, see if the format 2 image id file exists, and if
6610 * so, get the image's persistent id from it.
6611 */
Ilya Dryomovecd4a682017-01-25 18:16:21 +01006612 ret = ceph_oid_aprintf(&oid, GFP_KERNEL, "%s%s", RBD_ID_PREFIX,
6613 rbd_dev->spec->image_name);
6614 if (ret)
6615 return ret;
6616
6617 dout("rbd id object name is %s\n", oid.name);
Alex Elder589d30e2012-07-10 20:30:11 -05006618
6619 /* Response will be an encoded string, which includes a length */
Alex Elder589d30e2012-07-10 20:30:11 -05006620 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
6621 response = kzalloc(size, GFP_NOIO);
6622 if (!response) {
6623 ret = -ENOMEM;
6624 goto out;
6625 }
6626
Alex Elderc0fba362013-04-25 23:15:08 -05006627 /* If it doesn't exist we'll assume it's a format 1 image */
6628
Ilya Dryomovecd4a682017-01-25 18:16:21 +01006629 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
6630 "get_id", NULL, 0,
Dongsheng Yang5435d2062019-08-09 07:05:27 +00006631 response, size);
Alex Elder36be9a72013-01-19 00:30:28 -06006632 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
Alex Elderc0fba362013-04-25 23:15:08 -05006633 if (ret == -ENOENT) {
6634 image_id = kstrdup("", GFP_KERNEL);
6635 ret = image_id ? 0 : -ENOMEM;
6636 if (!ret)
6637 rbd_dev->image_format = 1;
Ilya Dryomov7dd440c2014-09-11 18:49:18 +04006638 } else if (ret >= 0) {
Alex Elderc0fba362013-04-25 23:15:08 -05006639 void *p = response;
Alex Elder589d30e2012-07-10 20:30:11 -05006640
Alex Elderc0fba362013-04-25 23:15:08 -05006641 image_id = ceph_extract_encoded_string(&p, p + ret,
Alex Elder979ed482012-11-01 08:39:26 -05006642 NULL, GFP_NOIO);
Duan Jiong461f7582014-04-11 16:38:12 +08006643 ret = PTR_ERR_OR_ZERO(image_id);
Alex Elderc0fba362013-04-25 23:15:08 -05006644 if (!ret)
6645 rbd_dev->image_format = 2;
Alex Elderc0fba362013-04-25 23:15:08 -05006646 }
6647
6648 if (!ret) {
6649 rbd_dev->spec->image_id = image_id;
6650 dout("image_id is %s\n", image_id);
Alex Elder589d30e2012-07-10 20:30:11 -05006651 }
6652out:
6653 kfree(response);
Ilya Dryomovecd4a682017-01-25 18:16:21 +01006654 ceph_oid_destroy(&oid);
Alex Elder589d30e2012-07-10 20:30:11 -05006655 return ret;
6656}
6657
Alex Elder3abef3b2013-05-13 20:35:37 -05006658/*
6659 * Undo whatever state changes are made by v1 or v2 header info
6660 * call.
6661 */
Alex Elder6fd48b32013-04-28 23:32:34 -05006662static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
6663{
6664 struct rbd_image_header *header;
6665
Ilya Dryomove69b8d42015-01-19 12:06:14 +03006666 rbd_dev_parent_put(rbd_dev);
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02006667 rbd_object_map_free(rbd_dev);
Ilya Dryomovda5ef6be2019-06-17 15:29:49 +02006668 rbd_dev_mapping_clear(rbd_dev);
Alex Elder6fd48b32013-04-28 23:32:34 -05006669
6670 /* Free dynamic fields from the header, then zero it out */
6671
6672 header = &rbd_dev->header;
Alex Elder812164f82013-04-30 00:44:32 -05006673 ceph_put_snap_context(header->snapc);
Alex Elder6fd48b32013-04-28 23:32:34 -05006674 kfree(header->snap_sizes);
6675 kfree(header->snap_names);
6676 kfree(header->object_prefix);
6677 memset(header, 0, sizeof (*header));
6678}
6679
Alex Elder2df3fac2013-05-06 09:51:30 -05006680static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
Alex Eldera30b71b2012-07-10 20:30:11 -05006681{
6682 int ret;
Alex Eldera30b71b2012-07-10 20:30:11 -05006683
Alex Elder1e130192012-07-03 16:01:19 -05006684 ret = rbd_dev_v2_object_prefix(rbd_dev);
Alex Elder57385b52013-04-21 12:14:45 -05006685 if (ret)
Alex Elder1e130192012-07-03 16:01:19 -05006686 goto out_err;
Alex Elderb1b54022012-07-03 16:01:19 -05006687
Alex Elder2df3fac2013-05-06 09:51:30 -05006688 /*
6689 * Get the and check features for the image. Currently the
6690 * features are assumed to never change.
6691 */
Alex Elderb1b54022012-07-03 16:01:19 -05006692 ret = rbd_dev_v2_features(rbd_dev);
Alex Elder57385b52013-04-21 12:14:45 -05006693 if (ret)
Alex Elderb1b54022012-07-03 16:01:19 -05006694 goto out_err;
Alex Elder35d489f2012-07-03 16:01:19 -05006695
Alex Eldercc070d52013-04-21 12:14:45 -05006696 /* If the image supports fancy striping, get its parameters */
6697
6698 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
6699 ret = rbd_dev_v2_striping_info(rbd_dev);
6700 if (ret < 0)
6701 goto out_err;
6702 }
Alex Eldera30b71b2012-07-10 20:30:11 -05006703
Ilya Dryomov7e973322017-01-25 18:16:22 +01006704 if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) {
6705 ret = rbd_dev_v2_data_pool(rbd_dev);
6706 if (ret)
6707 goto out_err;
6708 }
6709
Ilya Dryomov263423f2017-01-25 18:16:22 +01006710 rbd_init_layout(rbd_dev);
Alex Elder35152972012-08-31 17:29:55 -05006711 return 0;
Ilya Dryomov263423f2017-01-25 18:16:22 +01006712
Alex Elder9d475de52012-07-03 16:01:19 -05006713out_err:
Alex Elder642a2532013-05-06 17:40:33 -05006714 rbd_dev->header.features = 0;
Alex Elder1e130192012-07-03 16:01:19 -05006715 kfree(rbd_dev->header.object_prefix);
6716 rbd_dev->header.object_prefix = NULL;
Alex Elder9d475de52012-07-03 16:01:19 -05006717 return ret;
Alex Eldera30b71b2012-07-10 20:30:11 -05006718}
6719
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02006720/*
6721 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
6722 * rbd_dev_image_probe() recursion depth, which means it's also the
6723 * length of the already discovered part of the parent chain.
6724 */
6725static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
Alex Elder83a06262012-10-30 15:47:17 -05006726{
Alex Elder2f82ee52012-10-30 19:40:33 -05006727 struct rbd_device *parent = NULL;
Alex Elder124afba2013-04-26 15:44:36 -05006728 int ret;
6729
6730 if (!rbd_dev->parent_spec)
6731 return 0;
Alex Elder124afba2013-04-26 15:44:36 -05006732
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02006733 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
6734 pr_info("parent chain is too long (%d)\n", depth);
6735 ret = -EINVAL;
6736 goto out_err;
6737 }
6738
Ilya Dryomov1643dfa2016-08-12 15:45:52 +02006739 parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
Ilya Dryomov1f2c6652015-10-11 19:38:00 +02006740 if (!parent) {
6741 ret = -ENOMEM;
Alex Elder124afba2013-04-26 15:44:36 -05006742 goto out_err;
Ilya Dryomov1f2c6652015-10-11 19:38:00 +02006743 }
6744
6745 /*
6746 * Images related by parent/child relationships always share
6747 * rbd_client and spec/parent_spec, so bump their refcounts.
6748 */
6749 __rbd_get_client(rbd_dev->rbd_client);
6750 rbd_spec_get(rbd_dev->parent_spec);
Alex Elder124afba2013-04-26 15:44:36 -05006751
Ilya Dryomov39258aa2019-11-07 17:16:23 +01006752 __set_bit(RBD_DEV_FLAG_READONLY, &parent->flags);
6753
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02006754 ret = rbd_dev_image_probe(parent, depth);
Alex Elder124afba2013-04-26 15:44:36 -05006755 if (ret < 0)
6756 goto out_err;
Ilya Dryomov1f2c6652015-10-11 19:38:00 +02006757
Alex Elder124afba2013-04-26 15:44:36 -05006758 rbd_dev->parent = parent;
Alex Eldera2acd002013-05-08 22:50:04 -05006759 atomic_set(&rbd_dev->parent_ref, 1);
Alex Elder124afba2013-04-26 15:44:36 -05006760 return 0;
Alex Elder124afba2013-04-26 15:44:36 -05006761
Ilya Dryomov1f2c6652015-10-11 19:38:00 +02006762out_err:
6763 rbd_dev_unparent(rbd_dev);
Markus Elfring1761b222015-11-23 20:16:45 +01006764 rbd_dev_destroy(parent);
Alex Elder124afba2013-04-26 15:44:36 -05006765 return ret;
6766}
6767
Ilya Dryomov5769ed02017-04-13 12:17:38 +02006768static void rbd_dev_device_release(struct rbd_device *rbd_dev)
6769{
6770 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
Ilya Dryomov5769ed02017-04-13 12:17:38 +02006771 rbd_free_disk(rbd_dev);
6772 if (!single_major)
6773 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6774}
6775
Ilya Dryomov811c6682016-04-15 16:22:16 +02006776/*
6777 * rbd_dev->header_rwsem must be locked for write and will be unlocked
6778 * upon return.
6779 */
Alex Elder200a6a82013-04-28 23:32:34 -05006780static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
Alex Elder124afba2013-04-26 15:44:36 -05006781{
Alex Elder83a06262012-10-30 15:47:17 -05006782 int ret;
Alex Elder83a06262012-10-30 15:47:17 -05006783
Ilya Dryomov9b60e702013-12-13 15:28:57 +02006784 /* Record our major and minor device numbers. */
Alex Elder83a06262012-10-30 15:47:17 -05006785
Ilya Dryomov9b60e702013-12-13 15:28:57 +02006786 if (!single_major) {
6787 ret = register_blkdev(0, rbd_dev->name);
6788 if (ret < 0)
Ilya Dryomov1643dfa2016-08-12 15:45:52 +02006789 goto err_out_unlock;
Ilya Dryomov9b60e702013-12-13 15:28:57 +02006790
6791 rbd_dev->major = ret;
6792 rbd_dev->minor = 0;
6793 } else {
6794 rbd_dev->major = rbd_major;
6795 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
6796 }
Alex Elder83a06262012-10-30 15:47:17 -05006797
6798 /* Set up the blkdev mapping. */
6799
6800 ret = rbd_init_disk(rbd_dev);
6801 if (ret)
6802 goto err_out_blkdev;
6803
Alex Elderf35a4de2013-05-06 09:51:29 -05006804 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
Ilya Dryomov39258aa2019-11-07 17:16:23 +01006805 set_disk_ro(rbd_dev->disk, rbd_is_ro(rbd_dev));
Alex Elderf35a4de2013-05-06 09:51:29 -05006806
Ilya Dryomov5769ed02017-04-13 12:17:38 +02006807 ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
Alex Elderf35a4de2013-05-06 09:51:29 -05006808 if (ret)
Ilya Dryomovda5ef6be2019-06-17 15:29:49 +02006809 goto err_out_disk;
Alex Elder83a06262012-10-30 15:47:17 -05006810
Alex Elder129b79d2013-04-26 15:44:36 -05006811 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
Ilya Dryomov811c6682016-04-15 16:22:16 +02006812 up_write(&rbd_dev->header_rwsem);
Ilya Dryomov5769ed02017-04-13 12:17:38 +02006813 return 0;
Alex Elder2f82ee52012-10-30 19:40:33 -05006814
Alex Elder83a06262012-10-30 15:47:17 -05006815err_out_disk:
6816 rbd_free_disk(rbd_dev);
6817err_out_blkdev:
Ilya Dryomov9b60e702013-12-13 15:28:57 +02006818 if (!single_major)
6819 unregister_blkdev(rbd_dev->major, rbd_dev->name);
Ilya Dryomov811c6682016-04-15 16:22:16 +02006820err_out_unlock:
6821 up_write(&rbd_dev->header_rwsem);
Alex Elder83a06262012-10-30 15:47:17 -05006822 return ret;
6823}
6824
Alex Elder332bb122013-04-27 09:59:30 -05006825static int rbd_dev_header_name(struct rbd_device *rbd_dev)
6826{
6827 struct rbd_spec *spec = rbd_dev->spec;
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02006828 int ret;
Alex Elder332bb122013-04-27 09:59:30 -05006829
6830 /* Record the header object name for this rbd image. */
6831
6832 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
Alex Elder332bb122013-04-27 09:59:30 -05006833 if (rbd_dev->image_format == 1)
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02006834 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6835 spec->image_name, RBD_SUFFIX);
Alex Elder332bb122013-04-27 09:59:30 -05006836 else
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02006837 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6838 RBD_HEADER_PREFIX, spec->image_id);
Alex Elder332bb122013-04-27 09:59:30 -05006839
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02006840 return ret;
Alex Elder332bb122013-04-27 09:59:30 -05006841}
6842
Ilya Dryomovb9ef2b82019-11-12 20:20:04 +01006843static void rbd_print_dne(struct rbd_device *rbd_dev, bool is_snap)
6844{
6845 if (!is_snap) {
6846 pr_info("image %s/%s%s%s does not exist\n",
6847 rbd_dev->spec->pool_name,
6848 rbd_dev->spec->pool_ns ?: "",
6849 rbd_dev->spec->pool_ns ? "/" : "",
6850 rbd_dev->spec->image_name);
6851 } else {
6852 pr_info("snap %s/%s%s%s@%s does not exist\n",
6853 rbd_dev->spec->pool_name,
6854 rbd_dev->spec->pool_ns ?: "",
6855 rbd_dev->spec->pool_ns ? "/" : "",
6856 rbd_dev->spec->image_name,
6857 rbd_dev->spec->snap_name);
6858 }
6859}
6860
Alex Elder200a6a82013-04-28 23:32:34 -05006861static void rbd_dev_image_release(struct rbd_device *rbd_dev)
6862{
Ilya Dryomovb8776052020-03-16 17:16:28 +01006863 if (!rbd_is_ro(rbd_dev))
Ilya Dryomovfd22aef2017-04-13 12:17:37 +02006864 rbd_unregister_watch(rbd_dev);
Ilya Dryomov952c48b2020-03-16 15:52:54 +01006865
6866 rbd_dev_unprobe(rbd_dev);
Alex Elder6fd48b32013-04-28 23:32:34 -05006867 rbd_dev->image_format = 0;
6868 kfree(rbd_dev->spec->image_id);
6869 rbd_dev->spec->image_id = NULL;
Alex Elder200a6a82013-04-28 23:32:34 -05006870}
6871
Alex Eldera30b71b2012-07-10 20:30:11 -05006872/*
6873 * Probe for the existence of the header object for the given rbd
Alex Elder1f3ef782013-05-06 17:40:33 -05006874 * device. If this image is the one being mapped (i.e., not a
6875 * parent), initiate a watch on its header object before using that
6876 * object to get detailed information about the rbd image.
Ilya Dryomov0e4e1de52020-03-13 11:20:51 +01006877 *
6878 * On success, returns with header_rwsem held for write if called
6879 * with @depth == 0.
Alex Eldera30b71b2012-07-10 20:30:11 -05006880 */
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02006881static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
Alex Eldera30b71b2012-07-10 20:30:11 -05006882{
Ilya Dryomovb9ef2b82019-11-12 20:20:04 +01006883 bool need_watch = !rbd_is_ro(rbd_dev);
Alex Eldera30b71b2012-07-10 20:30:11 -05006884 int ret;
6885
6886 /*
Alex Elder3abef3b2013-05-13 20:35:37 -05006887 * Get the id from the image id object. Unless there's an
6888 * error, rbd_dev->spec->image_id will be filled in with
6889 * a dynamically-allocated string, and rbd_dev->image_format
6890 * will be set to either 1 or 2.
Alex Eldera30b71b2012-07-10 20:30:11 -05006891 */
6892 ret = rbd_dev_image_id(rbd_dev);
6893 if (ret)
Alex Elderc0fba362013-04-25 23:15:08 -05006894 return ret;
Alex Elderc0fba362013-04-25 23:15:08 -05006895
Alex Elder332bb122013-04-27 09:59:30 -05006896 ret = rbd_dev_header_name(rbd_dev);
6897 if (ret)
6898 goto err_out_format;
6899
Ilya Dryomovb9ef2b82019-11-12 20:20:04 +01006900 if (need_watch) {
Ilya Dryomov99d16942016-08-12 16:11:41 +02006901 ret = rbd_register_watch(rbd_dev);
Ilya Dryomov1fe48022015-03-05 10:47:22 +03006902 if (ret) {
6903 if (ret == -ENOENT)
Ilya Dryomovb9ef2b82019-11-12 20:20:04 +01006904 rbd_print_dne(rbd_dev, false);
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02006905 goto err_out_format;
Ilya Dryomov1fe48022015-03-05 10:47:22 +03006906 }
Alex Elder1f3ef782013-05-06 17:40:33 -05006907 }
Alex Elderb644de22013-04-27 09:59:31 -05006908
Ilya Dryomov0e4e1de52020-03-13 11:20:51 +01006909 if (!depth)
6910 down_write(&rbd_dev->header_rwsem);
6911
Ilya Dryomova720ae02014-07-23 17:11:19 +04006912 ret = rbd_dev_header_info(rbd_dev);
Ilya Dryomovb9ef2b82019-11-12 20:20:04 +01006913 if (ret) {
6914 if (ret == -ENOENT && !need_watch)
6915 rbd_print_dne(rbd_dev, false);
Ilya Dryomov952c48b2020-03-16 15:52:54 +01006916 goto err_out_probe;
Ilya Dryomovb9ef2b82019-11-12 20:20:04 +01006917 }
Alex Elder83a06262012-10-30 15:47:17 -05006918
Ilya Dryomov04077592014-07-23 17:11:20 +04006919 /*
6920 * If this image is the one being mapped, we have pool name and
6921 * id, image name and id, and snap name - need to fill snap id.
6922 * Otherwise this is a parent image, identified by pool, image
6923 * and snap ids - need to fill in names for those ids.
6924 */
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02006925 if (!depth)
Ilya Dryomov04077592014-07-23 17:11:20 +04006926 ret = rbd_spec_fill_snap_id(rbd_dev);
6927 else
6928 ret = rbd_spec_fill_names(rbd_dev);
Ilya Dryomov1fe48022015-03-05 10:47:22 +03006929 if (ret) {
6930 if (ret == -ENOENT)
Ilya Dryomovb9ef2b82019-11-12 20:20:04 +01006931 rbd_print_dne(rbd_dev, true);
Alex Elder33dca392013-04-30 00:44:33 -05006932 goto err_out_probe;
Ilya Dryomov1fe48022015-03-05 10:47:22 +03006933 }
Alex Elder9bb81c92013-04-27 09:59:30 -05006934
Ilya Dryomovda5ef6be2019-06-17 15:29:49 +02006935 ret = rbd_dev_mapping_set(rbd_dev);
6936 if (ret)
6937 goto err_out_probe;
6938
Ilya Dryomovf3c0e452019-11-07 16:22:10 +01006939 if (rbd_is_snap(rbd_dev) &&
Ilya Dryomov22e8bd52019-06-05 19:25:11 +02006940 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)) {
6941 ret = rbd_object_map_load(rbd_dev);
6942 if (ret)
6943 goto err_out_probe;
6944 }
6945
Ilya Dryomove8f59b52014-07-24 10:42:13 +04006946 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
6947 ret = rbd_dev_v2_parent_info(rbd_dev);
6948 if (ret)
6949 goto err_out_probe;
Ilya Dryomove8f59b52014-07-24 10:42:13 +04006950 }
6951
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02006952 ret = rbd_dev_probe_parent(rbd_dev, depth);
Alex Elder30d60ba2013-05-06 09:51:30 -05006953 if (ret)
6954 goto err_out_probe;
Alex Elder83a06262012-10-30 15:47:17 -05006955
Alex Elder30d60ba2013-05-06 09:51:30 -05006956 dout("discovered format %u image, header name is %s\n",
Ilya Dryomovc41d13a2016-04-29 20:01:25 +02006957 rbd_dev->image_format, rbd_dev->header_oid.name);
Alex Elder30d60ba2013-05-06 09:51:30 -05006958 return 0;
Ilya Dryomove8f59b52014-07-24 10:42:13 +04006959
Alex Elder6fd48b32013-04-28 23:32:34 -05006960err_out_probe:
Ilya Dryomov0e4e1de52020-03-13 11:20:51 +01006961 if (!depth)
6962 up_write(&rbd_dev->header_rwsem);
Ilya Dryomovb9ef2b82019-11-12 20:20:04 +01006963 if (need_watch)
Ilya Dryomov99d16942016-08-12 16:11:41 +02006964 rbd_unregister_watch(rbd_dev);
Ilya Dryomov952c48b2020-03-16 15:52:54 +01006965 rbd_dev_unprobe(rbd_dev);
Alex Elder332bb122013-04-27 09:59:30 -05006966err_out_format:
6967 rbd_dev->image_format = 0;
Alex Elder5655c4d2013-04-25 23:15:08 -05006968 kfree(rbd_dev->spec->image_id);
6969 rbd_dev->spec->image_id = NULL;
Alex Elder5655c4d2013-04-25 23:15:08 -05006970 return ret;
Alex Eldera30b71b2012-07-10 20:30:11 -05006971}
6972
Ilya Dryomov9b60e702013-12-13 15:28:57 +02006973static ssize_t do_rbd_add(struct bus_type *bus,
6974 const char *buf,
6975 size_t count)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006976{
Alex Eldercb8627c2012-07-09 21:04:23 -05006977 struct rbd_device *rbd_dev = NULL;
Alex Elderdc79b112012-10-25 23:34:41 -05006978 struct ceph_options *ceph_opts = NULL;
Alex Elder4e9afeba2012-10-25 23:34:41 -05006979 struct rbd_options *rbd_opts = NULL;
Alex Elder859c31d2012-10-25 23:34:42 -05006980 struct rbd_spec *spec = NULL;
Alex Elder9d3997f2012-10-25 23:34:42 -05006981 struct rbd_client *rbdc;
Ilya Dryomovb51c83c2015-10-15 15:38:57 +02006982 int rc;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006983
Ilya Dryomovf44d04e2020-09-03 13:24:11 +02006984 if (!capable(CAP_SYS_ADMIN))
6985 return -EPERM;
6986
Yehuda Sadeh602adf42010-08-12 16:11:25 -07006987 if (!try_module_get(THIS_MODULE))
6988 return -ENODEV;
6989
Alex Eldera725f65e2012-02-02 08:13:30 -06006990 /* parse add command */
Alex Elder859c31d2012-10-25 23:34:42 -05006991 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
Alex Elderdc79b112012-10-25 23:34:41 -05006992 if (rc < 0)
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02006993 goto out;
Alex Eldera725f65e2012-02-02 08:13:30 -06006994
Alex Elder9d3997f2012-10-25 23:34:42 -05006995 rbdc = rbd_get_client(ceph_opts);
6996 if (IS_ERR(rbdc)) {
6997 rc = PTR_ERR(rbdc);
Alex Elder0ddebc02012-10-25 23:34:41 -05006998 goto err_out_args;
Alex Elder9d3997f2012-10-25 23:34:42 -05006999 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07007000
Yehuda Sadeh602adf42010-08-12 16:11:25 -07007001 /* pick the pool */
Ilya Dryomovdd435852018-02-22 13:43:24 +01007002 rc = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, spec->pool_name);
Ilya Dryomov1fe48022015-03-05 10:47:22 +03007003 if (rc < 0) {
7004 if (rc == -ENOENT)
7005 pr_info("pool %s does not exist\n", spec->pool_name);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07007006 goto err_out_client;
Ilya Dryomov1fe48022015-03-05 10:47:22 +03007007 }
Alex Elderc0cd10db2013-04-26 09:43:47 -05007008 spec->pool_id = (u64)rc;
Alex Elder859c31d2012-10-25 23:34:42 -05007009
Ilya Dryomovd1475432015-06-22 13:24:48 +03007010 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
Ilya Dryomovb51c83c2015-10-15 15:38:57 +02007011 if (!rbd_dev) {
7012 rc = -ENOMEM;
Alex Elderbd4ba652012-10-25 23:34:42 -05007013 goto err_out_client;
Ilya Dryomovb51c83c2015-10-15 15:38:57 +02007014 }
Alex Elderc53d5892012-10-25 23:34:42 -05007015 rbdc = NULL; /* rbd_dev now owns this */
7016 spec = NULL; /* rbd_dev now owns this */
Ilya Dryomovd1475432015-06-22 13:24:48 +03007017 rbd_opts = NULL; /* rbd_dev now owns this */
Yehuda Sadeh602adf42010-08-12 16:11:25 -07007018
Ilya Dryomov39258aa2019-11-07 17:16:23 +01007019 /* if we are mapping a snapshot it will be a read-only mapping */
7020 if (rbd_dev->opts->read_only ||
7021 strcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME))
7022 __set_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags);
7023
Mike Christie0d6d1e9c2016-08-18 18:38:45 +02007024 rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
7025 if (!rbd_dev->config_info) {
7026 rc = -ENOMEM;
7027 goto err_out_rbd_dev;
7028 }
7029
Ilya Dryomov6d69bb532015-10-11 19:38:00 +02007030 rc = rbd_dev_image_probe(rbd_dev, 0);
Ilya Dryomov0e4e1de52020-03-13 11:20:51 +01007031 if (rc < 0)
Alex Elderc53d5892012-10-25 23:34:42 -05007032 goto err_out_rbd_dev;
Alex Elder05fd6f62012-08-29 17:11:07 -05007033
Ilya Dryomov0c93e1b2019-01-30 15:14:48 +01007034 if (rbd_dev->opts->alloc_size > rbd_dev->layout.object_size) {
7035 rbd_warn(rbd_dev, "alloc_size adjusted to %u",
7036 rbd_dev->layout.object_size);
7037 rbd_dev->opts->alloc_size = rbd_dev->layout.object_size;
7038 }
7039
Alex Elderb536f692013-04-28 23:32:34 -05007040 rc = rbd_dev_device_setup(rbd_dev);
Ilya Dryomovfd22aef2017-04-13 12:17:37 +02007041 if (rc)
Ilya Dryomov8b679ec2017-04-13 12:17:37 +02007042 goto err_out_image_probe;
Alex Elderb536f692013-04-28 23:32:34 -05007043
Ilya Dryomov637cd062019-06-06 17:14:49 +02007044 rc = rbd_add_acquire_lock(rbd_dev);
7045 if (rc)
7046 goto err_out_image_lock;
Alex Elderb536f692013-04-28 23:32:34 -05007047
Ilya Dryomov5769ed02017-04-13 12:17:38 +02007048 /* Everything's ready. Announce the disk to the world. */
7049
7050 rc = device_add(&rbd_dev->dev);
7051 if (rc)
Ilya Dryomove010dd02017-04-13 12:17:39 +02007052 goto err_out_image_lock;
Ilya Dryomov5769ed02017-04-13 12:17:38 +02007053
Luis Chamberlain27c97ab2021-09-27 15:02:28 -07007054 rc = device_add_disk(&rbd_dev->dev, rbd_dev->disk, NULL);
7055 if (rc)
7056 goto err_out_cleanup_disk;
Ilya Dryomov5769ed02017-04-13 12:17:38 +02007057
7058 spin_lock(&rbd_dev_list_lock);
7059 list_add_tail(&rbd_dev->node, &rbd_dev_list);
7060 spin_unlock(&rbd_dev_list_lock);
7061
7062 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
7063 (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
7064 rbd_dev->header.features);
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02007065 rc = count;
7066out:
7067 module_put(THIS_MODULE);
7068 return rc;
Alex Elder3abef3b2013-05-13 20:35:37 -05007069
Luis Chamberlain27c97ab2021-09-27 15:02:28 -07007070err_out_cleanup_disk:
7071 rbd_free_disk(rbd_dev);
Ilya Dryomove010dd02017-04-13 12:17:39 +02007072err_out_image_lock:
7073 rbd_dev_image_unlock(rbd_dev);
Ilya Dryomov5769ed02017-04-13 12:17:38 +02007074 rbd_dev_device_release(rbd_dev);
Ilya Dryomov8b679ec2017-04-13 12:17:37 +02007075err_out_image_probe:
7076 rbd_dev_image_release(rbd_dev);
Alex Elderc53d5892012-10-25 23:34:42 -05007077err_out_rbd_dev:
7078 rbd_dev_destroy(rbd_dev);
Alex Elderbd4ba652012-10-25 23:34:42 -05007079err_out_client:
Alex Elder9d3997f2012-10-25 23:34:42 -05007080 rbd_put_client(rbdc);
Alex Elder0ddebc02012-10-25 23:34:41 -05007081err_out_args:
Alex Elder859c31d2012-10-25 23:34:42 -05007082 rbd_spec_put(spec);
Ilya Dryomovd1475432015-06-22 13:24:48 +03007083 kfree(rbd_opts);
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02007084 goto out;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07007085}
7086
Greg Kroah-Hartman7e9586b2018-12-21 08:54:38 +01007087static ssize_t add_store(struct bus_type *bus, const char *buf, size_t count)
Ilya Dryomov9b60e702013-12-13 15:28:57 +02007088{
7089 if (single_major)
7090 return -EINVAL;
7091
7092 return do_rbd_add(bus, buf, count);
7093}
7094
Greg Kroah-Hartman7e9586b2018-12-21 08:54:38 +01007095static ssize_t add_single_major_store(struct bus_type *bus, const char *buf,
7096 size_t count)
Ilya Dryomov9b60e702013-12-13 15:28:57 +02007097{
7098 return do_rbd_add(bus, buf, count);
7099}
7100
Alex Elder05a46af2013-04-26 15:44:36 -05007101static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
7102{
Alex Elderad945fc2013-04-26 15:44:36 -05007103 while (rbd_dev->parent) {
Alex Elder05a46af2013-04-26 15:44:36 -05007104 struct rbd_device *first = rbd_dev;
7105 struct rbd_device *second = first->parent;
7106 struct rbd_device *third;
7107
7108 /*
7109 * Follow to the parent with no grandparent and
7110 * remove it.
7111 */
7112 while (second && (third = second->parent)) {
7113 first = second;
7114 second = third;
7115 }
Alex Elderad945fc2013-04-26 15:44:36 -05007116 rbd_assert(second);
Alex Elder8ad42cd2013-04-28 23:32:34 -05007117 rbd_dev_image_release(second);
Ilya Dryomov8b679ec2017-04-13 12:17:37 +02007118 rbd_dev_destroy(second);
Alex Elderad945fc2013-04-26 15:44:36 -05007119 first->parent = NULL;
7120 first->parent_overlap = 0;
7121
7122 rbd_assert(first->parent_spec);
Alex Elder05a46af2013-04-26 15:44:36 -05007123 rbd_spec_put(first->parent_spec);
7124 first->parent_spec = NULL;
Alex Elder05a46af2013-04-26 15:44:36 -05007125 }
7126}
7127
Ilya Dryomov9b60e702013-12-13 15:28:57 +02007128static ssize_t do_rbd_remove(struct bus_type *bus,
7129 const char *buf,
7130 size_t count)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07007131{
7132 struct rbd_device *rbd_dev = NULL;
Alex Elder751cc0e2013-05-31 15:17:01 -05007133 struct list_head *tmp;
7134 int dev_id;
Mike Christie0276dca2016-08-18 18:38:45 +02007135 char opt_buf[6];
Mike Christie0276dca2016-08-18 18:38:45 +02007136 bool force = false;
Alex Elder0d8189e2013-04-27 09:59:30 -05007137 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07007138
Ilya Dryomovf44d04e2020-09-03 13:24:11 +02007139 if (!capable(CAP_SYS_ADMIN))
7140 return -EPERM;
7141
Mike Christie0276dca2016-08-18 18:38:45 +02007142 dev_id = -1;
7143 opt_buf[0] = '\0';
7144 sscanf(buf, "%d %5s", &dev_id, opt_buf);
7145 if (dev_id < 0) {
7146 pr_err("dev_id out of range\n");
Yehuda Sadeh602adf42010-08-12 16:11:25 -07007147 return -EINVAL;
Mike Christie0276dca2016-08-18 18:38:45 +02007148 }
7149 if (opt_buf[0] != '\0') {
7150 if (!strcmp(opt_buf, "force")) {
7151 force = true;
7152 } else {
7153 pr_err("bad remove option at '%s'\n", opt_buf);
7154 return -EINVAL;
7155 }
7156 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07007157
Alex Elder751cc0e2013-05-31 15:17:01 -05007158 ret = -ENOENT;
7159 spin_lock(&rbd_dev_list_lock);
7160 list_for_each(tmp, &rbd_dev_list) {
7161 rbd_dev = list_entry(tmp, struct rbd_device, node);
7162 if (rbd_dev->dev_id == dev_id) {
7163 ret = 0;
7164 break;
7165 }
Yehuda Sadeh602adf42010-08-12 16:11:25 -07007166 }
Alex Elder751cc0e2013-05-31 15:17:01 -05007167 if (!ret) {
7168 spin_lock_irq(&rbd_dev->lock);
Mike Christie0276dca2016-08-18 18:38:45 +02007169 if (rbd_dev->open_count && !force)
Alex Elder751cc0e2013-05-31 15:17:01 -05007170 ret = -EBUSY;
Ilya Dryomov85f5a4d2019-01-08 19:47:38 +01007171 else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING,
7172 &rbd_dev->flags))
7173 ret = -EINPROGRESS;
Alex Elder751cc0e2013-05-31 15:17:01 -05007174 spin_unlock_irq(&rbd_dev->lock);
7175 }
7176 spin_unlock(&rbd_dev_list_lock);
Ilya Dryomov85f5a4d2019-01-08 19:47:38 +01007177 if (ret)
Alex Elder1ba0f1e2013-05-31 15:17:01 -05007178 return ret;
Alex Elder751cc0e2013-05-31 15:17:01 -05007179
Mike Christie0276dca2016-08-18 18:38:45 +02007180 if (force) {
7181 /*
7182 * Prevent new IO from being queued and wait for existing
7183 * IO to complete/fail.
7184 */
7185 blk_mq_freeze_queue(rbd_dev->disk->queue);
Christoph Hellwig7a5428d2022-02-17 08:52:31 +01007186 blk_mark_disk_dead(rbd_dev->disk);
Mike Christie0276dca2016-08-18 18:38:45 +02007187 }
7188
Ilya Dryomov5769ed02017-04-13 12:17:38 +02007189 del_gendisk(rbd_dev->disk);
7190 spin_lock(&rbd_dev_list_lock);
7191 list_del_init(&rbd_dev->node);
7192 spin_unlock(&rbd_dev_list_lock);
7193 device_del(&rbd_dev->dev);
Ilya Dryomovfca27062013-12-16 18:02:40 +02007194
Ilya Dryomove010dd02017-04-13 12:17:39 +02007195 rbd_dev_image_unlock(rbd_dev);
Ilya Dryomovdd5ac322015-10-16 17:09:24 +02007196 rbd_dev_device_release(rbd_dev);
Alex Elder8ad42cd2013-04-28 23:32:34 -05007197 rbd_dev_image_release(rbd_dev);
Ilya Dryomov8b679ec2017-04-13 12:17:37 +02007198 rbd_dev_destroy(rbd_dev);
Alex Elder1ba0f1e2013-05-31 15:17:01 -05007199 return count;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07007200}
7201
Greg Kroah-Hartman7e9586b2018-12-21 08:54:38 +01007202static ssize_t remove_store(struct bus_type *bus, const char *buf, size_t count)
Ilya Dryomov9b60e702013-12-13 15:28:57 +02007203{
7204 if (single_major)
7205 return -EINVAL;
7206
7207 return do_rbd_remove(bus, buf, count);
7208}
7209
Greg Kroah-Hartman7e9586b2018-12-21 08:54:38 +01007210static ssize_t remove_single_major_store(struct bus_type *bus, const char *buf,
7211 size_t count)
Ilya Dryomov9b60e702013-12-13 15:28:57 +02007212{
7213 return do_rbd_remove(bus, buf, count);
7214}
7215
Yehuda Sadeh602adf42010-08-12 16:11:25 -07007216/*
7217 * create control files in sysfs
Yehuda Sadehdfc56062010-11-19 14:51:04 -08007218 * /sys/bus/rbd/...
Yehuda Sadeh602adf42010-08-12 16:11:25 -07007219 */
Chengguang Xu7d8dc532018-08-12 23:06:54 +08007220static int __init rbd_sysfs_init(void)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07007221{
Yehuda Sadehdfc56062010-11-19 14:51:04 -08007222 int ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07007223
Alex Elderfed4c142012-02-07 12:03:36 -06007224 ret = device_register(&rbd_root_dev);
Alex Elder21079782012-01-24 10:08:36 -06007225 if (ret < 0)
Yehuda Sadehdfc56062010-11-19 14:51:04 -08007226 return ret;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07007227
Alex Elderfed4c142012-02-07 12:03:36 -06007228 ret = bus_register(&rbd_bus_type);
7229 if (ret < 0)
7230 device_unregister(&rbd_root_dev);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07007231
Yehuda Sadeh602adf42010-08-12 16:11:25 -07007232 return ret;
7233}
7234
Chengguang Xu7d8dc532018-08-12 23:06:54 +08007235static void __exit rbd_sysfs_cleanup(void)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07007236{
Yehuda Sadehdfc56062010-11-19 14:51:04 -08007237 bus_unregister(&rbd_bus_type);
Alex Elderfed4c142012-02-07 12:03:36 -06007238 device_unregister(&rbd_root_dev);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07007239}
7240
Chengguang Xu7d8dc532018-08-12 23:06:54 +08007241static int __init rbd_slab_init(void)
Alex Elder1c2a9df2013-05-01 12:43:03 -05007242{
7243 rbd_assert(!rbd_img_request_cache);
Geliang Tang03d94402016-03-13 15:17:32 +08007244 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
Alex Elder868311b2013-05-01 12:43:03 -05007245 if (!rbd_img_request_cache)
7246 return -ENOMEM;
7247
7248 rbd_assert(!rbd_obj_request_cache);
Geliang Tang03d94402016-03-13 15:17:32 +08007249 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
Alex Elder78c2a442013-05-01 12:43:04 -05007250 if (!rbd_obj_request_cache)
7251 goto out_err;
7252
Ilya Dryomov6c696d82017-01-25 18:16:23 +01007253 return 0;
Alex Elder1c2a9df2013-05-01 12:43:03 -05007254
Ilya Dryomov6c696d82017-01-25 18:16:23 +01007255out_err:
Alex Elder868311b2013-05-01 12:43:03 -05007256 kmem_cache_destroy(rbd_img_request_cache);
7257 rbd_img_request_cache = NULL;
Alex Elder1c2a9df2013-05-01 12:43:03 -05007258 return -ENOMEM;
7259}
7260
7261static void rbd_slab_exit(void)
7262{
Alex Elder868311b2013-05-01 12:43:03 -05007263 rbd_assert(rbd_obj_request_cache);
7264 kmem_cache_destroy(rbd_obj_request_cache);
7265 rbd_obj_request_cache = NULL;
7266
Alex Elder1c2a9df2013-05-01 12:43:03 -05007267 rbd_assert(rbd_img_request_cache);
7268 kmem_cache_destroy(rbd_img_request_cache);
7269 rbd_img_request_cache = NULL;
7270}
7271
Alex Eldercc344fa2013-02-19 12:25:56 -06007272static int __init rbd_init(void)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07007273{
7274 int rc;
7275
Alex Elder1e32d342013-01-30 11:13:33 -06007276 if (!libceph_compatible(NULL)) {
7277 rbd_warn(NULL, "libceph incompatibility (quitting)");
Alex Elder1e32d342013-01-30 11:13:33 -06007278 return -EINVAL;
7279 }
Ilya Dryomove1b4d962013-12-13 15:28:57 +02007280
Alex Elder1c2a9df2013-05-01 12:43:03 -05007281 rc = rbd_slab_init();
Yehuda Sadeh602adf42010-08-12 16:11:25 -07007282 if (rc)
7283 return rc;
Ilya Dryomove1b4d962013-12-13 15:28:57 +02007284
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04007285 /*
7286 * The number of active work items is limited by the number of
Ilya Dryomovf77303b2015-04-22 18:28:13 +03007287 * rbd devices * queue depth, so leave @max_active at default.
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04007288 */
7289 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
7290 if (!rbd_wq) {
7291 rc = -ENOMEM;
7292 goto err_out_slab;
7293 }
7294
Ilya Dryomov9b60e702013-12-13 15:28:57 +02007295 if (single_major) {
7296 rbd_major = register_blkdev(0, RBD_DRV_NAME);
7297 if (rbd_major < 0) {
7298 rc = rbd_major;
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04007299 goto err_out_wq;
Ilya Dryomov9b60e702013-12-13 15:28:57 +02007300 }
7301 }
7302
Alex Elder1c2a9df2013-05-01 12:43:03 -05007303 rc = rbd_sysfs_init();
7304 if (rc)
Ilya Dryomov9b60e702013-12-13 15:28:57 +02007305 goto err_out_blkdev;
Alex Elder1c2a9df2013-05-01 12:43:03 -05007306
Ilya Dryomov9b60e702013-12-13 15:28:57 +02007307 if (single_major)
7308 pr_info("loaded (major %d)\n", rbd_major);
7309 else
7310 pr_info("loaded\n");
7311
Ilya Dryomove1b4d962013-12-13 15:28:57 +02007312 return 0;
7313
Ilya Dryomov9b60e702013-12-13 15:28:57 +02007314err_out_blkdev:
7315 if (single_major)
7316 unregister_blkdev(rbd_major, RBD_DRV_NAME);
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04007317err_out_wq:
7318 destroy_workqueue(rbd_wq);
Ilya Dryomove1b4d962013-12-13 15:28:57 +02007319err_out_slab:
7320 rbd_slab_exit();
Alex Elder1c2a9df2013-05-01 12:43:03 -05007321 return rc;
Yehuda Sadeh602adf42010-08-12 16:11:25 -07007322}
7323
Alex Eldercc344fa2013-02-19 12:25:56 -06007324static void __exit rbd_exit(void)
Yehuda Sadeh602adf42010-08-12 16:11:25 -07007325{
Ilya Dryomovffe312c2014-05-20 15:46:04 +04007326 ida_destroy(&rbd_dev_id_ida);
Yehuda Sadeh602adf42010-08-12 16:11:25 -07007327 rbd_sysfs_cleanup();
Ilya Dryomov9b60e702013-12-13 15:28:57 +02007328 if (single_major)
7329 unregister_blkdev(rbd_major, RBD_DRV_NAME);
Ilya Dryomovf5ee37b2014-10-09 17:06:01 +04007330 destroy_workqueue(rbd_wq);
Alex Elder1c2a9df2013-05-01 12:43:03 -05007331 rbd_slab_exit();
Yehuda Sadeh602adf42010-08-12 16:11:25 -07007332}
7333
7334module_init(rbd_init);
7335module_exit(rbd_exit);
7336
Alex Elderd552c612013-05-31 20:13:09 -05007337MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
Yehuda Sadeh602adf42010-08-12 16:11:25 -07007338MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
7339MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
Yehuda Sadeh602adf42010-08-12 16:11:25 -07007340/* following authorship retained from original osdblk.c */
7341MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
7342
Ilya Dryomov90da2582013-12-13 15:28:56 +02007343MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
Yehuda Sadeh602adf42010-08-12 16:11:25 -07007344MODULE_LICENSE("GPL");