blob: e696ae53bf1e089a19834b70c03d659f7dce0ab6 [file] [log] [blame]
Christoph Hellwigcd82cca2021-09-07 16:13:02 +02001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2016 - 2020 Christoph Hellwig
6 */
7#include <linux/init.h>
8#include <linux/mm.h>
9#include <linux/blkdev.h>
10#include <linux/buffer_head.h>
11#include <linux/mpage.h>
12#include <linux/uio.h>
13#include <linux/namei.h>
14#include <linux/task_io_accounting_ops.h>
15#include <linux/falloc.h>
16#include <linux/suspend.h>
Ming Leif278eb3d2021-09-23 10:37:51 +080017#include <linux/fs.h>
Christoph Hellwig487c6072023-08-01 19:22:00 +020018#include <linux/iomap.h>
Jakub Kicinski8581fd42021-12-02 12:34:00 -080019#include <linux/module.h>
Pavel Begunkov50c52252024-09-11 17:34:41 +010020#include <linux/io_uring/cmd.h>
Christoph Hellwigcd82cca2021-09-07 16:13:02 +020021#include "blk.h"
22
Pavel Begunkovfac7c6d2021-10-13 09:57:11 +010023static inline struct inode *bdev_file_inode(struct file *file)
Christoph Hellwigcd82cca2021-09-07 16:13:02 +020024{
25 return file->f_mapping->host;
26}
27
Bart Van Assche16458cf2022-07-14 11:06:32 -070028static blk_opf_t dio_bio_write_op(struct kiocb *iocb)
Christoph Hellwigcd82cca2021-09-07 16:13:02 +020029{
Bart Van Assche16458cf2022-07-14 11:06:32 -070030 blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +020031
32 /* avoid the need for a I/O completion work item */
Al Viro91b94c52022-05-22 09:39:27 -040033 if (iocb_is_dsync(iocb))
Bart Van Assche16458cf2022-07-14 11:06:32 -070034 opf |= REQ_FUA;
35 return opf;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +020036}
37
John Garrycaf336f2024-06-20 12:53:56 +000038static bool blkdev_dio_invalid(struct block_device *bdev, loff_t pos,
39 struct iov_iter *iter, bool is_atomic)
Keith Busch37fee2e42022-06-10 12:58:24 -070040{
John Garrycaf336f2024-06-20 12:53:56 +000041 if (is_atomic && !generic_atomic_write_valid(iter, pos))
42 return true;
43
Keith Buschb1a000d2022-06-10 12:58:29 -070044 return pos & (bdev_logical_block_size(bdev) - 1) ||
45 !bdev_iter_is_aligned(bdev, iter);
Keith Busch37fee2e42022-06-10 12:58:24 -070046}
47
Christoph Hellwigcd82cca2021-09-07 16:13:02 +020048#define DIO_INLINE_BIO_VECS 4
49
Christoph Hellwigcd82cca2021-09-07 16:13:02 +020050static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
John Garryde4c7be2024-04-15 12:20:20 +000051 struct iov_iter *iter, struct block_device *bdev,
52 unsigned int nr_pages)
Christoph Hellwigcd82cca2021-09-07 16:13:02 +020053{
Christoph Hellwigcd82cca2021-09-07 16:13:02 +020054 struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs;
55 loff_t pos = iocb->ki_pos;
56 bool should_dirty = false;
57 struct bio bio;
58 ssize_t ret;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +020059
Christoph Hellwigcd82cca2021-09-07 16:13:02 +020060 if (nr_pages <= DIO_INLINE_BIO_VECS)
61 vecs = inline_vecs;
62 else {
63 vecs = kmalloc_array(nr_pages, sizeof(struct bio_vec),
64 GFP_KERNEL);
65 if (!vecs)
66 return -ENOMEM;
67 }
68
Christoph Hellwig49add492022-01-24 10:11:06 +010069 if (iov_iter_rw(iter) == READ) {
70 bio_init(&bio, bdev, vecs, nr_pages, REQ_OP_READ);
Al Virofcb14cb2022-05-22 14:59:25 -040071 if (user_backed_iter(iter))
Christoph Hellwig49add492022-01-24 10:11:06 +010072 should_dirty = true;
73 } else {
74 bio_init(&bio, bdev, vecs, nr_pages, dio_bio_write_op(iocb));
75 }
Pavel Begunkov6549a872021-10-20 20:00:50 +010076 bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT;
Bart Van Assche44981352024-02-02 12:39:25 -080077 bio.bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +020078 bio.bi_ioprio = iocb->ki_ioprio;
John Garrycaf336f2024-06-20 12:53:56 +000079 if (iocb->ki_flags & IOCB_ATOMIC)
80 bio.bi_opf |= REQ_ATOMIC;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +020081
82 ret = bio_iov_iter_get_pages(&bio, iter);
83 if (unlikely(ret))
84 goto out;
85 ret = bio.bi_iter.bi_size;
86
Christoph Hellwig49add492022-01-24 10:11:06 +010087 if (iov_iter_rw(iter) == WRITE)
Christoph Hellwigcd82cca2021-09-07 16:13:02 +020088 task_io_account_write(ret);
Christoph Hellwig49add492022-01-24 10:11:06 +010089
Christoph Hellwigcd82cca2021-09-07 16:13:02 +020090 if (iocb->ki_flags & IOCB_NOWAIT)
91 bio.bi_opf |= REQ_NOWAIT;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +020092
Ming Lei9650b452022-04-20 22:31:10 +080093 submit_bio_wait(&bio);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +020094
95 bio_release_pages(&bio, should_dirty);
96 if (unlikely(bio.bi_status))
97 ret = blk_status_to_errno(bio.bi_status);
98
99out:
100 if (vecs != inline_vecs)
101 kfree(vecs);
102
103 bio_uninit(&bio);
104
105 return ret;
106}
107
Jens Axboe09ce8742021-10-14 11:17:43 -0600108enum {
Pavel Begunkove71aa912021-10-27 13:21:09 +0100109 DIO_SHOULD_DIRTY = 1,
110 DIO_IS_SYNC = 2,
Jens Axboe09ce8742021-10-14 11:17:43 -0600111};
112
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200113struct blkdev_dio {
114 union {
115 struct kiocb *iocb;
116 struct task_struct *waiter;
117 };
118 size_t size;
119 atomic_t ref;
Jens Axboe09ce8742021-10-14 11:17:43 -0600120 unsigned int flags;
Jens Axboe61556312021-10-15 16:55:05 -0600121 struct bio bio ____cacheline_aligned_in_smp;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200122};
123
124static struct bio_set blkdev_dio_pool;
125
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200126static void blkdev_bio_end_io(struct bio *bio)
127{
128 struct blkdev_dio *dio = bio->bi_private;
Jens Axboe09ce8742021-10-14 11:17:43 -0600129 bool should_dirty = dio->flags & DIO_SHOULD_DIRTY;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200130
131 if (bio->bi_status && !dio->bio.bi_status)
132 dio->bio.bi_status = bio->bi_status;
133
Pavel Begunkove71aa912021-10-27 13:21:09 +0100134 if (atomic_dec_and_test(&dio->ref)) {
Jens Axboe09ce8742021-10-14 11:17:43 -0600135 if (!(dio->flags & DIO_IS_SYNC)) {
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200136 struct kiocb *iocb = dio->iocb;
137 ssize_t ret;
138
Christoph Hellwig3e087732021-10-12 13:12:24 +0200139 WRITE_ONCE(iocb->private, NULL);
140
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200141 if (likely(!dio->bio.bi_status)) {
142 ret = dio->size;
143 iocb->ki_pos += ret;
144 } else {
145 ret = blk_status_to_errno(dio->bio.bi_status);
146 }
147
Jens Axboe6b19b762021-10-21 09:22:35 -0600148 dio->iocb->ki_complete(iocb, ret);
Pavel Begunkove71aa912021-10-27 13:21:09 +0100149 bio_put(&dio->bio);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200150 } else {
151 struct task_struct *waiter = dio->waiter;
152
153 WRITE_ONCE(dio->waiter, NULL);
154 blk_wake_io_task(waiter);
155 }
156 }
157
158 if (should_dirty) {
159 bio_check_pages_dirty(bio);
160 } else {
161 bio_release_pages(bio, false);
162 bio_put(bio);
163 }
164}
165
166static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
John Garryde4c7be2024-04-15 12:20:20 +0000167 struct block_device *bdev, unsigned int nr_pages)
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200168{
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200169 struct blk_plug plug;
170 struct blkdev_dio *dio;
171 struct bio *bio;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200172 bool is_read = (iov_iter_rw(iter) == READ), is_sync;
Bart Van Assche16458cf2022-07-14 11:06:32 -0700173 blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200174 loff_t pos = iocb->ki_pos;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200175 int ret = 0;
176
Mike Snitzer0df71652022-03-24 16:35:24 -0400177 if (iocb->ki_flags & IOCB_ALLOC_CACHE)
178 opf |= REQ_ALLOC_CACHE;
179 bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
180 &blkdev_dio_pool);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200181 dio = container_of(bio, struct blkdev_dio, bio);
Pavel Begunkove71aa912021-10-27 13:21:09 +0100182 atomic_set(&dio->ref, 1);
183 /*
184 * Grab an extra reference to ensure the dio structure which is embedded
185 * into the first bio stays around.
186 */
187 bio_get(bio);
188
Jens Axboe09ce8742021-10-14 11:17:43 -0600189 is_sync = is_sync_kiocb(iocb);
190 if (is_sync) {
191 dio->flags = DIO_IS_SYNC;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200192 dio->waiter = current;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200193 } else {
Jens Axboe09ce8742021-10-14 11:17:43 -0600194 dio->flags = 0;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200195 dio->iocb = iocb;
196 }
197
198 dio->size = 0;
Al Virofcb14cb2022-05-22 14:59:25 -0400199 if (is_read && user_backed_iter(iter))
Jens Axboe09ce8742021-10-14 11:17:43 -0600200 dio->flags |= DIO_SHOULD_DIRTY;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200201
Pavel Begunkov25d207d2021-10-27 13:21:08 +0100202 blk_start_plug(&plug);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200203
204 for (;;) {
Pavel Begunkov6549a872021-10-20 20:00:50 +0100205 bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
Bart Van Assche44981352024-02-02 12:39:25 -0800206 bio->bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200207 bio->bi_private = dio;
208 bio->bi_end_io = blkdev_bio_end_io;
209 bio->bi_ioprio = iocb->ki_ioprio;
210
211 ret = bio_iov_iter_get_pages(bio, iter);
212 if (unlikely(ret)) {
213 bio->bi_status = BLK_STS_IOERR;
214 bio_endio(bio);
215 break;
216 }
Jens Axboe67d59247d2023-01-16 08:55:53 -0700217 if (iocb->ki_flags & IOCB_NOWAIT) {
218 /*
219 * This is nonblocking IO, and we need to allocate
220 * another bio if we have data left to map. As we
221 * cannot guarantee that one of the sub bios will not
222 * fail getting issued FOR NOWAIT and as error results
223 * are coalesced across all of them, be safe and ask for
224 * a retry of this from blocking context.
225 */
226 if (unlikely(iov_iter_count(iter))) {
227 bio_release_pages(bio, false);
228 bio_clear_flag(bio, BIO_REFFED);
229 bio_put(bio);
230 blk_finish_plug(&plug);
231 return -EAGAIN;
232 }
233 bio->bi_opf |= REQ_NOWAIT;
234 }
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200235
236 if (is_read) {
Jens Axboe09ce8742021-10-14 11:17:43 -0600237 if (dio->flags & DIO_SHOULD_DIRTY)
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200238 bio_set_pages_dirty(bio);
239 } else {
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200240 task_io_account_write(bio->bi_iter.bi_size);
241 }
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200242 dio->size += bio->bi_iter.bi_size;
243 pos += bio->bi_iter.bi_size;
244
245 nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS);
246 if (!nr_pages) {
Christoph Hellwig3e087732021-10-12 13:12:24 +0200247 submit_bio(bio);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200248 break;
249 }
Pavel Begunkove71aa912021-10-27 13:21:09 +0100250 atomic_inc(&dio->ref);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200251 submit_bio(bio);
Christoph Hellwig07888c662022-01-24 10:11:05 +0100252 bio = bio_alloc(bdev, nr_pages, opf, GFP_KERNEL);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200253 }
254
Pavel Begunkov25d207d2021-10-27 13:21:08 +0100255 blk_finish_plug(&plug);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200256
257 if (!is_sync)
258 return -EIOCBQUEUED;
259
260 for (;;) {
261 set_current_state(TASK_UNINTERRUPTIBLE);
262 if (!READ_ONCE(dio->waiter))
263 break;
Pavel Begunkov25d207d2021-10-27 13:21:08 +0100264 blk_io_schedule();
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200265 }
266 __set_current_state(TASK_RUNNING);
267
268 if (!ret)
269 ret = blk_status_to_errno(dio->bio.bi_status);
270 if (likely(!ret))
271 ret = dio->size;
272
273 bio_put(&dio->bio);
274 return ret;
275}
276
Pavel Begunkov54a88eb2021-10-23 17:21:32 +0100277static void blkdev_bio_end_io_async(struct bio *bio)
278{
279 struct blkdev_dio *dio = container_of(bio, struct blkdev_dio, bio);
280 struct kiocb *iocb = dio->iocb;
281 ssize_t ret;
282
Stefano Garzarellabb49c6f2022-02-11 10:01:36 +0100283 WRITE_ONCE(iocb->private, NULL);
284
Pavel Begunkov54a88eb2021-10-23 17:21:32 +0100285 if (likely(!bio->bi_status)) {
286 ret = dio->size;
287 iocb->ki_pos += ret;
288 } else {
289 ret = blk_status_to_errno(bio->bi_status);
290 }
291
Linus Torvaldsb6773cd2021-11-01 10:17:11 -0700292 iocb->ki_complete(iocb, ret);
Pavel Begunkov54a88eb2021-10-23 17:21:32 +0100293
294 if (dio->flags & DIO_SHOULD_DIRTY) {
295 bio_check_pages_dirty(bio);
296 } else {
297 bio_release_pages(bio, false);
298 bio_put(bio);
299 }
300}
301
302static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
303 struct iov_iter *iter,
John Garryde4c7be2024-04-15 12:20:20 +0000304 struct block_device *bdev,
Pavel Begunkov54a88eb2021-10-23 17:21:32 +0100305 unsigned int nr_pages)
306{
Christoph Hellwigb77c88c2022-01-24 10:11:04 +0100307 bool is_read = iov_iter_rw(iter) == READ;
Bart Van Assche16458cf2022-07-14 11:06:32 -0700308 blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
Pavel Begunkov54a88eb2021-10-23 17:21:32 +0100309 struct blkdev_dio *dio;
310 struct bio *bio;
311 loff_t pos = iocb->ki_pos;
312 int ret = 0;
313
Mike Snitzer0df71652022-03-24 16:35:24 -0400314 if (iocb->ki_flags & IOCB_ALLOC_CACHE)
315 opf |= REQ_ALLOC_CACHE;
316 bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
317 &blkdev_dio_pool);
Pavel Begunkov54a88eb2021-10-23 17:21:32 +0100318 dio = container_of(bio, struct blkdev_dio, bio);
319 dio->flags = 0;
320 dio->iocb = iocb;
Pavel Begunkov54a88eb2021-10-23 17:21:32 +0100321 bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
Bart Van Assche44981352024-02-02 12:39:25 -0800322 bio->bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint;
Pavel Begunkov54a88eb2021-10-23 17:21:32 +0100323 bio->bi_end_io = blkdev_bio_end_io_async;
324 bio->bi_ioprio = iocb->ki_ioprio;
325
Pavel Begunkov1bb6b812021-10-27 13:21:07 +0100326 if (iov_iter_is_bvec(iter)) {
327 /*
328 * Users don't rely on the iterator being in any particular
329 * state for async I/O returning -EIOCBQUEUED, hence we can
330 * avoid expensive iov_iter_advance(). Bypass
331 * bio_iov_iter_get_pages() and set the bvec directly.
332 */
333 bio_iov_bvec_set(bio, iter);
334 } else {
335 ret = bio_iov_iter_get_pages(bio, iter);
336 if (unlikely(ret)) {
Pavel Begunkov75feae72021-12-07 20:16:36 +0000337 bio_put(bio);
Pavel Begunkov1bb6b812021-10-27 13:21:07 +0100338 return ret;
339 }
Pavel Begunkov54a88eb2021-10-23 17:21:32 +0100340 }
341 dio->size = bio->bi_iter.bi_size;
342
Christoph Hellwigb77c88c2022-01-24 10:11:04 +0100343 if (is_read) {
Al Virofcb14cb2022-05-22 14:59:25 -0400344 if (user_backed_iter(iter)) {
Pavel Begunkov54a88eb2021-10-23 17:21:32 +0100345 dio->flags |= DIO_SHOULD_DIRTY;
346 bio_set_pages_dirty(bio);
347 }
348 } else {
Pavel Begunkov54a88eb2021-10-23 17:21:32 +0100349 task_io_account_write(bio->bi_iter.bi_size);
350 }
351
John Garrycaf336f2024-06-20 12:53:56 +0000352 if (iocb->ki_flags & IOCB_ATOMIC)
353 bio->bi_opf |= REQ_ATOMIC;
354
Jens Axboe2bc05762023-08-08 11:06:17 -0600355 if (iocb->ki_flags & IOCB_NOWAIT)
356 bio->bi_opf |= REQ_NOWAIT;
357
Pavel Begunkov54a88eb2021-10-23 17:21:32 +0100358 if (iocb->ki_flags & IOCB_HIPRI) {
Jens Axboe2bc05762023-08-08 11:06:17 -0600359 bio->bi_opf |= REQ_POLLED;
Pavel Begunkov54a88eb2021-10-23 17:21:32 +0100360 submit_bio(bio);
361 WRITE_ONCE(iocb->private, bio);
362 } else {
363 submit_bio(bio);
364 }
365 return -EIOCBQUEUED;
366}
367
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200368static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
369{
John Garryde4c7be2024-04-15 12:20:20 +0000370 struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
John Garrycaf336f2024-06-20 12:53:56 +0000371 bool is_atomic = iocb->ki_flags & IOCB_ATOMIC;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200372 unsigned int nr_pages;
373
374 if (!iov_iter_count(iter))
375 return 0;
376
John Garrycaf336f2024-06-20 12:53:56 +0000377 if (blkdev_dio_invalid(bdev, iocb->ki_pos, iter, is_atomic))
John Garryde4c7be2024-04-15 12:20:20 +0000378 return -EINVAL;
379
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200380 nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1);
Pavel Begunkov54a88eb2021-10-23 17:21:32 +0100381 if (likely(nr_pages <= BIO_MAX_VECS)) {
382 if (is_sync_kiocb(iocb))
John Garryde4c7be2024-04-15 12:20:20 +0000383 return __blkdev_direct_IO_simple(iocb, iter, bdev,
384 nr_pages);
385 return __blkdev_direct_IO_async(iocb, iter, bdev, nr_pages);
John Garrycaf336f2024-06-20 12:53:56 +0000386 } else if (is_atomic) {
387 return -EINVAL;
Pavel Begunkov54a88eb2021-10-23 17:21:32 +0100388 }
John Garryde4c7be2024-04-15 12:20:20 +0000389 return __blkdev_direct_IO(iocb, iter, bdev, bio_max_segs(nr_pages));
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200390}
391
Christoph Hellwig487c6072023-08-01 19:22:00 +0200392static int blkdev_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
393 unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
394{
395 struct block_device *bdev = I_BDEV(inode);
396 loff_t isize = i_size_read(inode);
397
Christoph Hellwig0c120282024-05-03 10:10:42 +0200398 if (offset >= isize)
Christoph Hellwig487c6072023-08-01 19:22:00 +0200399 return -EIO;
Li Nane2695372024-06-25 19:55:17 +0800400
401 iomap->bdev = bdev;
402 iomap->offset = ALIGN_DOWN(offset, bdev_logical_block_size(bdev));
Christoph Hellwig487c6072023-08-01 19:22:00 +0200403 iomap->type = IOMAP_MAPPED;
404 iomap->addr = iomap->offset;
405 iomap->length = isize - iomap->offset;
Christoph Hellwig925c86a2023-08-01 19:22:01 +0200406 iomap->flags |= IOMAP_F_BUFFER_HEAD; /* noop for !CONFIG_BUFFER_HEAD */
Christoph Hellwig487c6072023-08-01 19:22:00 +0200407 return 0;
408}
409
410static const struct iomap_ops blkdev_iomap_ops = {
411 .iomap_begin = blkdev_iomap_begin,
412};
413
Christoph Hellwig925c86a2023-08-01 19:22:01 +0200414#ifdef CONFIG_BUFFER_HEAD
415static int blkdev_get_block(struct inode *inode, sector_t iblock,
416 struct buffer_head *bh, int create)
417{
418 bh->b_bdev = I_BDEV(inode);
419 bh->b_blocknr = iblock;
420 set_buffer_mapped(bh);
421 return 0;
422}
423
Matthew Wilcox (Oracle)17bf23a2023-12-15 20:02:44 +0000424/*
425 * We cannot call mpage_writepages() as it does not take the buffer lock.
426 * We must use block_write_full_folio() directly which holds the buffer
427 * lock. The buffer lock provides the synchronisation with writeback
428 * that filesystems rely on when they use the blockdev's mapping.
429 */
430static int blkdev_writepages(struct address_space *mapping,
431 struct writeback_control *wbc)
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200432{
Matthew Wilcox (Oracle)17bf23a2023-12-15 20:02:44 +0000433 struct blk_plug plug;
434 int err;
435
436 blk_start_plug(&plug);
437 err = write_cache_pages(mapping, wbc, block_write_full_folio,
438 blkdev_get_block);
439 blk_finish_plug(&plug);
440
441 return err;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200442}
443
Matthew Wilcox (Oracle)2c69e202022-04-29 10:40:40 -0400444static int blkdev_read_folio(struct file *file, struct folio *folio)
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200445{
Matthew Wilcox (Oracle)2c69e202022-04-29 10:40:40 -0400446 return block_read_full_folio(folio, blkdev_get_block);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200447}
448
449static void blkdev_readahead(struct readahead_control *rac)
450{
451 mpage_readahead(rac, blkdev_get_block);
452}
453
454static int blkdev_write_begin(struct file *file, struct address_space *mapping,
Matthew Wilcox (Oracle)1da86612024-07-15 14:24:01 -0400455 loff_t pos, unsigned len, struct folio **foliop, void **fsdata)
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200456{
Matthew Wilcox (Oracle)1da86612024-07-15 14:24:01 -0400457 return block_write_begin(mapping, pos, len, foliop, blkdev_get_block);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200458}
459
460static int blkdev_write_end(struct file *file, struct address_space *mapping,
Matthew Wilcox (Oracle)a2258002024-07-10 15:45:32 -0400461 loff_t pos, unsigned len, unsigned copied, struct folio *folio,
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200462 void *fsdata)
463{
464 int ret;
Matthew Wilcox (Oracle)97edbc02024-07-10 14:51:11 -0400465 ret = block_write_end(file, mapping, pos, len, copied, folio, fsdata);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200466
Matthew Wilcox (Oracle)12622492024-07-10 13:43:36 -0400467 folio_unlock(folio);
468 folio_put(folio);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200469
470 return ret;
471}
472
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200473const struct address_space_operations def_blk_aops = {
Matthew Wilcox (Oracle)e6219002022-02-09 20:22:12 +0000474 .dirty_folio = block_dirty_folio,
Matthew Wilcox (Oracle)7ba13ab2022-02-09 20:21:34 +0000475 .invalidate_folio = block_invalidate_folio,
Matthew Wilcox (Oracle)2c69e202022-04-29 10:40:40 -0400476 .read_folio = blkdev_read_folio,
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200477 .readahead = blkdev_readahead,
Matthew Wilcox (Oracle)17bf23a2023-12-15 20:02:44 +0000478 .writepages = blkdev_writepages,
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200479 .write_begin = blkdev_write_begin,
480 .write_end = blkdev_write_end,
Matthew Wilcox (Oracle)67235182022-06-06 10:20:31 -0400481 .migrate_folio = buffer_migrate_folio_norefs,
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200482 .is_dirty_writeback = buffer_check_dirty_writeback,
483};
Christoph Hellwig925c86a2023-08-01 19:22:01 +0200484#else /* CONFIG_BUFFER_HEAD */
485static int blkdev_read_folio(struct file *file, struct folio *folio)
486{
487 return iomap_read_folio(folio, &blkdev_iomap_ops);
488}
489
490static void blkdev_readahead(struct readahead_control *rac)
491{
492 iomap_readahead(rac, &blkdev_iomap_ops);
493}
494
495static int blkdev_map_blocks(struct iomap_writepage_ctx *wpc,
Christoph Hellwig19871b52023-12-07 08:27:10 +0100496 struct inode *inode, loff_t offset, unsigned int len)
Christoph Hellwig925c86a2023-08-01 19:22:01 +0200497{
498 loff_t isize = i_size_read(inode);
499
500 if (WARN_ON_ONCE(offset >= isize))
501 return -EIO;
502 if (offset >= wpc->iomap.offset &&
503 offset < wpc->iomap.offset + wpc->iomap.length)
504 return 0;
505 return blkdev_iomap_begin(inode, offset, isize - offset,
506 IOMAP_WRITE, &wpc->iomap, NULL);
507}
508
509static const struct iomap_writeback_ops blkdev_writeback_ops = {
510 .map_blocks = blkdev_map_blocks,
511};
512
513static int blkdev_writepages(struct address_space *mapping,
514 struct writeback_control *wbc)
515{
516 struct iomap_writepage_ctx wpc = { };
517
518 return iomap_writepages(mapping, wbc, &wpc, &blkdev_writeback_ops);
519}
520
521const struct address_space_operations def_blk_aops = {
522 .dirty_folio = filemap_dirty_folio,
523 .release_folio = iomap_release_folio,
524 .invalidate_folio = iomap_invalidate_folio,
525 .read_folio = blkdev_read_folio,
526 .readahead = blkdev_readahead,
527 .writepages = blkdev_writepages,
528 .is_partially_uptodate = iomap_is_partially_uptodate,
Matthew Wilcox (Oracle)af7628d2023-11-17 16:14:47 +0000529 .error_remove_folio = generic_error_remove_folio,
Christoph Hellwig925c86a2023-08-01 19:22:01 +0200530 .migrate_folio = filemap_migrate_folio,
531};
532#endif /* CONFIG_BUFFER_HEAD */
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200533
534/*
535 * for a block special file file_inode(file)->i_size is zero
536 * so we compute the size by hand (just as in block_read/write above)
537 */
538static loff_t blkdev_llseek(struct file *file, loff_t offset, int whence)
539{
540 struct inode *bd_inode = bdev_file_inode(file);
541 loff_t retval;
542
543 inode_lock(bd_inode);
544 retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode));
545 inode_unlock(bd_inode);
546 return retval;
547}
548
549static int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
550 int datasync)
551{
Christoph Hellwig4e762d82023-06-08 13:02:56 +0200552 struct block_device *bdev = I_BDEV(filp->f_mapping->host);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200553 int error;
554
555 error = file_write_and_wait_range(filp, start, end);
556 if (error)
557 return error;
558
559 /*
560 * There is no need to serialise calls to blkdev_issue_flush with
561 * i_mutex and doing so causes performance issues with concurrent
562 * O_SYNC writers to a block device.
563 */
564 error = blkdev_issue_flush(bdev);
565 if (error == -EOPNOTSUPP)
566 error = 0;
567
568 return error;
569}
570
Jan Kara841dd782023-09-27 11:34:08 +0200571/**
572 * file_to_blk_mode - get block open flags from file flags
573 * @file: file whose open flags should be converted
574 *
575 * Look at file open flags and generate corresponding block open flags from
576 * them. The function works both for file just being open (e.g. during ->open
577 * callback) and for file that is already open. This is actually non-trivial
578 * (see comment in the function).
579 */
Christoph Hellwig05bdb992023-06-08 13:02:55 +0200580blk_mode_t file_to_blk_mode(struct file *file)
581{
582 blk_mode_t mode = 0;
583
584 if (file->f_mode & FMODE_READ)
585 mode |= BLK_OPEN_READ;
586 if (file->f_mode & FMODE_WRITE)
587 mode |= BLK_OPEN_WRITE;
Jan Kara841dd782023-09-27 11:34:08 +0200588 /*
Christian Braunerab838b32024-01-23 14:26:49 +0100589 * do_dentry_open() clears O_EXCL from f_flags, use file->private_data
590 * to determine whether the open was exclusive for already open files.
Jan Kara841dd782023-09-27 11:34:08 +0200591 */
Christian Braunerab838b32024-01-23 14:26:49 +0100592 if (file->private_data)
593 mode |= BLK_OPEN_EXCL;
Jan Kara841dd782023-09-27 11:34:08 +0200594 else if (file->f_flags & O_EXCL)
Christoph Hellwig05bdb992023-06-08 13:02:55 +0200595 mode |= BLK_OPEN_EXCL;
596 if (file->f_flags & O_NDELAY)
597 mode |= BLK_OPEN_NDELAY;
598
599 /*
600 * If all bits in O_ACCMODE set (aka O_RDWR | O_WRONLY), the floppy
601 * driver has historically allowed ioctls as if the file was opened for
602 * writing, but does not allow and actual reads or writes.
603 */
604 if ((file->f_flags & O_ACCMODE) == (O_RDWR | O_WRONLY))
605 mode |= BLK_OPEN_WRITE_IOCTL;
606
607 return mode;
608}
609
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200610static int blkdev_open(struct inode *inode, struct file *filp)
611{
Christian Braunera56aefc2024-01-23 14:26:46 +0100612 struct block_device *bdev;
Jan Kara841dd782023-09-27 11:34:08 +0200613 blk_mode_t mode;
Christian Braunera56aefc2024-01-23 14:26:46 +0100614 int ret;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200615
Jan Kara841dd782023-09-27 11:34:08 +0200616 mode = file_to_blk_mode(filp);
Christian Braunerab838b32024-01-23 14:26:49 +0100617 /* Use the file as the holder. */
618 if (mode & BLK_OPEN_EXCL)
619 filp->private_data = filp;
620 ret = bdev_permission(inode->i_rdev, mode, filp->private_data);
Christian Braunera56aefc2024-01-23 14:26:46 +0100621 if (ret)
622 return ret;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200623
Christian Braunera56aefc2024-01-23 14:26:46 +0100624 bdev = blkdev_get_no_open(inode->i_rdev);
625 if (!bdev)
626 return -ENXIO;
Jens Axboee9833d82023-05-09 09:19:09 -0600627
John Garrycaf336f2024-06-20 12:53:56 +0000628 if (bdev_can_atomic_write(bdev) && filp->f_flags & O_DIRECT)
629 filp->f_mode |= FMODE_CAN_ATOMIC_WRITE;
630
Christian Braunerab838b32024-01-23 14:26:49 +0100631 ret = bdev_open(bdev, mode, filp->private_data, NULL, filp);
Christian Braunera56aefc2024-01-23 14:26:46 +0100632 if (ret)
633 blkdev_put_no_open(bdev);
634 return ret;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200635}
636
Christoph Hellwig7ee34cb2023-06-08 13:02:38 +0200637static int blkdev_release(struct inode *inode, struct file *filp)
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200638{
Christian Braunerab838b32024-01-23 14:26:49 +0100639 bdev_release(filp);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200640 return 0;
641}
642
Christoph Hellwig727cfe92023-08-01 19:21:58 +0200643static ssize_t
644blkdev_direct_write(struct kiocb *iocb, struct iov_iter *from)
645{
646 size_t count = iov_iter_count(from);
647 ssize_t written;
648
649 written = kiocb_invalidate_pages(iocb, count);
650 if (written) {
651 if (written == -EBUSY)
652 return 0;
653 return written;
654 }
655
656 written = blkdev_direct_IO(iocb, from);
657 if (written > 0) {
658 kiocb_invalidate_post_direct_write(iocb, count);
659 iocb->ki_pos += written;
660 count -= written;
661 }
662 if (written != -EIOCBQUEUED)
663 iov_iter_revert(from, count - iov_iter_count(from));
664 return written;
665}
666
Christoph Hellwig487c6072023-08-01 19:22:00 +0200667static ssize_t blkdev_buffered_write(struct kiocb *iocb, struct iov_iter *from)
668{
Josef Bacik31754ea2024-08-27 06:51:36 -0400669 return iomap_file_buffered_write(iocb, from, &blkdev_iomap_ops, NULL);
Christoph Hellwig487c6072023-08-01 19:22:00 +0200670}
671
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200672/*
673 * Write data to the block device. Only intended for the block device itself
674 * and the raw driver which basically is a fake block device.
675 *
676 * Does not take i_mutex for the write and thus is not for general purpose
677 * use.
678 */
679static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
680{
Christoph Hellwig727cfe92023-08-01 19:21:58 +0200681 struct file *file = iocb->ki_filp;
Al Viro39c3b4e2024-04-11 15:53:40 +0100682 struct inode *bd_inode = bdev_file_inode(file);
683 struct block_device *bdev = I_BDEV(bd_inode);
Jens Axboe138c1a32021-11-04 15:13:17 -0600684 loff_t size = bdev_nr_bytes(bdev);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200685 size_t shorted = 0;
686 ssize_t ret;
687
Pavel Begunkovfac7c6d2021-10-13 09:57:11 +0100688 if (bdev_read_only(bdev))
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200689 return -EPERM;
690
691 if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev))
692 return -ETXTBSY;
693
694 if (!iov_iter_count(from))
695 return 0;
696
697 if (iocb->ki_pos >= size)
698 return -ENOSPC;
699
700 if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT)
701 return -EOPNOTSUPP;
702
703 size -= iocb->ki_pos;
704 if (iov_iter_count(from) > size) {
705 shorted = iov_iter_count(from) - size;
706 iov_iter_truncate(from, size);
707 }
708
Christoph Hellwig727cfe92023-08-01 19:21:58 +0200709 ret = file_update_time(file);
710 if (ret)
711 return ret;
712
713 if (iocb->ki_flags & IOCB_DIRECT) {
714 ret = blkdev_direct_write(iocb, from);
715 if (ret >= 0 && iov_iter_count(from))
716 ret = direct_write_fallback(iocb, from, ret,
Christoph Hellwig487c6072023-08-01 19:22:00 +0200717 blkdev_buffered_write(iocb, from));
Christoph Hellwig727cfe92023-08-01 19:21:58 +0200718 } else {
Christoph Hellwig487c6072023-08-01 19:22:00 +0200719 ret = blkdev_buffered_write(iocb, from);
Christoph Hellwig727cfe92023-08-01 19:21:58 +0200720 }
721
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200722 if (ret > 0)
723 ret = generic_write_sync(iocb, ret);
724 iov_iter_reexpand(from, iov_iter_count(from) + shorted);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200725 return ret;
726}
727
728static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
729{
Christoph Hellwig4e762d82023-06-08 13:02:56 +0200730 struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
Jens Axboe138c1a32021-11-04 15:13:17 -0600731 loff_t size = bdev_nr_bytes(bdev);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200732 loff_t pos = iocb->ki_pos;
733 size_t shorted = 0;
Jens Axboeceaa7622021-10-28 08:57:09 -0600734 ssize_t ret = 0;
Ilya Dryomov3e1f9412022-02-01 11:04:20 +0100735 size_t count;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200736
Ilya Dryomov3e1f9412022-02-01 11:04:20 +0100737 if (unlikely(pos + iov_iter_count(to) > size)) {
Pavel Begunkov6450fe12021-10-20 20:00:48 +0100738 if (pos >= size)
739 return 0;
740 size -= pos;
Ilya Dryomov3e1f9412022-02-01 11:04:20 +0100741 shorted = iov_iter_count(to) - size;
742 iov_iter_truncate(to, size);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200743 }
744
Ilya Dryomov3e1f9412022-02-01 11:04:20 +0100745 count = iov_iter_count(to);
746 if (!count)
747 goto reexpand; /* skip atime */
748
Jens Axboeceaa7622021-10-28 08:57:09 -0600749 if (iocb->ki_flags & IOCB_DIRECT) {
Christoph Hellwig3c435a0f2023-06-01 16:58:56 +0200750 ret = kiocb_write_and_wait(iocb, count);
751 if (ret < 0)
752 goto reexpand;
Jens Axboeceaa7622021-10-28 08:57:09 -0600753 file_accessed(iocb->ki_filp);
754
755 ret = blkdev_direct_IO(iocb, to);
756 if (ret >= 0) {
757 iocb->ki_pos += ret;
758 count -= ret;
759 }
Ilya Dryomov3e1f9412022-02-01 11:04:20 +0100760 iov_iter_revert(to, count - iov_iter_count(to));
Jens Axboeceaa7622021-10-28 08:57:09 -0600761 if (ret < 0 || !count)
Ilya Dryomov3e1f9412022-02-01 11:04:20 +0100762 goto reexpand;
Jens Axboeceaa7622021-10-28 08:57:09 -0600763 }
764
765 ret = filemap_read(iocb, to, ret);
Pavel Begunkov6450fe12021-10-20 20:00:48 +0100766
Ilya Dryomov3e1f9412022-02-01 11:04:20 +0100767reexpand:
Pavel Begunkov6450fe12021-10-20 20:00:48 +0100768 if (unlikely(shorted))
769 iov_iter_reexpand(to, iov_iter_count(to) + shorted);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200770 return ret;
771}
772
773#define BLKDEV_FALLOC_FL_SUPPORTED \
774 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
Christoph Hellwigad01dad2024-08-27 08:50:45 +0200775 FALLOC_FL_ZERO_RANGE)
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200776
777static long blkdev_fallocate(struct file *file, int mode, loff_t start,
778 loff_t len)
779{
Ming Leif278eb3d2021-09-23 10:37:51 +0800780 struct inode *inode = bdev_file_inode(file);
781 struct block_device *bdev = I_BDEV(inode);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200782 loff_t end = start + len - 1;
783 loff_t isize;
784 int error;
785
786 /* Fail if we don't recognize the flags. */
787 if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED)
788 return -EOPNOTSUPP;
789
790 /* Don't go off the end of the device. */
Christoph Hellwig2a93ad82021-10-18 12:11:24 +0200791 isize = bdev_nr_bytes(bdev);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200792 if (start >= isize)
793 return -EINVAL;
794 if (end >= isize) {
795 if (mode & FALLOC_FL_KEEP_SIZE) {
796 len = isize - start;
797 end = start + len - 1;
798 } else
799 return -EINVAL;
800 }
801
802 /*
803 * Don't allow IO that isn't aligned to logical block size.
804 */
805 if ((start | len) & (bdev_logical_block_size(bdev) - 1))
806 return -EINVAL;
807
Ming Leif278eb3d2021-09-23 10:37:51 +0800808 filemap_invalidate_lock(inode->i_mapping);
809
Sarthak Kukreti1364a3c2023-10-11 13:12:30 -0700810 /*
811 * Invalidate the page cache, including dirty pages, for valid
812 * de-allocate mode calls to fallocate().
813 */
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200814 switch (mode) {
815 case FALLOC_FL_ZERO_RANGE:
816 case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
Sarthak Kukreti1364a3c2023-10-11 13:12:30 -0700817 error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
818 if (error)
819 goto fail;
820
Pavel Begunkov6549a872021-10-20 20:00:50 +0100821 error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
822 len >> SECTOR_SHIFT, GFP_KERNEL,
823 BLKDEV_ZERO_NOUNMAP);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200824 break;
825 case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
Sarthak Kukreti1364a3c2023-10-11 13:12:30 -0700826 error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
827 if (error)
828 goto fail;
829
Pavel Begunkov6549a872021-10-20 20:00:50 +0100830 error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
831 len >> SECTOR_SHIFT, GFP_KERNEL,
832 BLKDEV_ZERO_NOFALLBACK);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200833 break;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200834 default:
Ming Leif278eb3d2021-09-23 10:37:51 +0800835 error = -EOPNOTSUPP;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200836 }
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200837
Ming Leif278eb3d2021-09-23 10:37:51 +0800838 fail:
839 filemap_invalidate_unlock(inode->i_mapping);
840 return error;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200841}
842
Loic Poulain69baa3a2023-05-10 09:42:23 +0200843static int blkdev_mmap(struct file *file, struct vm_area_struct *vma)
844{
845 struct inode *bd_inode = bdev_file_inode(file);
846
847 if (bdev_read_only(I_BDEV(bd_inode)))
848 return generic_file_readonly_mmap(file, vma);
849
850 return generic_file_mmap(file, vma);
851}
852
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200853const struct file_operations def_blk_fops = {
854 .open = blkdev_open,
Christoph Hellwig7ee34cb2023-06-08 13:02:38 +0200855 .release = blkdev_release,
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200856 .llseek = blkdev_llseek,
857 .read_iter = blkdev_read_iter,
858 .write_iter = blkdev_write_iter,
Christoph Hellwig3e087732021-10-12 13:12:24 +0200859 .iopoll = iocb_bio_iopoll,
Loic Poulain69baa3a2023-05-10 09:42:23 +0200860 .mmap = blkdev_mmap,
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200861 .fsync = blkdev_fsync,
Christoph Hellwig8a709512021-10-12 12:44:50 +0200862 .unlocked_ioctl = blkdev_ioctl,
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200863#ifdef CONFIG_COMPAT
864 .compat_ioctl = compat_blkdev_ioctl,
865#endif
David Howells2cb1e0892023-05-22 14:50:15 +0100866 .splice_read = filemap_splice_read,
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200867 .splice_write = iter_file_splice_write,
868 .fallocate = blkdev_fallocate,
Pavel Begunkov50c52252024-09-11 17:34:41 +0100869 .uring_cmd = blkdev_uring_cmd,
Christian Brauner210a03c2024-03-28 13:27:24 +0100870 .fop_flags = FOP_BUFFER_RASYNC,
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200871};
872
873static __init int blkdev_init(void)
874{
875 return bioset_init(&blkdev_dio_pool, 4,
876 offsetof(struct blkdev_dio, bio),
877 BIOSET_NEED_BVECS|BIOSET_PERCPU_CACHE);
878}
879module_init(blkdev_init);