Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * Copyright (C) 1991, 1992 Linus Torvalds |
| 4 | * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE |
| 5 | * Copyright (C) 2016 - 2020 Christoph Hellwig |
| 6 | */ |
| 7 | #include <linux/init.h> |
| 8 | #include <linux/mm.h> |
| 9 | #include <linux/blkdev.h> |
| 10 | #include <linux/buffer_head.h> |
| 11 | #include <linux/mpage.h> |
| 12 | #include <linux/uio.h> |
| 13 | #include <linux/namei.h> |
| 14 | #include <linux/task_io_accounting_ops.h> |
| 15 | #include <linux/falloc.h> |
| 16 | #include <linux/suspend.h> |
Ming Lei | f278eb3d | 2021-09-23 10:37:51 +0800 | [diff] [blame] | 17 | #include <linux/fs.h> |
Christoph Hellwig | 487c607 | 2023-08-01 19:22:00 +0200 | [diff] [blame] | 18 | #include <linux/iomap.h> |
Jakub Kicinski | 8581fd4 | 2021-12-02 12:34:00 -0800 | [diff] [blame] | 19 | #include <linux/module.h> |
Pavel Begunkov | 50c5225 | 2024-09-11 17:34:41 +0100 | [diff] [blame] | 20 | #include <linux/io_uring/cmd.h> |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 21 | #include "blk.h" |
| 22 | |
Pavel Begunkov | fac7c6d | 2021-10-13 09:57:11 +0100 | [diff] [blame] | 23 | static inline struct inode *bdev_file_inode(struct file *file) |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 24 | { |
| 25 | return file->f_mapping->host; |
| 26 | } |
| 27 | |
Bart Van Assche | 16458cf | 2022-07-14 11:06:32 -0700 | [diff] [blame] | 28 | static blk_opf_t dio_bio_write_op(struct kiocb *iocb) |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 29 | { |
Bart Van Assche | 16458cf | 2022-07-14 11:06:32 -0700 | [diff] [blame] | 30 | blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 31 | |
| 32 | /* avoid the need for a I/O completion work item */ |
Al Viro | 91b94c5 | 2022-05-22 09:39:27 -0400 | [diff] [blame] | 33 | if (iocb_is_dsync(iocb)) |
Bart Van Assche | 16458cf | 2022-07-14 11:06:32 -0700 | [diff] [blame] | 34 | opf |= REQ_FUA; |
| 35 | return opf; |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 36 | } |
| 37 | |
John Garry | caf336f | 2024-06-20 12:53:56 +0000 | [diff] [blame] | 38 | static bool blkdev_dio_invalid(struct block_device *bdev, loff_t pos, |
| 39 | struct iov_iter *iter, bool is_atomic) |
Keith Busch | 37fee2e4 | 2022-06-10 12:58:24 -0700 | [diff] [blame] | 40 | { |
John Garry | caf336f | 2024-06-20 12:53:56 +0000 | [diff] [blame] | 41 | if (is_atomic && !generic_atomic_write_valid(iter, pos)) |
| 42 | return true; |
| 43 | |
Keith Busch | b1a000d | 2022-06-10 12:58:29 -0700 | [diff] [blame] | 44 | return pos & (bdev_logical_block_size(bdev) - 1) || |
| 45 | !bdev_iter_is_aligned(bdev, iter); |
Keith Busch | 37fee2e4 | 2022-06-10 12:58:24 -0700 | [diff] [blame] | 46 | } |
| 47 | |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 48 | #define DIO_INLINE_BIO_VECS 4 |
| 49 | |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 50 | static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb, |
John Garry | de4c7be | 2024-04-15 12:20:20 +0000 | [diff] [blame] | 51 | struct iov_iter *iter, struct block_device *bdev, |
| 52 | unsigned int nr_pages) |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 53 | { |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 54 | struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs; |
| 55 | loff_t pos = iocb->ki_pos; |
| 56 | bool should_dirty = false; |
| 57 | struct bio bio; |
| 58 | ssize_t ret; |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 59 | |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 60 | if (nr_pages <= DIO_INLINE_BIO_VECS) |
| 61 | vecs = inline_vecs; |
| 62 | else { |
| 63 | vecs = kmalloc_array(nr_pages, sizeof(struct bio_vec), |
| 64 | GFP_KERNEL); |
| 65 | if (!vecs) |
| 66 | return -ENOMEM; |
| 67 | } |
| 68 | |
Christoph Hellwig | 49add49 | 2022-01-24 10:11:06 +0100 | [diff] [blame] | 69 | if (iov_iter_rw(iter) == READ) { |
| 70 | bio_init(&bio, bdev, vecs, nr_pages, REQ_OP_READ); |
Al Viro | fcb14cb | 2022-05-22 14:59:25 -0400 | [diff] [blame] | 71 | if (user_backed_iter(iter)) |
Christoph Hellwig | 49add49 | 2022-01-24 10:11:06 +0100 | [diff] [blame] | 72 | should_dirty = true; |
| 73 | } else { |
| 74 | bio_init(&bio, bdev, vecs, nr_pages, dio_bio_write_op(iocb)); |
| 75 | } |
Pavel Begunkov | 6549a87 | 2021-10-20 20:00:50 +0100 | [diff] [blame] | 76 | bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT; |
Bart Van Assche | 4498135 | 2024-02-02 12:39:25 -0800 | [diff] [blame] | 77 | bio.bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint; |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 78 | bio.bi_ioprio = iocb->ki_ioprio; |
John Garry | caf336f | 2024-06-20 12:53:56 +0000 | [diff] [blame] | 79 | if (iocb->ki_flags & IOCB_ATOMIC) |
| 80 | bio.bi_opf |= REQ_ATOMIC; |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 81 | |
| 82 | ret = bio_iov_iter_get_pages(&bio, iter); |
| 83 | if (unlikely(ret)) |
| 84 | goto out; |
| 85 | ret = bio.bi_iter.bi_size; |
| 86 | |
Christoph Hellwig | 49add49 | 2022-01-24 10:11:06 +0100 | [diff] [blame] | 87 | if (iov_iter_rw(iter) == WRITE) |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 88 | task_io_account_write(ret); |
Christoph Hellwig | 49add49 | 2022-01-24 10:11:06 +0100 | [diff] [blame] | 89 | |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 90 | if (iocb->ki_flags & IOCB_NOWAIT) |
| 91 | bio.bi_opf |= REQ_NOWAIT; |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 92 | |
Ming Lei | 9650b45 | 2022-04-20 22:31:10 +0800 | [diff] [blame] | 93 | submit_bio_wait(&bio); |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 94 | |
| 95 | bio_release_pages(&bio, should_dirty); |
| 96 | if (unlikely(bio.bi_status)) |
| 97 | ret = blk_status_to_errno(bio.bi_status); |
| 98 | |
| 99 | out: |
| 100 | if (vecs != inline_vecs) |
| 101 | kfree(vecs); |
| 102 | |
| 103 | bio_uninit(&bio); |
| 104 | |
| 105 | return ret; |
| 106 | } |
| 107 | |
Jens Axboe | 09ce874 | 2021-10-14 11:17:43 -0600 | [diff] [blame] | 108 | enum { |
Pavel Begunkov | e71aa91 | 2021-10-27 13:21:09 +0100 | [diff] [blame] | 109 | DIO_SHOULD_DIRTY = 1, |
| 110 | DIO_IS_SYNC = 2, |
Jens Axboe | 09ce874 | 2021-10-14 11:17:43 -0600 | [diff] [blame] | 111 | }; |
| 112 | |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 113 | struct blkdev_dio { |
| 114 | union { |
| 115 | struct kiocb *iocb; |
| 116 | struct task_struct *waiter; |
| 117 | }; |
| 118 | size_t size; |
| 119 | atomic_t ref; |
Jens Axboe | 09ce874 | 2021-10-14 11:17:43 -0600 | [diff] [blame] | 120 | unsigned int flags; |
Jens Axboe | 6155631 | 2021-10-15 16:55:05 -0600 | [diff] [blame] | 121 | struct bio bio ____cacheline_aligned_in_smp; |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 122 | }; |
| 123 | |
| 124 | static struct bio_set blkdev_dio_pool; |
| 125 | |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 126 | static void blkdev_bio_end_io(struct bio *bio) |
| 127 | { |
| 128 | struct blkdev_dio *dio = bio->bi_private; |
Jens Axboe | 09ce874 | 2021-10-14 11:17:43 -0600 | [diff] [blame] | 129 | bool should_dirty = dio->flags & DIO_SHOULD_DIRTY; |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 130 | |
| 131 | if (bio->bi_status && !dio->bio.bi_status) |
| 132 | dio->bio.bi_status = bio->bi_status; |
| 133 | |
Pavel Begunkov | e71aa91 | 2021-10-27 13:21:09 +0100 | [diff] [blame] | 134 | if (atomic_dec_and_test(&dio->ref)) { |
Jens Axboe | 09ce874 | 2021-10-14 11:17:43 -0600 | [diff] [blame] | 135 | if (!(dio->flags & DIO_IS_SYNC)) { |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 136 | struct kiocb *iocb = dio->iocb; |
| 137 | ssize_t ret; |
| 138 | |
Christoph Hellwig | 3e08773 | 2021-10-12 13:12:24 +0200 | [diff] [blame] | 139 | WRITE_ONCE(iocb->private, NULL); |
| 140 | |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 141 | if (likely(!dio->bio.bi_status)) { |
| 142 | ret = dio->size; |
| 143 | iocb->ki_pos += ret; |
| 144 | } else { |
| 145 | ret = blk_status_to_errno(dio->bio.bi_status); |
| 146 | } |
| 147 | |
Jens Axboe | 6b19b76 | 2021-10-21 09:22:35 -0600 | [diff] [blame] | 148 | dio->iocb->ki_complete(iocb, ret); |
Pavel Begunkov | e71aa91 | 2021-10-27 13:21:09 +0100 | [diff] [blame] | 149 | bio_put(&dio->bio); |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 150 | } else { |
| 151 | struct task_struct *waiter = dio->waiter; |
| 152 | |
| 153 | WRITE_ONCE(dio->waiter, NULL); |
| 154 | blk_wake_io_task(waiter); |
| 155 | } |
| 156 | } |
| 157 | |
| 158 | if (should_dirty) { |
| 159 | bio_check_pages_dirty(bio); |
| 160 | } else { |
| 161 | bio_release_pages(bio, false); |
| 162 | bio_put(bio); |
| 163 | } |
| 164 | } |
| 165 | |
| 166 | static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, |
John Garry | de4c7be | 2024-04-15 12:20:20 +0000 | [diff] [blame] | 167 | struct block_device *bdev, unsigned int nr_pages) |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 168 | { |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 169 | struct blk_plug plug; |
| 170 | struct blkdev_dio *dio; |
| 171 | struct bio *bio; |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 172 | bool is_read = (iov_iter_rw(iter) == READ), is_sync; |
Bart Van Assche | 16458cf | 2022-07-14 11:06:32 -0700 | [diff] [blame] | 173 | blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb); |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 174 | loff_t pos = iocb->ki_pos; |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 175 | int ret = 0; |
| 176 | |
Mike Snitzer | 0df7165 | 2022-03-24 16:35:24 -0400 | [diff] [blame] | 177 | if (iocb->ki_flags & IOCB_ALLOC_CACHE) |
| 178 | opf |= REQ_ALLOC_CACHE; |
| 179 | bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL, |
| 180 | &blkdev_dio_pool); |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 181 | dio = container_of(bio, struct blkdev_dio, bio); |
Pavel Begunkov | e71aa91 | 2021-10-27 13:21:09 +0100 | [diff] [blame] | 182 | atomic_set(&dio->ref, 1); |
| 183 | /* |
| 184 | * Grab an extra reference to ensure the dio structure which is embedded |
| 185 | * into the first bio stays around. |
| 186 | */ |
| 187 | bio_get(bio); |
| 188 | |
Jens Axboe | 09ce874 | 2021-10-14 11:17:43 -0600 | [diff] [blame] | 189 | is_sync = is_sync_kiocb(iocb); |
| 190 | if (is_sync) { |
| 191 | dio->flags = DIO_IS_SYNC; |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 192 | dio->waiter = current; |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 193 | } else { |
Jens Axboe | 09ce874 | 2021-10-14 11:17:43 -0600 | [diff] [blame] | 194 | dio->flags = 0; |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 195 | dio->iocb = iocb; |
| 196 | } |
| 197 | |
| 198 | dio->size = 0; |
Al Viro | fcb14cb | 2022-05-22 14:59:25 -0400 | [diff] [blame] | 199 | if (is_read && user_backed_iter(iter)) |
Jens Axboe | 09ce874 | 2021-10-14 11:17:43 -0600 | [diff] [blame] | 200 | dio->flags |= DIO_SHOULD_DIRTY; |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 201 | |
Pavel Begunkov | 25d207d | 2021-10-27 13:21:08 +0100 | [diff] [blame] | 202 | blk_start_plug(&plug); |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 203 | |
| 204 | for (;;) { |
Pavel Begunkov | 6549a87 | 2021-10-20 20:00:50 +0100 | [diff] [blame] | 205 | bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT; |
Bart Van Assche | 4498135 | 2024-02-02 12:39:25 -0800 | [diff] [blame] | 206 | bio->bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint; |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 207 | bio->bi_private = dio; |
| 208 | bio->bi_end_io = blkdev_bio_end_io; |
| 209 | bio->bi_ioprio = iocb->ki_ioprio; |
| 210 | |
| 211 | ret = bio_iov_iter_get_pages(bio, iter); |
| 212 | if (unlikely(ret)) { |
| 213 | bio->bi_status = BLK_STS_IOERR; |
| 214 | bio_endio(bio); |
| 215 | break; |
| 216 | } |
Jens Axboe | 67d59247d | 2023-01-16 08:55:53 -0700 | [diff] [blame] | 217 | if (iocb->ki_flags & IOCB_NOWAIT) { |
| 218 | /* |
| 219 | * This is nonblocking IO, and we need to allocate |
| 220 | * another bio if we have data left to map. As we |
| 221 | * cannot guarantee that one of the sub bios will not |
| 222 | * fail getting issued FOR NOWAIT and as error results |
| 223 | * are coalesced across all of them, be safe and ask for |
| 224 | * a retry of this from blocking context. |
| 225 | */ |
| 226 | if (unlikely(iov_iter_count(iter))) { |
| 227 | bio_release_pages(bio, false); |
| 228 | bio_clear_flag(bio, BIO_REFFED); |
| 229 | bio_put(bio); |
| 230 | blk_finish_plug(&plug); |
| 231 | return -EAGAIN; |
| 232 | } |
| 233 | bio->bi_opf |= REQ_NOWAIT; |
| 234 | } |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 235 | |
| 236 | if (is_read) { |
Jens Axboe | 09ce874 | 2021-10-14 11:17:43 -0600 | [diff] [blame] | 237 | if (dio->flags & DIO_SHOULD_DIRTY) |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 238 | bio_set_pages_dirty(bio); |
| 239 | } else { |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 240 | task_io_account_write(bio->bi_iter.bi_size); |
| 241 | } |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 242 | dio->size += bio->bi_iter.bi_size; |
| 243 | pos += bio->bi_iter.bi_size; |
| 244 | |
| 245 | nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS); |
| 246 | if (!nr_pages) { |
Christoph Hellwig | 3e08773 | 2021-10-12 13:12:24 +0200 | [diff] [blame] | 247 | submit_bio(bio); |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 248 | break; |
| 249 | } |
Pavel Begunkov | e71aa91 | 2021-10-27 13:21:09 +0100 | [diff] [blame] | 250 | atomic_inc(&dio->ref); |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 251 | submit_bio(bio); |
Christoph Hellwig | 07888c66 | 2022-01-24 10:11:05 +0100 | [diff] [blame] | 252 | bio = bio_alloc(bdev, nr_pages, opf, GFP_KERNEL); |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 253 | } |
| 254 | |
Pavel Begunkov | 25d207d | 2021-10-27 13:21:08 +0100 | [diff] [blame] | 255 | blk_finish_plug(&plug); |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 256 | |
| 257 | if (!is_sync) |
| 258 | return -EIOCBQUEUED; |
| 259 | |
| 260 | for (;;) { |
| 261 | set_current_state(TASK_UNINTERRUPTIBLE); |
| 262 | if (!READ_ONCE(dio->waiter)) |
| 263 | break; |
Pavel Begunkov | 25d207d | 2021-10-27 13:21:08 +0100 | [diff] [blame] | 264 | blk_io_schedule(); |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 265 | } |
| 266 | __set_current_state(TASK_RUNNING); |
| 267 | |
| 268 | if (!ret) |
| 269 | ret = blk_status_to_errno(dio->bio.bi_status); |
| 270 | if (likely(!ret)) |
| 271 | ret = dio->size; |
| 272 | |
| 273 | bio_put(&dio->bio); |
| 274 | return ret; |
| 275 | } |
| 276 | |
Pavel Begunkov | 54a88eb | 2021-10-23 17:21:32 +0100 | [diff] [blame] | 277 | static void blkdev_bio_end_io_async(struct bio *bio) |
| 278 | { |
| 279 | struct blkdev_dio *dio = container_of(bio, struct blkdev_dio, bio); |
| 280 | struct kiocb *iocb = dio->iocb; |
| 281 | ssize_t ret; |
| 282 | |
Stefano Garzarella | bb49c6f | 2022-02-11 10:01:36 +0100 | [diff] [blame] | 283 | WRITE_ONCE(iocb->private, NULL); |
| 284 | |
Pavel Begunkov | 54a88eb | 2021-10-23 17:21:32 +0100 | [diff] [blame] | 285 | if (likely(!bio->bi_status)) { |
| 286 | ret = dio->size; |
| 287 | iocb->ki_pos += ret; |
| 288 | } else { |
| 289 | ret = blk_status_to_errno(bio->bi_status); |
| 290 | } |
| 291 | |
Linus Torvalds | b6773cd | 2021-11-01 10:17:11 -0700 | [diff] [blame] | 292 | iocb->ki_complete(iocb, ret); |
Pavel Begunkov | 54a88eb | 2021-10-23 17:21:32 +0100 | [diff] [blame] | 293 | |
| 294 | if (dio->flags & DIO_SHOULD_DIRTY) { |
| 295 | bio_check_pages_dirty(bio); |
| 296 | } else { |
| 297 | bio_release_pages(bio, false); |
| 298 | bio_put(bio); |
| 299 | } |
| 300 | } |
| 301 | |
| 302 | static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb, |
| 303 | struct iov_iter *iter, |
John Garry | de4c7be | 2024-04-15 12:20:20 +0000 | [diff] [blame] | 304 | struct block_device *bdev, |
Pavel Begunkov | 54a88eb | 2021-10-23 17:21:32 +0100 | [diff] [blame] | 305 | unsigned int nr_pages) |
| 306 | { |
Christoph Hellwig | b77c88c | 2022-01-24 10:11:04 +0100 | [diff] [blame] | 307 | bool is_read = iov_iter_rw(iter) == READ; |
Bart Van Assche | 16458cf | 2022-07-14 11:06:32 -0700 | [diff] [blame] | 308 | blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb); |
Pavel Begunkov | 54a88eb | 2021-10-23 17:21:32 +0100 | [diff] [blame] | 309 | struct blkdev_dio *dio; |
| 310 | struct bio *bio; |
| 311 | loff_t pos = iocb->ki_pos; |
| 312 | int ret = 0; |
| 313 | |
Mike Snitzer | 0df7165 | 2022-03-24 16:35:24 -0400 | [diff] [blame] | 314 | if (iocb->ki_flags & IOCB_ALLOC_CACHE) |
| 315 | opf |= REQ_ALLOC_CACHE; |
| 316 | bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL, |
| 317 | &blkdev_dio_pool); |
Pavel Begunkov | 54a88eb | 2021-10-23 17:21:32 +0100 | [diff] [blame] | 318 | dio = container_of(bio, struct blkdev_dio, bio); |
| 319 | dio->flags = 0; |
| 320 | dio->iocb = iocb; |
Pavel Begunkov | 54a88eb | 2021-10-23 17:21:32 +0100 | [diff] [blame] | 321 | bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT; |
Bart Van Assche | 4498135 | 2024-02-02 12:39:25 -0800 | [diff] [blame] | 322 | bio->bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint; |
Pavel Begunkov | 54a88eb | 2021-10-23 17:21:32 +0100 | [diff] [blame] | 323 | bio->bi_end_io = blkdev_bio_end_io_async; |
| 324 | bio->bi_ioprio = iocb->ki_ioprio; |
| 325 | |
Pavel Begunkov | 1bb6b81 | 2021-10-27 13:21:07 +0100 | [diff] [blame] | 326 | if (iov_iter_is_bvec(iter)) { |
| 327 | /* |
| 328 | * Users don't rely on the iterator being in any particular |
| 329 | * state for async I/O returning -EIOCBQUEUED, hence we can |
| 330 | * avoid expensive iov_iter_advance(). Bypass |
| 331 | * bio_iov_iter_get_pages() and set the bvec directly. |
| 332 | */ |
| 333 | bio_iov_bvec_set(bio, iter); |
| 334 | } else { |
| 335 | ret = bio_iov_iter_get_pages(bio, iter); |
| 336 | if (unlikely(ret)) { |
Pavel Begunkov | 75feae7 | 2021-12-07 20:16:36 +0000 | [diff] [blame] | 337 | bio_put(bio); |
Pavel Begunkov | 1bb6b81 | 2021-10-27 13:21:07 +0100 | [diff] [blame] | 338 | return ret; |
| 339 | } |
Pavel Begunkov | 54a88eb | 2021-10-23 17:21:32 +0100 | [diff] [blame] | 340 | } |
| 341 | dio->size = bio->bi_iter.bi_size; |
| 342 | |
Christoph Hellwig | b77c88c | 2022-01-24 10:11:04 +0100 | [diff] [blame] | 343 | if (is_read) { |
Al Viro | fcb14cb | 2022-05-22 14:59:25 -0400 | [diff] [blame] | 344 | if (user_backed_iter(iter)) { |
Pavel Begunkov | 54a88eb | 2021-10-23 17:21:32 +0100 | [diff] [blame] | 345 | dio->flags |= DIO_SHOULD_DIRTY; |
| 346 | bio_set_pages_dirty(bio); |
| 347 | } |
| 348 | } else { |
Pavel Begunkov | 54a88eb | 2021-10-23 17:21:32 +0100 | [diff] [blame] | 349 | task_io_account_write(bio->bi_iter.bi_size); |
| 350 | } |
| 351 | |
John Garry | caf336f | 2024-06-20 12:53:56 +0000 | [diff] [blame] | 352 | if (iocb->ki_flags & IOCB_ATOMIC) |
| 353 | bio->bi_opf |= REQ_ATOMIC; |
| 354 | |
Jens Axboe | 2bc0576 | 2023-08-08 11:06:17 -0600 | [diff] [blame] | 355 | if (iocb->ki_flags & IOCB_NOWAIT) |
| 356 | bio->bi_opf |= REQ_NOWAIT; |
| 357 | |
Pavel Begunkov | 54a88eb | 2021-10-23 17:21:32 +0100 | [diff] [blame] | 358 | if (iocb->ki_flags & IOCB_HIPRI) { |
Jens Axboe | 2bc0576 | 2023-08-08 11:06:17 -0600 | [diff] [blame] | 359 | bio->bi_opf |= REQ_POLLED; |
Pavel Begunkov | 54a88eb | 2021-10-23 17:21:32 +0100 | [diff] [blame] | 360 | submit_bio(bio); |
| 361 | WRITE_ONCE(iocb->private, bio); |
| 362 | } else { |
| 363 | submit_bio(bio); |
| 364 | } |
| 365 | return -EIOCBQUEUED; |
| 366 | } |
| 367 | |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 368 | static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter) |
| 369 | { |
John Garry | de4c7be | 2024-04-15 12:20:20 +0000 | [diff] [blame] | 370 | struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host); |
John Garry | caf336f | 2024-06-20 12:53:56 +0000 | [diff] [blame] | 371 | bool is_atomic = iocb->ki_flags & IOCB_ATOMIC; |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 372 | unsigned int nr_pages; |
| 373 | |
| 374 | if (!iov_iter_count(iter)) |
| 375 | return 0; |
| 376 | |
John Garry | caf336f | 2024-06-20 12:53:56 +0000 | [diff] [blame] | 377 | if (blkdev_dio_invalid(bdev, iocb->ki_pos, iter, is_atomic)) |
John Garry | de4c7be | 2024-04-15 12:20:20 +0000 | [diff] [blame] | 378 | return -EINVAL; |
| 379 | |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 380 | nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1); |
Pavel Begunkov | 54a88eb | 2021-10-23 17:21:32 +0100 | [diff] [blame] | 381 | if (likely(nr_pages <= BIO_MAX_VECS)) { |
| 382 | if (is_sync_kiocb(iocb)) |
John Garry | de4c7be | 2024-04-15 12:20:20 +0000 | [diff] [blame] | 383 | return __blkdev_direct_IO_simple(iocb, iter, bdev, |
| 384 | nr_pages); |
| 385 | return __blkdev_direct_IO_async(iocb, iter, bdev, nr_pages); |
John Garry | caf336f | 2024-06-20 12:53:56 +0000 | [diff] [blame] | 386 | } else if (is_atomic) { |
| 387 | return -EINVAL; |
Pavel Begunkov | 54a88eb | 2021-10-23 17:21:32 +0100 | [diff] [blame] | 388 | } |
John Garry | de4c7be | 2024-04-15 12:20:20 +0000 | [diff] [blame] | 389 | return __blkdev_direct_IO(iocb, iter, bdev, bio_max_segs(nr_pages)); |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 390 | } |
| 391 | |
Christoph Hellwig | 487c607 | 2023-08-01 19:22:00 +0200 | [diff] [blame] | 392 | static int blkdev_iomap_begin(struct inode *inode, loff_t offset, loff_t length, |
| 393 | unsigned int flags, struct iomap *iomap, struct iomap *srcmap) |
| 394 | { |
| 395 | struct block_device *bdev = I_BDEV(inode); |
| 396 | loff_t isize = i_size_read(inode); |
| 397 | |
Christoph Hellwig | 0c12028 | 2024-05-03 10:10:42 +0200 | [diff] [blame] | 398 | if (offset >= isize) |
Christoph Hellwig | 487c607 | 2023-08-01 19:22:00 +0200 | [diff] [blame] | 399 | return -EIO; |
Li Nan | e269537 | 2024-06-25 19:55:17 +0800 | [diff] [blame] | 400 | |
| 401 | iomap->bdev = bdev; |
| 402 | iomap->offset = ALIGN_DOWN(offset, bdev_logical_block_size(bdev)); |
Christoph Hellwig | 487c607 | 2023-08-01 19:22:00 +0200 | [diff] [blame] | 403 | iomap->type = IOMAP_MAPPED; |
| 404 | iomap->addr = iomap->offset; |
| 405 | iomap->length = isize - iomap->offset; |
Christoph Hellwig | 925c86a | 2023-08-01 19:22:01 +0200 | [diff] [blame] | 406 | iomap->flags |= IOMAP_F_BUFFER_HEAD; /* noop for !CONFIG_BUFFER_HEAD */ |
Christoph Hellwig | 487c607 | 2023-08-01 19:22:00 +0200 | [diff] [blame] | 407 | return 0; |
| 408 | } |
| 409 | |
| 410 | static const struct iomap_ops blkdev_iomap_ops = { |
| 411 | .iomap_begin = blkdev_iomap_begin, |
| 412 | }; |
| 413 | |
Christoph Hellwig | 925c86a | 2023-08-01 19:22:01 +0200 | [diff] [blame] | 414 | #ifdef CONFIG_BUFFER_HEAD |
| 415 | static int blkdev_get_block(struct inode *inode, sector_t iblock, |
| 416 | struct buffer_head *bh, int create) |
| 417 | { |
| 418 | bh->b_bdev = I_BDEV(inode); |
| 419 | bh->b_blocknr = iblock; |
| 420 | set_buffer_mapped(bh); |
| 421 | return 0; |
| 422 | } |
| 423 | |
Matthew Wilcox (Oracle) | 17bf23a | 2023-12-15 20:02:44 +0000 | [diff] [blame] | 424 | /* |
| 425 | * We cannot call mpage_writepages() as it does not take the buffer lock. |
| 426 | * We must use block_write_full_folio() directly which holds the buffer |
| 427 | * lock. The buffer lock provides the synchronisation with writeback |
| 428 | * that filesystems rely on when they use the blockdev's mapping. |
| 429 | */ |
| 430 | static int blkdev_writepages(struct address_space *mapping, |
| 431 | struct writeback_control *wbc) |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 432 | { |
Matthew Wilcox (Oracle) | 17bf23a | 2023-12-15 20:02:44 +0000 | [diff] [blame] | 433 | struct blk_plug plug; |
| 434 | int err; |
| 435 | |
| 436 | blk_start_plug(&plug); |
| 437 | err = write_cache_pages(mapping, wbc, block_write_full_folio, |
| 438 | blkdev_get_block); |
| 439 | blk_finish_plug(&plug); |
| 440 | |
| 441 | return err; |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 442 | } |
| 443 | |
Matthew Wilcox (Oracle) | 2c69e20 | 2022-04-29 10:40:40 -0400 | [diff] [blame] | 444 | static int blkdev_read_folio(struct file *file, struct folio *folio) |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 445 | { |
Matthew Wilcox (Oracle) | 2c69e20 | 2022-04-29 10:40:40 -0400 | [diff] [blame] | 446 | return block_read_full_folio(folio, blkdev_get_block); |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 447 | } |
| 448 | |
| 449 | static void blkdev_readahead(struct readahead_control *rac) |
| 450 | { |
| 451 | mpage_readahead(rac, blkdev_get_block); |
| 452 | } |
| 453 | |
| 454 | static int blkdev_write_begin(struct file *file, struct address_space *mapping, |
Matthew Wilcox (Oracle) | 1da8661 | 2024-07-15 14:24:01 -0400 | [diff] [blame] | 455 | loff_t pos, unsigned len, struct folio **foliop, void **fsdata) |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 456 | { |
Matthew Wilcox (Oracle) | 1da8661 | 2024-07-15 14:24:01 -0400 | [diff] [blame] | 457 | return block_write_begin(mapping, pos, len, foliop, blkdev_get_block); |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 458 | } |
| 459 | |
| 460 | static int blkdev_write_end(struct file *file, struct address_space *mapping, |
Matthew Wilcox (Oracle) | a225800 | 2024-07-10 15:45:32 -0400 | [diff] [blame] | 461 | loff_t pos, unsigned len, unsigned copied, struct folio *folio, |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 462 | void *fsdata) |
| 463 | { |
| 464 | int ret; |
Matthew Wilcox (Oracle) | 97edbc0 | 2024-07-10 14:51:11 -0400 | [diff] [blame] | 465 | ret = block_write_end(file, mapping, pos, len, copied, folio, fsdata); |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 466 | |
Matthew Wilcox (Oracle) | 1262249 | 2024-07-10 13:43:36 -0400 | [diff] [blame] | 467 | folio_unlock(folio); |
| 468 | folio_put(folio); |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 469 | |
| 470 | return ret; |
| 471 | } |
| 472 | |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 473 | const struct address_space_operations def_blk_aops = { |
Matthew Wilcox (Oracle) | e621900 | 2022-02-09 20:22:12 +0000 | [diff] [blame] | 474 | .dirty_folio = block_dirty_folio, |
Matthew Wilcox (Oracle) | 7ba13ab | 2022-02-09 20:21:34 +0000 | [diff] [blame] | 475 | .invalidate_folio = block_invalidate_folio, |
Matthew Wilcox (Oracle) | 2c69e20 | 2022-04-29 10:40:40 -0400 | [diff] [blame] | 476 | .read_folio = blkdev_read_folio, |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 477 | .readahead = blkdev_readahead, |
Matthew Wilcox (Oracle) | 17bf23a | 2023-12-15 20:02:44 +0000 | [diff] [blame] | 478 | .writepages = blkdev_writepages, |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 479 | .write_begin = blkdev_write_begin, |
| 480 | .write_end = blkdev_write_end, |
Matthew Wilcox (Oracle) | 6723518 | 2022-06-06 10:20:31 -0400 | [diff] [blame] | 481 | .migrate_folio = buffer_migrate_folio_norefs, |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 482 | .is_dirty_writeback = buffer_check_dirty_writeback, |
| 483 | }; |
Christoph Hellwig | 925c86a | 2023-08-01 19:22:01 +0200 | [diff] [blame] | 484 | #else /* CONFIG_BUFFER_HEAD */ |
| 485 | static int blkdev_read_folio(struct file *file, struct folio *folio) |
| 486 | { |
| 487 | return iomap_read_folio(folio, &blkdev_iomap_ops); |
| 488 | } |
| 489 | |
| 490 | static void blkdev_readahead(struct readahead_control *rac) |
| 491 | { |
| 492 | iomap_readahead(rac, &blkdev_iomap_ops); |
| 493 | } |
| 494 | |
| 495 | static int blkdev_map_blocks(struct iomap_writepage_ctx *wpc, |
Christoph Hellwig | 19871b5 | 2023-12-07 08:27:10 +0100 | [diff] [blame] | 496 | struct inode *inode, loff_t offset, unsigned int len) |
Christoph Hellwig | 925c86a | 2023-08-01 19:22:01 +0200 | [diff] [blame] | 497 | { |
| 498 | loff_t isize = i_size_read(inode); |
| 499 | |
| 500 | if (WARN_ON_ONCE(offset >= isize)) |
| 501 | return -EIO; |
| 502 | if (offset >= wpc->iomap.offset && |
| 503 | offset < wpc->iomap.offset + wpc->iomap.length) |
| 504 | return 0; |
| 505 | return blkdev_iomap_begin(inode, offset, isize - offset, |
| 506 | IOMAP_WRITE, &wpc->iomap, NULL); |
| 507 | } |
| 508 | |
| 509 | static const struct iomap_writeback_ops blkdev_writeback_ops = { |
| 510 | .map_blocks = blkdev_map_blocks, |
| 511 | }; |
| 512 | |
| 513 | static int blkdev_writepages(struct address_space *mapping, |
| 514 | struct writeback_control *wbc) |
| 515 | { |
| 516 | struct iomap_writepage_ctx wpc = { }; |
| 517 | |
| 518 | return iomap_writepages(mapping, wbc, &wpc, &blkdev_writeback_ops); |
| 519 | } |
| 520 | |
| 521 | const struct address_space_operations def_blk_aops = { |
| 522 | .dirty_folio = filemap_dirty_folio, |
| 523 | .release_folio = iomap_release_folio, |
| 524 | .invalidate_folio = iomap_invalidate_folio, |
| 525 | .read_folio = blkdev_read_folio, |
| 526 | .readahead = blkdev_readahead, |
| 527 | .writepages = blkdev_writepages, |
| 528 | .is_partially_uptodate = iomap_is_partially_uptodate, |
Matthew Wilcox (Oracle) | af7628d | 2023-11-17 16:14:47 +0000 | [diff] [blame] | 529 | .error_remove_folio = generic_error_remove_folio, |
Christoph Hellwig | 925c86a | 2023-08-01 19:22:01 +0200 | [diff] [blame] | 530 | .migrate_folio = filemap_migrate_folio, |
| 531 | }; |
| 532 | #endif /* CONFIG_BUFFER_HEAD */ |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 533 | |
| 534 | /* |
| 535 | * for a block special file file_inode(file)->i_size is zero |
| 536 | * so we compute the size by hand (just as in block_read/write above) |
| 537 | */ |
| 538 | static loff_t blkdev_llseek(struct file *file, loff_t offset, int whence) |
| 539 | { |
| 540 | struct inode *bd_inode = bdev_file_inode(file); |
| 541 | loff_t retval; |
| 542 | |
| 543 | inode_lock(bd_inode); |
| 544 | retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode)); |
| 545 | inode_unlock(bd_inode); |
| 546 | return retval; |
| 547 | } |
| 548 | |
| 549 | static int blkdev_fsync(struct file *filp, loff_t start, loff_t end, |
| 550 | int datasync) |
| 551 | { |
Christoph Hellwig | 4e762d8 | 2023-06-08 13:02:56 +0200 | [diff] [blame] | 552 | struct block_device *bdev = I_BDEV(filp->f_mapping->host); |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 553 | int error; |
| 554 | |
| 555 | error = file_write_and_wait_range(filp, start, end); |
| 556 | if (error) |
| 557 | return error; |
| 558 | |
| 559 | /* |
| 560 | * There is no need to serialise calls to blkdev_issue_flush with |
| 561 | * i_mutex and doing so causes performance issues with concurrent |
| 562 | * O_SYNC writers to a block device. |
| 563 | */ |
| 564 | error = blkdev_issue_flush(bdev); |
| 565 | if (error == -EOPNOTSUPP) |
| 566 | error = 0; |
| 567 | |
| 568 | return error; |
| 569 | } |
| 570 | |
Jan Kara | 841dd78 | 2023-09-27 11:34:08 +0200 | [diff] [blame] | 571 | /** |
| 572 | * file_to_blk_mode - get block open flags from file flags |
| 573 | * @file: file whose open flags should be converted |
| 574 | * |
| 575 | * Look at file open flags and generate corresponding block open flags from |
| 576 | * them. The function works both for file just being open (e.g. during ->open |
| 577 | * callback) and for file that is already open. This is actually non-trivial |
| 578 | * (see comment in the function). |
| 579 | */ |
Christoph Hellwig | 05bdb99 | 2023-06-08 13:02:55 +0200 | [diff] [blame] | 580 | blk_mode_t file_to_blk_mode(struct file *file) |
| 581 | { |
| 582 | blk_mode_t mode = 0; |
| 583 | |
| 584 | if (file->f_mode & FMODE_READ) |
| 585 | mode |= BLK_OPEN_READ; |
| 586 | if (file->f_mode & FMODE_WRITE) |
| 587 | mode |= BLK_OPEN_WRITE; |
Jan Kara | 841dd78 | 2023-09-27 11:34:08 +0200 | [diff] [blame] | 588 | /* |
Christian Brauner | ab838b3 | 2024-01-23 14:26:49 +0100 | [diff] [blame] | 589 | * do_dentry_open() clears O_EXCL from f_flags, use file->private_data |
| 590 | * to determine whether the open was exclusive for already open files. |
Jan Kara | 841dd78 | 2023-09-27 11:34:08 +0200 | [diff] [blame] | 591 | */ |
Christian Brauner | ab838b3 | 2024-01-23 14:26:49 +0100 | [diff] [blame] | 592 | if (file->private_data) |
| 593 | mode |= BLK_OPEN_EXCL; |
Jan Kara | 841dd78 | 2023-09-27 11:34:08 +0200 | [diff] [blame] | 594 | else if (file->f_flags & O_EXCL) |
Christoph Hellwig | 05bdb99 | 2023-06-08 13:02:55 +0200 | [diff] [blame] | 595 | mode |= BLK_OPEN_EXCL; |
| 596 | if (file->f_flags & O_NDELAY) |
| 597 | mode |= BLK_OPEN_NDELAY; |
| 598 | |
| 599 | /* |
| 600 | * If all bits in O_ACCMODE set (aka O_RDWR | O_WRONLY), the floppy |
| 601 | * driver has historically allowed ioctls as if the file was opened for |
| 602 | * writing, but does not allow and actual reads or writes. |
| 603 | */ |
| 604 | if ((file->f_flags & O_ACCMODE) == (O_RDWR | O_WRONLY)) |
| 605 | mode |= BLK_OPEN_WRITE_IOCTL; |
| 606 | |
| 607 | return mode; |
| 608 | } |
| 609 | |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 610 | static int blkdev_open(struct inode *inode, struct file *filp) |
| 611 | { |
Christian Brauner | a56aefc | 2024-01-23 14:26:46 +0100 | [diff] [blame] | 612 | struct block_device *bdev; |
Jan Kara | 841dd78 | 2023-09-27 11:34:08 +0200 | [diff] [blame] | 613 | blk_mode_t mode; |
Christian Brauner | a56aefc | 2024-01-23 14:26:46 +0100 | [diff] [blame] | 614 | int ret; |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 615 | |
Jan Kara | 841dd78 | 2023-09-27 11:34:08 +0200 | [diff] [blame] | 616 | mode = file_to_blk_mode(filp); |
Christian Brauner | ab838b3 | 2024-01-23 14:26:49 +0100 | [diff] [blame] | 617 | /* Use the file as the holder. */ |
| 618 | if (mode & BLK_OPEN_EXCL) |
| 619 | filp->private_data = filp; |
| 620 | ret = bdev_permission(inode->i_rdev, mode, filp->private_data); |
Christian Brauner | a56aefc | 2024-01-23 14:26:46 +0100 | [diff] [blame] | 621 | if (ret) |
| 622 | return ret; |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 623 | |
Christian Brauner | a56aefc | 2024-01-23 14:26:46 +0100 | [diff] [blame] | 624 | bdev = blkdev_get_no_open(inode->i_rdev); |
| 625 | if (!bdev) |
| 626 | return -ENXIO; |
Jens Axboe | e9833d8 | 2023-05-09 09:19:09 -0600 | [diff] [blame] | 627 | |
John Garry | caf336f | 2024-06-20 12:53:56 +0000 | [diff] [blame] | 628 | if (bdev_can_atomic_write(bdev) && filp->f_flags & O_DIRECT) |
| 629 | filp->f_mode |= FMODE_CAN_ATOMIC_WRITE; |
| 630 | |
Christian Brauner | ab838b3 | 2024-01-23 14:26:49 +0100 | [diff] [blame] | 631 | ret = bdev_open(bdev, mode, filp->private_data, NULL, filp); |
Christian Brauner | a56aefc | 2024-01-23 14:26:46 +0100 | [diff] [blame] | 632 | if (ret) |
| 633 | blkdev_put_no_open(bdev); |
| 634 | return ret; |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 635 | } |
| 636 | |
Christoph Hellwig | 7ee34cb | 2023-06-08 13:02:38 +0200 | [diff] [blame] | 637 | static int blkdev_release(struct inode *inode, struct file *filp) |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 638 | { |
Christian Brauner | ab838b3 | 2024-01-23 14:26:49 +0100 | [diff] [blame] | 639 | bdev_release(filp); |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 640 | return 0; |
| 641 | } |
| 642 | |
Christoph Hellwig | 727cfe9 | 2023-08-01 19:21:58 +0200 | [diff] [blame] | 643 | static ssize_t |
| 644 | blkdev_direct_write(struct kiocb *iocb, struct iov_iter *from) |
| 645 | { |
| 646 | size_t count = iov_iter_count(from); |
| 647 | ssize_t written; |
| 648 | |
| 649 | written = kiocb_invalidate_pages(iocb, count); |
| 650 | if (written) { |
| 651 | if (written == -EBUSY) |
| 652 | return 0; |
| 653 | return written; |
| 654 | } |
| 655 | |
| 656 | written = blkdev_direct_IO(iocb, from); |
| 657 | if (written > 0) { |
| 658 | kiocb_invalidate_post_direct_write(iocb, count); |
| 659 | iocb->ki_pos += written; |
| 660 | count -= written; |
| 661 | } |
| 662 | if (written != -EIOCBQUEUED) |
| 663 | iov_iter_revert(from, count - iov_iter_count(from)); |
| 664 | return written; |
| 665 | } |
| 666 | |
Christoph Hellwig | 487c607 | 2023-08-01 19:22:00 +0200 | [diff] [blame] | 667 | static ssize_t blkdev_buffered_write(struct kiocb *iocb, struct iov_iter *from) |
| 668 | { |
Josef Bacik | 31754ea | 2024-08-27 06:51:36 -0400 | [diff] [blame] | 669 | return iomap_file_buffered_write(iocb, from, &blkdev_iomap_ops, NULL); |
Christoph Hellwig | 487c607 | 2023-08-01 19:22:00 +0200 | [diff] [blame] | 670 | } |
| 671 | |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 672 | /* |
| 673 | * Write data to the block device. Only intended for the block device itself |
| 674 | * and the raw driver which basically is a fake block device. |
| 675 | * |
| 676 | * Does not take i_mutex for the write and thus is not for general purpose |
| 677 | * use. |
| 678 | */ |
| 679 | static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from) |
| 680 | { |
Christoph Hellwig | 727cfe9 | 2023-08-01 19:21:58 +0200 | [diff] [blame] | 681 | struct file *file = iocb->ki_filp; |
Al Viro | 39c3b4e | 2024-04-11 15:53:40 +0100 | [diff] [blame] | 682 | struct inode *bd_inode = bdev_file_inode(file); |
| 683 | struct block_device *bdev = I_BDEV(bd_inode); |
Jens Axboe | 138c1a3 | 2021-11-04 15:13:17 -0600 | [diff] [blame] | 684 | loff_t size = bdev_nr_bytes(bdev); |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 685 | size_t shorted = 0; |
| 686 | ssize_t ret; |
| 687 | |
Pavel Begunkov | fac7c6d | 2021-10-13 09:57:11 +0100 | [diff] [blame] | 688 | if (bdev_read_only(bdev)) |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 689 | return -EPERM; |
| 690 | |
| 691 | if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev)) |
| 692 | return -ETXTBSY; |
| 693 | |
| 694 | if (!iov_iter_count(from)) |
| 695 | return 0; |
| 696 | |
| 697 | if (iocb->ki_pos >= size) |
| 698 | return -ENOSPC; |
| 699 | |
| 700 | if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT) |
| 701 | return -EOPNOTSUPP; |
| 702 | |
| 703 | size -= iocb->ki_pos; |
| 704 | if (iov_iter_count(from) > size) { |
| 705 | shorted = iov_iter_count(from) - size; |
| 706 | iov_iter_truncate(from, size); |
| 707 | } |
| 708 | |
Christoph Hellwig | 727cfe9 | 2023-08-01 19:21:58 +0200 | [diff] [blame] | 709 | ret = file_update_time(file); |
| 710 | if (ret) |
| 711 | return ret; |
| 712 | |
| 713 | if (iocb->ki_flags & IOCB_DIRECT) { |
| 714 | ret = blkdev_direct_write(iocb, from); |
| 715 | if (ret >= 0 && iov_iter_count(from)) |
| 716 | ret = direct_write_fallback(iocb, from, ret, |
Christoph Hellwig | 487c607 | 2023-08-01 19:22:00 +0200 | [diff] [blame] | 717 | blkdev_buffered_write(iocb, from)); |
Christoph Hellwig | 727cfe9 | 2023-08-01 19:21:58 +0200 | [diff] [blame] | 718 | } else { |
Christoph Hellwig | 487c607 | 2023-08-01 19:22:00 +0200 | [diff] [blame] | 719 | ret = blkdev_buffered_write(iocb, from); |
Christoph Hellwig | 727cfe9 | 2023-08-01 19:21:58 +0200 | [diff] [blame] | 720 | } |
| 721 | |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 722 | if (ret > 0) |
| 723 | ret = generic_write_sync(iocb, ret); |
| 724 | iov_iter_reexpand(from, iov_iter_count(from) + shorted); |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 725 | return ret; |
| 726 | } |
| 727 | |
| 728 | static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) |
| 729 | { |
Christoph Hellwig | 4e762d8 | 2023-06-08 13:02:56 +0200 | [diff] [blame] | 730 | struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host); |
Jens Axboe | 138c1a3 | 2021-11-04 15:13:17 -0600 | [diff] [blame] | 731 | loff_t size = bdev_nr_bytes(bdev); |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 732 | loff_t pos = iocb->ki_pos; |
| 733 | size_t shorted = 0; |
Jens Axboe | ceaa762 | 2021-10-28 08:57:09 -0600 | [diff] [blame] | 734 | ssize_t ret = 0; |
Ilya Dryomov | 3e1f941 | 2022-02-01 11:04:20 +0100 | [diff] [blame] | 735 | size_t count; |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 736 | |
Ilya Dryomov | 3e1f941 | 2022-02-01 11:04:20 +0100 | [diff] [blame] | 737 | if (unlikely(pos + iov_iter_count(to) > size)) { |
Pavel Begunkov | 6450fe1 | 2021-10-20 20:00:48 +0100 | [diff] [blame] | 738 | if (pos >= size) |
| 739 | return 0; |
| 740 | size -= pos; |
Ilya Dryomov | 3e1f941 | 2022-02-01 11:04:20 +0100 | [diff] [blame] | 741 | shorted = iov_iter_count(to) - size; |
| 742 | iov_iter_truncate(to, size); |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 743 | } |
| 744 | |
Ilya Dryomov | 3e1f941 | 2022-02-01 11:04:20 +0100 | [diff] [blame] | 745 | count = iov_iter_count(to); |
| 746 | if (!count) |
| 747 | goto reexpand; /* skip atime */ |
| 748 | |
Jens Axboe | ceaa762 | 2021-10-28 08:57:09 -0600 | [diff] [blame] | 749 | if (iocb->ki_flags & IOCB_DIRECT) { |
Christoph Hellwig | 3c435a0f | 2023-06-01 16:58:56 +0200 | [diff] [blame] | 750 | ret = kiocb_write_and_wait(iocb, count); |
| 751 | if (ret < 0) |
| 752 | goto reexpand; |
Jens Axboe | ceaa762 | 2021-10-28 08:57:09 -0600 | [diff] [blame] | 753 | file_accessed(iocb->ki_filp); |
| 754 | |
| 755 | ret = blkdev_direct_IO(iocb, to); |
| 756 | if (ret >= 0) { |
| 757 | iocb->ki_pos += ret; |
| 758 | count -= ret; |
| 759 | } |
Ilya Dryomov | 3e1f941 | 2022-02-01 11:04:20 +0100 | [diff] [blame] | 760 | iov_iter_revert(to, count - iov_iter_count(to)); |
Jens Axboe | ceaa762 | 2021-10-28 08:57:09 -0600 | [diff] [blame] | 761 | if (ret < 0 || !count) |
Ilya Dryomov | 3e1f941 | 2022-02-01 11:04:20 +0100 | [diff] [blame] | 762 | goto reexpand; |
Jens Axboe | ceaa762 | 2021-10-28 08:57:09 -0600 | [diff] [blame] | 763 | } |
| 764 | |
| 765 | ret = filemap_read(iocb, to, ret); |
Pavel Begunkov | 6450fe1 | 2021-10-20 20:00:48 +0100 | [diff] [blame] | 766 | |
Ilya Dryomov | 3e1f941 | 2022-02-01 11:04:20 +0100 | [diff] [blame] | 767 | reexpand: |
Pavel Begunkov | 6450fe1 | 2021-10-20 20:00:48 +0100 | [diff] [blame] | 768 | if (unlikely(shorted)) |
| 769 | iov_iter_reexpand(to, iov_iter_count(to) + shorted); |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 770 | return ret; |
| 771 | } |
| 772 | |
| 773 | #define BLKDEV_FALLOC_FL_SUPPORTED \ |
| 774 | (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \ |
Christoph Hellwig | ad01dad | 2024-08-27 08:50:45 +0200 | [diff] [blame] | 775 | FALLOC_FL_ZERO_RANGE) |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 776 | |
| 777 | static long blkdev_fallocate(struct file *file, int mode, loff_t start, |
| 778 | loff_t len) |
| 779 | { |
Ming Lei | f278eb3d | 2021-09-23 10:37:51 +0800 | [diff] [blame] | 780 | struct inode *inode = bdev_file_inode(file); |
| 781 | struct block_device *bdev = I_BDEV(inode); |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 782 | loff_t end = start + len - 1; |
| 783 | loff_t isize; |
| 784 | int error; |
| 785 | |
| 786 | /* Fail if we don't recognize the flags. */ |
| 787 | if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED) |
| 788 | return -EOPNOTSUPP; |
| 789 | |
| 790 | /* Don't go off the end of the device. */ |
Christoph Hellwig | 2a93ad8 | 2021-10-18 12:11:24 +0200 | [diff] [blame] | 791 | isize = bdev_nr_bytes(bdev); |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 792 | if (start >= isize) |
| 793 | return -EINVAL; |
| 794 | if (end >= isize) { |
| 795 | if (mode & FALLOC_FL_KEEP_SIZE) { |
| 796 | len = isize - start; |
| 797 | end = start + len - 1; |
| 798 | } else |
| 799 | return -EINVAL; |
| 800 | } |
| 801 | |
| 802 | /* |
| 803 | * Don't allow IO that isn't aligned to logical block size. |
| 804 | */ |
| 805 | if ((start | len) & (bdev_logical_block_size(bdev) - 1)) |
| 806 | return -EINVAL; |
| 807 | |
Ming Lei | f278eb3d | 2021-09-23 10:37:51 +0800 | [diff] [blame] | 808 | filemap_invalidate_lock(inode->i_mapping); |
| 809 | |
Sarthak Kukreti | 1364a3c | 2023-10-11 13:12:30 -0700 | [diff] [blame] | 810 | /* |
| 811 | * Invalidate the page cache, including dirty pages, for valid |
| 812 | * de-allocate mode calls to fallocate(). |
| 813 | */ |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 814 | switch (mode) { |
| 815 | case FALLOC_FL_ZERO_RANGE: |
| 816 | case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE: |
Sarthak Kukreti | 1364a3c | 2023-10-11 13:12:30 -0700 | [diff] [blame] | 817 | error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end); |
| 818 | if (error) |
| 819 | goto fail; |
| 820 | |
Pavel Begunkov | 6549a87 | 2021-10-20 20:00:50 +0100 | [diff] [blame] | 821 | error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT, |
| 822 | len >> SECTOR_SHIFT, GFP_KERNEL, |
| 823 | BLKDEV_ZERO_NOUNMAP); |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 824 | break; |
| 825 | case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE: |
Sarthak Kukreti | 1364a3c | 2023-10-11 13:12:30 -0700 | [diff] [blame] | 826 | error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end); |
| 827 | if (error) |
| 828 | goto fail; |
| 829 | |
Pavel Begunkov | 6549a87 | 2021-10-20 20:00:50 +0100 | [diff] [blame] | 830 | error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT, |
| 831 | len >> SECTOR_SHIFT, GFP_KERNEL, |
| 832 | BLKDEV_ZERO_NOFALLBACK); |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 833 | break; |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 834 | default: |
Ming Lei | f278eb3d | 2021-09-23 10:37:51 +0800 | [diff] [blame] | 835 | error = -EOPNOTSUPP; |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 836 | } |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 837 | |
Ming Lei | f278eb3d | 2021-09-23 10:37:51 +0800 | [diff] [blame] | 838 | fail: |
| 839 | filemap_invalidate_unlock(inode->i_mapping); |
| 840 | return error; |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 841 | } |
| 842 | |
Loic Poulain | 69baa3a | 2023-05-10 09:42:23 +0200 | [diff] [blame] | 843 | static int blkdev_mmap(struct file *file, struct vm_area_struct *vma) |
| 844 | { |
| 845 | struct inode *bd_inode = bdev_file_inode(file); |
| 846 | |
| 847 | if (bdev_read_only(I_BDEV(bd_inode))) |
| 848 | return generic_file_readonly_mmap(file, vma); |
| 849 | |
| 850 | return generic_file_mmap(file, vma); |
| 851 | } |
| 852 | |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 853 | const struct file_operations def_blk_fops = { |
| 854 | .open = blkdev_open, |
Christoph Hellwig | 7ee34cb | 2023-06-08 13:02:38 +0200 | [diff] [blame] | 855 | .release = blkdev_release, |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 856 | .llseek = blkdev_llseek, |
| 857 | .read_iter = blkdev_read_iter, |
| 858 | .write_iter = blkdev_write_iter, |
Christoph Hellwig | 3e08773 | 2021-10-12 13:12:24 +0200 | [diff] [blame] | 859 | .iopoll = iocb_bio_iopoll, |
Loic Poulain | 69baa3a | 2023-05-10 09:42:23 +0200 | [diff] [blame] | 860 | .mmap = blkdev_mmap, |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 861 | .fsync = blkdev_fsync, |
Christoph Hellwig | 8a70951 | 2021-10-12 12:44:50 +0200 | [diff] [blame] | 862 | .unlocked_ioctl = blkdev_ioctl, |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 863 | #ifdef CONFIG_COMPAT |
| 864 | .compat_ioctl = compat_blkdev_ioctl, |
| 865 | #endif |
David Howells | 2cb1e089 | 2023-05-22 14:50:15 +0100 | [diff] [blame] | 866 | .splice_read = filemap_splice_read, |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 867 | .splice_write = iter_file_splice_write, |
| 868 | .fallocate = blkdev_fallocate, |
Pavel Begunkov | 50c5225 | 2024-09-11 17:34:41 +0100 | [diff] [blame] | 869 | .uring_cmd = blkdev_uring_cmd, |
Christian Brauner | 210a03c | 2024-03-28 13:27:24 +0100 | [diff] [blame] | 870 | .fop_flags = FOP_BUFFER_RASYNC, |
Christoph Hellwig | cd82cca | 2021-09-07 16:13:02 +0200 | [diff] [blame] | 871 | }; |
| 872 | |
| 873 | static __init int blkdev_init(void) |
| 874 | { |
| 875 | return bioset_init(&blkdev_dio_pool, 4, |
| 876 | offsetof(struct blkdev_dio, bio), |
| 877 | BIOSET_NEED_BVECS|BIOSET_PERCPU_CACHE); |
| 878 | } |
| 879 | module_init(blkdev_init); |