Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 2 | /* |
| 3 | * Functions related to mapping data to requests |
| 4 | */ |
| 5 | #include <linux/kernel.h> |
Ingo Molnar | 68db0cf | 2017-02-08 18:51:37 +0100 | [diff] [blame] | 6 | #include <linux/sched/task_stack.h> |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 7 | #include <linux/module.h> |
| 8 | #include <linux/bio.h> |
| 9 | #include <linux/blkdev.h> |
Kent Overstreet | 26e49cf | 2015-01-18 16:16:31 +0100 | [diff] [blame] | 10 | #include <linux/uio.h> |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 11 | |
| 12 | #include "blk.h" |
| 13 | |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 14 | struct bio_map_data { |
Christoph Hellwig | f325607 | 2020-08-27 17:37:45 +0200 | [diff] [blame] | 15 | bool is_our_pages : 1; |
| 16 | bool is_null_mapped : 1; |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 17 | struct iov_iter iter; |
| 18 | struct iovec iov[]; |
| 19 | }; |
| 20 | |
| 21 | static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data, |
| 22 | gfp_t gfp_mask) |
| 23 | { |
| 24 | struct bio_map_data *bmd; |
| 25 | |
| 26 | if (data->nr_segs > UIO_MAXIOV) |
| 27 | return NULL; |
| 28 | |
| 29 | bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask); |
| 30 | if (!bmd) |
| 31 | return NULL; |
| 32 | memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs); |
| 33 | bmd->iter = *data; |
| 34 | bmd->iter.iov = bmd->iov; |
| 35 | return bmd; |
| 36 | } |
| 37 | |
| 38 | /** |
| 39 | * bio_copy_from_iter - copy all pages from iov_iter to bio |
| 40 | * @bio: The &struct bio which describes the I/O as destination |
| 41 | * @iter: iov_iter as source |
| 42 | * |
| 43 | * Copy all pages from iov_iter to bio. |
| 44 | * Returns 0 on success, or error on failure. |
| 45 | */ |
| 46 | static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter) |
| 47 | { |
| 48 | struct bio_vec *bvec; |
| 49 | struct bvec_iter_all iter_all; |
| 50 | |
| 51 | bio_for_each_segment_all(bvec, bio, iter_all) { |
| 52 | ssize_t ret; |
| 53 | |
| 54 | ret = copy_page_from_iter(bvec->bv_page, |
| 55 | bvec->bv_offset, |
| 56 | bvec->bv_len, |
| 57 | iter); |
| 58 | |
| 59 | if (!iov_iter_count(iter)) |
| 60 | break; |
| 61 | |
| 62 | if (ret < bvec->bv_len) |
| 63 | return -EFAULT; |
| 64 | } |
| 65 | |
| 66 | return 0; |
| 67 | } |
| 68 | |
| 69 | /** |
| 70 | * bio_copy_to_iter - copy all pages from bio to iov_iter |
| 71 | * @bio: The &struct bio which describes the I/O as source |
| 72 | * @iter: iov_iter as destination |
| 73 | * |
| 74 | * Copy all pages from bio to iov_iter. |
| 75 | * Returns 0 on success, or error on failure. |
| 76 | */ |
| 77 | static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) |
| 78 | { |
| 79 | struct bio_vec *bvec; |
| 80 | struct bvec_iter_all iter_all; |
| 81 | |
| 82 | bio_for_each_segment_all(bvec, bio, iter_all) { |
| 83 | ssize_t ret; |
| 84 | |
| 85 | ret = copy_page_to_iter(bvec->bv_page, |
| 86 | bvec->bv_offset, |
| 87 | bvec->bv_len, |
| 88 | &iter); |
| 89 | |
| 90 | if (!iov_iter_count(&iter)) |
| 91 | break; |
| 92 | |
| 93 | if (ret < bvec->bv_len) |
| 94 | return -EFAULT; |
| 95 | } |
| 96 | |
| 97 | return 0; |
| 98 | } |
| 99 | |
| 100 | /** |
| 101 | * bio_uncopy_user - finish previously mapped bio |
| 102 | * @bio: bio being terminated |
| 103 | * |
| 104 | * Free pages allocated from bio_copy_user_iov() and write back data |
| 105 | * to user space in case of a read. |
| 106 | */ |
| 107 | static int bio_uncopy_user(struct bio *bio) |
| 108 | { |
| 109 | struct bio_map_data *bmd = bio->bi_private; |
| 110 | int ret = 0; |
| 111 | |
Christoph Hellwig | 3310eeb | 2020-08-27 17:37:48 +0200 | [diff] [blame] | 112 | if (!bmd->is_null_mapped) { |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 113 | /* |
| 114 | * if we're in a workqueue, the request is orphaned, so |
| 115 | * don't copy into a random user address space, just free |
| 116 | * and return -EINTR so user space doesn't expect any data. |
| 117 | */ |
| 118 | if (!current->mm) |
| 119 | ret = -EINTR; |
| 120 | else if (bio_data_dir(bio) == READ) |
| 121 | ret = bio_copy_to_iter(bio, bmd->iter); |
| 122 | if (bmd->is_our_pages) |
| 123 | bio_free_pages(bio); |
| 124 | } |
| 125 | kfree(bmd); |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 126 | return ret; |
| 127 | } |
| 128 | |
Christoph Hellwig | 7589ad6 | 2020-08-27 17:37:47 +0200 | [diff] [blame] | 129 | static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data, |
| 130 | struct iov_iter *iter, gfp_t gfp_mask) |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 131 | { |
| 132 | struct bio_map_data *bmd; |
| 133 | struct page *page; |
Christoph Hellwig | 393bb12 | 2021-03-31 09:30:01 +0200 | [diff] [blame] | 134 | struct bio *bio; |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 135 | int i = 0, ret; |
| 136 | int nr_pages; |
| 137 | unsigned int len = iter->count; |
| 138 | unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0; |
| 139 | |
| 140 | bmd = bio_alloc_map_data(iter, gfp_mask); |
| 141 | if (!bmd) |
Christoph Hellwig | 7589ad6 | 2020-08-27 17:37:47 +0200 | [diff] [blame] | 142 | return -ENOMEM; |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 143 | |
| 144 | /* |
| 145 | * We need to do a deep copy of the iov_iter including the iovecs. |
| 146 | * The caller provided iov might point to an on-stack or otherwise |
| 147 | * shortlived one. |
| 148 | */ |
Christoph Hellwig | f325607 | 2020-08-27 17:37:45 +0200 | [diff] [blame] | 149 | bmd->is_our_pages = !map_data; |
Christoph Hellwig | 0385971 | 2020-09-23 17:07:13 +0200 | [diff] [blame] | 150 | bmd->is_null_mapped = (map_data && map_data->null_mapped); |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 151 | |
Matthew Wilcox (Oracle) | 5f7136d | 2021-01-29 04:38:57 +0000 | [diff] [blame] | 152 | nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE)); |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 153 | |
| 154 | ret = -ENOMEM; |
Christoph Hellwig | 066ff57 | 2022-04-06 08:12:27 +0200 | [diff] [blame] | 155 | bio = bio_kmalloc(nr_pages, gfp_mask); |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 156 | if (!bio) |
| 157 | goto out_bmd; |
Christoph Hellwig | 066ff57 | 2022-04-06 08:12:27 +0200 | [diff] [blame] | 158 | bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq)); |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 159 | |
| 160 | if (map_data) { |
Jens Axboe | f5d632d | 2022-08-05 16:39:04 -0600 | [diff] [blame] | 161 | nr_pages = 1U << map_data->page_order; |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 162 | i = map_data->offset / PAGE_SIZE; |
| 163 | } |
| 164 | while (len) { |
| 165 | unsigned int bytes = PAGE_SIZE; |
| 166 | |
| 167 | bytes -= offset; |
| 168 | |
| 169 | if (bytes > len) |
| 170 | bytes = len; |
| 171 | |
| 172 | if (map_data) { |
| 173 | if (i == map_data->nr_entries * nr_pages) { |
| 174 | ret = -ENOMEM; |
Christoph Hellwig | 7589ad6 | 2020-08-27 17:37:47 +0200 | [diff] [blame] | 175 | goto cleanup; |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 176 | } |
| 177 | |
| 178 | page = map_data->pages[i / nr_pages]; |
| 179 | page += (i % nr_pages); |
| 180 | |
| 181 | i++; |
| 182 | } else { |
Christoph Hellwig | ce288e0 | 2021-03-31 09:29:59 +0200 | [diff] [blame] | 183 | page = alloc_page(GFP_NOIO | gfp_mask); |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 184 | if (!page) { |
| 185 | ret = -ENOMEM; |
Christoph Hellwig | 7589ad6 | 2020-08-27 17:37:47 +0200 | [diff] [blame] | 186 | goto cleanup; |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 187 | } |
| 188 | } |
| 189 | |
Christoph Hellwig | 7589ad6 | 2020-08-27 17:37:47 +0200 | [diff] [blame] | 190 | if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) { |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 191 | if (!map_data) |
| 192 | __free_page(page); |
| 193 | break; |
| 194 | } |
| 195 | |
| 196 | len -= bytes; |
| 197 | offset = 0; |
| 198 | } |
| 199 | |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 200 | if (map_data) |
| 201 | map_data->offset += bio->bi_iter.bi_size; |
| 202 | |
| 203 | /* |
| 204 | * success |
| 205 | */ |
| 206 | if ((iov_iter_rw(iter) == WRITE && |
| 207 | (!map_data || !map_data->null_mapped)) || |
| 208 | (map_data && map_data->from_user)) { |
| 209 | ret = bio_copy_from_iter(bio, iter); |
| 210 | if (ret) |
| 211 | goto cleanup; |
| 212 | } else { |
| 213 | if (bmd->is_our_pages) |
| 214 | zero_fill_bio(bio); |
| 215 | iov_iter_advance(iter, bio->bi_iter.bi_size); |
| 216 | } |
| 217 | |
| 218 | bio->bi_private = bmd; |
Christoph Hellwig | 7589ad6 | 2020-08-27 17:37:47 +0200 | [diff] [blame] | 219 | |
Christoph Hellwig | 393bb12 | 2021-03-31 09:30:01 +0200 | [diff] [blame] | 220 | ret = blk_rq_append_bio(rq, bio); |
Christoph Hellwig | 7589ad6 | 2020-08-27 17:37:47 +0200 | [diff] [blame] | 221 | if (ret) |
| 222 | goto cleanup; |
Christoph Hellwig | 7589ad6 | 2020-08-27 17:37:47 +0200 | [diff] [blame] | 223 | return 0; |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 224 | cleanup: |
| 225 | if (!map_data) |
| 226 | bio_free_pages(bio); |
Christoph Hellwig | 066ff57 | 2022-04-06 08:12:27 +0200 | [diff] [blame] | 227 | bio_uninit(bio); |
| 228 | kfree(bio); |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 229 | out_bmd: |
| 230 | kfree(bmd); |
Christoph Hellwig | 7589ad6 | 2020-08-27 17:37:47 +0200 | [diff] [blame] | 231 | return ret; |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 232 | } |
| 233 | |
Anuj Gupta | 32f1c71 | 2022-09-30 11:57:45 +0530 | [diff] [blame] | 234 | static void blk_mq_map_bio_put(struct bio *bio) |
Jens Axboe | 8af870a | 2022-08-05 16:43:09 -0600 | [diff] [blame] | 235 | { |
| 236 | if (bio->bi_opf & REQ_ALLOC_CACHE) { |
| 237 | bio_put(bio); |
| 238 | } else { |
| 239 | bio_uninit(bio); |
| 240 | kfree(bio); |
| 241 | } |
| 242 | } |
| 243 | |
Kanchan Joshi | ab89e8e | 2022-09-30 11:57:46 +0530 | [diff] [blame] | 244 | static struct bio *blk_rq_map_bio_alloc(struct request *rq, |
| 245 | unsigned int nr_vecs, gfp_t gfp_mask) |
| 246 | { |
| 247 | struct bio *bio; |
| 248 | |
| 249 | if (rq->cmd_flags & REQ_POLLED) { |
| 250 | blk_opf_t opf = rq->cmd_flags | REQ_ALLOC_CACHE; |
| 251 | |
| 252 | bio = bio_alloc_bioset(NULL, nr_vecs, opf, gfp_mask, |
| 253 | &fs_bio_set); |
| 254 | if (!bio) |
| 255 | return NULL; |
| 256 | } else { |
| 257 | bio = bio_kmalloc(nr_vecs, gfp_mask); |
| 258 | if (!bio) |
| 259 | return NULL; |
| 260 | bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq)); |
| 261 | } |
| 262 | return bio; |
| 263 | } |
| 264 | |
Christoph Hellwig | 7589ad6 | 2020-08-27 17:37:47 +0200 | [diff] [blame] | 265 | static int bio_map_user_iov(struct request *rq, struct iov_iter *iter, |
| 266 | gfp_t gfp_mask) |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 267 | { |
Christoph Hellwig | 7589ad6 | 2020-08-27 17:37:47 +0200 | [diff] [blame] | 268 | unsigned int max_sectors = queue_max_hw_sectors(rq->q); |
Christoph Hellwig | 066ff57 | 2022-04-06 08:12:27 +0200 | [diff] [blame] | 269 | unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS); |
Logan Gunthorpe | 7ee4ccf | 2022-10-21 11:41:14 -0600 | [diff] [blame] | 270 | unsigned int gup_flags = 0; |
Christoph Hellwig | 393bb12 | 2021-03-31 09:30:01 +0200 | [diff] [blame] | 271 | struct bio *bio; |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 272 | int ret; |
Christoph Hellwig | 7589ad6 | 2020-08-27 17:37:47 +0200 | [diff] [blame] | 273 | int j; |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 274 | |
| 275 | if (!iov_iter_count(iter)) |
Christoph Hellwig | 7589ad6 | 2020-08-27 17:37:47 +0200 | [diff] [blame] | 276 | return -EINVAL; |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 277 | |
Kanchan Joshi | ab89e8e | 2022-09-30 11:57:46 +0530 | [diff] [blame] | 278 | bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask); |
| 279 | if (bio == NULL) |
| 280 | return -ENOMEM; |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 281 | |
Logan Gunthorpe | 7ee4ccf | 2022-10-21 11:41:14 -0600 | [diff] [blame] | 282 | if (blk_queue_pci_p2pdma(rq->q)) |
| 283 | gup_flags |= FOLL_PCI_P2PDMA; |
| 284 | |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 285 | while (iov_iter_count(iter)) { |
Jens Axboe | e88811b | 2022-08-05 16:44:34 -0600 | [diff] [blame] | 286 | struct page **pages, *stack_pages[UIO_FASTIOV]; |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 287 | ssize_t bytes; |
Jiapeng Chong | 91e5add | 2022-09-05 14:32:53 +0800 | [diff] [blame] | 288 | size_t offs; |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 289 | int npages; |
| 290 | |
Jens Axboe | e88811b | 2022-08-05 16:44:34 -0600 | [diff] [blame] | 291 | if (nr_vecs <= ARRAY_SIZE(stack_pages)) { |
| 292 | pages = stack_pages; |
Logan Gunthorpe | 7ee4ccf | 2022-10-21 11:41:14 -0600 | [diff] [blame] | 293 | bytes = iov_iter_get_pages(iter, pages, LONG_MAX, |
| 294 | nr_vecs, &offs, gup_flags); |
Jens Axboe | e88811b | 2022-08-05 16:44:34 -0600 | [diff] [blame] | 295 | } else { |
Logan Gunthorpe | 7ee4ccf | 2022-10-21 11:41:14 -0600 | [diff] [blame] | 296 | bytes = iov_iter_get_pages_alloc(iter, &pages, |
| 297 | LONG_MAX, &offs, gup_flags); |
Jens Axboe | e88811b | 2022-08-05 16:44:34 -0600 | [diff] [blame] | 298 | } |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 299 | if (unlikely(bytes <= 0)) { |
| 300 | ret = bytes ? bytes : -EFAULT; |
| 301 | goto out_unmap; |
| 302 | } |
| 303 | |
| 304 | npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE); |
| 305 | |
Michal Orzel | 7ab89db | 2022-04-23 13:38:08 +0200 | [diff] [blame] | 306 | if (unlikely(offs & queue_dma_alignment(rq->q))) |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 307 | j = 0; |
Michal Orzel | 7ab89db | 2022-04-23 13:38:08 +0200 | [diff] [blame] | 308 | else { |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 309 | for (j = 0; j < npages; j++) { |
| 310 | struct page *page = pages[j]; |
| 311 | unsigned int n = PAGE_SIZE - offs; |
| 312 | bool same_page = false; |
| 313 | |
| 314 | if (n > bytes) |
| 315 | n = bytes; |
| 316 | |
Christoph Hellwig | 7589ad6 | 2020-08-27 17:37:47 +0200 | [diff] [blame] | 317 | if (!bio_add_hw_page(rq->q, bio, page, n, offs, |
Christoph Hellwig | e458110 | 2020-05-12 17:55:46 +0900 | [diff] [blame] | 318 | max_sectors, &same_page)) { |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 319 | if (same_page) |
| 320 | put_page(page); |
| 321 | break; |
| 322 | } |
| 323 | |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 324 | bytes -= n; |
| 325 | offs = 0; |
| 326 | } |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 327 | } |
| 328 | /* |
| 329 | * release the pages we didn't map into the bio, if any |
| 330 | */ |
| 331 | while (j < npages) |
| 332 | put_page(pages[j++]); |
Jens Axboe | e88811b | 2022-08-05 16:44:34 -0600 | [diff] [blame] | 333 | if (pages != stack_pages) |
| 334 | kvfree(pages); |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 335 | /* couldn't stuff something into bio? */ |
Al Viro | 480cb84 | 2022-06-09 10:37:57 -0400 | [diff] [blame] | 336 | if (bytes) { |
| 337 | iov_iter_revert(iter, bytes); |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 338 | break; |
Al Viro | 480cb84 | 2022-06-09 10:37:57 -0400 | [diff] [blame] | 339 | } |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 340 | } |
| 341 | |
Christoph Hellwig | 393bb12 | 2021-03-31 09:30:01 +0200 | [diff] [blame] | 342 | ret = blk_rq_append_bio(rq, bio); |
Christoph Hellwig | 7589ad6 | 2020-08-27 17:37:47 +0200 | [diff] [blame] | 343 | if (ret) |
Christoph Hellwig | 393bb12 | 2021-03-31 09:30:01 +0200 | [diff] [blame] | 344 | goto out_unmap; |
Christoph Hellwig | 7589ad6 | 2020-08-27 17:37:47 +0200 | [diff] [blame] | 345 | return 0; |
| 346 | |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 347 | out_unmap: |
| 348 | bio_release_pages(bio, false); |
Anuj Gupta | 32f1c71 | 2022-09-30 11:57:45 +0530 | [diff] [blame] | 349 | blk_mq_map_bio_put(bio); |
Christoph Hellwig | 7589ad6 | 2020-08-27 17:37:47 +0200 | [diff] [blame] | 350 | return ret; |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 351 | } |
| 352 | |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 353 | static void bio_invalidate_vmalloc_pages(struct bio *bio) |
| 354 | { |
Christoph Hellwig | f358afc | 2021-09-02 14:56:36 -0700 | [diff] [blame] | 355 | #ifdef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 356 | if (bio->bi_private && !op_is_write(bio_op(bio))) { |
| 357 | unsigned long i, len = 0; |
| 358 | |
| 359 | for (i = 0; i < bio->bi_vcnt; i++) |
| 360 | len += bio->bi_io_vec[i].bv_len; |
| 361 | invalidate_kernel_vmap_range(bio->bi_private, len); |
| 362 | } |
| 363 | #endif |
| 364 | } |
| 365 | |
| 366 | static void bio_map_kern_endio(struct bio *bio) |
| 367 | { |
| 368 | bio_invalidate_vmalloc_pages(bio); |
Christoph Hellwig | 066ff57 | 2022-04-06 08:12:27 +0200 | [diff] [blame] | 369 | bio_uninit(bio); |
| 370 | kfree(bio); |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 371 | } |
| 372 | |
| 373 | /** |
| 374 | * bio_map_kern - map kernel address into bio |
| 375 | * @q: the struct request_queue for the bio |
| 376 | * @data: pointer to buffer to map |
| 377 | * @len: length in bytes |
| 378 | * @gfp_mask: allocation flags for bio allocation |
| 379 | * |
| 380 | * Map the kernel address into a bio suitable for io to a block |
| 381 | * device. Returns an error pointer in case of error. |
| 382 | */ |
| 383 | static struct bio *bio_map_kern(struct request_queue *q, void *data, |
| 384 | unsigned int len, gfp_t gfp_mask) |
| 385 | { |
| 386 | unsigned long kaddr = (unsigned long)data; |
| 387 | unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 388 | unsigned long start = kaddr >> PAGE_SHIFT; |
| 389 | const int nr_pages = end - start; |
| 390 | bool is_vmalloc = is_vmalloc_addr(data); |
| 391 | struct page *page; |
| 392 | int offset, i; |
| 393 | struct bio *bio; |
| 394 | |
Christoph Hellwig | 066ff57 | 2022-04-06 08:12:27 +0200 | [diff] [blame] | 395 | bio = bio_kmalloc(nr_pages, gfp_mask); |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 396 | if (!bio) |
| 397 | return ERR_PTR(-ENOMEM); |
Christoph Hellwig | 066ff57 | 2022-04-06 08:12:27 +0200 | [diff] [blame] | 398 | bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0); |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 399 | |
| 400 | if (is_vmalloc) { |
| 401 | flush_kernel_vmap_range(data, len); |
| 402 | bio->bi_private = data; |
| 403 | } |
| 404 | |
| 405 | offset = offset_in_page(kaddr); |
| 406 | for (i = 0; i < nr_pages; i++) { |
| 407 | unsigned int bytes = PAGE_SIZE - offset; |
| 408 | |
| 409 | if (len <= 0) |
| 410 | break; |
| 411 | |
| 412 | if (bytes > len) |
| 413 | bytes = len; |
| 414 | |
| 415 | if (!is_vmalloc) |
| 416 | page = virt_to_page(data); |
| 417 | else |
| 418 | page = vmalloc_to_page(data); |
| 419 | if (bio_add_pc_page(q, bio, page, bytes, |
| 420 | offset) < bytes) { |
| 421 | /* we don't support partial mappings */ |
Christoph Hellwig | 066ff57 | 2022-04-06 08:12:27 +0200 | [diff] [blame] | 422 | bio_uninit(bio); |
| 423 | kfree(bio); |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 424 | return ERR_PTR(-EINVAL); |
| 425 | } |
| 426 | |
| 427 | data += bytes; |
| 428 | len -= bytes; |
| 429 | offset = 0; |
| 430 | } |
| 431 | |
| 432 | bio->bi_end_io = bio_map_kern_endio; |
| 433 | return bio; |
| 434 | } |
| 435 | |
| 436 | static void bio_copy_kern_endio(struct bio *bio) |
| 437 | { |
| 438 | bio_free_pages(bio); |
Christoph Hellwig | 066ff57 | 2022-04-06 08:12:27 +0200 | [diff] [blame] | 439 | bio_uninit(bio); |
| 440 | kfree(bio); |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 441 | } |
| 442 | |
| 443 | static void bio_copy_kern_endio_read(struct bio *bio) |
| 444 | { |
| 445 | char *p = bio->bi_private; |
| 446 | struct bio_vec *bvec; |
| 447 | struct bvec_iter_all iter_all; |
| 448 | |
| 449 | bio_for_each_segment_all(bvec, bio, iter_all) { |
Christoph Hellwig | d24920e | 2021-07-27 07:56:43 +0200 | [diff] [blame] | 450 | memcpy_from_bvec(p, bvec); |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 451 | p += bvec->bv_len; |
| 452 | } |
| 453 | |
| 454 | bio_copy_kern_endio(bio); |
| 455 | } |
| 456 | |
| 457 | /** |
| 458 | * bio_copy_kern - copy kernel address into bio |
| 459 | * @q: the struct request_queue for the bio |
| 460 | * @data: pointer to buffer to copy |
| 461 | * @len: length in bytes |
| 462 | * @gfp_mask: allocation flags for bio and page allocation |
| 463 | * @reading: data direction is READ |
| 464 | * |
| 465 | * copy the kernel address into a bio suitable for io to a block |
| 466 | * device. Returns an error pointer in case of error. |
| 467 | */ |
| 468 | static struct bio *bio_copy_kern(struct request_queue *q, void *data, |
| 469 | unsigned int len, gfp_t gfp_mask, int reading) |
| 470 | { |
| 471 | unsigned long kaddr = (unsigned long)data; |
| 472 | unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 473 | unsigned long start = kaddr >> PAGE_SHIFT; |
| 474 | struct bio *bio; |
| 475 | void *p = data; |
| 476 | int nr_pages = 0; |
| 477 | |
| 478 | /* |
| 479 | * Overflow, abort |
| 480 | */ |
| 481 | if (end < start) |
| 482 | return ERR_PTR(-EINVAL); |
| 483 | |
| 484 | nr_pages = end - start; |
Christoph Hellwig | 066ff57 | 2022-04-06 08:12:27 +0200 | [diff] [blame] | 485 | bio = bio_kmalloc(nr_pages, gfp_mask); |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 486 | if (!bio) |
| 487 | return ERR_PTR(-ENOMEM); |
Christoph Hellwig | 066ff57 | 2022-04-06 08:12:27 +0200 | [diff] [blame] | 488 | bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0); |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 489 | |
| 490 | while (len) { |
| 491 | struct page *page; |
| 492 | unsigned int bytes = PAGE_SIZE; |
| 493 | |
| 494 | if (bytes > len) |
| 495 | bytes = len; |
| 496 | |
Haimin Zhang | cc8f7fe | 2022-02-16 16:40:38 +0800 | [diff] [blame] | 497 | page = alloc_page(GFP_NOIO | __GFP_ZERO | gfp_mask); |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 498 | if (!page) |
| 499 | goto cleanup; |
| 500 | |
| 501 | if (!reading) |
| 502 | memcpy(page_address(page), p, bytes); |
| 503 | |
| 504 | if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) |
| 505 | break; |
| 506 | |
| 507 | len -= bytes; |
| 508 | p += bytes; |
| 509 | } |
| 510 | |
| 511 | if (reading) { |
| 512 | bio->bi_end_io = bio_copy_kern_endio_read; |
| 513 | bio->bi_private = data; |
| 514 | } else { |
| 515 | bio->bi_end_io = bio_copy_kern_endio; |
| 516 | } |
| 517 | |
| 518 | return bio; |
| 519 | |
| 520 | cleanup: |
| 521 | bio_free_pages(bio); |
Christoph Hellwig | 066ff57 | 2022-04-06 08:12:27 +0200 | [diff] [blame] | 522 | bio_uninit(bio); |
| 523 | kfree(bio); |
Christoph Hellwig | 130879f | 2020-03-27 18:48:37 +0100 | [diff] [blame] | 524 | return ERR_PTR(-ENOMEM); |
| 525 | } |
| 526 | |
Christoph Hellwig | 98d61d5 | 2016-07-19 11:31:51 +0200 | [diff] [blame] | 527 | /* |
Jens Axboe | 0abc2a1 | 2017-12-18 15:40:44 +0800 | [diff] [blame] | 528 | * Append a bio to a passthrough request. Only works if the bio can be merged |
| 529 | * into the request based on the driver constraints. |
Christoph Hellwig | 98d61d5 | 2016-07-19 11:31:51 +0200 | [diff] [blame] | 530 | */ |
Christoph Hellwig | 393bb12 | 2021-03-31 09:30:01 +0200 | [diff] [blame] | 531 | int blk_rq_append_bio(struct request *rq, struct bio *bio) |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 532 | { |
Christoph Hellwig | 14ccb66 | 2019-06-06 12:29:01 +0200 | [diff] [blame] | 533 | struct bvec_iter iter; |
| 534 | struct bio_vec bv; |
| 535 | unsigned int nr_segs = 0; |
Jens Axboe | 0abc2a1 | 2017-12-18 15:40:44 +0800 | [diff] [blame] | 536 | |
Christoph Hellwig | 393bb12 | 2021-03-31 09:30:01 +0200 | [diff] [blame] | 537 | bio_for_each_bvec(bv, bio, iter) |
Christoph Hellwig | 14ccb66 | 2019-06-06 12:29:01 +0200 | [diff] [blame] | 538 | nr_segs++; |
| 539 | |
Christoph Hellwig | 98d61d5 | 2016-07-19 11:31:51 +0200 | [diff] [blame] | 540 | if (!rq->bio) { |
Christoph Hellwig | 393bb12 | 2021-03-31 09:30:01 +0200 | [diff] [blame] | 541 | blk_rq_bio_prep(rq, bio, nr_segs); |
Christoph Hellwig | 98d61d5 | 2016-07-19 11:31:51 +0200 | [diff] [blame] | 542 | } else { |
Christoph Hellwig | 393bb12 | 2021-03-31 09:30:01 +0200 | [diff] [blame] | 543 | if (!ll_back_merge_fn(rq, bio, nr_segs)) |
Christoph Hellwig | 98d61d5 | 2016-07-19 11:31:51 +0200 | [diff] [blame] | 544 | return -EINVAL; |
Christoph Hellwig | 393bb12 | 2021-03-31 09:30:01 +0200 | [diff] [blame] | 545 | rq->biotail->bi_next = bio; |
| 546 | rq->biotail = bio; |
| 547 | rq->__data_len += (bio)->bi_iter.bi_size; |
| 548 | bio_crypt_free_ctx(bio); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 549 | } |
Christoph Hellwig | 98d61d5 | 2016-07-19 11:31:51 +0200 | [diff] [blame] | 550 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 551 | return 0; |
| 552 | } |
Christoph Hellwig | 98d61d5 | 2016-07-19 11:31:51 +0200 | [diff] [blame] | 553 | EXPORT_SYMBOL(blk_rq_append_bio); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 554 | |
Kanchan Joshi | 3798754 | 2022-09-30 11:57:47 +0530 | [diff] [blame] | 555 | /* Prepare bio for passthrough IO given ITER_BVEC iter */ |
| 556 | static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter) |
| 557 | { |
| 558 | struct request_queue *q = rq->q; |
| 559 | size_t nr_iter = iov_iter_count(iter); |
| 560 | size_t nr_segs = iter->nr_segs; |
| 561 | struct bio_vec *bvecs, *bvprvp = NULL; |
Bart Van Assche | aa261f2 | 2022-10-25 12:17:54 -0700 | [diff] [blame] | 562 | const struct queue_limits *lim = &q->limits; |
Kanchan Joshi | 3798754 | 2022-09-30 11:57:47 +0530 | [diff] [blame] | 563 | unsigned int nsegs = 0, bytes = 0; |
| 564 | struct bio *bio; |
| 565 | size_t i; |
| 566 | |
| 567 | if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q)) |
| 568 | return -EINVAL; |
| 569 | if (nr_segs > queue_max_segments(q)) |
| 570 | return -EINVAL; |
| 571 | |
| 572 | /* no iovecs to alloc, as we already have a BVEC iterator */ |
| 573 | bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL); |
| 574 | if (bio == NULL) |
| 575 | return -ENOMEM; |
| 576 | |
| 577 | bio_iov_bvec_set(bio, (struct iov_iter *)iter); |
| 578 | blk_rq_bio_prep(rq, bio, nr_segs); |
| 579 | |
| 580 | /* loop to perform a bunch of sanity checks */ |
| 581 | bvecs = (struct bio_vec *)iter->bvec; |
| 582 | for (i = 0; i < nr_segs; i++) { |
| 583 | struct bio_vec *bv = &bvecs[i]; |
| 584 | |
| 585 | /* |
| 586 | * If the queue doesn't support SG gaps and adding this |
| 587 | * offset would create a gap, fallback to copy. |
| 588 | */ |
| 589 | if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) { |
| 590 | blk_mq_map_bio_put(bio); |
| 591 | return -EREMOTEIO; |
| 592 | } |
| 593 | /* check full condition */ |
| 594 | if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len) |
| 595 | goto put_bio; |
| 596 | if (bytes + bv->bv_len > nr_iter) |
| 597 | goto put_bio; |
| 598 | if (bv->bv_offset + bv->bv_len > PAGE_SIZE) |
| 599 | goto put_bio; |
| 600 | |
| 601 | nsegs++; |
| 602 | bytes += bv->bv_len; |
| 603 | bvprvp = bv; |
| 604 | } |
| 605 | return 0; |
| 606 | put_bio: |
| 607 | blk_mq_map_bio_put(bio); |
| 608 | return -EINVAL; |
| 609 | } |
| 610 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 611 | /** |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 612 | * blk_rq_map_user_iov - map user data to a request, for passthrough requests |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 613 | * @q: request queue where request should be inserted |
| 614 | * @rq: request to map data to |
FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 615 | * @map_data: pointer to the rq_map_data holding pages (if necessary) |
Kent Overstreet | 26e49cf | 2015-01-18 16:16:31 +0100 | [diff] [blame] | 616 | * @iter: iovec iterator |
FUJITA Tomonori | a3bce90 | 2008-08-28 16:17:05 +0900 | [diff] [blame] | 617 | * @gfp_mask: memory allocation flags |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 618 | * |
| 619 | * Description: |
Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 620 | * Data will be mapped directly for zero copy I/O, if possible. Otherwise |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 621 | * a kernel bounce buffer is used. |
| 622 | * |
Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 623 | * A matching blk_rq_unmap_user() must be issued at the end of I/O, while |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 624 | * still in process context. |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 625 | */ |
| 626 | int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, |
Kent Overstreet | 26e49cf | 2015-01-18 16:16:31 +0100 | [diff] [blame] | 627 | struct rq_map_data *map_data, |
| 628 | const struct iov_iter *iter, gfp_t gfp_mask) |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 629 | { |
Kanchan Joshi | 3798754 | 2022-09-30 11:57:47 +0530 | [diff] [blame] | 630 | bool copy = false, map_bvec = false; |
Al Viro | 357f435 | 2016-04-08 19:05:19 -0400 | [diff] [blame] | 631 | unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); |
Christoph Hellwig | 4d6af73 | 2016-03-02 18:07:14 +0100 | [diff] [blame] | 632 | struct bio *bio = NULL; |
| 633 | struct iov_iter i; |
Douglas Gilbert | 69e0927 | 2018-01-14 17:00:48 -0500 | [diff] [blame] | 634 | int ret = -EINVAL; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 635 | |
Al Viro | 357f435 | 2016-04-08 19:05:19 -0400 | [diff] [blame] | 636 | if (map_data) |
| 637 | copy = true; |
Christoph Hellwig | 393bb12 | 2021-03-31 09:30:01 +0200 | [diff] [blame] | 638 | else if (blk_queue_may_bounce(q)) |
| 639 | copy = true; |
Al Viro | 357f435 | 2016-04-08 19:05:19 -0400 | [diff] [blame] | 640 | else if (iov_iter_alignment(iter) & align) |
| 641 | copy = true; |
Kanchan Joshi | 3798754 | 2022-09-30 11:57:47 +0530 | [diff] [blame] | 642 | else if (iov_iter_is_bvec(iter)) |
| 643 | map_bvec = true; |
| 644 | else if (!iter_is_iovec(iter)) |
| 645 | copy = true; |
Al Viro | 357f435 | 2016-04-08 19:05:19 -0400 | [diff] [blame] | 646 | else if (queue_virt_boundary(q)) |
| 647 | copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter); |
FUJITA Tomonori | afdc1a7 | 2008-04-11 12:56:51 +0200 | [diff] [blame] | 648 | |
Kanchan Joshi | 3798754 | 2022-09-30 11:57:47 +0530 | [diff] [blame] | 649 | if (map_bvec) { |
| 650 | ret = blk_rq_map_user_bvec(rq, iter); |
| 651 | if (!ret) |
| 652 | return 0; |
| 653 | if (ret != -EREMOTEIO) |
| 654 | goto fail; |
| 655 | /* fall back to copying the data on limits mismatches */ |
| 656 | copy = true; |
| 657 | } |
| 658 | |
Christoph Hellwig | 4d6af73 | 2016-03-02 18:07:14 +0100 | [diff] [blame] | 659 | i = *iter; |
| 660 | do { |
Christoph Hellwig | 7589ad6 | 2020-08-27 17:37:47 +0200 | [diff] [blame] | 661 | if (copy) |
| 662 | ret = bio_copy_user_iov(rq, map_data, &i, gfp_mask); |
| 663 | else |
| 664 | ret = bio_map_user_iov(rq, &i, gfp_mask); |
Christoph Hellwig | 4d6af73 | 2016-03-02 18:07:14 +0100 | [diff] [blame] | 665 | if (ret) |
| 666 | goto unmap_rq; |
| 667 | if (!bio) |
| 668 | bio = rq->bio; |
| 669 | } while (iov_iter_count(&i)); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 670 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 671 | return 0; |
Christoph Hellwig | 4d6af73 | 2016-03-02 18:07:14 +0100 | [diff] [blame] | 672 | |
| 673 | unmap_rq: |
Yang Yingliang | 3b7995a | 2019-12-18 16:44:04 +0800 | [diff] [blame] | 674 | blk_rq_unmap_user(bio); |
Linus Torvalds | a0ac402 | 2016-12-06 16:18:14 -0800 | [diff] [blame] | 675 | fail: |
Christoph Hellwig | 4d6af73 | 2016-03-02 18:07:14 +0100 | [diff] [blame] | 676 | rq->bio = NULL; |
Douglas Gilbert | 69e0927 | 2018-01-14 17:00:48 -0500 | [diff] [blame] | 677 | return ret; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 678 | } |
FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 679 | EXPORT_SYMBOL(blk_rq_map_user_iov); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 680 | |
Christoph Hellwig | ddad8dd | 2015-01-18 16:16:29 +0100 | [diff] [blame] | 681 | int blk_rq_map_user(struct request_queue *q, struct request *rq, |
| 682 | struct rq_map_data *map_data, void __user *ubuf, |
| 683 | unsigned long len, gfp_t gfp_mask) |
| 684 | { |
Kent Overstreet | 26e49cf | 2015-01-18 16:16:31 +0100 | [diff] [blame] | 685 | struct iovec iov; |
| 686 | struct iov_iter i; |
Al Viro | 8f7e885 | 2015-03-21 20:06:04 -0400 | [diff] [blame] | 687 | int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i); |
Christoph Hellwig | ddad8dd | 2015-01-18 16:16:29 +0100 | [diff] [blame] | 688 | |
Al Viro | 8f7e885 | 2015-03-21 20:06:04 -0400 | [diff] [blame] | 689 | if (unlikely(ret < 0)) |
| 690 | return ret; |
Christoph Hellwig | ddad8dd | 2015-01-18 16:16:29 +0100 | [diff] [blame] | 691 | |
Kent Overstreet | 26e49cf | 2015-01-18 16:16:31 +0100 | [diff] [blame] | 692 | return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); |
Christoph Hellwig | ddad8dd | 2015-01-18 16:16:29 +0100 | [diff] [blame] | 693 | } |
| 694 | EXPORT_SYMBOL(blk_rq_map_user); |
| 695 | |
Anuj Gupta | 5576540 | 2022-09-30 11:57:40 +0530 | [diff] [blame] | 696 | int blk_rq_map_user_io(struct request *req, struct rq_map_data *map_data, |
| 697 | void __user *ubuf, unsigned long buf_len, gfp_t gfp_mask, |
| 698 | bool vec, int iov_count, bool check_iter_count, int rw) |
| 699 | { |
| 700 | int ret = 0; |
| 701 | |
| 702 | if (vec) { |
| 703 | struct iovec fast_iov[UIO_FASTIOV]; |
| 704 | struct iovec *iov = fast_iov; |
| 705 | struct iov_iter iter; |
| 706 | |
| 707 | ret = import_iovec(rw, ubuf, iov_count ? iov_count : buf_len, |
| 708 | UIO_FASTIOV, &iov, &iter); |
| 709 | if (ret < 0) |
| 710 | return ret; |
| 711 | |
| 712 | if (iov_count) { |
| 713 | /* SG_IO howto says that the shorter of the two wins */ |
| 714 | iov_iter_truncate(&iter, buf_len); |
| 715 | if (check_iter_count && !iov_iter_count(&iter)) { |
| 716 | kfree(iov); |
| 717 | return -EINVAL; |
| 718 | } |
| 719 | } |
| 720 | |
| 721 | ret = blk_rq_map_user_iov(req->q, req, map_data, &iter, |
| 722 | gfp_mask); |
| 723 | kfree(iov); |
| 724 | } else if (buf_len) { |
| 725 | ret = blk_rq_map_user(req->q, req, map_data, ubuf, buf_len, |
| 726 | gfp_mask); |
| 727 | } |
| 728 | return ret; |
| 729 | } |
| 730 | EXPORT_SYMBOL(blk_rq_map_user_io); |
| 731 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 732 | /** |
| 733 | * blk_rq_unmap_user - unmap a request with user data |
| 734 | * @bio: start of bio list |
| 735 | * |
| 736 | * Description: |
| 737 | * Unmap a rq previously mapped by blk_rq_map_user(). The caller must |
| 738 | * supply the original rq->bio from the blk_rq_map_user() return, since |
Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 739 | * the I/O completion may have changed rq->bio. |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 740 | */ |
| 741 | int blk_rq_unmap_user(struct bio *bio) |
| 742 | { |
Christoph Hellwig | 393bb12 | 2021-03-31 09:30:01 +0200 | [diff] [blame] | 743 | struct bio *next_bio; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 744 | int ret = 0, ret2; |
| 745 | |
| 746 | while (bio) { |
Christoph Hellwig | 3310eeb | 2020-08-27 17:37:48 +0200 | [diff] [blame] | 747 | if (bio->bi_private) { |
Christoph Hellwig | 393bb12 | 2021-03-31 09:30:01 +0200 | [diff] [blame] | 748 | ret2 = bio_uncopy_user(bio); |
Christoph Hellwig | 7b63c05 | 2020-08-27 17:37:46 +0200 | [diff] [blame] | 749 | if (ret2 && !ret) |
| 750 | ret = ret2; |
Christoph Hellwig | 3310eeb | 2020-08-27 17:37:48 +0200 | [diff] [blame] | 751 | } else { |
Christoph Hellwig | 393bb12 | 2021-03-31 09:30:01 +0200 | [diff] [blame] | 752 | bio_release_pages(bio, bio_data_dir(bio) == READ); |
Christoph Hellwig | 7b63c05 | 2020-08-27 17:37:46 +0200 | [diff] [blame] | 753 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 754 | |
Christoph Hellwig | 393bb12 | 2021-03-31 09:30:01 +0200 | [diff] [blame] | 755 | next_bio = bio; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 756 | bio = bio->bi_next; |
Anuj Gupta | 32f1c71 | 2022-09-30 11:57:45 +0530 | [diff] [blame] | 757 | blk_mq_map_bio_put(next_bio); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 758 | } |
| 759 | |
| 760 | return ret; |
| 761 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 762 | EXPORT_SYMBOL(blk_rq_unmap_user); |
| 763 | |
| 764 | /** |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 765 | * blk_rq_map_kern - map kernel data to a request, for passthrough requests |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 766 | * @q: request queue where request should be inserted |
| 767 | * @rq: request to fill |
| 768 | * @kbuf: the kernel buffer |
| 769 | * @len: length of user data |
| 770 | * @gfp_mask: memory allocation flags |
FUJITA Tomonori | 68154e9 | 2008-04-25 12:47:50 +0200 | [diff] [blame] | 771 | * |
| 772 | * Description: |
| 773 | * Data will be mapped directly if possible. Otherwise a bounce |
Masanari Iida | e227867 | 2014-02-18 22:54:36 +0900 | [diff] [blame] | 774 | * buffer is used. Can be called multiple times to append multiple |
James Bottomley | 3a5a392 | 2009-05-17 18:55:18 +0300 | [diff] [blame] | 775 | * buffers. |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 776 | */ |
| 777 | int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, |
| 778 | unsigned int len, gfp_t gfp_mask) |
| 779 | { |
FUJITA Tomonori | 68154e9 | 2008-04-25 12:47:50 +0200 | [diff] [blame] | 780 | int reading = rq_data_dir(rq) == READ; |
Namhyung Kim | 1441779 | 2010-09-15 13:08:27 +0200 | [diff] [blame] | 781 | unsigned long addr = (unsigned long) kbuf; |
Christoph Hellwig | 393bb12 | 2021-03-31 09:30:01 +0200 | [diff] [blame] | 782 | struct bio *bio; |
James Bottomley | 3a5a392 | 2009-05-17 18:55:18 +0300 | [diff] [blame] | 783 | int ret; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 784 | |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 785 | if (len > (queue_max_hw_sectors(q) << 9)) |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 786 | return -EINVAL; |
| 787 | if (!len || !kbuf) |
| 788 | return -EINVAL; |
| 789 | |
Christoph Hellwig | 393bb12 | 2021-03-31 09:30:01 +0200 | [diff] [blame] | 790 | if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf) || |
| 791 | blk_queue_may_bounce(q)) |
FUJITA Tomonori | 68154e9 | 2008-04-25 12:47:50 +0200 | [diff] [blame] | 792 | bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); |
| 793 | else |
| 794 | bio = bio_map_kern(q, kbuf, len, gfp_mask); |
| 795 | |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 796 | if (IS_ERR(bio)) |
| 797 | return PTR_ERR(bio); |
| 798 | |
Christoph Hellwig | aebf526 | 2017-01-31 16:57:31 +0100 | [diff] [blame] | 799 | bio->bi_opf &= ~REQ_OP_MASK; |
| 800 | bio->bi_opf |= req_op(rq); |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 801 | |
Christoph Hellwig | 393bb12 | 2021-03-31 09:30:01 +0200 | [diff] [blame] | 802 | ret = blk_rq_append_bio(rq, bio); |
Christoph Hellwig | 066ff57 | 2022-04-06 08:12:27 +0200 | [diff] [blame] | 803 | if (unlikely(ret)) { |
| 804 | bio_uninit(bio); |
| 805 | kfree(bio); |
| 806 | } |
Christoph Hellwig | 393bb12 | 2021-03-31 09:30:01 +0200 | [diff] [blame] | 807 | return ret; |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 808 | } |
Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 809 | EXPORT_SYMBOL(blk_rq_map_kern); |