Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Shared application/kernel submission and completion ring pairs, for |
| 4 | * supporting fast/efficient IO. |
| 5 | * |
| 6 | * A note on the read/write ordering memory barriers that are matched between |
Stefan Bühler | 1e84b97 | 2019-04-24 23:54:16 +0200 | [diff] [blame] | 7 | * the application and kernel side. |
| 8 | * |
| 9 | * After the application reads the CQ ring tail, it must use an |
| 10 | * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses |
| 11 | * before writing the tail (using smp_load_acquire to read the tail will |
| 12 | * do). It also needs a smp_mb() before updating CQ head (ordering the |
| 13 | * entry load(s) with the head store), pairing with an implicit barrier |
Pavel Begunkov | d068b50 | 2021-05-16 22:58:11 +0100 | [diff] [blame] | 14 | * through a control-dependency in io_get_cqe (smp_store_release to |
Stefan Bühler | 1e84b97 | 2019-04-24 23:54:16 +0200 | [diff] [blame] | 15 | * store head will do). Failure to do so could lead to reading invalid |
| 16 | * CQ entries. |
| 17 | * |
| 18 | * Likewise, the application must use an appropriate smp_wmb() before |
| 19 | * writing the SQ tail (ordering SQ entry stores with the tail store), |
| 20 | * which pairs with smp_load_acquire in io_get_sqring (smp_store_release |
| 21 | * to store the tail will do). And it needs a barrier ordering the SQ |
| 22 | * head load before writing new SQ entries (smp_load_acquire to read |
| 23 | * head will do). |
| 24 | * |
| 25 | * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application |
| 26 | * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after* |
| 27 | * updating the SQ tail; a full memory barrier smp_mb() is needed |
| 28 | * between. |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 29 | * |
| 30 | * Also see the examples in the liburing library: |
| 31 | * |
| 32 | * git://git.kernel.dk/liburing |
| 33 | * |
| 34 | * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens |
| 35 | * from data shared between the kernel and application. This is done both |
| 36 | * for ordering purposes, but also to ensure that once a value is loaded from |
| 37 | * data that the application could potentially modify, it remains stable. |
| 38 | * |
| 39 | * Copyright (C) 2018-2019 Jens Axboe |
Christoph Hellwig | c992fe2 | 2019-01-11 09:43:02 -0700 | [diff] [blame] | 40 | * Copyright (c) 2018-2019 Christoph Hellwig |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 41 | */ |
| 42 | #include <linux/kernel.h> |
| 43 | #include <linux/init.h> |
| 44 | #include <linux/errno.h> |
| 45 | #include <linux/syscalls.h> |
Jens Axboe | 52de1fe | 2020-02-27 10:15:42 -0700 | [diff] [blame] | 46 | #include <net/compat.h> |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 47 | #include <linux/refcount.h> |
| 48 | #include <linux/uio.h> |
Pavel Begunkov | 6b47ee6 | 2020-01-18 20:22:41 +0300 | [diff] [blame] | 49 | #include <linux/bits.h> |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 50 | |
| 51 | #include <linux/sched/signal.h> |
| 52 | #include <linux/fs.h> |
| 53 | #include <linux/file.h> |
| 54 | #include <linux/fdtable.h> |
| 55 | #include <linux/mm.h> |
| 56 | #include <linux/mman.h> |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 57 | #include <linux/percpu.h> |
| 58 | #include <linux/slab.h> |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 59 | #include <linux/bvec.h> |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 60 | #include <linux/net.h> |
| 61 | #include <net/sock.h> |
| 62 | #include <net/af_unix.h> |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 63 | #include <net/scm.h> |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 64 | #include <linux/anon_inodes.h> |
| 65 | #include <linux/sched/mm.h> |
| 66 | #include <linux/uaccess.h> |
| 67 | #include <linux/nospec.h> |
Jens Axboe | aa4c396 | 2019-11-29 10:14:00 -0700 | [diff] [blame] | 68 | #include <linux/highmem.h> |
Jens Axboe | 15b71ab | 2019-12-11 11:20:36 -0700 | [diff] [blame] | 69 | #include <linux/fsnotify.h> |
Jens Axboe | 4840e41 | 2019-12-25 22:03:45 -0700 | [diff] [blame] | 70 | #include <linux/fadvise.h> |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 71 | #include <linux/task_work.h> |
Jens Axboe | 0f21220 | 2020-09-13 13:09:39 -0600 | [diff] [blame] | 72 | #include <linux/io_uring.h> |
Paul Moore | 5bd2182 | 2021-02-16 19:46:48 -0500 | [diff] [blame] | 73 | #include <linux/audit.h> |
Paul Moore | cdc1404 | 2021-02-01 19:56:49 -0500 | [diff] [blame] | 74 | #include <linux/security.h> |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 75 | |
Dmitrii Dolgov | c826bd7 | 2019-10-15 19:02:01 +0200 | [diff] [blame] | 76 | #define CREATE_TRACE_POINTS |
| 77 | #include <trace/events/io_uring.h> |
| 78 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 79 | #include <uapi/linux/io_uring.h> |
| 80 | |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 81 | #include "io-wq.h" |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 82 | |
Jens Axboe | de23077 | 2022-05-24 12:45:38 -0600 | [diff] [blame] | 83 | #include "io_uring.h" |
Jens Axboe | 329061d | 2022-05-25 20:31:09 -0600 | [diff] [blame] | 84 | #include "opdef.h" |
Jens Axboe | e418bbc | 2022-05-25 08:56:52 -0600 | [diff] [blame] | 85 | #include "refs.h" |
Jens Axboe | c9f06aa | 2022-05-25 11:01:04 -0600 | [diff] [blame] | 86 | #include "tctx.h" |
Jens Axboe | 17437f3 | 2022-05-25 09:13:39 -0600 | [diff] [blame] | 87 | #include "sqpoll.h" |
Jens Axboe | a4ad4f7 | 2022-05-25 10:40:19 -0600 | [diff] [blame] | 88 | #include "fdinfo.h" |
Jens Axboe | 3b77495 | 2022-06-13 07:07:23 -0600 | [diff] [blame] | 89 | #include "kbuf.h" |
Jens Axboe | 7357298 | 2022-06-13 07:12:45 -0600 | [diff] [blame] | 90 | #include "rsrc.h" |
Hao Xu | 38513c4 | 2022-06-16 10:22:02 +0100 | [diff] [blame] | 91 | #include "cancel.h" |
Jens Axboe | 43e0bbb | 2022-07-07 14:30:09 -0600 | [diff] [blame] | 92 | #include "net.h" |
Pavel Begunkov | eb42ceb | 2022-07-12 21:52:38 +0100 | [diff] [blame] | 93 | #include "notif.h" |
Jens Axboe | e27f928 | 2022-05-24 10:56:14 -0600 | [diff] [blame] | 94 | |
Jens Axboe | 59915143 | 2022-05-25 08:57:27 -0600 | [diff] [blame] | 95 | #include "timeout.h" |
Jens Axboe | 329061d | 2022-05-25 20:31:09 -0600 | [diff] [blame] | 96 | #include "poll.h" |
Jens Axboe | 9b797a3 | 2022-07-07 14:16:20 -0600 | [diff] [blame] | 97 | #include "alloc_cache.h" |
Jens Axboe | 5e2a18d | 2022-05-24 11:46:43 -0600 | [diff] [blame] | 98 | |
Daniel Xu | 5277dea | 2019-09-14 14:23:45 -0700 | [diff] [blame] | 99 | #define IORING_MAX_ENTRIES 32768 |
Jens Axboe | 33a107f | 2019-10-04 12:10:03 -0600 | [diff] [blame] | 100 | #define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES) |
Jens Axboe | 65e19f5 | 2019-10-26 07:20:21 -0600 | [diff] [blame] | 101 | |
Stefano Garzarella | 21b55db | 2020-08-27 16:58:30 +0200 | [diff] [blame] | 102 | #define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \ |
| 103 | IORING_REGISTER_LAST + IORING_OP_LAST) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 104 | |
Pavel Begunkov | 68fe256 | 2021-09-15 12:03:38 +0100 | [diff] [blame] | 105 | #define SQE_COMMON_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_LINK | \ |
| 106 | IOSQE_IO_HARDLINK | IOSQE_ASYNC) |
| 107 | |
Pavel Begunkov | 5562a8d | 2021-11-10 15:49:34 +0000 | [diff] [blame] | 108 | #define SQE_VALID_FLAGS (SQE_COMMON_FLAGS | IOSQE_BUFFER_SELECT | \ |
| 109 | IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS) |
Pavel Begunkov | 68fe256 | 2021-09-15 12:03:38 +0100 | [diff] [blame] | 110 | |
Pavel Begunkov | c854357 | 2021-06-17 18:14:04 +0100 | [diff] [blame] | 111 | #define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \ |
Jens Axboe | 9cae36a | 2022-06-01 23:57:02 -0600 | [diff] [blame] | 112 | REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \ |
| 113 | REQ_F_ASYNC_DATA) |
Pavel Begunkov | b16fed66 | 2021-02-18 18:29:40 +0000 | [diff] [blame] | 114 | |
Pavel Begunkov | a538be5 | 2022-03-21 22:02:22 +0000 | [diff] [blame] | 115 | #define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | REQ_F_LINK | REQ_F_HARDLINK |\ |
| 116 | IO_REQ_CLEAN_FLAGS) |
| 117 | |
Pavel Begunkov | 09899b1 | 2021-06-14 02:36:22 +0100 | [diff] [blame] | 118 | #define IO_TCTX_REFS_CACHE_NR (1U << 10) |
| 119 | |
Pavel Begunkov | 6dd0be1 | 2021-02-10 00:03:13 +0000 | [diff] [blame] | 120 | #define IO_COMPL_BATCH 32 |
Pavel Begunkov | bf019da | 2021-02-10 00:03:17 +0000 | [diff] [blame] | 121 | #define IO_REQ_ALLOC_BATCH 8 |
Pavel Begunkov | 258b29a | 2021-02-10 00:03:10 +0000 | [diff] [blame] | 122 | |
Pavel Begunkov | 992da01 | 2021-06-10 16:37:37 +0100 | [diff] [blame] | 123 | enum { |
Dylan Yudaken | 10988a0 | 2022-04-21 02:13:43 -0700 | [diff] [blame] | 124 | IO_CHECK_CQ_OVERFLOW_BIT, |
Dylan Yudaken | 155bc95 | 2022-04-21 02:13:44 -0700 | [diff] [blame] | 125 | IO_CHECK_CQ_DROPPED_BIT, |
Dylan Yudaken | 10988a0 | 2022-04-21 02:13:43 -0700 | [diff] [blame] | 126 | }; |
| 127 | |
Pavel Begunkov | 27dc833 | 2020-07-13 23:37:14 +0300 | [diff] [blame] | 128 | struct io_defer_entry { |
| 129 | struct list_head list; |
| 130 | struct io_kiocb *req; |
Pavel Begunkov | 9cf7c10 | 2020-07-13 23:37:15 +0300 | [diff] [blame] | 131 | u32 seq; |
Pavel Begunkov | 27dc833 | 2020-07-13 23:37:14 +0300 | [diff] [blame] | 132 | }; |
| 133 | |
Pavel Begunkov | 0756a86 | 2021-08-15 10:40:25 +0100 | [diff] [blame] | 134 | /* requests with any of those set should undergo io_disarm_next() */ |
| 135 | #define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL) |
Pavel Begunkov | da1a08c | 2022-04-15 22:08:29 +0100 | [diff] [blame] | 136 | #define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK) |
Pavel Begunkov | 0756a86 | 2021-08-15 10:40:25 +0100 | [diff] [blame] | 137 | |
Pavel Begunkov | affa87d | 2022-06-20 01:25:52 +0100 | [diff] [blame] | 138 | static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx, |
Pavel Begunkov | 9936c7c | 2021-02-04 13:51:56 +0000 | [diff] [blame] | 139 | struct task_struct *task, |
Pavel Begunkov | 3dd0c97 | 2021-05-16 22:58:04 +0100 | [diff] [blame] | 140 | bool cancel_all); |
Pavel Begunkov | 1ffc542 | 2020-12-30 21:34:15 +0000 | [diff] [blame] | 141 | |
Jens Axboe | c7dae4b | 2021-02-09 19:53:37 -0700 | [diff] [blame] | 142 | static void io_dismantle_req(struct io_kiocb *req); |
Pavel Begunkov | 68fb897 | 2021-03-19 17:22:41 +0000 | [diff] [blame] | 143 | static void io_clean_op(struct io_kiocb *req); |
Pavel Begunkov | cbc2e20 | 2022-04-15 22:08:26 +0100 | [diff] [blame] | 144 | static void io_queue_sqe(struct io_kiocb *req); |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 145 | |
Pavel Begunkov | c450178 | 2021-09-08 16:40:52 +0100 | [diff] [blame] | 146 | static void __io_submit_flush_completions(struct io_ring_ctx *ctx); |
Jens Axboe | 9a56a23 | 2019-01-09 09:06:50 -0700 | [diff] [blame] | 147 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 148 | static struct kmem_cache *req_cachep; |
| 149 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 150 | struct sock *io_uring_get_socket(struct file *file) |
| 151 | { |
| 152 | #if defined(CONFIG_UNIX) |
Jens Axboe | cd40cae | 2022-05-24 21:54:43 -0600 | [diff] [blame] | 153 | if (io_is_uring_fops(file)) { |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 154 | struct io_ring_ctx *ctx = file->private_data; |
| 155 | |
| 156 | return ctx->ring_sock->sk; |
| 157 | } |
| 158 | #endif |
| 159 | return NULL; |
| 160 | } |
| 161 | EXPORT_SYMBOL(io_uring_get_socket); |
| 162 | |
Pavel Begunkov | c450178 | 2021-09-08 16:40:52 +0100 | [diff] [blame] | 163 | static inline void io_submit_flush_completions(struct io_ring_ctx *ctx) |
| 164 | { |
Pavel Begunkov | 6f33b0b | 2021-09-24 21:59:44 +0100 | [diff] [blame] | 165 | if (!wq_list_empty(&ctx->submit_state.compl_reqs)) |
Pavel Begunkov | c450178 | 2021-09-08 16:40:52 +0100 | [diff] [blame] | 166 | __io_submit_flush_completions(ctx); |
| 167 | } |
| 168 | |
Pavel Begunkov | faf88dd | 2022-06-17 09:48:01 +0100 | [diff] [blame] | 169 | static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx) |
| 170 | { |
| 171 | return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head); |
| 172 | } |
| 173 | |
Jens Axboe | 9cae36a | 2022-06-01 23:57:02 -0600 | [diff] [blame] | 174 | static bool io_match_linked(struct io_kiocb *head) |
| 175 | { |
| 176 | struct io_kiocb *req; |
| 177 | |
| 178 | io_for_each_link(req, head) { |
| 179 | if (req->flags & REQ_F_INFLIGHT) |
| 180 | return true; |
| 181 | } |
| 182 | return false; |
Pavel Begunkov | 6af3f48 | 2021-11-26 14:38:15 +0000 | [diff] [blame] | 183 | } |
| 184 | |
| 185 | /* |
| 186 | * As io_match_task() but protected against racing with linked timeouts. |
| 187 | * User must not hold timeout_lock. |
| 188 | */ |
Jens Axboe | 329061d | 2022-05-25 20:31:09 -0600 | [diff] [blame] | 189 | bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task, |
| 190 | bool cancel_all) |
Pavel Begunkov | 6af3f48 | 2021-11-26 14:38:15 +0000 | [diff] [blame] | 191 | { |
Jens Axboe | 9cae36a | 2022-06-01 23:57:02 -0600 | [diff] [blame] | 192 | bool matched; |
| 193 | |
Pavel Begunkov | 6af3f48 | 2021-11-26 14:38:15 +0000 | [diff] [blame] | 194 | if (task && head->task != task) |
| 195 | return false; |
Jens Axboe | 9cae36a | 2022-06-01 23:57:02 -0600 | [diff] [blame] | 196 | if (cancel_all) |
| 197 | return true; |
| 198 | |
| 199 | if (head->flags & REQ_F_LINK_TIMEOUT) { |
| 200 | struct io_ring_ctx *ctx = head->ctx; |
| 201 | |
| 202 | /* protect against races with linked timeouts */ |
| 203 | spin_lock_irq(&ctx->timeout_lock); |
| 204 | matched = io_match_linked(head); |
| 205 | spin_unlock_irq(&ctx->timeout_lock); |
| 206 | } else { |
| 207 | matched = io_match_linked(head); |
| 208 | } |
| 209 | return matched; |
Pavel Begunkov | 6af3f48 | 2021-11-26 14:38:15 +0000 | [diff] [blame] | 210 | } |
| 211 | |
Hao Xu | a8295b9 | 2021-08-27 17:46:09 +0800 | [diff] [blame] | 212 | static inline void req_fail_link_node(struct io_kiocb *req, int res) |
| 213 | { |
| 214 | req_set_fail(req); |
Jens Axboe | 97b388d | 2022-05-24 15:21:00 -0600 | [diff] [blame] | 215 | io_req_set_res(req, res, 0); |
Hao Xu | a8295b9 | 2021-08-27 17:46:09 +0800 | [diff] [blame] | 216 | } |
| 217 | |
Pavel Begunkov | fa05457 | 2022-04-12 15:09:48 +0100 | [diff] [blame] | 218 | static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx) |
| 219 | { |
| 220 | wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list); |
Jens Axboe | c40f637 | 2020-06-25 15:39:59 -0600 | [diff] [blame] | 221 | } |
Jens Axboe | 4a38aed2 | 2020-05-14 17:21:15 -0600 | [diff] [blame] | 222 | |
Pavel Begunkov | c072481 | 2021-10-04 20:02:54 +0100 | [diff] [blame] | 223 | static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 224 | { |
| 225 | struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs); |
| 226 | |
Jens Axboe | 0f158b4 | 2020-05-14 17:18:39 -0600 | [diff] [blame] | 227 | complete(&ctx->ref_comp); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 228 | } |
| 229 | |
Pavel Begunkov | c072481 | 2021-10-04 20:02:54 +0100 | [diff] [blame] | 230 | static __cold void io_fallback_req_func(struct work_struct *work) |
Pavel Begunkov | f56165e | 2021-08-09 20:18:07 +0100 | [diff] [blame] | 231 | { |
| 232 | struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, |
| 233 | fallback_work.work); |
| 234 | struct llist_node *node = llist_del_all(&ctx->fallback_llist); |
| 235 | struct io_kiocb *req, *tmp; |
Pavel Begunkov | f237c30 | 2021-08-18 12:42:46 +0100 | [diff] [blame] | 236 | bool locked = false; |
Pavel Begunkov | f56165e | 2021-08-09 20:18:07 +0100 | [diff] [blame] | 237 | |
| 238 | percpu_ref_get(&ctx->refs); |
Pavel Begunkov | 3218e5d | 2022-06-25 11:52:59 +0100 | [diff] [blame] | 239 | llist_for_each_entry_safe(req, tmp, node, io_task_work.node) |
Pavel Begunkov | f237c30 | 2021-08-18 12:42:46 +0100 | [diff] [blame] | 240 | req->io_task_work.func(req, &locked); |
Pavel Begunkov | 5636c00 | 2021-08-18 12:42:45 +0100 | [diff] [blame] | 241 | |
Pavel Begunkov | f237c30 | 2021-08-18 12:42:46 +0100 | [diff] [blame] | 242 | if (locked) { |
Pavel Begunkov | c450178 | 2021-09-08 16:40:52 +0100 | [diff] [blame] | 243 | io_submit_flush_completions(ctx); |
Pavel Begunkov | f237c30 | 2021-08-18 12:42:46 +0100 | [diff] [blame] | 244 | mutex_unlock(&ctx->uring_lock); |
| 245 | } |
Pavel Begunkov | f56165e | 2021-08-09 20:18:07 +0100 | [diff] [blame] | 246 | percpu_ref_put(&ctx->refs); |
| 247 | } |
| 248 | |
Pavel Begunkov | e6f89be | 2022-06-16 10:22:10 +0100 | [diff] [blame] | 249 | static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits) |
| 250 | { |
| 251 | unsigned hash_buckets = 1U << bits; |
| 252 | size_t hash_size = hash_buckets * sizeof(table->hbs[0]); |
| 253 | |
| 254 | table->hbs = kmalloc(hash_size, GFP_KERNEL); |
| 255 | if (!table->hbs) |
| 256 | return -ENOMEM; |
| 257 | |
| 258 | table->hash_bits = bits; |
| 259 | init_hash_table(table, hash_buckets); |
| 260 | return 0; |
| 261 | } |
| 262 | |
Pavel Begunkov | c072481 | 2021-10-04 20:02:54 +0100 | [diff] [blame] | 263 | static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 264 | { |
| 265 | struct io_ring_ctx *ctx; |
Jens Axboe | 9cfc7e9 | 2022-05-01 10:52:44 -0600 | [diff] [blame] | 266 | int hash_bits; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 267 | |
| 268 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); |
| 269 | if (!ctx) |
| 270 | return NULL; |
| 271 | |
Jens Axboe | 9cfc7e9 | 2022-05-01 10:52:44 -0600 | [diff] [blame] | 272 | xa_init(&ctx->io_bl_xa); |
| 273 | |
Jens Axboe | 78076bb | 2019-12-04 19:56:40 -0700 | [diff] [blame] | 274 | /* |
| 275 | * Use 5 bits less than the max cq entries, that should give us around |
Pavel Begunkov | 4a07723 | 2022-06-16 10:22:05 +0100 | [diff] [blame] | 276 | * 32 entries per hash list if totally full and uniformly spread, but |
| 277 | * don't keep too many buckets to not overconsume memory. |
Jens Axboe | 78076bb | 2019-12-04 19:56:40 -0700 | [diff] [blame] | 278 | */ |
Pavel Begunkov | 4a07723 | 2022-06-16 10:22:05 +0100 | [diff] [blame] | 279 | hash_bits = ilog2(p->cq_entries) - 5; |
| 280 | hash_bits = clamp(hash_bits, 1, 8); |
Pavel Begunkov | e6f89be | 2022-06-16 10:22:10 +0100 | [diff] [blame] | 281 | if (io_alloc_hash_table(&ctx->cancel_table, hash_bits)) |
Jens Axboe | 78076bb | 2019-12-04 19:56:40 -0700 | [diff] [blame] | 282 | goto err; |
Pavel Begunkov | 9ca9fb2 | 2022-06-16 10:22:12 +0100 | [diff] [blame] | 283 | if (io_alloc_hash_table(&ctx->cancel_table_locked, hash_bits)) |
| 284 | goto err; |
Hao Xu | 38513c4 | 2022-06-16 10:22:02 +0100 | [diff] [blame] | 285 | |
Pavel Begunkov | 6224843 | 2021-04-28 13:11:29 +0100 | [diff] [blame] | 286 | ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL); |
| 287 | if (!ctx->dummy_ubuf) |
| 288 | goto err; |
| 289 | /* set invalid range, so io_import_fixed() fails meeting it */ |
| 290 | ctx->dummy_ubuf->ubuf = -1UL; |
| 291 | |
Roman Gushchin | 2148289 | 2019-05-07 10:01:48 -0700 | [diff] [blame] | 292 | if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free, |
Michal Koutný | 4890422 | 2022-07-15 19:45:01 +0200 | [diff] [blame] | 293 | 0, GFP_KERNEL)) |
Jens Axboe | 206aefd | 2019-11-07 18:27:42 -0700 | [diff] [blame] | 294 | goto err; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 295 | |
| 296 | ctx->flags = p->flags; |
Jens Axboe | 9055420 | 2020-09-03 12:12:41 -0600 | [diff] [blame] | 297 | init_waitqueue_head(&ctx->sqo_sq_wait); |
Jens Axboe | 69fb213 | 2020-09-14 11:16:23 -0600 | [diff] [blame] | 298 | INIT_LIST_HEAD(&ctx->sqd_list); |
Jens Axboe | 1d7bb1d | 2019-11-06 11:31:17 -0700 | [diff] [blame] | 299 | INIT_LIST_HEAD(&ctx->cq_overflow_list); |
Jens Axboe | cc3cec8 | 2022-03-08 17:46:52 -0700 | [diff] [blame] | 300 | INIT_LIST_HEAD(&ctx->io_buffers_cache); |
Jens Axboe | 9b797a3 | 2022-07-07 14:16:20 -0600 | [diff] [blame] | 301 | io_alloc_cache_init(&ctx->apoll_cache); |
Jens Axboe | 43e0bbb | 2022-07-07 14:30:09 -0600 | [diff] [blame] | 302 | io_alloc_cache_init(&ctx->netmsg_cache); |
Jens Axboe | 0f158b4 | 2020-05-14 17:18:39 -0600 | [diff] [blame] | 303 | init_completion(&ctx->ref_comp); |
Matthew Wilcox (Oracle) | 61cf937 | 2021-03-08 14:16:16 +0000 | [diff] [blame] | 304 | xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 305 | mutex_init(&ctx->uring_lock); |
Pavel Begunkov | 311997b | 2021-06-14 23:37:28 +0100 | [diff] [blame] | 306 | init_waitqueue_head(&ctx->cq_wait); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 307 | spin_lock_init(&ctx->completion_lock); |
Jens Axboe | 89850fc | 2021-08-10 15:11:51 -0600 | [diff] [blame] | 308 | spin_lock_init(&ctx->timeout_lock); |
Pavel Begunkov | 5eef4e8 | 2021-09-24 21:59:49 +0100 | [diff] [blame] | 309 | INIT_WQ_LIST(&ctx->iopoll_list); |
Jens Axboe | cc3cec8 | 2022-03-08 17:46:52 -0700 | [diff] [blame] | 310 | INIT_LIST_HEAD(&ctx->io_buffers_pages); |
| 311 | INIT_LIST_HEAD(&ctx->io_buffers_comp); |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 312 | INIT_LIST_HEAD(&ctx->defer_list); |
Jens Axboe | 5262f56 | 2019-09-17 12:26:57 -0600 | [diff] [blame] | 313 | INIT_LIST_HEAD(&ctx->timeout_list); |
Pavel Begunkov | ef9dd63 | 2021-08-28 19:54:38 -0600 | [diff] [blame] | 314 | INIT_LIST_HEAD(&ctx->ltimeout_list); |
Bijan Mottahedeh | d67d226 | 2021-01-15 17:37:46 +0000 | [diff] [blame] | 315 | spin_lock_init(&ctx->rsrc_ref_lock); |
| 316 | INIT_LIST_HEAD(&ctx->rsrc_ref_list); |
Bijan Mottahedeh | 269bbe5 | 2021-01-15 17:37:44 +0000 | [diff] [blame] | 317 | INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work); |
| 318 | init_llist_head(&ctx->rsrc_put_llist); |
Pavel Begunkov | 13bf43f | 2021-03-06 11:02:12 +0000 | [diff] [blame] | 319 | INIT_LIST_HEAD(&ctx->tctx_list); |
Pavel Begunkov | c2b6c6b | 2021-09-24 21:59:47 +0100 | [diff] [blame] | 320 | ctx->submit_state.free_list.next = NULL; |
| 321 | INIT_WQ_LIST(&ctx->locked_free_list); |
Pavel Begunkov | 9011bf9 | 2021-06-30 21:54:03 +0100 | [diff] [blame] | 322 | INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func); |
Pavel Begunkov | 6f33b0b | 2021-09-24 21:59:44 +0100 | [diff] [blame] | 323 | INIT_WQ_LIST(&ctx->submit_state.compl_reqs); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 324 | return ctx; |
Jens Axboe | 206aefd | 2019-11-07 18:27:42 -0700 | [diff] [blame] | 325 | err: |
Pavel Begunkov | 6224843 | 2021-04-28 13:11:29 +0100 | [diff] [blame] | 326 | kfree(ctx->dummy_ubuf); |
Pavel Begunkov | e6f89be | 2022-06-16 10:22:10 +0100 | [diff] [blame] | 327 | kfree(ctx->cancel_table.hbs); |
Pavel Begunkov | 9ca9fb2 | 2022-06-16 10:22:12 +0100 | [diff] [blame] | 328 | kfree(ctx->cancel_table_locked.hbs); |
Jens Axboe | 9cfc7e9 | 2022-05-01 10:52:44 -0600 | [diff] [blame] | 329 | kfree(ctx->io_bl); |
| 330 | xa_destroy(&ctx->io_bl_xa); |
Jens Axboe | 206aefd | 2019-11-07 18:27:42 -0700 | [diff] [blame] | 331 | kfree(ctx); |
| 332 | return NULL; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 333 | } |
| 334 | |
Pavel Begunkov | 8f6ed49 | 2021-05-16 22:58:10 +0100 | [diff] [blame] | 335 | static void io_account_cq_overflow(struct io_ring_ctx *ctx) |
| 336 | { |
| 337 | struct io_rings *r = ctx->rings; |
| 338 | |
| 339 | WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1); |
| 340 | ctx->cq_extra--; |
| 341 | } |
| 342 | |
Pavel Begunkov | 9cf7c10 | 2020-07-13 23:37:15 +0300 | [diff] [blame] | 343 | static bool req_need_defer(struct io_kiocb *req, u32 seq) |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 344 | { |
Jens Axboe | 2bc9930 | 2020-07-09 09:43:27 -0600 | [diff] [blame] | 345 | if (unlikely(req->flags & REQ_F_IO_DRAIN)) { |
| 346 | struct io_ring_ctx *ctx = req->ctx; |
Jackie Liu | a197f66 | 2019-11-08 08:09:12 -0700 | [diff] [blame] | 347 | |
Pavel Begunkov | 8f6ed49 | 2021-05-16 22:58:10 +0100 | [diff] [blame] | 348 | return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail; |
Jens Axboe | 2bc9930 | 2020-07-09 09:43:27 -0600 | [diff] [blame] | 349 | } |
Jens Axboe | 7adf4ea | 2019-10-10 21:42:58 -0600 | [diff] [blame] | 350 | |
Bob Liu | 9d858b2 | 2019-11-13 18:06:25 +0800 | [diff] [blame] | 351 | return false; |
Jens Axboe | 7adf4ea | 2019-10-10 21:42:58 -0600 | [diff] [blame] | 352 | } |
| 353 | |
Jens Axboe | 9cae36a | 2022-06-01 23:57:02 -0600 | [diff] [blame] | 354 | static inline void io_req_track_inflight(struct io_kiocb *req) |
| 355 | { |
| 356 | if (!(req->flags & REQ_F_INFLIGHT)) { |
| 357 | req->flags |= REQ_F_INFLIGHT; |
Jens Axboe | 386e4fb | 2022-06-23 11:06:43 -0600 | [diff] [blame] | 358 | atomic_inc(&req->task->io_uring->inflight_tracked); |
Jens Axboe | 9cae36a | 2022-06-01 23:57:02 -0600 | [diff] [blame] | 359 | } |
| 360 | } |
| 361 | |
Pavel Begunkov | fd08e53 | 2021-08-11 19:28:31 +0100 | [diff] [blame] | 362 | static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req) |
| 363 | { |
Pavel Begunkov | 906c6ca | 2021-08-15 10:40:26 +0100 | [diff] [blame] | 364 | if (WARN_ON_ONCE(!req->link)) |
| 365 | return NULL; |
| 366 | |
Pavel Begunkov | 4d13d1a | 2021-08-15 10:40:24 +0100 | [diff] [blame] | 367 | req->flags &= ~REQ_F_ARM_LTIMEOUT; |
| 368 | req->flags |= REQ_F_LINK_TIMEOUT; |
Pavel Begunkov | fd08e53 | 2021-08-11 19:28:31 +0100 | [diff] [blame] | 369 | |
| 370 | /* linked timeouts should have two refs once prep'ed */ |
Pavel Begunkov | 48dcd38 | 2021-08-15 10:40:18 +0100 | [diff] [blame] | 371 | io_req_set_refcount(req); |
Pavel Begunkov | 4d13d1a | 2021-08-15 10:40:24 +0100 | [diff] [blame] | 372 | __io_req_set_refcount(req->link, 2); |
| 373 | return req->link; |
Pavel Begunkov | fd08e53 | 2021-08-11 19:28:31 +0100 | [diff] [blame] | 374 | } |
| 375 | |
| 376 | static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req) |
| 377 | { |
Pavel Begunkov | 4d13d1a | 2021-08-15 10:40:24 +0100 | [diff] [blame] | 378 | if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT))) |
Pavel Begunkov | fd08e53 | 2021-08-11 19:28:31 +0100 | [diff] [blame] | 379 | return NULL; |
| 380 | return __io_prep_linked_timeout(req); |
| 381 | } |
| 382 | |
Pavel Begunkov | cb2d344 | 2022-04-15 22:08:25 +0100 | [diff] [blame] | 383 | static noinline void __io_arm_ltimeout(struct io_kiocb *req) |
| 384 | { |
| 385 | io_queue_linked_timeout(__io_prep_linked_timeout(req)); |
| 386 | } |
| 387 | |
| 388 | static inline void io_arm_ltimeout(struct io_kiocb *req) |
| 389 | { |
| 390 | if (unlikely(req->flags & REQ_F_ARM_LTIMEOUT)) |
| 391 | __io_arm_ltimeout(req); |
| 392 | } |
| 393 | |
Pavel Begunkov | cbdcb43 | 2020-06-29 19:18:43 +0300 | [diff] [blame] | 394 | static void io_prep_async_work(struct io_kiocb *req) |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 395 | { |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 396 | const struct io_op_def *def = &io_op_defs[req->opcode]; |
Pavel Begunkov | 2332951 | 2020-10-10 18:34:06 +0100 | [diff] [blame] | 397 | struct io_ring_ctx *ctx = req->ctx; |
Jens Axboe | 54a91f3 | 2019-09-10 09:15:04 -0600 | [diff] [blame] | 398 | |
Pavel Begunkov | b8e64b5 | 2021-06-17 18:14:02 +0100 | [diff] [blame] | 399 | if (!(req->flags & REQ_F_CREDS)) { |
| 400 | req->flags |= REQ_F_CREDS; |
Pavel Begunkov | c10d1f9 | 2021-06-17 18:14:01 +0100 | [diff] [blame] | 401 | req->creds = get_current_cred(); |
Pavel Begunkov | b8e64b5 | 2021-06-17 18:14:02 +0100 | [diff] [blame] | 402 | } |
Jens Axboe | 003e8dc | 2021-03-06 09:22:27 -0700 | [diff] [blame] | 403 | |
Pavel Begunkov | e1d675d | 2021-03-22 01:58:29 +0000 | [diff] [blame] | 404 | req->work.list.next = NULL; |
| 405 | req->work.flags = 0; |
Jens Axboe | 8e29da6 | 2022-04-18 10:44:00 -0600 | [diff] [blame] | 406 | req->work.cancel_seq = atomic_read(&ctx->cancel_seq); |
Pavel Begunkov | feaadc4 | 2020-10-22 16:47:16 +0100 | [diff] [blame] | 407 | if (req->flags & REQ_F_FORCE_ASYNC) |
| 408 | req->work.flags |= IO_WQ_WORK_CONCURRENT; |
| 409 | |
Jens Axboe | f6b543f | 2022-07-21 09:06:47 -0600 | [diff] [blame] | 410 | if (req->file && !io_req_ffs_set(req)) |
| 411 | req->flags |= io_file_get_flags(req->file) << REQ_F_SUPPORT_NOWAIT_BIT; |
| 412 | |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 413 | if (req->flags & REQ_F_ISREG) { |
Pavel Begunkov | 2332951 | 2020-10-10 18:34:06 +0100 | [diff] [blame] | 414 | if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL)) |
Pavel Begunkov | 8766dd5 | 2020-03-14 00:31:04 +0300 | [diff] [blame] | 415 | io_wq_hash_work(&req->work, file_inode(req->file)); |
Jens Axboe | 4b982bd | 2021-04-01 08:38:34 -0600 | [diff] [blame] | 416 | } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) { |
Jens Axboe | d365634 | 2019-12-18 09:50:26 -0700 | [diff] [blame] | 417 | if (def->unbound_nonreg_file) |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 418 | req->work.flags |= IO_WQ_WORK_UNBOUND; |
Jens Axboe | 54a91f3 | 2019-09-10 09:15:04 -0600 | [diff] [blame] | 419 | } |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 420 | } |
| 421 | |
Pavel Begunkov | cbdcb43 | 2020-06-29 19:18:43 +0300 | [diff] [blame] | 422 | static void io_prep_async_link(struct io_kiocb *req) |
| 423 | { |
| 424 | struct io_kiocb *cur; |
| 425 | |
Pavel Begunkov | 44eff40 | 2021-07-26 14:14:31 +0100 | [diff] [blame] | 426 | if (req->flags & REQ_F_LINK_TIMEOUT) { |
| 427 | struct io_ring_ctx *ctx = req->ctx; |
| 428 | |
Pavel Begunkov | 674ee8e | 2021-11-23 01:45:35 +0000 | [diff] [blame] | 429 | spin_lock_irq(&ctx->timeout_lock); |
Pavel Begunkov | 44eff40 | 2021-07-26 14:14:31 +0100 | [diff] [blame] | 430 | io_for_each_link(cur, req) |
| 431 | io_prep_async_work(cur); |
Pavel Begunkov | 674ee8e | 2021-11-23 01:45:35 +0000 | [diff] [blame] | 432 | spin_unlock_irq(&ctx->timeout_lock); |
Pavel Begunkov | 44eff40 | 2021-07-26 14:14:31 +0100 | [diff] [blame] | 433 | } else { |
| 434 | io_for_each_link(cur, req) |
| 435 | io_prep_async_work(cur); |
| 436 | } |
Pavel Begunkov | cbdcb43 | 2020-06-29 19:18:43 +0300 | [diff] [blame] | 437 | } |
| 438 | |
Jens Axboe | f3b44f9 | 2022-06-13 07:27:03 -0600 | [diff] [blame] | 439 | void io_queue_iowq(struct io_kiocb *req, bool *dont_use) |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 440 | { |
Pavel Begunkov | cbdcb43 | 2020-06-29 19:18:43 +0300 | [diff] [blame] | 441 | struct io_kiocb *link = io_prep_linked_timeout(req); |
Jens Axboe | 5aa75ed | 2021-02-16 12:56:50 -0700 | [diff] [blame] | 442 | struct io_uring_task *tctx = req->task->io_uring; |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 443 | |
Jens Axboe | 3bfe610 | 2021-02-16 14:15:30 -0700 | [diff] [blame] | 444 | BUG_ON(!tctx); |
| 445 | BUG_ON(!tctx->io_wq); |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 446 | |
Pavel Begunkov | cbdcb43 | 2020-06-29 19:18:43 +0300 | [diff] [blame] | 447 | /* init ->work of the whole link before punting */ |
| 448 | io_prep_async_link(req); |
Jens Axboe | 991468d | 2021-07-23 11:53:54 -0600 | [diff] [blame] | 449 | |
| 450 | /* |
| 451 | * Not expected to happen, but if we do have a bug where this _can_ |
| 452 | * happen, catch it here and ensure the request is marked as |
| 453 | * canceled. That will make io-wq go through the usual work cancel |
| 454 | * procedure rather than attempt to run this request (or create a new |
| 455 | * worker for it). |
| 456 | */ |
| 457 | if (WARN_ON_ONCE(!same_thread_group(req->task, current))) |
| 458 | req->work.flags |= IO_WQ_WORK_CANCEL; |
| 459 | |
Pavel Begunkov | 48863ff | 2022-06-16 13:57:20 +0100 | [diff] [blame] | 460 | trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work)); |
Pavel Begunkov | ebf9366 | 2021-03-01 18:20:47 +0000 | [diff] [blame] | 461 | io_wq_enqueue(tctx->io_wq, &req->work); |
Jens Axboe | 7271ef3 | 2020-08-10 09:55:22 -0600 | [diff] [blame] | 462 | if (link) |
| 463 | io_queue_linked_timeout(link); |
Pavel Begunkov | cbdcb43 | 2020-06-29 19:18:43 +0300 | [diff] [blame] | 464 | } |
| 465 | |
Pavel Begunkov | c072481 | 2021-10-04 20:02:54 +0100 | [diff] [blame] | 466 | static __cold void io_queue_deferred(struct io_ring_ctx *ctx) |
Pavel Begunkov | 0451894 | 2020-05-26 20:34:05 +0300 | [diff] [blame] | 467 | { |
Pavel Begunkov | 441b8a7 | 2021-06-14 23:37:31 +0100 | [diff] [blame] | 468 | while (!list_empty(&ctx->defer_list)) { |
Pavel Begunkov | 27dc833 | 2020-07-13 23:37:14 +0300 | [diff] [blame] | 469 | struct io_defer_entry *de = list_first_entry(&ctx->defer_list, |
| 470 | struct io_defer_entry, list); |
Pavel Begunkov | 0451894 | 2020-05-26 20:34:05 +0300 | [diff] [blame] | 471 | |
Pavel Begunkov | 9cf7c10 | 2020-07-13 23:37:15 +0300 | [diff] [blame] | 472 | if (req_need_defer(de->req, de->seq)) |
Pavel Begunkov | 0451894 | 2020-05-26 20:34:05 +0300 | [diff] [blame] | 473 | break; |
Pavel Begunkov | 27dc833 | 2020-07-13 23:37:14 +0300 | [diff] [blame] | 474 | list_del_init(&de->list); |
Pavel Begunkov | 907d1df | 2021-01-26 23:35:10 +0000 | [diff] [blame] | 475 | io_req_task_queue(de->req); |
Pavel Begunkov | 27dc833 | 2020-07-13 23:37:14 +0300 | [diff] [blame] | 476 | kfree(de); |
Pavel Begunkov | 441b8a7 | 2021-06-14 23:37:31 +0100 | [diff] [blame] | 477 | } |
Pavel Begunkov | 0451894 | 2020-05-26 20:34:05 +0300 | [diff] [blame] | 478 | } |
| 479 | |
Usama Arif | 77bc59b | 2022-02-04 14:51:14 +0000 | [diff] [blame] | 480 | static void io_eventfd_signal(struct io_ring_ctx *ctx) |
Jens Axboe | f2842ab | 2020-01-08 11:04:00 -0700 | [diff] [blame] | 481 | { |
Usama Arif | 77bc59b | 2022-02-04 14:51:14 +0000 | [diff] [blame] | 482 | struct io_ev_fd *ev_fd; |
Pavel Begunkov | 305bef9 | 2022-06-20 01:25:55 +0100 | [diff] [blame] | 483 | bool skip; |
| 484 | |
| 485 | spin_lock(&ctx->completion_lock); |
| 486 | /* |
| 487 | * Eventfd should only get triggered when at least one event has been |
| 488 | * posted. Some applications rely on the eventfd notification count only |
| 489 | * changing IFF a new CQE has been added to the CQ ring. There's no |
| 490 | * depedency on 1:1 relationship between how many times this function is |
| 491 | * called (and hence the eventfd count) and number of CQEs posted to the |
| 492 | * CQ ring. |
| 493 | */ |
| 494 | skip = ctx->cached_cq_tail == ctx->evfd_last_cq_tail; |
| 495 | ctx->evfd_last_cq_tail = ctx->cached_cq_tail; |
| 496 | spin_unlock(&ctx->completion_lock); |
| 497 | if (skip) |
| 498 | return; |
Usama Arif | 77bc59b | 2022-02-04 14:51:14 +0000 | [diff] [blame] | 499 | |
Usama Arif | 77bc59b | 2022-02-04 14:51:14 +0000 | [diff] [blame] | 500 | rcu_read_lock(); |
| 501 | /* |
| 502 | * rcu_dereference ctx->io_ev_fd once and use it for both for checking |
| 503 | * and eventfd_signal |
| 504 | */ |
| 505 | ev_fd = rcu_dereference(ctx->io_ev_fd); |
| 506 | |
| 507 | /* |
| 508 | * Check again if ev_fd exists incase an io_eventfd_unregister call |
| 509 | * completed between the NULL check of ctx->io_ev_fd at the start of |
| 510 | * the function and rcu_read_lock. |
| 511 | */ |
| 512 | if (unlikely(!ev_fd)) |
| 513 | goto out; |
Stefano Garzarella | 7e55a19 | 2020-05-15 18:38:05 +0200 | [diff] [blame] | 514 | if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED) |
Usama Arif | 77bc59b | 2022-02-04 14:51:14 +0000 | [diff] [blame] | 515 | goto out; |
| 516 | |
Usama Arif | c75312d | 2022-02-04 14:51:15 +0000 | [diff] [blame] | 517 | if (!ev_fd->eventfd_async || io_wq_current_is_worker()) |
Usama Arif | 77bc59b | 2022-02-04 14:51:14 +0000 | [diff] [blame] | 518 | eventfd_signal(ev_fd->cq_ev_fd, 1); |
Usama Arif | 77bc59b | 2022-02-04 14:51:14 +0000 | [diff] [blame] | 519 | out: |
| 520 | rcu_read_unlock(); |
Jens Axboe | f2842ab | 2020-01-08 11:04:00 -0700 | [diff] [blame] | 521 | } |
| 522 | |
Pavel Begunkov | a830ffd | 2022-06-19 12:26:06 +0100 | [diff] [blame] | 523 | void __io_commit_cqring_flush(struct io_ring_ctx *ctx) |
| 524 | { |
| 525 | if (ctx->off_timeout_used || ctx->drain_active) { |
| 526 | spin_lock(&ctx->completion_lock); |
| 527 | if (ctx->off_timeout_used) |
| 528 | io_flush_timeouts(ctx); |
| 529 | if (ctx->drain_active) |
| 530 | io_queue_deferred(ctx); |
| 531 | spin_unlock(&ctx->completion_lock); |
| 532 | } |
| 533 | if (ctx->has_evfd) |
| 534 | io_eventfd_signal(ctx); |
| 535 | } |
| 536 | |
Pavel Begunkov | 25399321 | 2022-06-20 01:25:56 +0100 | [diff] [blame] | 537 | static inline void io_cqring_ev_posted(struct io_ring_ctx *ctx) |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 538 | { |
Pavel Begunkov | 46929b0 | 2022-06-20 01:25:57 +0100 | [diff] [blame] | 539 | io_commit_cqring_flush(ctx); |
Pavel Begunkov | 9aa8dfd | 2022-03-17 02:03:42 +0000 | [diff] [blame] | 540 | io_cqring_wake(ctx); |
Jens Axboe | 8c83878 | 2019-03-12 15:48:16 -0600 | [diff] [blame] | 541 | } |
| 542 | |
Pavel Begunkov | 25399321 | 2022-06-20 01:25:56 +0100 | [diff] [blame] | 543 | static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx) |
| 544 | __releases(ctx->completion_lock) |
| 545 | { |
| 546 | io_commit_cqring(ctx); |
| 547 | spin_unlock(&ctx->completion_lock); |
| 548 | io_cqring_ev_posted(ctx); |
| 549 | } |
| 550 | |
| 551 | void io_cq_unlock_post(struct io_ring_ctx *ctx) |
| 552 | { |
| 553 | __io_cq_unlock_post(ctx); |
| 554 | } |
| 555 | |
Jens Axboe | c4a2ed7 | 2019-11-21 21:01:26 -0700 | [diff] [blame] | 556 | /* Returns true if there are no backlogged entries after the flush */ |
Pavel Begunkov | 6c2450a | 2021-02-23 12:40:22 +0000 | [diff] [blame] | 557 | static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 558 | { |
Pavel Begunkov | 305bef9 | 2022-06-20 01:25:55 +0100 | [diff] [blame] | 559 | bool all_flushed; |
Stefan Roesch | e45a3e0 | 2022-04-26 11:21:30 -0700 | [diff] [blame] | 560 | size_t cqe_size = sizeof(struct io_uring_cqe); |
Jens Axboe | 1d7bb1d | 2019-11-06 11:31:17 -0700 | [diff] [blame] | 561 | |
Pavel Begunkov | a566c55 | 2021-05-16 22:58:08 +0100 | [diff] [blame] | 562 | if (!force && __io_cqring_events(ctx) == ctx->cq_entries) |
Pavel Begunkov | e23de15 | 2020-12-17 00:24:37 +0000 | [diff] [blame] | 563 | return false; |
Jens Axboe | 1d7bb1d | 2019-11-06 11:31:17 -0700 | [diff] [blame] | 564 | |
Stefan Roesch | e45a3e0 | 2022-04-26 11:21:30 -0700 | [diff] [blame] | 565 | if (ctx->flags & IORING_SETUP_CQE32) |
| 566 | cqe_size <<= 1; |
| 567 | |
Pavel Begunkov | 25399321 | 2022-06-20 01:25:56 +0100 | [diff] [blame] | 568 | io_cq_lock(ctx); |
Pavel Begunkov | 6c2450a | 2021-02-23 12:40:22 +0000 | [diff] [blame] | 569 | while (!list_empty(&ctx->cq_overflow_list)) { |
Pavel Begunkov | d068b50 | 2021-05-16 22:58:11 +0100 | [diff] [blame] | 570 | struct io_uring_cqe *cqe = io_get_cqe(ctx); |
Pavel Begunkov | 6c2450a | 2021-02-23 12:40:22 +0000 | [diff] [blame] | 571 | struct io_overflow_cqe *ocqe; |
Jens Axboe | e6c8aa9 | 2020-09-28 13:10:13 -0600 | [diff] [blame] | 572 | |
Jens Axboe | 1d7bb1d | 2019-11-06 11:31:17 -0700 | [diff] [blame] | 573 | if (!cqe && !force) |
| 574 | break; |
Pavel Begunkov | 6c2450a | 2021-02-23 12:40:22 +0000 | [diff] [blame] | 575 | ocqe = list_first_entry(&ctx->cq_overflow_list, |
| 576 | struct io_overflow_cqe, list); |
| 577 | if (cqe) |
Stefan Roesch | e45a3e0 | 2022-04-26 11:21:30 -0700 | [diff] [blame] | 578 | memcpy(cqe, &ocqe->cqe, cqe_size); |
Pavel Begunkov | 6c2450a | 2021-02-23 12:40:22 +0000 | [diff] [blame] | 579 | else |
Pavel Begunkov | 8f6ed49 | 2021-05-16 22:58:10 +0100 | [diff] [blame] | 580 | io_account_cq_overflow(ctx); |
| 581 | |
Pavel Begunkov | 6c2450a | 2021-02-23 12:40:22 +0000 | [diff] [blame] | 582 | list_del(&ocqe->list); |
| 583 | kfree(ocqe); |
Jens Axboe | 1d7bb1d | 2019-11-06 11:31:17 -0700 | [diff] [blame] | 584 | } |
| 585 | |
Pavel Begunkov | 09e8840 | 2020-12-17 00:24:38 +0000 | [diff] [blame] | 586 | all_flushed = list_empty(&ctx->cq_overflow_list); |
| 587 | if (all_flushed) { |
Dylan Yudaken | 10988a0 | 2022-04-21 02:13:43 -0700 | [diff] [blame] | 588 | clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq); |
Jens Axboe | 3a4b89a | 2022-04-25 19:49:00 -0600 | [diff] [blame] | 589 | atomic_andnot(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags); |
Pavel Begunkov | 09e8840 | 2020-12-17 00:24:38 +0000 | [diff] [blame] | 590 | } |
Pavel Begunkov | 4693014 | 2020-07-30 18:43:49 +0300 | [diff] [blame] | 591 | |
Pavel Begunkov | 25399321 | 2022-06-20 01:25:56 +0100 | [diff] [blame] | 592 | io_cq_unlock_post(ctx); |
Pavel Begunkov | 09e8840 | 2020-12-17 00:24:38 +0000 | [diff] [blame] | 593 | return all_flushed; |
Jens Axboe | 1d7bb1d | 2019-11-06 11:31:17 -0700 | [diff] [blame] | 594 | } |
| 595 | |
Pavel Begunkov | 90f6736 | 2021-08-09 20:18:12 +0100 | [diff] [blame] | 596 | static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx) |
Pavel Begunkov | 6c50315 | 2021-01-04 20:36:36 +0000 | [diff] [blame] | 597 | { |
Jens Axboe | ca0a265 | 2021-03-04 17:15:48 -0700 | [diff] [blame] | 598 | bool ret = true; |
| 599 | |
Dylan Yudaken | 10988a0 | 2022-04-21 02:13:43 -0700 | [diff] [blame] | 600 | if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) { |
Pavel Begunkov | 6c50315 | 2021-01-04 20:36:36 +0000 | [diff] [blame] | 601 | /* iopoll syncs against uring_lock, not completion_lock */ |
| 602 | if (ctx->flags & IORING_SETUP_IOPOLL) |
| 603 | mutex_lock(&ctx->uring_lock); |
Pavel Begunkov | 90f6736 | 2021-08-09 20:18:12 +0100 | [diff] [blame] | 604 | ret = __io_cqring_overflow_flush(ctx, false); |
Pavel Begunkov | 6c50315 | 2021-01-04 20:36:36 +0000 | [diff] [blame] | 605 | if (ctx->flags & IORING_SETUP_IOPOLL) |
| 606 | mutex_unlock(&ctx->uring_lock); |
| 607 | } |
Jens Axboe | ca0a265 | 2021-03-04 17:15:48 -0700 | [diff] [blame] | 608 | |
| 609 | return ret; |
Pavel Begunkov | 6c50315 | 2021-01-04 20:36:36 +0000 | [diff] [blame] | 610 | } |
| 611 | |
Pavel Begunkov | e70cb60 | 2022-07-12 21:52:37 +0100 | [diff] [blame] | 612 | void __io_put_task(struct task_struct *task, int nr) |
Pavel Begunkov | 6a290a1 | 2021-08-09 13:04:13 +0100 | [diff] [blame] | 613 | { |
| 614 | struct io_uring_task *tctx = task->io_uring; |
| 615 | |
Pavel Begunkov | 9d17016 | 2022-03-25 11:52:15 +0000 | [diff] [blame] | 616 | percpu_counter_sub(&tctx->inflight, nr); |
| 617 | if (unlikely(atomic_read(&tctx->in_idle))) |
| 618 | wake_up(&tctx->wait); |
| 619 | put_task_struct_many(task, nr); |
| 620 | } |
| 621 | |
Pavel Begunkov | 6380913 | 2022-07-12 21:52:47 +0100 | [diff] [blame] | 622 | void io_task_refs_refill(struct io_uring_task *tctx) |
Pavel Begunkov | 9a10867 | 2021-08-27 11:55:01 +0100 | [diff] [blame] | 623 | { |
| 624 | unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR; |
| 625 | |
| 626 | percpu_counter_add(&tctx->inflight, refill); |
| 627 | refcount_add(refill, ¤t->usage); |
| 628 | tctx->cached_refs += refill; |
| 629 | } |
| 630 | |
Pavel Begunkov | 3cc7fdb | 2022-01-09 00:53:22 +0000 | [diff] [blame] | 631 | static __cold void io_uring_drop_tctx_refs(struct task_struct *task) |
| 632 | { |
| 633 | struct io_uring_task *tctx = task->io_uring; |
| 634 | unsigned int refs = tctx->cached_refs; |
| 635 | |
| 636 | if (refs) { |
| 637 | tctx->cached_refs = 0; |
| 638 | percpu_counter_sub(&tctx->inflight, refs); |
| 639 | put_task_struct_many(task, refs); |
| 640 | } |
| 641 | } |
| 642 | |
Pavel Begunkov | 68494a6 | 2022-06-17 09:48:02 +0100 | [diff] [blame] | 643 | static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, |
| 644 | s32 res, u32 cflags, u64 extra1, u64 extra2) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 645 | { |
Pavel Begunkov | cce4b8b | 2021-04-13 02:58:44 +0100 | [diff] [blame] | 646 | struct io_overflow_cqe *ocqe; |
Stefan Roesch | e45a3e0 | 2022-04-26 11:21:30 -0700 | [diff] [blame] | 647 | size_t ocq_size = sizeof(struct io_overflow_cqe); |
| 648 | bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 649 | |
Stefan Roesch | e45a3e0 | 2022-04-26 11:21:30 -0700 | [diff] [blame] | 650 | if (is_cqe32) |
| 651 | ocq_size += sizeof(struct io_uring_cqe); |
| 652 | |
| 653 | ocqe = kmalloc(ocq_size, GFP_ATOMIC | __GFP_ACCOUNT); |
Dylan Yudaken | 08dcd02 | 2022-04-21 02:13:41 -0700 | [diff] [blame] | 654 | trace_io_uring_cqe_overflow(ctx, user_data, res, cflags, ocqe); |
Pavel Begunkov | cce4b8b | 2021-04-13 02:58:44 +0100 | [diff] [blame] | 655 | if (!ocqe) { |
| 656 | /* |
| 657 | * If we're in ring overflow flush mode, or in task cancel mode, |
| 658 | * or cannot allocate an overflow entry, then we need to drop it |
| 659 | * on the floor. |
| 660 | */ |
Pavel Begunkov | 8f6ed49 | 2021-05-16 22:58:10 +0100 | [diff] [blame] | 661 | io_account_cq_overflow(ctx); |
Dylan Yudaken | 155bc95 | 2022-04-21 02:13:44 -0700 | [diff] [blame] | 662 | set_bit(IO_CHECK_CQ_DROPPED_BIT, &ctx->check_cq); |
Pavel Begunkov | cce4b8b | 2021-04-13 02:58:44 +0100 | [diff] [blame] | 663 | return false; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 664 | } |
Pavel Begunkov | cce4b8b | 2021-04-13 02:58:44 +0100 | [diff] [blame] | 665 | if (list_empty(&ctx->cq_overflow_list)) { |
Dylan Yudaken | 10988a0 | 2022-04-21 02:13:43 -0700 | [diff] [blame] | 666 | set_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq); |
Jens Axboe | 3a4b89a | 2022-04-25 19:49:00 -0600 | [diff] [blame] | 667 | atomic_or(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags); |
Nadav Amit | 20c0b38 | 2021-08-07 17:13:42 -0700 | [diff] [blame] | 668 | |
Pavel Begunkov | cce4b8b | 2021-04-13 02:58:44 +0100 | [diff] [blame] | 669 | } |
Pavel Begunkov | d4d19c1 | 2021-04-25 14:32:17 +0100 | [diff] [blame] | 670 | ocqe->cqe.user_data = user_data; |
Pavel Begunkov | cce4b8b | 2021-04-13 02:58:44 +0100 | [diff] [blame] | 671 | ocqe->cqe.res = res; |
| 672 | ocqe->cqe.flags = cflags; |
Stefan Roesch | e45a3e0 | 2022-04-26 11:21:30 -0700 | [diff] [blame] | 673 | if (is_cqe32) { |
| 674 | ocqe->cqe.big_cqe[0] = extra1; |
| 675 | ocqe->cqe.big_cqe[1] = extra2; |
| 676 | } |
Pavel Begunkov | cce4b8b | 2021-04-13 02:58:44 +0100 | [diff] [blame] | 677 | list_add_tail(&ocqe->list, &ctx->cq_overflow_list); |
| 678 | return true; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 679 | } |
| 680 | |
Pavel Begunkov | 68494a6 | 2022-06-17 09:48:02 +0100 | [diff] [blame] | 681 | bool io_req_cqe_overflow(struct io_kiocb *req) |
| 682 | { |
| 683 | if (!(req->flags & REQ_F_CQE32_INIT)) { |
| 684 | req->extra1 = 0; |
| 685 | req->extra2 = 0; |
| 686 | } |
| 687 | return io_cqring_event_overflow(req->ctx, req->cqe.user_data, |
| 688 | req->cqe.res, req->cqe.flags, |
| 689 | req->extra1, req->extra2); |
| 690 | } |
| 691 | |
Pavel Begunkov | faf88dd | 2022-06-17 09:48:01 +0100 | [diff] [blame] | 692 | /* |
| 693 | * writes to the cq entry need to come after reading head; the |
| 694 | * control dependency is enough as we're using WRITE_ONCE to |
| 695 | * fill the cq entry |
| 696 | */ |
| 697 | struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx) |
| 698 | { |
| 699 | struct io_rings *rings = ctx->rings; |
| 700 | unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1); |
Pavel Begunkov | faf88dd | 2022-06-17 09:48:01 +0100 | [diff] [blame] | 701 | unsigned int free, queued, len; |
| 702 | |
Pavel Begunkov | faf88dd | 2022-06-17 09:48:01 +0100 | [diff] [blame] | 703 | |
| 704 | /* userspace may cheat modifying the tail, be safe and do min */ |
| 705 | queued = min(__io_cqring_events(ctx), ctx->cq_entries); |
| 706 | free = ctx->cq_entries - queued; |
| 707 | /* we need a contiguous range, limit based on the current array offset */ |
| 708 | len = min(free, ctx->cq_entries - off); |
| 709 | if (!len) |
| 710 | return NULL; |
| 711 | |
Pavel Begunkov | b3659a6 | 2022-06-17 09:48:05 +0100 | [diff] [blame] | 712 | if (ctx->flags & IORING_SETUP_CQE32) { |
| 713 | off <<= 1; |
| 714 | len <<= 1; |
| 715 | } |
| 716 | |
Pavel Begunkov | faf88dd | 2022-06-17 09:48:01 +0100 | [diff] [blame] | 717 | ctx->cqe_cached = &rings->cqes[off]; |
| 718 | ctx->cqe_sentinel = ctx->cqe_cached + len; |
Pavel Begunkov | b3659a6 | 2022-06-17 09:48:05 +0100 | [diff] [blame] | 719 | |
| 720 | ctx->cached_cq_tail++; |
Pavel Begunkov | faf88dd | 2022-06-17 09:48:01 +0100 | [diff] [blame] | 721 | ctx->cqe_cached++; |
Pavel Begunkov | b3659a6 | 2022-06-17 09:48:05 +0100 | [diff] [blame] | 722 | if (ctx->flags & IORING_SETUP_CQE32) |
| 723 | ctx->cqe_cached++; |
| 724 | return &rings->cqes[off]; |
Pavel Begunkov | faf88dd | 2022-06-17 09:48:01 +0100 | [diff] [blame] | 725 | } |
| 726 | |
Pavel Begunkov | eb42ceb | 2022-07-12 21:52:38 +0100 | [diff] [blame] | 727 | bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags, |
| 728 | bool allow_overflow) |
Pavel Begunkov | 913a571 | 2021-11-10 15:49:31 +0000 | [diff] [blame] | 729 | { |
Pavel Begunkov | cd94903 | 2022-06-15 11:23:06 +0100 | [diff] [blame] | 730 | struct io_uring_cqe *cqe; |
| 731 | |
Pavel Begunkov | 913a571 | 2021-11-10 15:49:31 +0000 | [diff] [blame] | 732 | ctx->cq_extra++; |
Pavel Begunkov | cd94903 | 2022-06-15 11:23:06 +0100 | [diff] [blame] | 733 | |
| 734 | /* |
| 735 | * If we can't get a cq entry, userspace overflowed the |
| 736 | * submission (by quite a lot). Increment the overflow count in |
| 737 | * the ring. |
| 738 | */ |
| 739 | cqe = io_get_cqe(ctx); |
| 740 | if (likely(cqe)) { |
Dylan Yudaken | e0486f3 | 2022-06-30 02:12:31 -0700 | [diff] [blame] | 741 | trace_io_uring_complete(ctx, NULL, user_data, res, cflags, 0, 0); |
| 742 | |
Pavel Begunkov | cd94903 | 2022-06-15 11:23:06 +0100 | [diff] [blame] | 743 | WRITE_ONCE(cqe->user_data, user_data); |
| 744 | WRITE_ONCE(cqe->res, res); |
| 745 | WRITE_ONCE(cqe->flags, cflags); |
Pavel Begunkov | c559597 | 2022-06-15 11:23:07 +0100 | [diff] [blame] | 746 | |
| 747 | if (ctx->flags & IORING_SETUP_CQE32) { |
| 748 | WRITE_ONCE(cqe->big_cqe[0], 0); |
| 749 | WRITE_ONCE(cqe->big_cqe[1], 0); |
| 750 | } |
Pavel Begunkov | cd94903 | 2022-06-15 11:23:06 +0100 | [diff] [blame] | 751 | return true; |
| 752 | } |
Dylan Yudaken | 52120f0 | 2022-06-30 02:12:26 -0700 | [diff] [blame] | 753 | |
| 754 | if (allow_overflow) |
| 755 | return io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0); |
| 756 | |
| 757 | return false; |
Jens Axboe | bcda7ba | 2020-02-23 16:42:51 -0700 | [diff] [blame] | 758 | } |
| 759 | |
Pavel Begunkov | d245bca | 2022-06-17 09:48:00 +0100 | [diff] [blame] | 760 | bool io_post_aux_cqe(struct io_ring_ctx *ctx, |
Dylan Yudaken | 52120f0 | 2022-06-30 02:12:26 -0700 | [diff] [blame] | 761 | u64 user_data, s32 res, u32 cflags, |
| 762 | bool allow_overflow) |
Pavel Begunkov | d245bca | 2022-06-17 09:48:00 +0100 | [diff] [blame] | 763 | { |
| 764 | bool filled; |
| 765 | |
Pavel Begunkov | 25399321 | 2022-06-20 01:25:56 +0100 | [diff] [blame] | 766 | io_cq_lock(ctx); |
Dylan Yudaken | 52120f0 | 2022-06-30 02:12:26 -0700 | [diff] [blame] | 767 | filled = io_fill_cqe_aux(ctx, user_data, res, cflags, allow_overflow); |
Pavel Begunkov | 25399321 | 2022-06-20 01:25:56 +0100 | [diff] [blame] | 768 | io_cq_unlock_post(ctx); |
Pavel Begunkov | d245bca | 2022-06-17 09:48:00 +0100 | [diff] [blame] | 769 | return filled; |
| 770 | } |
| 771 | |
Stefan Roesch | effcf8b | 2022-04-26 11:21:27 -0700 | [diff] [blame] | 772 | static void __io_req_complete_put(struct io_kiocb *req) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 773 | { |
Jens Axboe | c7dae4b | 2021-02-09 19:53:37 -0700 | [diff] [blame] | 774 | /* |
| 775 | * If we're the last reference to this request, add to our locked |
| 776 | * free_list cache. |
| 777 | */ |
Jens Axboe | de9b4cc | 2021-02-24 13:28:27 -0700 | [diff] [blame] | 778 | if (req_ref_put_and_test(req)) { |
Stefan Roesch | effcf8b | 2022-04-26 11:21:27 -0700 | [diff] [blame] | 779 | struct io_ring_ctx *ctx = req->ctx; |
| 780 | |
Pavel Begunkov | da1a08c | 2022-04-15 22:08:29 +0100 | [diff] [blame] | 781 | if (req->flags & IO_REQ_LINK_FLAGS) { |
Pavel Begunkov | 0756a86 | 2021-08-15 10:40:25 +0100 | [diff] [blame] | 782 | if (req->flags & IO_DISARM_MASK) |
Pavel Begunkov | 7a61235 | 2021-03-09 00:37:59 +0000 | [diff] [blame] | 783 | io_disarm_next(req); |
| 784 | if (req->link) { |
| 785 | io_req_task_queue(req->link); |
| 786 | req->link = NULL; |
| 787 | } |
| 788 | } |
Pavel Begunkov | 7ac1edc | 2022-04-18 20:51:15 +0100 | [diff] [blame] | 789 | io_req_put_rsrc(req); |
Pavel Begunkov | 8197b05 | 2022-03-25 13:00:43 +0000 | [diff] [blame] | 790 | /* |
| 791 | * Selected buffer deallocation in io_clean_op() assumes that |
| 792 | * we don't hold ->completion_lock. Clean them here to avoid |
| 793 | * deadlocks. |
| 794 | */ |
| 795 | io_put_kbuf_comp(req); |
Jens Axboe | c7dae4b | 2021-02-09 19:53:37 -0700 | [diff] [blame] | 796 | io_dismantle_req(req); |
| 797 | io_put_task(req->task, 1); |
Pavel Begunkov | c2b6c6b | 2021-09-24 21:59:47 +0100 | [diff] [blame] | 798 | wq_list_add_head(&req->comp_list, &ctx->locked_free_list); |
Pavel Begunkov | d0acdee | 2021-05-16 22:58:12 +0100 | [diff] [blame] | 799 | ctx->locked_free_nr++; |
Pavel Begunkov | 180f829 | 2021-03-14 20:57:09 +0000 | [diff] [blame] | 800 | } |
Hao Xu | a37fae8 | 2021-12-07 17:39:50 +0800 | [diff] [blame] | 801 | } |
| 802 | |
Jens Axboe | 59915143 | 2022-05-25 08:57:27 -0600 | [diff] [blame] | 803 | void __io_req_complete_post(struct io_kiocb *req) |
Stefan Roesch | effcf8b | 2022-04-26 11:21:27 -0700 | [diff] [blame] | 804 | { |
Jens Axboe | 97b388d | 2022-05-24 15:21:00 -0600 | [diff] [blame] | 805 | if (!(req->flags & REQ_F_CQE_SKIP)) |
Pavel Begunkov | 91ef75a | 2022-06-15 11:23:02 +0100 | [diff] [blame] | 806 | __io_fill_cqe_req(req->ctx, req); |
Stefan Roesch | effcf8b | 2022-04-26 11:21:27 -0700 | [diff] [blame] | 807 | __io_req_complete_put(req); |
| 808 | } |
| 809 | |
Jens Axboe | 59915143 | 2022-05-25 08:57:27 -0600 | [diff] [blame] | 810 | void io_req_complete_post(struct io_kiocb *req) |
Hao Xu | a37fae8 | 2021-12-07 17:39:50 +0800 | [diff] [blame] | 811 | { |
| 812 | struct io_ring_ctx *ctx = req->ctx; |
| 813 | |
Pavel Begunkov | 25399321 | 2022-06-20 01:25:56 +0100 | [diff] [blame] | 814 | io_cq_lock(ctx); |
Jens Axboe | 97b388d | 2022-05-24 15:21:00 -0600 | [diff] [blame] | 815 | __io_req_complete_post(req); |
Pavel Begunkov | 25399321 | 2022-06-20 01:25:56 +0100 | [diff] [blame] | 816 | io_cq_unlock_post(ctx); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 817 | } |
| 818 | |
Jens Axboe | 99f15d8 | 2022-05-25 05:59:19 -0600 | [diff] [blame] | 819 | inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags) |
Pavel Begunkov | a38d68d | 2021-01-19 13:32:45 +0000 | [diff] [blame] | 820 | { |
Pavel Begunkov | 75d7b3a | 2022-06-16 10:21:58 +0100 | [diff] [blame] | 821 | io_req_complete_post(req); |
Jens Axboe | bcda7ba | 2020-02-23 16:42:51 -0700 | [diff] [blame] | 822 | } |
| 823 | |
Jens Axboe | 329061d | 2022-05-25 20:31:09 -0600 | [diff] [blame] | 824 | void io_req_complete_failed(struct io_kiocb *req, s32 res) |
Pavel Begunkov | f41db273 | 2021-02-28 22:35:12 +0000 | [diff] [blame] | 825 | { |
Pavel Begunkov | 93d2bcd | 2021-05-16 22:58:05 +0100 | [diff] [blame] | 826 | req_set_fail(req); |
Jens Axboe | 97b388d | 2022-05-24 15:21:00 -0600 | [diff] [blame] | 827 | io_req_set_res(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED)); |
| 828 | io_req_complete_post(req); |
Pavel Begunkov | f41db273 | 2021-02-28 22:35:12 +0000 | [diff] [blame] | 829 | } |
| 830 | |
Pavel Begunkov | 864ea92 | 2021-08-09 13:04:08 +0100 | [diff] [blame] | 831 | /* |
| 832 | * Don't initialise the fields below on every allocation, but do that in |
| 833 | * advance and keep them valid across allocations. |
| 834 | */ |
| 835 | static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx) |
| 836 | { |
| 837 | req->ctx = ctx; |
| 838 | req->link = NULL; |
| 839 | req->async_data = NULL; |
| 840 | /* not necessary, but safer to zero */ |
Pavel Begunkov | cef216f | 2022-04-12 15:09:43 +0100 | [diff] [blame] | 841 | req->cqe.res = 0; |
Pavel Begunkov | 864ea92 | 2021-08-09 13:04:08 +0100 | [diff] [blame] | 842 | } |
| 843 | |
Pavel Begunkov | dac7a09 | 2021-03-19 17:22:39 +0000 | [diff] [blame] | 844 | static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx, |
Pavel Begunkov | cd0ca2e | 2021-08-09 20:18:11 +0100 | [diff] [blame] | 845 | struct io_submit_state *state) |
Pavel Begunkov | dac7a09 | 2021-03-19 17:22:39 +0000 | [diff] [blame] | 846 | { |
Jens Axboe | 79ebeae | 2021-08-10 15:18:27 -0600 | [diff] [blame] | 847 | spin_lock(&ctx->completion_lock); |
Pavel Begunkov | c2b6c6b | 2021-09-24 21:59:47 +0100 | [diff] [blame] | 848 | wq_list_splice(&ctx->locked_free_list, &state->free_list); |
Pavel Begunkov | d0acdee | 2021-05-16 22:58:12 +0100 | [diff] [blame] | 849 | ctx->locked_free_nr = 0; |
Jens Axboe | 79ebeae | 2021-08-10 15:18:27 -0600 | [diff] [blame] | 850 | spin_unlock(&ctx->completion_lock); |
Pavel Begunkov | dac7a09 | 2021-03-19 17:22:39 +0000 | [diff] [blame] | 851 | } |
| 852 | |
Pavel Begunkov | 5d5901a | 2021-08-11 19:28:29 +0100 | [diff] [blame] | 853 | /* |
| 854 | * A request might get retired back into the request caches even before opcode |
| 855 | * handlers and io_issue_sqe() are done with it, e.g. inline completion path. |
| 856 | * Because of that, io_alloc_req() should be called only under ->uring_lock |
| 857 | * and with extra caution to not get a request that is still worked on. |
| 858 | */ |
Pavel Begunkov | bd1a378 | 2022-07-27 10:30:40 +0100 | [diff] [blame] | 859 | __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx) |
Pavel Begunkov | 5d5901a | 2021-08-11 19:28:29 +0100 | [diff] [blame] | 860 | __must_hold(&ctx->uring_lock) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 861 | { |
Pavel Begunkov | 864ea92 | 2021-08-09 13:04:08 +0100 | [diff] [blame] | 862 | gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; |
Pavel Begunkov | 3ab665b | 2021-09-24 21:59:45 +0100 | [diff] [blame] | 863 | void *reqs[IO_REQ_ALLOC_BATCH]; |
Pavel Begunkov | 864ea92 | 2021-08-09 13:04:08 +0100 | [diff] [blame] | 864 | int ret, i; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 865 | |
Pavel Begunkov | 23a5c43 | 2022-04-12 15:09:46 +0100 | [diff] [blame] | 866 | /* |
| 867 | * If we have more than a batch's worth of requests in our IRQ side |
| 868 | * locked cache, grab the lock and move them over to our submission |
| 869 | * side cache. |
| 870 | */ |
Pavel Begunkov | a6d97a8 | 2022-04-15 22:08:33 +0100 | [diff] [blame] | 871 | if (data_race(ctx->locked_free_nr) > IO_COMPL_BATCH) { |
Pavel Begunkov | 23a5c43 | 2022-04-12 15:09:46 +0100 | [diff] [blame] | 872 | io_flush_cached_locked_reqs(ctx, &ctx->submit_state); |
Pavel Begunkov | 88ab95b | 2022-04-12 15:09:47 +0100 | [diff] [blame] | 873 | if (!io_req_cache_empty(ctx)) |
Pavel Begunkov | 23a5c43 | 2022-04-12 15:09:46 +0100 | [diff] [blame] | 874 | return true; |
| 875 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 876 | |
Pavel Begunkov | 3ab665b | 2021-09-24 21:59:45 +0100 | [diff] [blame] | 877 | ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs); |
Pavel Begunkov | e5d1bc0 | 2021-02-10 00:03:23 +0000 | [diff] [blame] | 878 | |
Pavel Begunkov | 864ea92 | 2021-08-09 13:04:08 +0100 | [diff] [blame] | 879 | /* |
| 880 | * Bulk alloc is all-or-nothing. If we fail to get a batch, |
| 881 | * retry single alloc to be on the safe side. |
| 882 | */ |
| 883 | if (unlikely(ret <= 0)) { |
Pavel Begunkov | 3ab665b | 2021-09-24 21:59:45 +0100 | [diff] [blame] | 884 | reqs[0] = kmem_cache_alloc(req_cachep, gfp); |
| 885 | if (!reqs[0]) |
Pavel Begunkov | a33ae9c | 2021-10-04 20:02:49 +0100 | [diff] [blame] | 886 | return false; |
Pavel Begunkov | 864ea92 | 2021-08-09 13:04:08 +0100 | [diff] [blame] | 887 | ret = 1; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 888 | } |
Pavel Begunkov | 864ea92 | 2021-08-09 13:04:08 +0100 | [diff] [blame] | 889 | |
Pavel Begunkov | 37f0e76 | 2021-10-04 20:02:53 +0100 | [diff] [blame] | 890 | percpu_ref_get_many(&ctx->refs, ret); |
Pavel Begunkov | 3ab665b | 2021-09-24 21:59:45 +0100 | [diff] [blame] | 891 | for (i = 0; i < ret; i++) { |
Pavel Begunkov | 23a5c43 | 2022-04-12 15:09:46 +0100 | [diff] [blame] | 892 | struct io_kiocb *req = reqs[i]; |
Pavel Begunkov | 3ab665b | 2021-09-24 21:59:45 +0100 | [diff] [blame] | 893 | |
| 894 | io_preinit_req(req, ctx); |
Pavel Begunkov | fa05457 | 2022-04-12 15:09:48 +0100 | [diff] [blame] | 895 | io_req_add_to_cache(req, ctx); |
Pavel Begunkov | 3ab665b | 2021-09-24 21:59:45 +0100 | [diff] [blame] | 896 | } |
Pavel Begunkov | a33ae9c | 2021-10-04 20:02:49 +0100 | [diff] [blame] | 897 | return true; |
| 898 | } |
| 899 | |
Pavel Begunkov | 6b63952 | 2021-09-08 16:40:50 +0100 | [diff] [blame] | 900 | static inline void io_dismantle_req(struct io_kiocb *req) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 901 | { |
Pavel Begunkov | 094bae4 | 2021-03-19 17:22:42 +0000 | [diff] [blame] | 902 | unsigned int flags = req->flags; |
Pavel Begunkov | 929a3af | 2020-02-19 00:19:09 +0300 | [diff] [blame] | 903 | |
Pavel Begunkov | 867f8fa | 2021-10-04 20:02:58 +0100 | [diff] [blame] | 904 | if (unlikely(flags & IO_REQ_CLEAN_FLAGS)) |
Pavel Begunkov | 3a0a690 | 2021-04-20 12:03:31 +0100 | [diff] [blame] | 905 | io_clean_op(req); |
Pavel Begunkov | e1d767f | 2021-03-19 17:22:43 +0000 | [diff] [blame] | 906 | if (!(flags & REQ_F_FIXED_FILE)) |
| 907 | io_put_file(req->file); |
Pavel Begunkov | e6543a8 | 2020-06-28 12:52:30 +0300 | [diff] [blame] | 908 | } |
Pavel Begunkov | 2b85edf | 2019-12-28 14:13:03 +0300 | [diff] [blame] | 909 | |
Jens Axboe | 59915143 | 2022-05-25 08:57:27 -0600 | [diff] [blame] | 910 | __cold void io_free_req(struct io_kiocb *req) |
Pavel Begunkov | e6543a8 | 2020-06-28 12:52:30 +0300 | [diff] [blame] | 911 | { |
Jens Axboe | 51a4cc1 | 2020-08-10 10:55:56 -0600 | [diff] [blame] | 912 | struct io_ring_ctx *ctx = req->ctx; |
Pavel Begunkov | ecfc517 | 2020-06-29 13:13:03 +0300 | [diff] [blame] | 913 | |
Pavel Begunkov | 7ac1edc | 2022-04-18 20:51:15 +0100 | [diff] [blame] | 914 | io_req_put_rsrc(req); |
Pavel Begunkov | 216578e | 2020-10-13 09:44:00 +0100 | [diff] [blame] | 915 | io_dismantle_req(req); |
Pavel Begunkov | 7c66073 | 2021-01-25 11:42:21 +0000 | [diff] [blame] | 916 | io_put_task(req->task, 1); |
Pavel Begunkov | e6543a8 | 2020-06-28 12:52:30 +0300 | [diff] [blame] | 917 | |
Jens Axboe | 79ebeae | 2021-08-10 15:18:27 -0600 | [diff] [blame] | 918 | spin_lock(&ctx->completion_lock); |
Pavel Begunkov | c2b6c6b | 2021-09-24 21:59:47 +0100 | [diff] [blame] | 919 | wq_list_add_head(&req->comp_list, &ctx->locked_free_list); |
Pavel Begunkov | c34b025 | 2021-08-09 20:18:08 +0100 | [diff] [blame] | 920 | ctx->locked_free_nr++; |
Jens Axboe | 79ebeae | 2021-08-10 15:18:27 -0600 | [diff] [blame] | 921 | spin_unlock(&ctx->completion_lock); |
Jens Axboe | e65ef56 | 2019-03-12 10:16:44 -0600 | [diff] [blame] | 922 | } |
| 923 | |
Pavel Begunkov | d81499b | 2021-09-08 16:40:51 +0100 | [diff] [blame] | 924 | static void __io_req_find_next_prep(struct io_kiocb *req) |
| 925 | { |
| 926 | struct io_ring_ctx *ctx = req->ctx; |
Pavel Begunkov | d81499b | 2021-09-08 16:40:51 +0100 | [diff] [blame] | 927 | |
Pavel Begunkov | 25399321 | 2022-06-20 01:25:56 +0100 | [diff] [blame] | 928 | io_cq_lock(ctx); |
Pavel Begunkov | 305bef9 | 2022-06-20 01:25:55 +0100 | [diff] [blame] | 929 | io_disarm_next(req); |
Pavel Begunkov | 25399321 | 2022-06-20 01:25:56 +0100 | [diff] [blame] | 930 | io_cq_unlock_post(ctx); |
Pavel Begunkov | d81499b | 2021-09-08 16:40:51 +0100 | [diff] [blame] | 931 | } |
| 932 | |
| 933 | static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req) |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 934 | { |
Pavel Begunkov | 33cc89a | 2021-03-09 00:37:58 +0000 | [diff] [blame] | 935 | struct io_kiocb *nxt; |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 936 | |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 937 | /* |
| 938 | * If LINK is set, we have dependent requests in this chain. If we |
| 939 | * didn't fail this request, queue the first one up, moving any other |
| 940 | * dependencies to the next request. In case of failure, fail the rest |
| 941 | * of the chain. |
| 942 | */ |
Pavel Begunkov | d81499b | 2021-09-08 16:40:51 +0100 | [diff] [blame] | 943 | if (unlikely(req->flags & IO_DISARM_MASK)) |
| 944 | __io_req_find_next_prep(req); |
Pavel Begunkov | 33cc89a | 2021-03-09 00:37:58 +0000 | [diff] [blame] | 945 | nxt = req->link; |
| 946 | req->link = NULL; |
| 947 | return nxt; |
Jens Axboe | 4d7dd46 | 2019-11-20 13:03:52 -0700 | [diff] [blame] | 948 | } |
Jens Axboe | 2665abf | 2019-11-05 12:40:47 -0700 | [diff] [blame] | 949 | |
Pavel Begunkov | f237c30 | 2021-08-18 12:42:46 +0100 | [diff] [blame] | 950 | static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked) |
Pavel Begunkov | 2c32395 | 2021-02-28 22:04:53 +0000 | [diff] [blame] | 951 | { |
| 952 | if (!ctx) |
| 953 | return; |
Jens Axboe | ef060ea | 2022-04-25 19:49:04 -0600 | [diff] [blame] | 954 | if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) |
| 955 | atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); |
Pavel Begunkov | f237c30 | 2021-08-18 12:42:46 +0100 | [diff] [blame] | 956 | if (*locked) { |
Pavel Begunkov | c450178 | 2021-09-08 16:40:52 +0100 | [diff] [blame] | 957 | io_submit_flush_completions(ctx); |
Pavel Begunkov | 2c32395 | 2021-02-28 22:04:53 +0000 | [diff] [blame] | 958 | mutex_unlock(&ctx->uring_lock); |
Pavel Begunkov | f237c30 | 2021-08-18 12:42:46 +0100 | [diff] [blame] | 959 | *locked = false; |
Pavel Begunkov | 2c32395 | 2021-02-28 22:04:53 +0000 | [diff] [blame] | 960 | } |
| 961 | percpu_ref_put(&ctx->refs); |
| 962 | } |
| 963 | |
Dylan Yudaken | c6dd763 | 2022-06-22 06:40:28 -0700 | [diff] [blame] | 964 | static unsigned int handle_tw_list(struct llist_node *node, |
| 965 | struct io_ring_ctx **ctx, bool *locked, |
| 966 | struct llist_node *last) |
Hao Xu | 9f8d032 | 2021-12-07 17:39:49 +0800 | [diff] [blame] | 967 | { |
Dylan Yudaken | c6dd763 | 2022-06-22 06:40:28 -0700 | [diff] [blame] | 968 | unsigned int count = 0; |
| 969 | |
Dylan Yudaken | 3a0c037 | 2022-06-22 06:40:25 -0700 | [diff] [blame] | 970 | while (node != last) { |
Dylan Yudaken | f88262e | 2022-06-22 06:40:23 -0700 | [diff] [blame] | 971 | struct llist_node *next = node->next; |
Hao Xu | 9f8d032 | 2021-12-07 17:39:49 +0800 | [diff] [blame] | 972 | struct io_kiocb *req = container_of(node, struct io_kiocb, |
| 973 | io_task_work.node); |
| 974 | |
Jens Axboe | 34d2bfe | 2022-03-24 10:17:44 -0600 | [diff] [blame] | 975 | prefetch(container_of(next, struct io_kiocb, io_task_work.node)); |
| 976 | |
Hao Xu | 9f8d032 | 2021-12-07 17:39:49 +0800 | [diff] [blame] | 977 | if (req->ctx != *ctx) { |
| 978 | ctx_flush_and_put(*ctx, locked); |
| 979 | *ctx = req->ctx; |
| 980 | /* if not contended, grab and improve batching */ |
| 981 | *locked = mutex_trylock(&(*ctx)->uring_lock); |
| 982 | percpu_ref_get(&(*ctx)->refs); |
| 983 | } |
| 984 | req->io_task_work.func(req, locked); |
| 985 | node = next; |
Dylan Yudaken | c6dd763 | 2022-06-22 06:40:28 -0700 | [diff] [blame] | 986 | count++; |
Dylan Yudaken | 3a0c037 | 2022-06-22 06:40:25 -0700 | [diff] [blame] | 987 | } |
Dylan Yudaken | c6dd763 | 2022-06-22 06:40:28 -0700 | [diff] [blame] | 988 | |
| 989 | return count; |
Hao Xu | 9f8d032 | 2021-12-07 17:39:49 +0800 | [diff] [blame] | 990 | } |
| 991 | |
Dylan Yudaken | 923d159 | 2022-06-22 06:40:24 -0700 | [diff] [blame] | 992 | /** |
| 993 | * io_llist_xchg - swap all entries in a lock-less list |
| 994 | * @head: the head of lock-less list to delete all entries |
| 995 | * @new: new entry as the head of the list |
| 996 | * |
| 997 | * If list is empty, return NULL, otherwise, return the pointer to the first entry. |
| 998 | * The order of entries returned is from the newest to the oldest added one. |
| 999 | */ |
| 1000 | static inline struct llist_node *io_llist_xchg(struct llist_head *head, |
| 1001 | struct llist_node *new) |
| 1002 | { |
| 1003 | return xchg(&head->first, new); |
| 1004 | } |
| 1005 | |
| 1006 | /** |
| 1007 | * io_llist_cmpxchg - possibly swap all entries in a lock-less list |
| 1008 | * @head: the head of lock-less list to delete all entries |
| 1009 | * @old: expected old value of the first entry of the list |
| 1010 | * @new: new entry as the head of the list |
| 1011 | * |
| 1012 | * perform a cmpxchg on the first entry of the list. |
| 1013 | */ |
| 1014 | |
| 1015 | static inline struct llist_node *io_llist_cmpxchg(struct llist_head *head, |
| 1016 | struct llist_node *old, |
| 1017 | struct llist_node *new) |
| 1018 | { |
| 1019 | return cmpxchg(&head->first, old, new); |
| 1020 | } |
| 1021 | |
Jens Axboe | c9f06aa | 2022-05-25 11:01:04 -0600 | [diff] [blame] | 1022 | void tctx_task_work(struct callback_head *cb) |
Jens Axboe | 7cbf172 | 2021-02-10 00:03:20 +0000 | [diff] [blame] | 1023 | { |
Hao Xu | f28c240e | 2021-12-08 13:21:25 +0800 | [diff] [blame] | 1024 | bool uring_locked = false; |
Pavel Begunkov | ebd0df2 | 2021-06-17 18:14:07 +0100 | [diff] [blame] | 1025 | struct io_ring_ctx *ctx = NULL; |
Pavel Begunkov | 3f18407 | 2021-06-17 18:14:06 +0100 | [diff] [blame] | 1026 | struct io_uring_task *tctx = container_of(cb, struct io_uring_task, |
| 1027 | task_work); |
Dylan Yudaken | 3a0c037 | 2022-06-22 06:40:25 -0700 | [diff] [blame] | 1028 | struct llist_node fake = {}; |
| 1029 | struct llist_node *node = io_llist_xchg(&tctx->task_list, &fake); |
Dylan Yudaken | c6dd763 | 2022-06-22 06:40:28 -0700 | [diff] [blame] | 1030 | unsigned int loops = 1; |
| 1031 | unsigned int count = handle_tw_list(node, &ctx, &uring_locked, NULL); |
Jens Axboe | 7cbf172 | 2021-02-10 00:03:20 +0000 | [diff] [blame] | 1032 | |
Dylan Yudaken | 3a0c037 | 2022-06-22 06:40:25 -0700 | [diff] [blame] | 1033 | node = io_llist_cmpxchg(&tctx->task_list, &fake, NULL); |
| 1034 | while (node != &fake) { |
Dylan Yudaken | c6dd763 | 2022-06-22 06:40:28 -0700 | [diff] [blame] | 1035 | loops++; |
Dylan Yudaken | 3a0c037 | 2022-06-22 06:40:25 -0700 | [diff] [blame] | 1036 | node = io_llist_xchg(&tctx->task_list, &fake); |
Dylan Yudaken | c6dd763 | 2022-06-22 06:40:28 -0700 | [diff] [blame] | 1037 | count += handle_tw_list(node, &ctx, &uring_locked, &fake); |
Dylan Yudaken | 3a0c037 | 2022-06-22 06:40:25 -0700 | [diff] [blame] | 1038 | node = io_llist_cmpxchg(&tctx->task_list, &fake, NULL); |
Pavel Begunkov | 3f18407 | 2021-06-17 18:14:06 +0100 | [diff] [blame] | 1039 | } |
Pavel Begunkov | ebd0df2 | 2021-06-17 18:14:07 +0100 | [diff] [blame] | 1040 | |
Hao Xu | f28c240e | 2021-12-08 13:21:25 +0800 | [diff] [blame] | 1041 | ctx_flush_and_put(ctx, &uring_locked); |
Pavel Begunkov | 3cc7fdb | 2022-01-09 00:53:22 +0000 | [diff] [blame] | 1042 | |
| 1043 | /* relaxed read is enough as only the task itself sets ->in_idle */ |
| 1044 | if (unlikely(atomic_read(&tctx->in_idle))) |
| 1045 | io_uring_drop_tctx_refs(current); |
Dylan Yudaken | c6dd763 | 2022-06-22 06:40:28 -0700 | [diff] [blame] | 1046 | |
| 1047 | trace_io_uring_task_work_run(tctx, count, loops); |
Jens Axboe | 7cbf172 | 2021-02-10 00:03:20 +0000 | [diff] [blame] | 1048 | } |
| 1049 | |
Dylan Yudaken | c34398a | 2022-06-22 06:40:22 -0700 | [diff] [blame] | 1050 | void io_req_task_work_add(struct io_kiocb *req) |
Jens Axboe | 7cbf172 | 2021-02-10 00:03:20 +0000 | [diff] [blame] | 1051 | { |
Dylan Yudaken | c34398a | 2022-06-22 06:40:22 -0700 | [diff] [blame] | 1052 | struct io_uring_task *tctx = req->task->io_uring; |
Jens Axboe | 9f01050 | 2022-04-25 19:49:02 -0600 | [diff] [blame] | 1053 | struct io_ring_ctx *ctx = req->ctx; |
Dylan Yudaken | f88262e | 2022-06-22 06:40:23 -0700 | [diff] [blame] | 1054 | struct llist_node *node; |
Pavel Begunkov | 6294f36 | 2021-08-10 17:53:55 +0100 | [diff] [blame] | 1055 | bool running; |
Jens Axboe | 7cbf172 | 2021-02-10 00:03:20 +0000 | [diff] [blame] | 1056 | |
Dylan Yudaken | f88262e | 2022-06-22 06:40:23 -0700 | [diff] [blame] | 1057 | running = !llist_add(&req->io_task_work.node, &tctx->task_list); |
Jens Axboe | 7cbf172 | 2021-02-10 00:03:20 +0000 | [diff] [blame] | 1058 | |
| 1059 | /* task_work already pending, we're done */ |
Pavel Begunkov | 6294f36 | 2021-08-10 17:53:55 +0100 | [diff] [blame] | 1060 | if (running) |
Pavel Begunkov | e09ee51 | 2021-07-01 13:26:05 +0100 | [diff] [blame] | 1061 | return; |
Jens Axboe | 7cbf172 | 2021-02-10 00:03:20 +0000 | [diff] [blame] | 1062 | |
Jens Axboe | ef060ea | 2022-04-25 19:49:04 -0600 | [diff] [blame] | 1063 | if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) |
| 1064 | atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); |
| 1065 | |
Jens Axboe | 3fe07bc | 2022-05-21 09:17:05 -0600 | [diff] [blame] | 1066 | if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method))) |
Pavel Begunkov | e09ee51 | 2021-07-01 13:26:05 +0100 | [diff] [blame] | 1067 | return; |
Pavel Begunkov | 2215bed | 2021-08-09 13:04:06 +0100 | [diff] [blame] | 1068 | |
Dylan Yudaken | f88262e | 2022-06-22 06:40:23 -0700 | [diff] [blame] | 1069 | node = llist_del_all(&tctx->task_list); |
Jens Axboe | 7cbf172 | 2021-02-10 00:03:20 +0000 | [diff] [blame] | 1070 | |
Pavel Begunkov | e09ee51 | 2021-07-01 13:26:05 +0100 | [diff] [blame] | 1071 | while (node) { |
| 1072 | req = container_of(node, struct io_kiocb, io_task_work.node); |
| 1073 | node = node->next; |
Pavel Begunkov | 3218e5d | 2022-06-25 11:52:59 +0100 | [diff] [blame] | 1074 | if (llist_add(&req->io_task_work.node, |
Pavel Begunkov | e09ee51 | 2021-07-01 13:26:05 +0100 | [diff] [blame] | 1075 | &req->ctx->fallback_llist)) |
| 1076 | schedule_delayed_work(&req->ctx->fallback_work, 1); |
| 1077 | } |
Pavel Begunkov | eab30c4 | 2021-01-19 13:32:42 +0000 | [diff] [blame] | 1078 | } |
| 1079 | |
Pavel Begunkov | 4e118cd | 2022-04-15 22:08:23 +0100 | [diff] [blame] | 1080 | static void io_req_tw_post(struct io_kiocb *req, bool *locked) |
| 1081 | { |
Jens Axboe | 97b388d | 2022-05-24 15:21:00 -0600 | [diff] [blame] | 1082 | io_req_complete_post(req); |
Pavel Begunkov | 4e118cd | 2022-04-15 22:08:23 +0100 | [diff] [blame] | 1083 | } |
| 1084 | |
Jens Axboe | 59915143 | 2022-05-25 08:57:27 -0600 | [diff] [blame] | 1085 | void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags) |
Pavel Begunkov | 4e118cd | 2022-04-15 22:08:23 +0100 | [diff] [blame] | 1086 | { |
Jens Axboe | 97b388d | 2022-05-24 15:21:00 -0600 | [diff] [blame] | 1087 | io_req_set_res(req, res, cflags); |
Pavel Begunkov | 4e118cd | 2022-04-15 22:08:23 +0100 | [diff] [blame] | 1088 | req->io_task_work.func = io_req_tw_post; |
Jens Axboe | 3fe07bc | 2022-05-21 09:17:05 -0600 | [diff] [blame] | 1089 | io_req_task_work_add(req); |
Pavel Begunkov | 4e118cd | 2022-04-15 22:08:23 +0100 | [diff] [blame] | 1090 | } |
| 1091 | |
Pavel Begunkov | f237c30 | 2021-08-18 12:42:46 +0100 | [diff] [blame] | 1092 | static void io_req_task_cancel(struct io_kiocb *req, bool *locked) |
Jens Axboe | c40f637 | 2020-06-25 15:39:59 -0600 | [diff] [blame] | 1093 | { |
Pavel Begunkov | b18a1a4 | 2021-08-25 20:51:39 +0100 | [diff] [blame] | 1094 | /* not needed for normal modes, but SQPOLL depends on it */ |
Pavel Begunkov | 971cf9c | 2022-04-15 22:08:22 +0100 | [diff] [blame] | 1095 | io_tw_lock(req->ctx, locked); |
Pavel Begunkov | cef216f | 2022-04-12 15:09:43 +0100 | [diff] [blame] | 1096 | io_req_complete_failed(req, req->cqe.res); |
Jens Axboe | c40f637 | 2020-06-25 15:39:59 -0600 | [diff] [blame] | 1097 | } |
| 1098 | |
Jens Axboe | 329061d | 2022-05-25 20:31:09 -0600 | [diff] [blame] | 1099 | void io_req_task_submit(struct io_kiocb *req, bool *locked) |
Jens Axboe | c40f637 | 2020-06-25 15:39:59 -0600 | [diff] [blame] | 1100 | { |
Pavel Begunkov | 971cf9c | 2022-04-15 22:08:22 +0100 | [diff] [blame] | 1101 | io_tw_lock(req->ctx, locked); |
Jens Axboe | 316319e | 2021-08-19 09:41:42 -0600 | [diff] [blame] | 1102 | /* req->task == current here, checking PF_EXITING is safe */ |
Pavel Begunkov | af066f3 | 2021-08-09 13:04:19 +0100 | [diff] [blame] | 1103 | if (likely(!(req->task->flags & PF_EXITING))) |
Pavel Begunkov | cbc2e20 | 2022-04-15 22:08:26 +0100 | [diff] [blame] | 1104 | io_queue_sqe(req); |
Pavel Begunkov | 81b6d05 | 2021-01-04 20:36:35 +0000 | [diff] [blame] | 1105 | else |
Pavel Begunkov | 2593553 | 2021-03-19 17:22:40 +0000 | [diff] [blame] | 1106 | io_req_complete_failed(req, -EFAULT); |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 1107 | } |
| 1108 | |
Jens Axboe | 59915143 | 2022-05-25 08:57:27 -0600 | [diff] [blame] | 1109 | void io_req_task_queue_fail(struct io_kiocb *req, int ret) |
Pavel Begunkov | a3df7698 | 2021-02-18 22:32:52 +0000 | [diff] [blame] | 1110 | { |
Jens Axboe | 97b388d | 2022-05-24 15:21:00 -0600 | [diff] [blame] | 1111 | io_req_set_res(req, ret, 0); |
Pavel Begunkov | 5b0a6ac | 2021-06-30 21:54:04 +0100 | [diff] [blame] | 1112 | req->io_task_work.func = io_req_task_cancel; |
Jens Axboe | 3fe07bc | 2022-05-21 09:17:05 -0600 | [diff] [blame] | 1113 | io_req_task_work_add(req); |
Pavel Begunkov | a3df7698 | 2021-02-18 22:32:52 +0000 | [diff] [blame] | 1114 | } |
| 1115 | |
Jens Axboe | f3b44f9 | 2022-06-13 07:27:03 -0600 | [diff] [blame] | 1116 | void io_req_task_queue(struct io_kiocb *req) |
Pavel Begunkov | 2c4b8eb | 2021-02-28 22:35:10 +0000 | [diff] [blame] | 1117 | { |
Pavel Begunkov | 5b0a6ac | 2021-06-30 21:54:04 +0100 | [diff] [blame] | 1118 | req->io_task_work.func = io_req_task_submit; |
Jens Axboe | 3fe07bc | 2022-05-21 09:17:05 -0600 | [diff] [blame] | 1119 | io_req_task_work_add(req); |
Pavel Begunkov | 2c4b8eb | 2021-02-28 22:35:10 +0000 | [diff] [blame] | 1120 | } |
| 1121 | |
Jens Axboe | 59915143 | 2022-05-25 08:57:27 -0600 | [diff] [blame] | 1122 | void io_queue_next(struct io_kiocb *req) |
Jackie Liu | c69f8db | 2019-11-09 11:00:08 +0800 | [diff] [blame] | 1123 | { |
Pavel Begunkov | 9b5f7bd9 | 2020-06-29 13:13:00 +0300 | [diff] [blame] | 1124 | struct io_kiocb *nxt = io_req_find_next(req); |
Pavel Begunkov | 944e58b | 2019-11-21 23:21:01 +0300 | [diff] [blame] | 1125 | |
Pavel Begunkov | 906a8c3 | 2020-06-27 14:04:55 +0300 | [diff] [blame] | 1126 | if (nxt) |
| 1127 | io_req_task_queue(nxt); |
Jackie Liu | c69f8db | 2019-11-09 11:00:08 +0800 | [diff] [blame] | 1128 | } |
| 1129 | |
Jens Axboe | f3b44f9 | 2022-06-13 07:27:03 -0600 | [diff] [blame] | 1130 | void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node) |
Jens Axboe | a141dd8 | 2021-08-12 12:48:34 -0600 | [diff] [blame] | 1131 | __must_hold(&ctx->uring_lock) |
Pavel Begunkov | 905c172 | 2021-02-10 00:03:14 +0000 | [diff] [blame] | 1132 | { |
Pavel Begunkov | d4b7a5e | 2021-09-24 21:59:53 +0100 | [diff] [blame] | 1133 | struct task_struct *task = NULL; |
Pavel Begunkov | 37f0e76 | 2021-10-04 20:02:53 +0100 | [diff] [blame] | 1134 | int task_refs = 0; |
Pavel Begunkov | 3aa83bf | 2021-09-24 21:59:50 +0100 | [diff] [blame] | 1135 | |
Pavel Begunkov | 3aa83bf | 2021-09-24 21:59:50 +0100 | [diff] [blame] | 1136 | do { |
| 1137 | struct io_kiocb *req = container_of(node, struct io_kiocb, |
| 1138 | comp_list); |
| 1139 | |
Pavel Begunkov | a538be5 | 2022-03-21 22:02:22 +0000 | [diff] [blame] | 1140 | if (unlikely(req->flags & IO_REQ_CLEAN_SLOW_FLAGS)) { |
| 1141 | if (req->flags & REQ_F_REFCOUNT) { |
| 1142 | node = req->comp_list.next; |
| 1143 | if (!req_ref_put_and_test(req)) |
| 1144 | continue; |
| 1145 | } |
Pavel Begunkov | b605a7fa | 2022-03-21 22:02:23 +0000 | [diff] [blame] | 1146 | if ((req->flags & REQ_F_POLLED) && req->apoll) { |
| 1147 | struct async_poll *apoll = req->apoll; |
| 1148 | |
| 1149 | if (apoll->double_poll) |
| 1150 | kfree(apoll->double_poll); |
Jens Axboe | 9731bc98 | 2022-07-07 14:20:54 -0600 | [diff] [blame] | 1151 | if (!io_alloc_cache_put(&ctx->apoll_cache, &apoll->cache)) |
| 1152 | kfree(apoll); |
Pavel Begunkov | b605a7fa | 2022-03-21 22:02:23 +0000 | [diff] [blame] | 1153 | req->flags &= ~REQ_F_POLLED; |
| 1154 | } |
Pavel Begunkov | da1a08c | 2022-04-15 22:08:29 +0100 | [diff] [blame] | 1155 | if (req->flags & IO_REQ_LINK_FLAGS) |
Pavel Begunkov | 57859f4 | 2022-03-21 22:02:24 +0000 | [diff] [blame] | 1156 | io_queue_next(req); |
Pavel Begunkov | a538be5 | 2022-03-21 22:02:22 +0000 | [diff] [blame] | 1157 | if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS)) |
| 1158 | io_clean_op(req); |
Pavel Begunkov | c1e53a6 | 2021-10-04 20:02:55 +0100 | [diff] [blame] | 1159 | } |
Pavel Begunkov | a538be5 | 2022-03-21 22:02:22 +0000 | [diff] [blame] | 1160 | if (!(req->flags & REQ_F_FIXED_FILE)) |
| 1161 | io_put_file(req->file); |
Pavel Begunkov | d4b7a5e | 2021-09-24 21:59:53 +0100 | [diff] [blame] | 1162 | |
Pavel Begunkov | ab40940 | 2021-10-09 23:14:41 +0100 | [diff] [blame] | 1163 | io_req_put_rsrc_locked(req, ctx); |
Pavel Begunkov | d4b7a5e | 2021-09-24 21:59:53 +0100 | [diff] [blame] | 1164 | |
| 1165 | if (req->task != task) { |
| 1166 | if (task) |
| 1167 | io_put_task(task, task_refs); |
| 1168 | task = req->task; |
| 1169 | task_refs = 0; |
| 1170 | } |
| 1171 | task_refs++; |
Pavel Begunkov | c1e53a6 | 2021-10-04 20:02:55 +0100 | [diff] [blame] | 1172 | node = req->comp_list.next; |
Pavel Begunkov | fa05457 | 2022-04-12 15:09:48 +0100 | [diff] [blame] | 1173 | io_req_add_to_cache(req, ctx); |
Pavel Begunkov | 3aa83bf | 2021-09-24 21:59:50 +0100 | [diff] [blame] | 1174 | } while (node); |
Pavel Begunkov | d4b7a5e | 2021-09-24 21:59:53 +0100 | [diff] [blame] | 1175 | |
Pavel Begunkov | d4b7a5e | 2021-09-24 21:59:53 +0100 | [diff] [blame] | 1176 | if (task) |
| 1177 | io_put_task(task, task_refs); |
Pavel Begunkov | 3aa83bf | 2021-09-24 21:59:50 +0100 | [diff] [blame] | 1178 | } |
| 1179 | |
Pavel Begunkov | c450178 | 2021-09-08 16:40:52 +0100 | [diff] [blame] | 1180 | static void __io_submit_flush_completions(struct io_ring_ctx *ctx) |
Pavel Begunkov | 905c172 | 2021-02-10 00:03:14 +0000 | [diff] [blame] | 1181 | __must_hold(&ctx->uring_lock) |
| 1182 | { |
Pavel Begunkov | 6f33b0b | 2021-09-24 21:59:44 +0100 | [diff] [blame] | 1183 | struct io_wq_work_node *node, *prev; |
Pavel Begunkov | cd0ca2e | 2021-08-09 20:18:11 +0100 | [diff] [blame] | 1184 | struct io_submit_state *state = &ctx->submit_state; |
Pavel Begunkov | 905c172 | 2021-02-10 00:03:14 +0000 | [diff] [blame] | 1185 | |
Pavel Begunkov | d9dee43 | 2022-06-19 12:26:08 +0100 | [diff] [blame] | 1186 | spin_lock(&ctx->completion_lock); |
| 1187 | wq_list_for_each(node, prev, &state->compl_reqs) { |
| 1188 | struct io_kiocb *req = container_of(node, struct io_kiocb, |
| 1189 | comp_list); |
Pavel Begunkov | 5182ed2 | 2021-06-26 21:40:48 +0100 | [diff] [blame] | 1190 | |
Pavel Begunkov | d9dee43 | 2022-06-19 12:26:08 +0100 | [diff] [blame] | 1191 | if (!(req->flags & REQ_F_CQE_SKIP)) |
| 1192 | __io_fill_cqe_req(ctx, req); |
Pavel Begunkov | 905c172 | 2021-02-10 00:03:14 +0000 | [diff] [blame] | 1193 | } |
Pavel Begunkov | 25399321 | 2022-06-20 01:25:56 +0100 | [diff] [blame] | 1194 | __io_cq_unlock_post(ctx); |
Pavel Begunkov | d9dee43 | 2022-06-19 12:26:08 +0100 | [diff] [blame] | 1195 | |
Pavel Begunkov | 1cce17a | 2021-09-24 21:59:54 +0100 | [diff] [blame] | 1196 | io_free_batch_list(ctx, state->compl_reqs.first); |
Pavel Begunkov | 6f33b0b | 2021-09-24 21:59:44 +0100 | [diff] [blame] | 1197 | INIT_WQ_LIST(&state->compl_reqs); |
Pavel Begunkov | 7a743e2 | 2020-03-03 21:33:13 +0300 | [diff] [blame] | 1198 | } |
| 1199 | |
Jens Axboe | ba816ad | 2019-09-28 11:36:45 -0600 | [diff] [blame] | 1200 | /* |
| 1201 | * Drop reference to request, return next in chain (if there is one) if this |
| 1202 | * was the last reference to this request. |
| 1203 | */ |
Pavel Begunkov | 0d85035 | 2021-03-19 17:22:37 +0000 | [diff] [blame] | 1204 | static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req) |
Jens Axboe | e65ef56 | 2019-03-12 10:16:44 -0600 | [diff] [blame] | 1205 | { |
Pavel Begunkov | 9b5f7bd9 | 2020-06-29 13:13:00 +0300 | [diff] [blame] | 1206 | struct io_kiocb *nxt = NULL; |
| 1207 | |
Jens Axboe | de9b4cc | 2021-02-24 13:28:27 -0700 | [diff] [blame] | 1208 | if (req_ref_put_and_test(req)) { |
Pavel Begunkov | da1a08c | 2022-04-15 22:08:29 +0100 | [diff] [blame] | 1209 | if (unlikely(req->flags & IO_REQ_LINK_FLAGS)) |
Pavel Begunkov | 7819a1f | 2022-03-21 22:02:21 +0000 | [diff] [blame] | 1210 | nxt = io_req_find_next(req); |
Pavel Begunkov | f5c6cf2 | 2022-04-15 22:08:24 +0100 | [diff] [blame] | 1211 | io_free_req(req); |
Jens Axboe | 2a44f46 | 2020-02-25 13:25:41 -0700 | [diff] [blame] | 1212 | } |
Pavel Begunkov | 9b5f7bd9 | 2020-06-29 13:13:00 +0300 | [diff] [blame] | 1213 | return nxt; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1214 | } |
| 1215 | |
Pavel Begunkov | 6c50315 | 2021-01-04 20:36:36 +0000 | [diff] [blame] | 1216 | static unsigned io_cqring_events(struct io_ring_ctx *ctx) |
Jens Axboe | a3a0e43 | 2019-08-20 11:03:11 -0600 | [diff] [blame] | 1217 | { |
| 1218 | /* See comment at the top of this file */ |
| 1219 | smp_rmb(); |
Pavel Begunkov | e23de15 | 2020-12-17 00:24:37 +0000 | [diff] [blame] | 1220 | return __io_cqring_events(ctx); |
Jens Axboe | a3a0e43 | 2019-08-20 11:03:11 -0600 | [diff] [blame] | 1221 | } |
| 1222 | |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1223 | /* |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1224 | * We can't just wait for polled events to come to us, we have to actively |
| 1225 | * find and complete them. |
| 1226 | */ |
Pavel Begunkov | c072481 | 2021-10-04 20:02:54 +0100 | [diff] [blame] | 1227 | static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx) |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1228 | { |
| 1229 | if (!(ctx->flags & IORING_SETUP_IOPOLL)) |
| 1230 | return; |
| 1231 | |
| 1232 | mutex_lock(&ctx->uring_lock); |
Pavel Begunkov | 5eef4e8 | 2021-09-24 21:59:49 +0100 | [diff] [blame] | 1233 | while (!wq_list_empty(&ctx->iopoll_list)) { |
Pavel Begunkov | b2edc0a | 2020-07-07 16:36:22 +0300 | [diff] [blame] | 1234 | /* let it sleep and repeat later if can't complete a request */ |
Pavel Begunkov | 5ba3c87 | 2021-09-24 21:59:43 +0100 | [diff] [blame] | 1235 | if (io_do_iopoll(ctx, true) == 0) |
Pavel Begunkov | b2edc0a | 2020-07-07 16:36:22 +0300 | [diff] [blame] | 1236 | break; |
Jens Axboe | 08f5439 | 2019-08-21 22:19:11 -0600 | [diff] [blame] | 1237 | /* |
| 1238 | * Ensure we allow local-to-the-cpu processing to take place, |
| 1239 | * in this case we need to ensure that we reap all events. |
Pavel Begunkov | 3fcee5a | 2020-07-06 17:59:31 +0300 | [diff] [blame] | 1240 | * Also let task_work, etc. to progress by releasing the mutex |
Jens Axboe | 08f5439 | 2019-08-21 22:19:11 -0600 | [diff] [blame] | 1241 | */ |
Pavel Begunkov | 3fcee5a | 2020-07-06 17:59:31 +0300 | [diff] [blame] | 1242 | if (need_resched()) { |
| 1243 | mutex_unlock(&ctx->uring_lock); |
| 1244 | cond_resched(); |
| 1245 | mutex_lock(&ctx->uring_lock); |
| 1246 | } |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1247 | } |
| 1248 | mutex_unlock(&ctx->uring_lock); |
| 1249 | } |
| 1250 | |
Pavel Begunkov | 7668b92 | 2020-07-07 16:36:21 +0300 | [diff] [blame] | 1251 | static int io_iopoll_check(struct io_ring_ctx *ctx, long min) |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1252 | { |
Pavel Begunkov | 7668b92 | 2020-07-07 16:36:21 +0300 | [diff] [blame] | 1253 | unsigned int nr_events = 0; |
Pavel Begunkov | e9979b3 | 2021-04-13 02:58:45 +0100 | [diff] [blame] | 1254 | int ret = 0; |
Dylan Yudaken | 155bc95 | 2022-04-21 02:13:44 -0700 | [diff] [blame] | 1255 | unsigned long check_cq; |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1256 | |
Pavel Begunkov | 3a08576 | 2022-06-15 17:33:55 +0100 | [diff] [blame] | 1257 | check_cq = READ_ONCE(ctx->check_cq); |
| 1258 | if (unlikely(check_cq)) { |
| 1259 | if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)) |
| 1260 | __io_cqring_overflow_flush(ctx, false); |
| 1261 | /* |
| 1262 | * Similarly do not spin if we have not informed the user of any |
| 1263 | * dropped CQE. |
| 1264 | */ |
| 1265 | if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)) |
| 1266 | return -EBADR; |
| 1267 | } |
Xiaoguang Wang | c7849be | 2020-02-22 14:46:05 +0800 | [diff] [blame] | 1268 | /* |
Pavel Begunkov | f39c8a5 | 2021-04-13 02:58:46 +0100 | [diff] [blame] | 1269 | * Don't enter poll loop if we already have events pending. |
| 1270 | * If we do, we can potentially be spinning for commands that |
| 1271 | * already triggered a CQE (eg in error). |
| 1272 | */ |
Pavel Begunkov | f39c8a5 | 2021-04-13 02:58:46 +0100 | [diff] [blame] | 1273 | if (io_cqring_events(ctx)) |
Pavel Begunkov | d487b43 | 2022-03-22 14:07:58 +0000 | [diff] [blame] | 1274 | return 0; |
Dylan Yudaken | 155bc95 | 2022-04-21 02:13:44 -0700 | [diff] [blame] | 1275 | |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1276 | do { |
Jens Axboe | 500f9fb | 2019-08-19 12:15:59 -0600 | [diff] [blame] | 1277 | /* |
| 1278 | * If a submit got punted to a workqueue, we can have the |
| 1279 | * application entering polling for a command before it gets |
| 1280 | * issued. That app will hold the uring_lock for the duration |
| 1281 | * of the poll right here, so we need to take a breather every |
| 1282 | * now and then to ensure that the issue has a chance to add |
| 1283 | * the poll to the issued list. Otherwise we can spin here |
| 1284 | * forever, while the workqueue is stuck trying to acquire the |
| 1285 | * very same mutex. |
| 1286 | */ |
Pavel Begunkov | 5eef4e8 | 2021-09-24 21:59:49 +0100 | [diff] [blame] | 1287 | if (wq_list_empty(&ctx->iopoll_list)) { |
Pavel Begunkov | 8f487ef | 2021-07-08 13:37:06 +0100 | [diff] [blame] | 1288 | u32 tail = ctx->cached_cq_tail; |
| 1289 | |
Jens Axboe | 500f9fb | 2019-08-19 12:15:59 -0600 | [diff] [blame] | 1290 | mutex_unlock(&ctx->uring_lock); |
Jens Axboe | 4c6e277 | 2020-07-01 11:29:10 -0600 | [diff] [blame] | 1291 | io_run_task_work(); |
Jens Axboe | 500f9fb | 2019-08-19 12:15:59 -0600 | [diff] [blame] | 1292 | mutex_lock(&ctx->uring_lock); |
Pavel Begunkov | e9979b3 | 2021-04-13 02:58:45 +0100 | [diff] [blame] | 1293 | |
Pavel Begunkov | 8f487ef | 2021-07-08 13:37:06 +0100 | [diff] [blame] | 1294 | /* some requests don't go through iopoll_list */ |
| 1295 | if (tail != ctx->cached_cq_tail || |
Pavel Begunkov | 5eef4e8 | 2021-09-24 21:59:49 +0100 | [diff] [blame] | 1296 | wq_list_empty(&ctx->iopoll_list)) |
Pavel Begunkov | e9979b3 | 2021-04-13 02:58:45 +0100 | [diff] [blame] | 1297 | break; |
Jens Axboe | 500f9fb | 2019-08-19 12:15:59 -0600 | [diff] [blame] | 1298 | } |
Pavel Begunkov | 5ba3c87 | 2021-09-24 21:59:43 +0100 | [diff] [blame] | 1299 | ret = io_do_iopoll(ctx, !min); |
| 1300 | if (ret < 0) |
| 1301 | break; |
| 1302 | nr_events += ret; |
| 1303 | ret = 0; |
| 1304 | } while (nr_events < min && !need_resched()); |
Pavel Begunkov | d487b43 | 2022-03-22 14:07:58 +0000 | [diff] [blame] | 1305 | |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1306 | return ret; |
| 1307 | } |
Pavel Begunkov | 7012c81 | 2022-06-16 10:21:59 +0100 | [diff] [blame] | 1308 | |
| 1309 | void io_req_task_complete(struct io_kiocb *req, bool *locked) |
Jens Axboe | 8ef12ef | 2021-08-10 15:15:25 -0600 | [diff] [blame] | 1310 | { |
Pavel Begunkov | 7012c81 | 2022-06-16 10:21:59 +0100 | [diff] [blame] | 1311 | if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) { |
| 1312 | unsigned issue_flags = *locked ? 0 : IO_URING_F_UNLOCKED; |
| 1313 | |
| 1314 | req->cqe.flags |= io_put_kbuf(req, issue_flags); |
Pavel Begunkov | 126180b | 2021-08-18 12:42:47 +0100 | [diff] [blame] | 1315 | } |
Pavel Begunkov | 7012c81 | 2022-06-16 10:21:59 +0100 | [diff] [blame] | 1316 | |
| 1317 | if (*locked) |
Pavel Begunkov | 9da070b | 2022-06-20 01:26:00 +0100 | [diff] [blame] | 1318 | io_req_complete_defer(req); |
Pavel Begunkov | 7012c81 | 2022-06-16 10:21:59 +0100 | [diff] [blame] | 1319 | else |
| 1320 | io_req_complete_post(req); |
Jens Axboe | 8ef12ef | 2021-08-10 15:15:25 -0600 | [diff] [blame] | 1321 | } |
| 1322 | |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1323 | /* |
| 1324 | * After the iocb has been issued, it's safe to be found on the poll list. |
| 1325 | * Adding the kiocb to the list AFTER submission ensures that we don't |
Pavel Begunkov | f39c8a5 | 2021-04-13 02:58:46 +0100 | [diff] [blame] | 1326 | * find it from a io_do_iopoll() thread before the issuer is done |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1327 | * accessing the kiocb cookie. |
| 1328 | */ |
Pavel Begunkov | 9882131 | 2021-10-15 17:09:12 +0100 | [diff] [blame] | 1329 | static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags) |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1330 | { |
| 1331 | struct io_ring_ctx *ctx = req->ctx; |
Hao Xu | 3b44b37 | 2021-10-18 21:34:31 +0800 | [diff] [blame] | 1332 | const bool needs_lock = issue_flags & IO_URING_F_UNLOCKED; |
Pavel Begunkov | cb3d897 | 2021-06-14 02:36:14 +0100 | [diff] [blame] | 1333 | |
| 1334 | /* workqueue context doesn't hold uring_lock, grab it now */ |
Hao Xu | 3b44b37 | 2021-10-18 21:34:31 +0800 | [diff] [blame] | 1335 | if (unlikely(needs_lock)) |
Pavel Begunkov | cb3d897 | 2021-06-14 02:36:14 +0100 | [diff] [blame] | 1336 | mutex_lock(&ctx->uring_lock); |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1337 | |
| 1338 | /* |
| 1339 | * Track whether we have multiple files in our lists. This will impact |
| 1340 | * how we do polling eventually, not spinning if we're on potentially |
| 1341 | * different devices. |
| 1342 | */ |
Pavel Begunkov | 5eef4e8 | 2021-09-24 21:59:49 +0100 | [diff] [blame] | 1343 | if (wq_list_empty(&ctx->iopoll_list)) { |
Hao Xu | 915b3dd | 2021-06-28 05:37:30 +0800 | [diff] [blame] | 1344 | ctx->poll_multi_queue = false; |
| 1345 | } else if (!ctx->poll_multi_queue) { |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1346 | struct io_kiocb *list_req; |
| 1347 | |
Pavel Begunkov | 5eef4e8 | 2021-09-24 21:59:49 +0100 | [diff] [blame] | 1348 | list_req = container_of(ctx->iopoll_list.first, struct io_kiocb, |
| 1349 | comp_list); |
Christoph Hellwig | 30da1b4 | 2021-10-12 13:12:14 +0200 | [diff] [blame] | 1350 | if (list_req->file != req->file) |
Hao Xu | 915b3dd | 2021-06-28 05:37:30 +0800 | [diff] [blame] | 1351 | ctx->poll_multi_queue = true; |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1352 | } |
| 1353 | |
| 1354 | /* |
| 1355 | * For fast devices, IO may have already completed. If it has, add |
| 1356 | * it to the front so we find it first. |
| 1357 | */ |
Xiaoguang Wang | 65a6543 | 2020-06-11 23:39:36 +0800 | [diff] [blame] | 1358 | if (READ_ONCE(req->iopoll_completed)) |
Pavel Begunkov | 5eef4e8 | 2021-09-24 21:59:49 +0100 | [diff] [blame] | 1359 | wq_list_add_head(&req->comp_list, &ctx->iopoll_list); |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1360 | else |
Pavel Begunkov | 5eef4e8 | 2021-09-24 21:59:49 +0100 | [diff] [blame] | 1361 | wq_list_add_tail(&req->comp_list, &ctx->iopoll_list); |
Xiaoguang Wang | bdcd3ea | 2020-02-25 22:12:08 +0800 | [diff] [blame] | 1362 | |
Hao Xu | 3b44b37 | 2021-10-18 21:34:31 +0800 | [diff] [blame] | 1363 | if (unlikely(needs_lock)) { |
Pavel Begunkov | cb3d897 | 2021-06-14 02:36:14 +0100 | [diff] [blame] | 1364 | /* |
| 1365 | * If IORING_SETUP_SQPOLL is enabled, sqes are either handle |
| 1366 | * in sq thread task context or in io worker task context. If |
| 1367 | * current task context is sq thread, we don't need to check |
| 1368 | * whether should wake up sq thread. |
| 1369 | */ |
| 1370 | if ((ctx->flags & IORING_SETUP_SQPOLL) && |
| 1371 | wq_has_sleeper(&ctx->sq_data->wait)) |
| 1372 | wake_up(&ctx->sq_data->wait); |
| 1373 | |
| 1374 | mutex_unlock(&ctx->uring_lock); |
| 1375 | } |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1376 | } |
| 1377 | |
Jens Axboe | 4503b76 | 2020-06-01 10:00:27 -0600 | [diff] [blame] | 1378 | static bool io_bdev_nowait(struct block_device *bdev) |
| 1379 | { |
Jeffle Xu | 9ba0d0c | 2020-10-19 16:59:42 +0800 | [diff] [blame] | 1380 | return !bdev || blk_queue_nowait(bdev_get_queue(bdev)); |
Jens Axboe | 4503b76 | 2020-06-01 10:00:27 -0600 | [diff] [blame] | 1381 | } |
| 1382 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1383 | /* |
| 1384 | * If we tracked the file through the SCM inflight mechanism, we could support |
| 1385 | * any file. For now, just ensure that anything potentially problematic is done |
| 1386 | * inline. |
| 1387 | */ |
Pavel Begunkov | 88459b5 | 2021-10-17 00:07:10 +0100 | [diff] [blame] | 1388 | static bool __io_file_supports_nowait(struct file *file, umode_t mode) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1389 | { |
Jens Axboe | 4503b76 | 2020-06-01 10:00:27 -0600 | [diff] [blame] | 1390 | if (S_ISBLK(mode)) { |
Christoph Hellwig | 4e7b567 | 2020-11-23 13:38:40 +0100 | [diff] [blame] | 1391 | if (IS_ENABLED(CONFIG_BLOCK) && |
| 1392 | io_bdev_nowait(I_BDEV(file->f_mapping->host))) |
Jens Axboe | 4503b76 | 2020-06-01 10:00:27 -0600 | [diff] [blame] | 1393 | return true; |
| 1394 | return false; |
| 1395 | } |
Pavel Begunkov | 976517f | 2021-06-09 12:07:25 +0100 | [diff] [blame] | 1396 | if (S_ISSOCK(mode)) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1397 | return true; |
Jens Axboe | 4503b76 | 2020-06-01 10:00:27 -0600 | [diff] [blame] | 1398 | if (S_ISREG(mode)) { |
Christoph Hellwig | 4e7b567 | 2020-11-23 13:38:40 +0100 | [diff] [blame] | 1399 | if (IS_ENABLED(CONFIG_BLOCK) && |
| 1400 | io_bdev_nowait(file->f_inode->i_sb->s_bdev) && |
Jens Axboe | e5550a1 | 2022-05-25 10:28:04 -0600 | [diff] [blame] | 1401 | !io_is_uring_fops(file)) |
Jens Axboe | 4503b76 | 2020-06-01 10:00:27 -0600 | [diff] [blame] | 1402 | return true; |
| 1403 | return false; |
| 1404 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1405 | |
Jens Axboe | c5b8562 | 2020-06-09 19:23:05 -0600 | [diff] [blame] | 1406 | /* any ->read/write should understand O_NONBLOCK */ |
| 1407 | if (file->f_flags & O_NONBLOCK) |
| 1408 | return true; |
Pavel Begunkov | 35645ac | 2021-10-17 00:07:09 +0100 | [diff] [blame] | 1409 | return file->f_mode & FMODE_NOWAIT; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1410 | } |
| 1411 | |
Pavel Begunkov | 88459b5 | 2021-10-17 00:07:10 +0100 | [diff] [blame] | 1412 | /* |
| 1413 | * If we tracked the file through the SCM inflight mechanism, we could support |
| 1414 | * any file. For now, just ensure that anything potentially problematic is done |
| 1415 | * inline. |
| 1416 | */ |
Jens Axboe | a4ad4f7 | 2022-05-25 10:40:19 -0600 | [diff] [blame] | 1417 | unsigned int io_file_get_flags(struct file *file) |
Jens Axboe | 7b29f92 | 2021-03-12 08:30:14 -0700 | [diff] [blame] | 1418 | { |
Pavel Begunkov | 88459b5 | 2021-10-17 00:07:10 +0100 | [diff] [blame] | 1419 | umode_t mode = file_inode(file)->i_mode; |
| 1420 | unsigned int res = 0; |
Jens Axboe | 7b29f92 | 2021-03-12 08:30:14 -0700 | [diff] [blame] | 1421 | |
Pavel Begunkov | 88459b5 | 2021-10-17 00:07:10 +0100 | [diff] [blame] | 1422 | if (S_ISREG(mode)) |
| 1423 | res |= FFS_ISREG; |
| 1424 | if (__io_file_supports_nowait(file, mode)) |
| 1425 | res |= FFS_NOWAIT; |
Jens Axboe | 5e45690 | 2022-04-20 16:15:27 -0600 | [diff] [blame] | 1426 | if (io_file_need_scm(file)) |
| 1427 | res |= FFS_SCM; |
Pavel Begunkov | 88459b5 | 2021-10-17 00:07:10 +0100 | [diff] [blame] | 1428 | return res; |
Jens Axboe | 7b29f92 | 2021-03-12 08:30:14 -0700 | [diff] [blame] | 1429 | } |
| 1430 | |
Jens Axboe | 99f15d8 | 2022-05-25 05:59:19 -0600 | [diff] [blame] | 1431 | bool io_alloc_async_data(struct io_kiocb *req) |
Xiaoguang Wang | 3d9932a | 2020-03-27 15:36:52 +0800 | [diff] [blame] | 1432 | { |
Jens Axboe | e8c2bc1 | 2020-08-15 18:44:09 -0700 | [diff] [blame] | 1433 | WARN_ON_ONCE(!io_op_defs[req->opcode].async_size); |
| 1434 | req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL); |
Pavel Begunkov | d886e18 | 2021-10-04 20:02:56 +0100 | [diff] [blame] | 1435 | if (req->async_data) { |
| 1436 | req->flags |= REQ_F_ASYNC_DATA; |
| 1437 | return false; |
| 1438 | } |
| 1439 | return true; |
Xiaoguang Wang | 3d9932a | 2020-03-27 15:36:52 +0800 | [diff] [blame] | 1440 | } |
| 1441 | |
Jens Axboe | f3b44f9 | 2022-06-13 07:27:03 -0600 | [diff] [blame] | 1442 | int io_req_prep_async(struct io_kiocb *req) |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 1443 | { |
Jens Axboe | a196c78b | 2022-05-01 21:19:50 -0600 | [diff] [blame] | 1444 | const struct io_op_def *def = &io_op_defs[req->opcode]; |
| 1445 | |
| 1446 | /* assign early for deferred execution for non-fixed file */ |
| 1447 | if (def->needs_file && !(req->flags & REQ_F_FIXED_FILE)) |
Linus Torvalds | 3a166bd | 2022-05-23 12:22:49 -0700 | [diff] [blame] | 1448 | req->file = io_file_get_normal(req, req->cqe.fd); |
Jens Axboe | dc919ca | 2022-05-23 17:30:37 -0600 | [diff] [blame] | 1449 | if (!def->prep_async) |
Pavel Begunkov | b7e298d | 2021-02-28 22:35:19 +0000 | [diff] [blame] | 1450 | return 0; |
Pavel Begunkov | d886e18 | 2021-10-04 20:02:56 +0100 | [diff] [blame] | 1451 | if (WARN_ON_ONCE(req_has_async_data(req))) |
Pavel Begunkov | b7e298d | 2021-02-28 22:35:19 +0000 | [diff] [blame] | 1452 | return -EFAULT; |
| 1453 | if (io_alloc_async_data(req)) |
| 1454 | return -EAGAIN; |
| 1455 | |
Jens Axboe | dc919ca | 2022-05-23 17:30:37 -0600 | [diff] [blame] | 1456 | return def->prep_async(req); |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 1457 | } |
| 1458 | |
Pavel Begunkov | 9cf7c10 | 2020-07-13 23:37:15 +0300 | [diff] [blame] | 1459 | static u32 io_get_sequence(struct io_kiocb *req) |
| 1460 | { |
Pavel Begunkov | a3dbdf5 | 2021-06-17 18:14:05 +0100 | [diff] [blame] | 1461 | u32 seq = req->ctx->cached_sq_head; |
Pavel Begunkov | 963c6ab | 2022-03-25 11:52:16 +0000 | [diff] [blame] | 1462 | struct io_kiocb *cur; |
Pavel Begunkov | 9cf7c10 | 2020-07-13 23:37:15 +0300 | [diff] [blame] | 1463 | |
Pavel Begunkov | a3dbdf5 | 2021-06-17 18:14:05 +0100 | [diff] [blame] | 1464 | /* need original cached_sq_head, but it was increased for each req */ |
Pavel Begunkov | 963c6ab | 2022-03-25 11:52:16 +0000 | [diff] [blame] | 1465 | io_for_each_link(cur, req) |
Pavel Begunkov | a3dbdf5 | 2021-06-17 18:14:05 +0100 | [diff] [blame] | 1466 | seq--; |
| 1467 | return seq; |
Pavel Begunkov | 9cf7c10 | 2020-07-13 23:37:15 +0300 | [diff] [blame] | 1468 | } |
| 1469 | |
Pavel Begunkov | c072481 | 2021-10-04 20:02:54 +0100 | [diff] [blame] | 1470 | static __cold void io_drain_req(struct io_kiocb *req) |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 1471 | { |
Jackie Liu | a197f66 | 2019-11-08 08:09:12 -0700 | [diff] [blame] | 1472 | struct io_ring_ctx *ctx = req->ctx; |
Pavel Begunkov | 27dc833 | 2020-07-13 23:37:14 +0300 | [diff] [blame] | 1473 | struct io_defer_entry *de; |
Jens Axboe | f67676d | 2019-12-02 11:03:47 -0700 | [diff] [blame] | 1474 | int ret; |
Pavel Begunkov | e0eb71d | 2021-10-01 18:07:01 +0100 | [diff] [blame] | 1475 | u32 seq = io_get_sequence(req); |
Pavel Begunkov | 3c19966 | 2021-06-15 16:47:57 +0100 | [diff] [blame] | 1476 | |
Bob Liu | 9d858b2 | 2019-11-13 18:06:25 +0800 | [diff] [blame] | 1477 | /* Still need defer if there is pending req in defer list. */ |
Hao Xu | e302f10 | 2021-11-25 17:21:02 +0800 | [diff] [blame] | 1478 | spin_lock(&ctx->completion_lock); |
Pavel Begunkov | 5e37126 | 2021-09-24 22:00:04 +0100 | [diff] [blame] | 1479 | if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) { |
Hao Xu | e302f10 | 2021-11-25 17:21:02 +0800 | [diff] [blame] | 1480 | spin_unlock(&ctx->completion_lock); |
Pavel Begunkov | e0eb71d | 2021-10-01 18:07:01 +0100 | [diff] [blame] | 1481 | queue: |
Pavel Begunkov | 10c6690 | 2021-06-15 16:47:56 +0100 | [diff] [blame] | 1482 | ctx->drain_active = false; |
Pavel Begunkov | e0eb71d | 2021-10-01 18:07:01 +0100 | [diff] [blame] | 1483 | io_req_task_queue(req); |
| 1484 | return; |
Pavel Begunkov | 10c6690 | 2021-06-15 16:47:56 +0100 | [diff] [blame] | 1485 | } |
Hao Xu | e302f10 | 2021-11-25 17:21:02 +0800 | [diff] [blame] | 1486 | spin_unlock(&ctx->completion_lock); |
Pavel Begunkov | 9cf7c10 | 2020-07-13 23:37:15 +0300 | [diff] [blame] | 1487 | |
Pavel Begunkov | b7e298d | 2021-02-28 22:35:19 +0000 | [diff] [blame] | 1488 | ret = io_req_prep_async(req); |
Pavel Begunkov | e0eb71d | 2021-10-01 18:07:01 +0100 | [diff] [blame] | 1489 | if (ret) { |
| 1490 | fail: |
| 1491 | io_req_complete_failed(req, ret); |
| 1492 | return; |
| 1493 | } |
Pavel Begunkov | cbdcb43 | 2020-06-29 19:18:43 +0300 | [diff] [blame] | 1494 | io_prep_async_link(req); |
Pavel Begunkov | 27dc833 | 2020-07-13 23:37:14 +0300 | [diff] [blame] | 1495 | de = kmalloc(sizeof(*de), GFP_KERNEL); |
Pavel Begunkov | 76cc33d | 2021-06-14 23:37:30 +0100 | [diff] [blame] | 1496 | if (!de) { |
Pavel Begunkov | 1b48773 | 2021-07-11 22:41:13 +0100 | [diff] [blame] | 1497 | ret = -ENOMEM; |
Pavel Begunkov | e0eb71d | 2021-10-01 18:07:01 +0100 | [diff] [blame] | 1498 | goto fail; |
Pavel Begunkov | 76cc33d | 2021-06-14 23:37:30 +0100 | [diff] [blame] | 1499 | } |
Jens Axboe | 2d28390 | 2019-12-04 11:08:05 -0700 | [diff] [blame] | 1500 | |
Jens Axboe | 79ebeae | 2021-08-10 15:18:27 -0600 | [diff] [blame] | 1501 | spin_lock(&ctx->completion_lock); |
Pavel Begunkov | 9cf7c10 | 2020-07-13 23:37:15 +0300 | [diff] [blame] | 1502 | if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) { |
Jens Axboe | 79ebeae | 2021-08-10 15:18:27 -0600 | [diff] [blame] | 1503 | spin_unlock(&ctx->completion_lock); |
Pavel Begunkov | 27dc833 | 2020-07-13 23:37:14 +0300 | [diff] [blame] | 1504 | kfree(de); |
Pavel Begunkov | e0eb71d | 2021-10-01 18:07:01 +0100 | [diff] [blame] | 1505 | goto queue; |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 1506 | } |
| 1507 | |
Pavel Begunkov | 48863ff | 2022-06-16 13:57:20 +0100 | [diff] [blame] | 1508 | trace_io_uring_defer(req); |
Pavel Begunkov | 27dc833 | 2020-07-13 23:37:14 +0300 | [diff] [blame] | 1509 | de->req = req; |
Pavel Begunkov | 9cf7c10 | 2020-07-13 23:37:15 +0300 | [diff] [blame] | 1510 | de->seq = seq; |
Pavel Begunkov | 27dc833 | 2020-07-13 23:37:14 +0300 | [diff] [blame] | 1511 | list_add_tail(&de->list, &ctx->defer_list); |
Jens Axboe | 79ebeae | 2021-08-10 15:18:27 -0600 | [diff] [blame] | 1512 | spin_unlock(&ctx->completion_lock); |
Jens Axboe | de0617e | 2019-04-06 21:51:27 -0600 | [diff] [blame] | 1513 | } |
| 1514 | |
Pavel Begunkov | 68fb897 | 2021-03-19 17:22:41 +0000 | [diff] [blame] | 1515 | static void io_clean_op(struct io_kiocb *req) |
Pavel Begunkov | 99bc4c3 | 2020-02-07 22:04:45 +0300 | [diff] [blame] | 1516 | { |
Pavel Begunkov | 8197b05 | 2022-03-25 13:00:43 +0000 | [diff] [blame] | 1517 | if (req->flags & REQ_F_BUFFER_SELECTED) { |
| 1518 | spin_lock(&req->ctx->completion_lock); |
Jens Axboe | cc3cec8 | 2022-03-08 17:46:52 -0700 | [diff] [blame] | 1519 | io_put_kbuf_comp(req); |
Pavel Begunkov | 8197b05 | 2022-03-25 13:00:43 +0000 | [diff] [blame] | 1520 | spin_unlock(&req->ctx->completion_lock); |
| 1521 | } |
Pavel Begunkov | 99bc4c3 | 2020-02-07 22:04:45 +0300 | [diff] [blame] | 1522 | |
Pavel Begunkov | 0e1b6fe3 | 2020-07-16 23:28:02 +0300 | [diff] [blame] | 1523 | if (req->flags & REQ_F_NEED_CLEANUP) { |
Jens Axboe | 4d4c9cf | 2022-05-24 10:26:28 -0600 | [diff] [blame] | 1524 | const struct io_op_def *def = &io_op_defs[req->opcode]; |
Pavel Begunkov | 1dacb4d | 2021-06-17 18:14:03 +0100 | [diff] [blame] | 1525 | |
Jens Axboe | 4d4c9cf | 2022-05-24 10:26:28 -0600 | [diff] [blame] | 1526 | if (def->cleanup) |
| 1527 | def->cleanup(req); |
Pavel Begunkov | 0e1b6fe3 | 2020-07-16 23:28:02 +0300 | [diff] [blame] | 1528 | } |
Jens Axboe | 75652a30 | 2021-04-15 09:52:40 -0600 | [diff] [blame] | 1529 | if ((req->flags & REQ_F_POLLED) && req->apoll) { |
| 1530 | kfree(req->apoll->double_poll); |
| 1531 | kfree(req->apoll); |
| 1532 | req->apoll = NULL; |
| 1533 | } |
Jens Axboe | 9cae36a | 2022-06-01 23:57:02 -0600 | [diff] [blame] | 1534 | if (req->flags & REQ_F_INFLIGHT) { |
| 1535 | struct io_uring_task *tctx = req->task->io_uring; |
| 1536 | |
| 1537 | atomic_dec(&tctx->inflight_tracked); |
| 1538 | } |
Pavel Begunkov | c854357 | 2021-06-17 18:14:04 +0100 | [diff] [blame] | 1539 | if (req->flags & REQ_F_CREDS) |
Pavel Begunkov | b8e64b5 | 2021-06-17 18:14:02 +0100 | [diff] [blame] | 1540 | put_cred(req->creds); |
Pavel Begunkov | d886e18 | 2021-10-04 20:02:56 +0100 | [diff] [blame] | 1541 | if (req->flags & REQ_F_ASYNC_DATA) { |
| 1542 | kfree(req->async_data); |
| 1543 | req->async_data = NULL; |
| 1544 | } |
Pavel Begunkov | c854357 | 2021-06-17 18:14:04 +0100 | [diff] [blame] | 1545 | req->flags &= ~IO_REQ_CLEAN_FLAGS; |
Pavel Begunkov | 99bc4c3 | 2020-02-07 22:04:45 +0300 | [diff] [blame] | 1546 | } |
| 1547 | |
Jens Axboe | 6bf9c47 | 2022-03-29 10:10:08 -0600 | [diff] [blame] | 1548 | static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags) |
| 1549 | { |
| 1550 | if (req->file || !io_op_defs[req->opcode].needs_file) |
| 1551 | return true; |
| 1552 | |
| 1553 | if (req->flags & REQ_F_FIXED_FILE) |
Pavel Begunkov | cef216f | 2022-04-12 15:09:43 +0100 | [diff] [blame] | 1554 | req->file = io_file_get_fixed(req, req->cqe.fd, issue_flags); |
Jens Axboe | 6bf9c47 | 2022-03-29 10:10:08 -0600 | [diff] [blame] | 1555 | else |
Pavel Begunkov | cef216f | 2022-04-12 15:09:43 +0100 | [diff] [blame] | 1556 | req->file = io_file_get_normal(req, req->cqe.fd); |
Jens Axboe | 6bf9c47 | 2022-03-29 10:10:08 -0600 | [diff] [blame] | 1557 | |
Pavel Begunkov | 772f5e0 | 2022-04-18 20:51:12 +0100 | [diff] [blame] | 1558 | return !!req->file; |
Jens Axboe | 6bf9c47 | 2022-03-29 10:10:08 -0600 | [diff] [blame] | 1559 | } |
| 1560 | |
Pavel Begunkov | 889fca7 | 2021-02-10 00:03:09 +0000 | [diff] [blame] | 1561 | static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1562 | { |
Jens Axboe | fcde59f | 2022-05-23 16:53:15 -0600 | [diff] [blame] | 1563 | const struct io_op_def *def = &io_op_defs[req->opcode]; |
Jens Axboe | 5730b27 | 2021-02-27 15:57:30 -0700 | [diff] [blame] | 1564 | const struct cred *creds = NULL; |
Jens Axboe | d625c6e | 2019-12-17 19:53:05 -0700 | [diff] [blame] | 1565 | int ret; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1566 | |
Jens Axboe | 7015214 | 2022-04-14 20:23:40 -0600 | [diff] [blame] | 1567 | if (unlikely(!io_assign_file(req, issue_flags))) |
| 1568 | return -EBADF; |
| 1569 | |
Pavel Begunkov | 6878b40 | 2021-09-24 21:59:41 +0100 | [diff] [blame] | 1570 | if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred())) |
Pavel Begunkov | c10d1f9 | 2021-06-17 18:14:01 +0100 | [diff] [blame] | 1571 | creds = override_creds(req->creds); |
Jens Axboe | 5730b27 | 2021-02-27 15:57:30 -0700 | [diff] [blame] | 1572 | |
Jens Axboe | fcde59f | 2022-05-23 16:53:15 -0600 | [diff] [blame] | 1573 | if (!def->audit_skip) |
Paul Moore | 5bd2182 | 2021-02-16 19:46:48 -0500 | [diff] [blame] | 1574 | audit_uring_entry(req->opcode); |
| 1575 | |
Jens Axboe | 0702e53 | 2022-05-23 16:56:21 -0600 | [diff] [blame] | 1576 | ret = def->issue(req, issue_flags); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1577 | |
Jens Axboe | fcde59f | 2022-05-23 16:53:15 -0600 | [diff] [blame] | 1578 | if (!def->audit_skip) |
Paul Moore | 5bd2182 | 2021-02-16 19:46:48 -0500 | [diff] [blame] | 1579 | audit_uring_exit(!ret, ret); |
| 1580 | |
Jens Axboe | 5730b27 | 2021-02-27 15:57:30 -0700 | [diff] [blame] | 1581 | if (creds) |
| 1582 | revert_creds(creds); |
Jens Axboe | 97b388d | 2022-05-24 15:21:00 -0600 | [diff] [blame] | 1583 | |
Pavel Begunkov | 75d7b3a | 2022-06-16 10:21:58 +0100 | [diff] [blame] | 1584 | if (ret == IOU_OK) { |
| 1585 | if (issue_flags & IO_URING_F_COMPLETE_DEFER) |
Pavel Begunkov | 9da070b | 2022-06-20 01:26:00 +0100 | [diff] [blame] | 1586 | io_req_complete_defer(req); |
Pavel Begunkov | 75d7b3a | 2022-06-16 10:21:58 +0100 | [diff] [blame] | 1587 | else |
| 1588 | io_req_complete_post(req); |
| 1589 | } else if (ret != IOU_ISSUE_SKIP_COMPLETE) |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1590 | return ret; |
Jens Axboe | 97b388d | 2022-05-24 15:21:00 -0600 | [diff] [blame] | 1591 | |
Jens Axboe | b532576 | 2020-05-19 21:20:27 -0600 | [diff] [blame] | 1592 | /* If the op doesn't have a file, we're not polling for it */ |
Pavel Begunkov | 9983028 | 2021-10-15 17:09:11 +0100 | [diff] [blame] | 1593 | if ((req->ctx->flags & IORING_SETUP_IOPOLL) && req->file) |
Pavel Begunkov | 9882131 | 2021-10-15 17:09:12 +0100 | [diff] [blame] | 1594 | io_iopoll_req_issued(req, issue_flags); |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 1595 | |
| 1596 | return 0; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1597 | } |
| 1598 | |
Jens Axboe | 329061d | 2022-05-25 20:31:09 -0600 | [diff] [blame] | 1599 | int io_poll_issue(struct io_kiocb *req, bool *locked) |
| 1600 | { |
| 1601 | io_tw_lock(req->ctx, locked); |
| 1602 | if (unlikely(req->task->flags & PF_EXITING)) |
| 1603 | return -EFAULT; |
Pavel Begunkov | aeaa72c | 2022-06-15 17:33:54 +0100 | [diff] [blame] | 1604 | return io_issue_sqe(req, IO_URING_F_NONBLOCK); |
Jens Axboe | 329061d | 2022-05-25 20:31:09 -0600 | [diff] [blame] | 1605 | } |
| 1606 | |
Jens Axboe | c9f06aa | 2022-05-25 11:01:04 -0600 | [diff] [blame] | 1607 | struct io_wq_work *io_wq_free_work(struct io_wq_work *work) |
Pavel Begunkov | ebc11b6 | 2021-08-09 13:04:05 +0100 | [diff] [blame] | 1608 | { |
| 1609 | struct io_kiocb *req = container_of(work, struct io_kiocb, work); |
| 1610 | |
| 1611 | req = io_put_req_find_next(req); |
| 1612 | return req ? &req->work : NULL; |
| 1613 | } |
| 1614 | |
Jens Axboe | c9f06aa | 2022-05-25 11:01:04 -0600 | [diff] [blame] | 1615 | void io_wq_submit_work(struct io_wq_work *work) |
Pavel Begunkov | d4c81f3 | 2020-06-08 21:08:19 +0300 | [diff] [blame] | 1616 | { |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1617 | struct io_kiocb *req = container_of(work, struct io_kiocb, work); |
Jens Axboe | 6bf9c47 | 2022-03-29 10:10:08 -0600 | [diff] [blame] | 1618 | const struct io_op_def *def = &io_op_defs[req->opcode]; |
Pavel Begunkov | d01905d | 2021-10-23 12:13:57 +0100 | [diff] [blame] | 1619 | unsigned int issue_flags = IO_URING_F_UNLOCKED; |
| 1620 | bool needs_poll = false; |
Jens Axboe | 6bf9c47 | 2022-03-29 10:10:08 -0600 | [diff] [blame] | 1621 | int ret = 0, err = -ECANCELED; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1622 | |
Pavel Begunkov | 48dcd38 | 2021-08-15 10:40:18 +0100 | [diff] [blame] | 1623 | /* one will be dropped by ->io_free_work() after returning to io-wq */ |
| 1624 | if (!(req->flags & REQ_F_REFCOUNT)) |
| 1625 | __io_req_set_refcount(req, 2); |
| 1626 | else |
| 1627 | req_ref_get(req); |
Pavel Begunkov | 5d5901a | 2021-08-11 19:28:29 +0100 | [diff] [blame] | 1628 | |
Pavel Begunkov | cb2d344 | 2022-04-15 22:08:25 +0100 | [diff] [blame] | 1629 | io_arm_ltimeout(req); |
Jens Axboe | 6bf9c47 | 2022-03-29 10:10:08 -0600 | [diff] [blame] | 1630 | |
Pavel Begunkov | dadebc3 | 2021-08-23 13:30:44 +0100 | [diff] [blame] | 1631 | /* either cancelled or io-wq is dying, so don't touch tctx->iowq */ |
Pavel Begunkov | d01905d | 2021-10-23 12:13:57 +0100 | [diff] [blame] | 1632 | if (work->flags & IO_WQ_WORK_CANCEL) { |
Pavel Begunkov | 0f8da75 | 2022-04-12 15:24:43 +0100 | [diff] [blame] | 1633 | fail: |
Jens Axboe | 6bf9c47 | 2022-03-29 10:10:08 -0600 | [diff] [blame] | 1634 | io_req_task_queue_fail(req, err); |
Pavel Begunkov | d01905d | 2021-10-23 12:13:57 +0100 | [diff] [blame] | 1635 | return; |
Jens Axboe | 561fb04 | 2019-10-24 07:25:42 -0600 | [diff] [blame] | 1636 | } |
Pavel Begunkov | 0f8da75 | 2022-04-12 15:24:43 +0100 | [diff] [blame] | 1637 | if (!io_assign_file(req, issue_flags)) { |
| 1638 | err = -EBADF; |
| 1639 | work->flags |= IO_WQ_WORK_CANCEL; |
| 1640 | goto fail; |
| 1641 | } |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 1642 | |
Pavel Begunkov | d01905d | 2021-10-23 12:13:57 +0100 | [diff] [blame] | 1643 | if (req->flags & REQ_F_FORCE_ASYNC) { |
Pavel Begunkov | afb7f56f | 2021-10-23 12:13:59 +0100 | [diff] [blame] | 1644 | bool opcode_poll = def->pollin || def->pollout; |
| 1645 | |
| 1646 | if (opcode_poll && file_can_poll(req->file)) { |
| 1647 | needs_poll = true; |
Pavel Begunkov | d01905d | 2021-10-23 12:13:57 +0100 | [diff] [blame] | 1648 | issue_flags |= IO_URING_F_NONBLOCK; |
Pavel Begunkov | afb7f56f | 2021-10-23 12:13:59 +0100 | [diff] [blame] | 1649 | } |
Pavel Begunkov | d01905d | 2021-10-23 12:13:57 +0100 | [diff] [blame] | 1650 | } |
Hao Xu | 90fa0288 | 2021-10-18 21:34:45 +0800 | [diff] [blame] | 1651 | |
Pavel Begunkov | d01905d | 2021-10-23 12:13:57 +0100 | [diff] [blame] | 1652 | do { |
| 1653 | ret = io_issue_sqe(req, issue_flags); |
| 1654 | if (ret != -EAGAIN) |
| 1655 | break; |
| 1656 | /* |
| 1657 | * We can get EAGAIN for iopolled IO even though we're |
| 1658 | * forcing a sync submission from here, since we can't |
| 1659 | * wait for request slots on the block side. |
| 1660 | */ |
| 1661 | if (!needs_poll) { |
Pavel Begunkov | e0deb6a | 2022-05-13 11:24:56 +0100 | [diff] [blame] | 1662 | if (!(req->ctx->flags & IORING_SETUP_IOPOLL)) |
| 1663 | break; |
Pavel Begunkov | d01905d | 2021-10-23 12:13:57 +0100 | [diff] [blame] | 1664 | cond_resched(); |
| 1665 | continue; |
Hao Xu | 90fa0288 | 2021-10-18 21:34:45 +0800 | [diff] [blame] | 1666 | } |
| 1667 | |
Jens Axboe | 4d9237e | 2022-03-15 10:54:08 -0600 | [diff] [blame] | 1668 | if (io_arm_poll_handler(req, issue_flags) == IO_APOLL_OK) |
Pavel Begunkov | d01905d | 2021-10-23 12:13:57 +0100 | [diff] [blame] | 1669 | return; |
| 1670 | /* aborted or ready, in either case retry blocking */ |
| 1671 | needs_poll = false; |
| 1672 | issue_flags &= ~IO_URING_F_NONBLOCK; |
| 1673 | } while (1); |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 1674 | |
Pavel Begunkov | a3df7698 | 2021-02-18 22:32:52 +0000 | [diff] [blame] | 1675 | /* avoid locking problems by failing it from a clean context */ |
Jens Axboe | 97b388d | 2022-05-24 15:21:00 -0600 | [diff] [blame] | 1676 | if (ret < 0) |
Pavel Begunkov | a3df7698 | 2021-02-18 22:32:52 +0000 | [diff] [blame] | 1677 | io_req_task_queue_fail(req, ret); |
Jens Axboe | 31b5151 | 2019-01-18 22:56:34 -0700 | [diff] [blame] | 1678 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1679 | |
Jens Axboe | 531113b | 2022-05-24 21:19:47 -0600 | [diff] [blame] | 1680 | inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd, |
| 1681 | unsigned int issue_flags) |
Pavel Begunkov | 8da11c1 | 2020-02-24 11:32:44 +0300 | [diff] [blame] | 1682 | { |
Jens Axboe | 5106dd6 | 2022-04-04 17:18:43 -0600 | [diff] [blame] | 1683 | struct io_ring_ctx *ctx = req->ctx; |
| 1684 | struct file *file = NULL; |
Pavel Begunkov | ac17705 | 2021-08-09 13:04:02 +0100 | [diff] [blame] | 1685 | unsigned long file_ptr; |
Pavel Begunkov | 8da11c1 | 2020-02-24 11:32:44 +0300 | [diff] [blame] | 1686 | |
Pavel Begunkov | 93f052c | 2022-04-18 20:51:11 +0100 | [diff] [blame] | 1687 | io_ring_submit_lock(ctx, issue_flags); |
Jens Axboe | 5106dd6 | 2022-04-04 17:18:43 -0600 | [diff] [blame] | 1688 | |
Pavel Begunkov | ac17705 | 2021-08-09 13:04:02 +0100 | [diff] [blame] | 1689 | if (unlikely((unsigned int)fd >= ctx->nr_user_files)) |
Jens Axboe | 5106dd6 | 2022-04-04 17:18:43 -0600 | [diff] [blame] | 1690 | goto out; |
Pavel Begunkov | ac17705 | 2021-08-09 13:04:02 +0100 | [diff] [blame] | 1691 | fd = array_index_nospec(fd, ctx->nr_user_files); |
| 1692 | file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr; |
| 1693 | file = (struct file *) (file_ptr & FFS_MASK); |
| 1694 | file_ptr &= ~FFS_MASK; |
| 1695 | /* mask in overlapping REQ_F and FFS bits */ |
Pavel Begunkov | 35645ac | 2021-10-17 00:07:09 +0100 | [diff] [blame] | 1696 | req->flags |= (file_ptr << REQ_F_SUPPORT_NOWAIT_BIT); |
Jens Axboe | 5106dd6 | 2022-04-04 17:18:43 -0600 | [diff] [blame] | 1697 | io_req_set_rsrc_node(req, ctx, 0); |
Jens Axboe | d78bd8a | 2022-05-07 09:56:13 -0600 | [diff] [blame] | 1698 | WARN_ON_ONCE(file && !test_bit(fd, ctx->file_table.bitmap)); |
Jens Axboe | 5106dd6 | 2022-04-04 17:18:43 -0600 | [diff] [blame] | 1699 | out: |
Pavel Begunkov | 93f052c | 2022-04-18 20:51:11 +0100 | [diff] [blame] | 1700 | io_ring_submit_unlock(ctx, issue_flags); |
Pavel Begunkov | 8371adf | 2020-10-10 18:34:08 +0100 | [diff] [blame] | 1701 | return file; |
Pavel Begunkov | 8da11c1 | 2020-02-24 11:32:44 +0300 | [diff] [blame] | 1702 | } |
| 1703 | |
Jens Axboe | 531113b | 2022-05-24 21:19:47 -0600 | [diff] [blame] | 1704 | struct file *io_file_get_normal(struct io_kiocb *req, int fd) |
Pavel Begunkov | ac17705 | 2021-08-09 13:04:02 +0100 | [diff] [blame] | 1705 | { |
Pavel Begunkov | 62906e8 | 2021-08-10 14:52:47 +0100 | [diff] [blame] | 1706 | struct file *file = fget(fd); |
Pavel Begunkov | ac17705 | 2021-08-09 13:04:02 +0100 | [diff] [blame] | 1707 | |
Pavel Begunkov | 48863ff | 2022-06-16 13:57:20 +0100 | [diff] [blame] | 1708 | trace_io_uring_file_get(req, fd); |
Pavel Begunkov | ac17705 | 2021-08-09 13:04:02 +0100 | [diff] [blame] | 1709 | |
| 1710 | /* we don't allow fixed io_uring files */ |
Jens Axboe | e5550a1 | 2022-05-25 10:28:04 -0600 | [diff] [blame] | 1711 | if (file && io_is_uring_fops(file)) |
Jens Axboe | 9cae36a | 2022-06-01 23:57:02 -0600 | [diff] [blame] | 1712 | io_req_track_inflight(req); |
Pavel Begunkov | ac17705 | 2021-08-09 13:04:02 +0100 | [diff] [blame] | 1713 | return file; |
| 1714 | } |
| 1715 | |
Pavel Begunkov | 7bfa9ba | 2022-04-15 22:08:28 +0100 | [diff] [blame] | 1716 | static void io_queue_async(struct io_kiocb *req, int ret) |
Pavel Begunkov | d475a9a | 2021-09-24 21:59:59 +0100 | [diff] [blame] | 1717 | __must_hold(&req->ctx->uring_lock) |
| 1718 | { |
Pavel Begunkov | 7bfa9ba | 2022-04-15 22:08:28 +0100 | [diff] [blame] | 1719 | struct io_kiocb *linked_timeout; |
| 1720 | |
| 1721 | if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) { |
| 1722 | io_req_complete_failed(req, ret); |
| 1723 | return; |
| 1724 | } |
| 1725 | |
| 1726 | linked_timeout = io_prep_linked_timeout(req); |
Pavel Begunkov | d475a9a | 2021-09-24 21:59:59 +0100 | [diff] [blame] | 1727 | |
Jens Axboe | 4d9237e | 2022-03-15 10:54:08 -0600 | [diff] [blame] | 1728 | switch (io_arm_poll_handler(req, 0)) { |
Pavel Begunkov | d475a9a | 2021-09-24 21:59:59 +0100 | [diff] [blame] | 1729 | case IO_APOLL_READY: |
Pavel Begunkov | d475a9a | 2021-09-24 21:59:59 +0100 | [diff] [blame] | 1730 | io_req_task_queue(req); |
| 1731 | break; |
| 1732 | case IO_APOLL_ABORTED: |
| 1733 | /* |
| 1734 | * Queued up for async execution, worker will release |
| 1735 | * submit reference when the iocb is actually submitted. |
| 1736 | */ |
Jens Axboe | 6436c77 | 2022-06-17 06:24:26 -0600 | [diff] [blame] | 1737 | io_kbuf_recycle(req, 0); |
Pavel Begunkov | 77955ef | 2022-04-15 22:08:27 +0100 | [diff] [blame] | 1738 | io_queue_iowq(req, NULL); |
Pavel Begunkov | d475a9a | 2021-09-24 21:59:59 +0100 | [diff] [blame] | 1739 | break; |
Jens Axboe | b1c6264 | 2022-03-09 11:27:52 -0700 | [diff] [blame] | 1740 | case IO_APOLL_OK: |
Jens Axboe | b1c6264 | 2022-03-09 11:27:52 -0700 | [diff] [blame] | 1741 | break; |
Pavel Begunkov | d475a9a | 2021-09-24 21:59:59 +0100 | [diff] [blame] | 1742 | } |
| 1743 | |
| 1744 | if (linked_timeout) |
| 1745 | io_queue_linked_timeout(linked_timeout); |
| 1746 | } |
| 1747 | |
Pavel Begunkov | cbc2e20 | 2022-04-15 22:08:26 +0100 | [diff] [blame] | 1748 | static inline void io_queue_sqe(struct io_kiocb *req) |
Pavel Begunkov | 282cdc8 | 2021-08-09 13:04:10 +0100 | [diff] [blame] | 1749 | __must_hold(&req->ctx->uring_lock) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1750 | { |
Jens Axboe | e0c5c57 | 2019-03-12 10:18:47 -0600 | [diff] [blame] | 1751 | int ret; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1752 | |
Pavel Begunkov | c5eef2b94 | 2021-02-10 00:03:22 +0000 | [diff] [blame] | 1753 | ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER); |
Jens Axboe | 491381ce | 2019-10-17 09:20:46 -0600 | [diff] [blame] | 1754 | |
| 1755 | /* |
| 1756 | * We async punt it if the file wasn't marked NOWAIT, or if the file |
| 1757 | * doesn't support non-blocking read/write attempts |
| 1758 | */ |
Pavel Begunkov | 7bfa9ba | 2022-04-15 22:08:28 +0100 | [diff] [blame] | 1759 | if (likely(!ret)) |
Pavel Begunkov | cb2d344 | 2022-04-15 22:08:25 +0100 | [diff] [blame] | 1760 | io_arm_ltimeout(req); |
Pavel Begunkov | 7bfa9ba | 2022-04-15 22:08:28 +0100 | [diff] [blame] | 1761 | else |
| 1762 | io_queue_async(req, ret); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1763 | } |
| 1764 | |
Pavel Begunkov | 4652fe3 | 2021-09-24 21:59:58 +0100 | [diff] [blame] | 1765 | static void io_queue_sqe_fallback(struct io_kiocb *req) |
Pavel Begunkov | 282cdc8 | 2021-08-09 13:04:10 +0100 | [diff] [blame] | 1766 | __must_hold(&req->ctx->uring_lock) |
Jackie Liu | 4fe2c96 | 2019-09-09 20:50:40 +0800 | [diff] [blame] | 1767 | { |
Pavel Begunkov | 17b147f | 2022-04-15 22:08:32 +0100 | [diff] [blame] | 1768 | if (unlikely(req->flags & REQ_F_FAIL)) { |
| 1769 | /* |
| 1770 | * We don't submit, fail them all, for that replace hardlinks |
| 1771 | * with normal links. Extra REQ_F_LINK is tolerated. |
| 1772 | */ |
| 1773 | req->flags &= ~REQ_F_HARDLINK; |
| 1774 | req->flags |= REQ_F_LINK; |
| 1775 | io_req_complete_failed(req, req->cqe.res); |
Pavel Begunkov | e0eb71d | 2021-10-01 18:07:01 +0100 | [diff] [blame] | 1776 | } else if (unlikely(req->ctx->drain_active)) { |
| 1777 | io_drain_req(req); |
Pavel Begunkov | 76cc33d | 2021-06-14 23:37:30 +0100 | [diff] [blame] | 1778 | } else { |
| 1779 | int ret = io_req_prep_async(req); |
| 1780 | |
| 1781 | if (unlikely(ret)) |
| 1782 | io_req_complete_failed(req, ret); |
| 1783 | else |
Pavel Begunkov | 77955ef | 2022-04-15 22:08:27 +0100 | [diff] [blame] | 1784 | io_queue_iowq(req, NULL); |
Jens Axboe | ce35a47 | 2019-12-17 08:04:44 -0700 | [diff] [blame] | 1785 | } |
Jackie Liu | 4fe2c96 | 2019-09-09 20:50:40 +0800 | [diff] [blame] | 1786 | } |
| 1787 | |
Stefano Garzarella | 21b55db | 2020-08-27 16:58:30 +0200 | [diff] [blame] | 1788 | /* |
| 1789 | * Check SQE restrictions (opcode and flags). |
| 1790 | * |
| 1791 | * Returns 'true' if SQE is allowed, 'false' otherwise. |
| 1792 | */ |
| 1793 | static inline bool io_check_restriction(struct io_ring_ctx *ctx, |
| 1794 | struct io_kiocb *req, |
| 1795 | unsigned int sqe_flags) |
| 1796 | { |
Stefano Garzarella | 21b55db | 2020-08-27 16:58:30 +0200 | [diff] [blame] | 1797 | if (!test_bit(req->opcode, ctx->restrictions.sqe_op)) |
| 1798 | return false; |
| 1799 | |
| 1800 | if ((sqe_flags & ctx->restrictions.sqe_flags_required) != |
| 1801 | ctx->restrictions.sqe_flags_required) |
| 1802 | return false; |
| 1803 | |
| 1804 | if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed | |
| 1805 | ctx->restrictions.sqe_flags_required)) |
| 1806 | return false; |
| 1807 | |
| 1808 | return true; |
| 1809 | } |
| 1810 | |
Pavel Begunkov | 22b2ca3 | 2021-10-01 18:07:00 +0100 | [diff] [blame] | 1811 | static void io_init_req_drain(struct io_kiocb *req) |
| 1812 | { |
| 1813 | struct io_ring_ctx *ctx = req->ctx; |
| 1814 | struct io_kiocb *head = ctx->submit_state.link.head; |
| 1815 | |
| 1816 | ctx->drain_active = true; |
| 1817 | if (head) { |
| 1818 | /* |
| 1819 | * If we need to drain a request in the middle of a link, drain |
| 1820 | * the head request and the next request/link after the current |
| 1821 | * link. Considering sequential execution of links, |
Hao Xu | b6c7db3 | 2021-11-25 17:21:03 +0800 | [diff] [blame] | 1822 | * REQ_F_IO_DRAIN will be maintained for every request of our |
Pavel Begunkov | 22b2ca3 | 2021-10-01 18:07:00 +0100 | [diff] [blame] | 1823 | * link. |
| 1824 | */ |
Hao Xu | b6c7db3 | 2021-11-25 17:21:03 +0800 | [diff] [blame] | 1825 | head->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC; |
Pavel Begunkov | 22b2ca3 | 2021-10-01 18:07:00 +0100 | [diff] [blame] | 1826 | ctx->drain_next = true; |
| 1827 | } |
| 1828 | } |
| 1829 | |
Pavel Begunkov | ef4ff58 | 2020-04-12 02:05:05 +0300 | [diff] [blame] | 1830 | static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, |
Pavel Begunkov | 258b29a | 2021-02-10 00:03:10 +0000 | [diff] [blame] | 1831 | const struct io_uring_sqe *sqe) |
Pavel Begunkov | 282cdc8 | 2021-08-09 13:04:10 +0100 | [diff] [blame] | 1832 | __must_hold(&ctx->uring_lock) |
Pavel Begunkov | 0553b8b | 2020-04-08 08:58:45 +0300 | [diff] [blame] | 1833 | { |
Jens Axboe | fcde59f | 2022-05-23 16:53:15 -0600 | [diff] [blame] | 1834 | const struct io_op_def *def; |
Pavel Begunkov | ef4ff58 | 2020-04-12 02:05:05 +0300 | [diff] [blame] | 1835 | unsigned int sqe_flags; |
Pavel Begunkov | fc0ae02 | 2021-10-01 18:07:02 +0100 | [diff] [blame] | 1836 | int personality; |
Pavel Begunkov | 4a04d1d | 2021-10-06 16:06:49 +0100 | [diff] [blame] | 1837 | u8 opcode; |
Pavel Begunkov | ef4ff58 | 2020-04-12 02:05:05 +0300 | [diff] [blame] | 1838 | |
Pavel Begunkov | 864ea92 | 2021-08-09 13:04:08 +0100 | [diff] [blame] | 1839 | /* req is partially pre-initialised, see io_preinit_req() */ |
Pavel Begunkov | 4a04d1d | 2021-10-06 16:06:49 +0100 | [diff] [blame] | 1840 | req->opcode = opcode = READ_ONCE(sqe->opcode); |
Pavel Begunkov | 5be9ad1 | 2021-02-12 18:41:17 +0000 | [diff] [blame] | 1841 | /* same numerical values with corresponding REQ_F_*, safe to copy */ |
| 1842 | req->flags = sqe_flags = READ_ONCE(sqe->flags); |
Pavel Begunkov | cef216f | 2022-04-12 15:09:43 +0100 | [diff] [blame] | 1843 | req->cqe.user_data = READ_ONCE(sqe->user_data); |
Pavel Begunkov | 0553b8b | 2020-04-08 08:58:45 +0300 | [diff] [blame] | 1844 | req->file = NULL; |
Pavel Begunkov | c1bdf8e | 2022-04-18 20:51:13 +0100 | [diff] [blame] | 1845 | req->rsrc_node = NULL; |
Pavel Begunkov | 4dd2824 | 2020-06-15 10:33:13 +0300 | [diff] [blame] | 1846 | req->task = current; |
Pavel Begunkov | ef4ff58 | 2020-04-12 02:05:05 +0300 | [diff] [blame] | 1847 | |
Pavel Begunkov | 4a04d1d | 2021-10-06 16:06:49 +0100 | [diff] [blame] | 1848 | if (unlikely(opcode >= IORING_OP_LAST)) { |
| 1849 | req->opcode = 0; |
Pavel Begunkov | 5be9ad1 | 2021-02-12 18:41:17 +0000 | [diff] [blame] | 1850 | return -EINVAL; |
Pavel Begunkov | 4a04d1d | 2021-10-06 16:06:49 +0100 | [diff] [blame] | 1851 | } |
Jens Axboe | fcde59f | 2022-05-23 16:53:15 -0600 | [diff] [blame] | 1852 | def = &io_op_defs[opcode]; |
Pavel Begunkov | 68fe256 | 2021-09-15 12:03:38 +0100 | [diff] [blame] | 1853 | if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) { |
| 1854 | /* enforce forwards compatibility on users */ |
| 1855 | if (sqe_flags & ~SQE_VALID_FLAGS) |
| 1856 | return -EINVAL; |
Jens Axboe | 4e90670 | 2022-04-28 19:09:43 -0600 | [diff] [blame] | 1857 | if (sqe_flags & IOSQE_BUFFER_SELECT) { |
Jens Axboe | fcde59f | 2022-05-23 16:53:15 -0600 | [diff] [blame] | 1858 | if (!def->buffer_select) |
Jens Axboe | 4e90670 | 2022-04-28 19:09:43 -0600 | [diff] [blame] | 1859 | return -EOPNOTSUPP; |
| 1860 | req->buf_index = READ_ONCE(sqe->buf_group); |
| 1861 | } |
Pavel Begunkov | 5562a8d | 2021-11-10 15:49:34 +0000 | [diff] [blame] | 1862 | if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS) |
| 1863 | ctx->drain_disabled = true; |
| 1864 | if (sqe_flags & IOSQE_IO_DRAIN) { |
| 1865 | if (ctx->drain_disabled) |
| 1866 | return -EOPNOTSUPP; |
Pavel Begunkov | 22b2ca3 | 2021-10-01 18:07:00 +0100 | [diff] [blame] | 1867 | io_init_req_drain(req); |
Pavel Begunkov | 5562a8d | 2021-11-10 15:49:34 +0000 | [diff] [blame] | 1868 | } |
Pavel Begunkov | 68fe256 | 2021-09-15 12:03:38 +0100 | [diff] [blame] | 1869 | } |
Pavel Begunkov | 2a56a9b | 2021-09-24 21:59:57 +0100 | [diff] [blame] | 1870 | if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) { |
| 1871 | if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags)) |
| 1872 | return -EACCES; |
| 1873 | /* knock it to the slow queue path, will be drained there */ |
| 1874 | if (ctx->drain_active) |
| 1875 | req->flags |= REQ_F_FORCE_ASYNC; |
| 1876 | /* if there is no link, we're at "next" request and need to drain */ |
| 1877 | if (unlikely(ctx->drain_next) && !ctx->submit_state.link.head) { |
| 1878 | ctx->drain_next = false; |
| 1879 | ctx->drain_active = true; |
Hao Xu | b6c7db3 | 2021-11-25 17:21:03 +0800 | [diff] [blame] | 1880 | req->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC; |
Pavel Begunkov | 2a56a9b | 2021-09-24 21:59:57 +0100 | [diff] [blame] | 1881 | } |
| 1882 | } |
Stefano Garzarella | 21b55db | 2020-08-27 16:58:30 +0200 | [diff] [blame] | 1883 | |
Jens Axboe | fcde59f | 2022-05-23 16:53:15 -0600 | [diff] [blame] | 1884 | if (!def->ioprio && sqe->ioprio) |
Jens Axboe | 7391142 | 2022-04-26 11:34:56 -0600 | [diff] [blame] | 1885 | return -EINVAL; |
Jens Axboe | fcde59f | 2022-05-23 16:53:15 -0600 | [diff] [blame] | 1886 | if (!def->iopoll && (ctx->flags & IORING_SETUP_IOPOLL)) |
Jens Axboe | 7391142 | 2022-04-26 11:34:56 -0600 | [diff] [blame] | 1887 | return -EINVAL; |
| 1888 | |
Jens Axboe | fcde59f | 2022-05-23 16:53:15 -0600 | [diff] [blame] | 1889 | if (def->needs_file) { |
Pavel Begunkov | 6d63416 | 2021-10-06 16:06:46 +0100 | [diff] [blame] | 1890 | struct io_submit_state *state = &ctx->submit_state; |
| 1891 | |
Pavel Begunkov | cef216f | 2022-04-12 15:09:43 +0100 | [diff] [blame] | 1892 | req->cqe.fd = READ_ONCE(sqe->fd); |
Jens Axboe | 6bf9c47 | 2022-03-29 10:10:08 -0600 | [diff] [blame] | 1893 | |
Pavel Begunkov | 6d63416 | 2021-10-06 16:06:46 +0100 | [diff] [blame] | 1894 | /* |
| 1895 | * Plug now if we have more than 2 IO left after this, and the |
| 1896 | * target is potentially a read/write to block based storage. |
| 1897 | */ |
Jens Axboe | fcde59f | 2022-05-23 16:53:15 -0600 | [diff] [blame] | 1898 | if (state->need_plug && def->plug) { |
Pavel Begunkov | 6d63416 | 2021-10-06 16:06:46 +0100 | [diff] [blame] | 1899 | state->plug_started = true; |
| 1900 | state->need_plug = false; |
Jens Axboe | 5ca7a8b | 2021-10-06 11:01:42 -0600 | [diff] [blame] | 1901 | blk_start_plug_nr_ios(&state->plug, state->submit_nr); |
Pavel Begunkov | 6d63416 | 2021-10-06 16:06:46 +0100 | [diff] [blame] | 1902 | } |
Pavel Begunkov | bd5bbda | 2020-11-20 15:50:51 +0000 | [diff] [blame] | 1903 | } |
Pavel Begunkov | c11368a5 | 2020-05-17 14:13:42 +0300 | [diff] [blame] | 1904 | |
Pavel Begunkov | ef4ff58 | 2020-04-12 02:05:05 +0300 | [diff] [blame] | 1905 | personality = READ_ONCE(sqe->personality); |
Jens Axboe | 63ff822 | 2020-05-07 14:56:15 -0600 | [diff] [blame] | 1906 | if (personality) { |
Linus Torvalds | cdab10b | 2021-11-01 21:06:18 -0700 | [diff] [blame] | 1907 | int ret; |
| 1908 | |
Jens Axboe | 63ff822 | 2020-05-07 14:56:15 -0600 | [diff] [blame] | 1909 | req->creds = xa_load(&ctx->personalities, personality); |
| 1910 | if (!req->creds) |
Jens Axboe | 27926b6 | 2020-10-28 09:33:23 -0600 | [diff] [blame] | 1911 | return -EINVAL; |
Jens Axboe | 63ff822 | 2020-05-07 14:56:15 -0600 | [diff] [blame] | 1912 | get_cred(req->creds); |
Paul Moore | cdc1404 | 2021-02-01 19:56:49 -0500 | [diff] [blame] | 1913 | ret = security_uring_override_creds(req->creds); |
| 1914 | if (ret) { |
| 1915 | put_cred(req->creds); |
| 1916 | return ret; |
| 1917 | } |
Jens Axboe | 63ff822 | 2020-05-07 14:56:15 -0600 | [diff] [blame] | 1918 | req->flags |= REQ_F_CREDS; |
| 1919 | } |
Pavel Begunkov | bd5bbda | 2020-11-20 15:50:51 +0000 | [diff] [blame] | 1920 | |
Jens Axboe | 0702e53 | 2022-05-23 16:56:21 -0600 | [diff] [blame] | 1921 | return def->prep(req, sqe); |
Pavel Begunkov | 0553b8b | 2020-04-08 08:58:45 +0300 | [diff] [blame] | 1922 | } |
| 1923 | |
Pavel Begunkov | df3becd | 2022-04-15 22:08:30 +0100 | [diff] [blame] | 1924 | static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe, |
| 1925 | struct io_kiocb *req, int ret) |
| 1926 | { |
| 1927 | struct io_ring_ctx *ctx = req->ctx; |
| 1928 | struct io_submit_link *link = &ctx->submit_state.link; |
| 1929 | struct io_kiocb *head = link->head; |
| 1930 | |
Pavel Begunkov | 48863ff | 2022-06-16 13:57:20 +0100 | [diff] [blame] | 1931 | trace_io_uring_req_failed(sqe, req, ret); |
Pavel Begunkov | df3becd | 2022-04-15 22:08:30 +0100 | [diff] [blame] | 1932 | |
| 1933 | /* |
| 1934 | * Avoid breaking links in the middle as it renders links with SQPOLL |
| 1935 | * unusable. Instead of failing eagerly, continue assembling the link if |
| 1936 | * applicable and mark the head with REQ_F_FAIL. The link flushing code |
| 1937 | * should find the flag and handle the rest. |
| 1938 | */ |
| 1939 | req_fail_link_node(req, ret); |
| 1940 | if (head && !(head->flags & REQ_F_FAIL)) |
| 1941 | req_fail_link_node(head, -ECANCELED); |
| 1942 | |
| 1943 | if (!(req->flags & IO_REQ_LINK_FLAGS)) { |
| 1944 | if (head) { |
| 1945 | link->last->link = req; |
| 1946 | link->head = NULL; |
| 1947 | req = head; |
| 1948 | } |
| 1949 | io_queue_sqe_fallback(req); |
| 1950 | return ret; |
| 1951 | } |
| 1952 | |
| 1953 | if (head) |
| 1954 | link->last->link = req; |
| 1955 | else |
| 1956 | link->head = req; |
| 1957 | link->last = req; |
| 1958 | return 0; |
| 1959 | } |
| 1960 | |
| 1961 | static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, |
Pavel Begunkov | a1ab7b3 | 2021-02-18 18:29:42 +0000 | [diff] [blame] | 1962 | const struct io_uring_sqe *sqe) |
Pavel Begunkov | 282cdc8 | 2021-08-09 13:04:10 +0100 | [diff] [blame] | 1963 | __must_hold(&ctx->uring_lock) |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 1964 | { |
Pavel Begunkov | a1ab7b3 | 2021-02-18 18:29:42 +0000 | [diff] [blame] | 1965 | struct io_submit_link *link = &ctx->submit_state.link; |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 1966 | int ret; |
| 1967 | |
Pavel Begunkov | a6b8cadc | 2021-02-18 18:29:41 +0000 | [diff] [blame] | 1968 | ret = io_init_req(ctx, req, sqe); |
Pavel Begunkov | df3becd | 2022-04-15 22:08:30 +0100 | [diff] [blame] | 1969 | if (unlikely(ret)) |
| 1970 | return io_submit_fail_init(sqe, req, ret); |
Pavel Begunkov | 441b8a7 | 2021-06-14 23:37:31 +0100 | [diff] [blame] | 1971 | |
Pavel Begunkov | be7053b | 2021-02-18 18:29:45 +0000 | [diff] [blame] | 1972 | /* don't need @sqe from now on */ |
Pavel Begunkov | 48863ff | 2022-06-16 13:57:20 +0100 | [diff] [blame] | 1973 | trace_io_uring_submit_sqe(req, true); |
Pavel Begunkov | a6b8cadc | 2021-02-18 18:29:41 +0000 | [diff] [blame] | 1974 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 1975 | /* |
| 1976 | * If we already have a head request, queue this one for async |
| 1977 | * submittal once the head completes. If we don't have a head but |
| 1978 | * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be |
| 1979 | * submitted sync once the chain is complete. If none of those |
| 1980 | * conditions are true (normal request), then just queue it. |
| 1981 | */ |
Pavel Begunkov | 924a07e | 2022-04-15 22:08:31 +0100 | [diff] [blame] | 1982 | if (unlikely(link->head)) { |
Pavel Begunkov | df3becd | 2022-04-15 22:08:30 +0100 | [diff] [blame] | 1983 | ret = io_req_prep_async(req); |
| 1984 | if (unlikely(ret)) |
| 1985 | return io_submit_fail_init(sqe, req, ret); |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 1986 | |
Pavel Begunkov | 48863ff | 2022-06-16 13:57:20 +0100 | [diff] [blame] | 1987 | trace_io_uring_link(req, link->head); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 1988 | link->last->link = req; |
| 1989 | link->last = req; |
| 1990 | |
Pavel Begunkov | da1a08c | 2022-04-15 22:08:29 +0100 | [diff] [blame] | 1991 | if (req->flags & IO_REQ_LINK_FLAGS) |
Pavel Begunkov | f15a343 | 2021-09-24 21:59:56 +0100 | [diff] [blame] | 1992 | return 0; |
Pavel Begunkov | df3becd | 2022-04-15 22:08:30 +0100 | [diff] [blame] | 1993 | /* last request of the link, flush it */ |
| 1994 | req = link->head; |
Pavel Begunkov | f15a343 | 2021-09-24 21:59:56 +0100 | [diff] [blame] | 1995 | link->head = NULL; |
Pavel Begunkov | 924a07e | 2022-04-15 22:08:31 +0100 | [diff] [blame] | 1996 | if (req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL)) |
| 1997 | goto fallback; |
| 1998 | |
| 1999 | } else if (unlikely(req->flags & (IO_REQ_LINK_FLAGS | |
| 2000 | REQ_F_FORCE_ASYNC | REQ_F_FAIL))) { |
| 2001 | if (req->flags & IO_REQ_LINK_FLAGS) { |
| 2002 | link->head = req; |
| 2003 | link->last = req; |
| 2004 | } else { |
| 2005 | fallback: |
| 2006 | io_queue_sqe_fallback(req); |
| 2007 | } |
Pavel Begunkov | f15a343 | 2021-09-24 21:59:56 +0100 | [diff] [blame] | 2008 | return 0; |
Jackie Liu | 4fe2c96 | 2019-09-09 20:50:40 +0800 | [diff] [blame] | 2009 | } |
| 2010 | |
Pavel Begunkov | f15a343 | 2021-09-24 21:59:56 +0100 | [diff] [blame] | 2011 | io_queue_sqe(req); |
Jackie Liu | 4fe2c96 | 2019-09-09 20:50:40 +0800 | [diff] [blame] | 2012 | return 0; |
| 2013 | } |
| 2014 | |
| 2015 | /* |
| 2016 | * Batched submission is done, ensure local IO is flushed out. |
Pavel Begunkov | 1b4a51b | 2019-11-21 11:54:28 +0300 | [diff] [blame] | 2017 | */ |
Pavel Begunkov | 553deff | 2021-09-24 21:59:55 +0100 | [diff] [blame] | 2018 | static void io_submit_state_end(struct io_ring_ctx *ctx) |
Jackie Liu | a197f66 | 2019-11-08 08:09:12 -0700 | [diff] [blame] | 2019 | { |
Pavel Begunkov | 553deff | 2021-09-24 21:59:55 +0100 | [diff] [blame] | 2020 | struct io_submit_state *state = &ctx->submit_state; |
| 2021 | |
Pavel Begunkov | e126391 | 2022-04-12 15:09:45 +0100 | [diff] [blame] | 2022 | if (unlikely(state->link.head)) |
| 2023 | io_queue_sqe_fallback(state->link.head); |
Pavel Begunkov | 553deff | 2021-09-24 21:59:55 +0100 | [diff] [blame] | 2024 | /* flush only after queuing links as they can generate completions */ |
Pavel Begunkov | c450178 | 2021-09-08 16:40:52 +0100 | [diff] [blame] | 2025 | io_submit_flush_completions(ctx); |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2026 | if (state->plug_started) |
Pavel Begunkov | 32fe525 | 2019-12-17 22:26:58 +0300 | [diff] [blame] | 2027 | blk_finish_plug(&state->plug); |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2028 | } |
Pavel Begunkov | 32fe525 | 2019-12-17 22:26:58 +0300 | [diff] [blame] | 2029 | |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2030 | /* |
Pavel Begunkov | 196be95 | 2019-11-07 01:41:06 +0300 | [diff] [blame] | 2031 | * Start submission side cache. |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2032 | */ |
| 2033 | static void io_submit_state_start(struct io_submit_state *state, |
Jens Axboe | bcda7ba | 2020-02-23 16:42:51 -0700 | [diff] [blame] | 2034 | unsigned int max_ios) |
| 2035 | { |
| 2036 | state->plug_started = false; |
Pavel Begunkov | 4b628ae | 2021-09-08 16:40:49 +0100 | [diff] [blame] | 2037 | state->need_plug = max_ios > 2; |
Jens Axboe | 5ca7a8b | 2021-10-06 11:01:42 -0600 | [diff] [blame] | 2038 | state->submit_nr = max_ios; |
Pavel Begunkov | a1ab7b3 | 2021-02-18 18:29:42 +0000 | [diff] [blame] | 2039 | /* set only head, no need to init link_last in advance */ |
| 2040 | state->link.head = NULL; |
Jens Axboe | 193155c | 2020-02-22 23:22:19 -0700 | [diff] [blame] | 2041 | } |
Jens Axboe | 75c6a03 | 2020-01-28 10:15:23 -0700 | [diff] [blame] | 2042 | |
| 2043 | static void io_commit_sqring(struct io_ring_ctx *ctx) |
| 2044 | { |
Jens Axboe | 193155c | 2020-02-22 23:22:19 -0700 | [diff] [blame] | 2045 | struct io_rings *rings = ctx->rings; |
Jens Axboe | 75c6a03 | 2020-01-28 10:15:23 -0700 | [diff] [blame] | 2046 | |
| 2047 | /* |
Pavel Begunkov | 6b47ee6 | 2020-01-18 20:22:41 +0300 | [diff] [blame] | 2048 | * Ensure any loads from the SQEs are done at this point, |
Pavel Begunkov | 8da11c1 | 2020-02-24 11:32:44 +0300 | [diff] [blame] | 2049 | * since once we write the new head, the application could |
Jens Axboe | bcda7ba | 2020-02-23 16:42:51 -0700 | [diff] [blame] | 2050 | * write new data to them. |
| 2051 | */ |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2052 | smp_store_release(&rings->sq.head, ctx->cached_sq_head); |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 2053 | } |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2054 | |
| 2055 | /* |
Fam Zheng | dd9ae8a | 2021-06-04 17:42:56 +0100 | [diff] [blame] | 2056 | * Fetch an sqe, if one is available. Note this returns a pointer to memory |
Jens Axboe | 78e19bb | 2019-11-06 15:21:34 -0700 | [diff] [blame] | 2057 | * that is mapped by userspace. This means that care needs to be taken to |
Pavel Begunkov | 2e6e1fd | 2019-12-05 16:15:45 +0300 | [diff] [blame] | 2058 | * ensure that reads are stable, as we cannot rely on userspace always |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2059 | * being a good citizen. If members of the sqe are validated and then later |
| 2060 | * used, it's important that those reads are done through READ_ONCE() to |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2061 | * prevent a re-load down the line. |
| 2062 | */ |
| 2063 | static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx) |
| 2064 | { |
Pavel Begunkov | ea5ab3b | 2021-05-16 22:58:09 +0100 | [diff] [blame] | 2065 | unsigned head, mask = ctx->sq_entries - 1; |
Pavel Begunkov | 17d3aeb | 2021-06-14 23:37:23 +0100 | [diff] [blame] | 2066 | unsigned sq_idx = ctx->cached_sq_head++ & mask; |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2067 | |
| 2068 | /* |
Pavel Begunkov | 9d76377 | 2019-12-17 02:22:07 +0300 | [diff] [blame] | 2069 | * The cached sq head (or cq tail) serves two purposes: |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2070 | * |
Pavel Begunkov | 8cdf219 | 2020-01-25 00:40:24 +0300 | [diff] [blame] | 2071 | * 1) allows us to batch the cost of updating the user visible |
| 2072 | * head updates. |
| 2073 | * 2) allows the kernel side to track the head on its own, even |
| 2074 | * though the application is the one updating it. |
| 2075 | */ |
Pavel Begunkov | 17d3aeb | 2021-06-14 23:37:23 +0100 | [diff] [blame] | 2076 | head = READ_ONCE(ctx->sq_array[sq_idx]); |
Jens Axboe | ebdeb7c0 | 2022-03-31 19:27:52 -0600 | [diff] [blame] | 2077 | if (likely(head < ctx->sq_entries)) { |
| 2078 | /* double index for 128-byte SQEs, twice as long */ |
| 2079 | if (ctx->flags & IORING_SETUP_SQE128) |
| 2080 | head <<= 1; |
Pavel Begunkov | 711be03 | 2020-01-17 03:57:59 +0300 | [diff] [blame] | 2081 | return &ctx->sq_sqes[head]; |
Jens Axboe | ebdeb7c0 | 2022-03-31 19:27:52 -0600 | [diff] [blame] | 2082 | } |
Pavel Begunkov | 711be03 | 2020-01-17 03:57:59 +0300 | [diff] [blame] | 2083 | |
| 2084 | /* drop invalid entries */ |
Pavel Begunkov | 15641e4 | 2021-06-14 23:37:24 +0100 | [diff] [blame] | 2085 | ctx->cq_extra--; |
| 2086 | WRITE_ONCE(ctx->rings->sq_dropped, |
| 2087 | READ_ONCE(ctx->rings->sq_dropped) + 1); |
Jens Axboe | 9e645e11 | 2019-05-10 16:07:28 -0600 | [diff] [blame] | 2088 | return NULL; |
| 2089 | } |
| 2090 | |
Jens Axboe | 17437f3 | 2022-05-25 09:13:39 -0600 | [diff] [blame] | 2091 | int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) |
Pavel Begunkov | 282cdc8 | 2021-08-09 13:04:10 +0100 | [diff] [blame] | 2092 | __must_hold(&ctx->uring_lock) |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 2093 | { |
Pavel Begunkov | 6962980 | 2021-09-24 22:00:01 +0100 | [diff] [blame] | 2094 | unsigned int entries = io_sqring_entries(ctx); |
Pavel Begunkov | 8e6971a | 2022-04-12 15:09:49 +0100 | [diff] [blame] | 2095 | unsigned int left; |
| 2096 | int ret; |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 2097 | |
Pavel Begunkov | 51d48da | 2021-10-04 20:02:47 +0100 | [diff] [blame] | 2098 | if (unlikely(!entries)) |
Pavel Begunkov | 6962980 | 2021-09-24 22:00:01 +0100 | [diff] [blame] | 2099 | return 0; |
Pavel Begunkov | ee7d46d9 | 2019-12-30 21:24:45 +0300 | [diff] [blame] | 2100 | /* make sure SQ entry isn't read before tail */ |
Pavel Begunkov | 8e6971a | 2022-04-12 15:09:49 +0100 | [diff] [blame] | 2101 | ret = left = min3(nr, ctx->sq_entries, entries); |
| 2102 | io_get_task_refs(left); |
| 2103 | io_submit_state_start(&ctx->submit_state, left); |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 2104 | |
Pavel Begunkov | 6962980 | 2021-09-24 22:00:01 +0100 | [diff] [blame] | 2105 | do { |
Jens Axboe | 3529d8c | 2019-12-19 18:24:38 -0700 | [diff] [blame] | 2106 | const struct io_uring_sqe *sqe; |
Pavel Begunkov | 196be95 | 2019-11-07 01:41:06 +0300 | [diff] [blame] | 2107 | struct io_kiocb *req; |
Pavel Begunkov | fb5ccc9 | 2019-10-25 12:31:30 +0300 | [diff] [blame] | 2108 | |
Pavel Begunkov | 8e6971a | 2022-04-12 15:09:49 +0100 | [diff] [blame] | 2109 | if (unlikely(!io_alloc_req_refill(ctx))) |
Pavel Begunkov | fb5ccc9 | 2019-10-25 12:31:30 +0300 | [diff] [blame] | 2110 | break; |
Pavel Begunkov | a33ae9c | 2021-10-04 20:02:49 +0100 | [diff] [blame] | 2111 | req = io_alloc_req(ctx); |
Pavel Begunkov | 4fccfcb | 2021-02-12 11:55:17 +0000 | [diff] [blame] | 2112 | sqe = io_get_sqe(ctx); |
| 2113 | if (unlikely(!sqe)) { |
Pavel Begunkov | fa05457 | 2022-04-12 15:09:48 +0100 | [diff] [blame] | 2114 | io_req_add_to_cache(req, ctx); |
Pavel Begunkov | 4fccfcb | 2021-02-12 11:55:17 +0000 | [diff] [blame] | 2115 | break; |
| 2116 | } |
Pavel Begunkov | 1cd1590 | 2022-04-12 15:09:50 +0100 | [diff] [blame] | 2117 | |
| 2118 | /* |
| 2119 | * Continue submitting even for sqe failure if the |
| 2120 | * ring was setup with IORING_SETUP_SUBMIT_ALL |
| 2121 | */ |
| 2122 | if (unlikely(io_submit_sqe(ctx, req, sqe)) && |
| 2123 | !(ctx->flags & IORING_SETUP_SUBMIT_ALL)) { |
| 2124 | left--; |
| 2125 | break; |
Jens Axboe | bcbb7bf | 2022-03-10 12:59:35 -0700 | [diff] [blame] | 2126 | } |
Pavel Begunkov | 1cd1590 | 2022-04-12 15:09:50 +0100 | [diff] [blame] | 2127 | } while (--left); |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 2128 | |
Pavel Begunkov | 8e6971a | 2022-04-12 15:09:49 +0100 | [diff] [blame] | 2129 | if (unlikely(left)) { |
| 2130 | ret -= left; |
| 2131 | /* try again if it submitted nothing and can't allocate a req */ |
| 2132 | if (!ret && io_req_cache_empty(ctx)) |
| 2133 | ret = -EAGAIN; |
| 2134 | current->io_uring->cached_refs += left; |
Pavel Begunkov | 9466f43 | 2020-01-25 22:34:01 +0300 | [diff] [blame] | 2135 | } |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 2136 | |
Pavel Begunkov | 553deff | 2021-09-24 21:59:55 +0100 | [diff] [blame] | 2137 | io_submit_state_end(ctx); |
Pavel Begunkov | ae9428c | 2019-11-06 00:22:14 +0300 | [diff] [blame] | 2138 | /* Commit SQ ring head once we've consumed and submitted all SQEs */ |
| 2139 | io_commit_sqring(ctx); |
Pavel Begunkov | 8e6971a | 2022-04-12 15:09:49 +0100 | [diff] [blame] | 2140 | return ret; |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 2141 | } |
| 2142 | |
Jens Axboe | bda5216 | 2019-09-24 13:47:15 -0600 | [diff] [blame] | 2143 | struct io_wait_queue { |
| 2144 | struct wait_queue_entry wq; |
| 2145 | struct io_ring_ctx *ctx; |
Jens Axboe | 5fd4617 | 2021-08-06 14:04:31 -0600 | [diff] [blame] | 2146 | unsigned cq_tail; |
Jens Axboe | bda5216 | 2019-09-24 13:47:15 -0600 | [diff] [blame] | 2147 | unsigned nr_timeouts; |
| 2148 | }; |
| 2149 | |
Pavel Begunkov | 6c50315 | 2021-01-04 20:36:36 +0000 | [diff] [blame] | 2150 | static inline bool io_should_wake(struct io_wait_queue *iowq) |
Jens Axboe | bda5216 | 2019-09-24 13:47:15 -0600 | [diff] [blame] | 2151 | { |
| 2152 | struct io_ring_ctx *ctx = iowq->ctx; |
Jens Axboe | 5fd4617 | 2021-08-06 14:04:31 -0600 | [diff] [blame] | 2153 | int dist = ctx->cached_cq_tail - (int) iowq->cq_tail; |
Jens Axboe | bda5216 | 2019-09-24 13:47:15 -0600 | [diff] [blame] | 2154 | |
| 2155 | /* |
Brian Gianforcaro | d195a66 | 2019-12-13 03:09:50 -0800 | [diff] [blame] | 2156 | * Wake up if we have enough events, or if a timeout occurred since we |
Jens Axboe | bda5216 | 2019-09-24 13:47:15 -0600 | [diff] [blame] | 2157 | * started waiting. For timeouts, we always want to return to userspace, |
| 2158 | * regardless of event count. |
| 2159 | */ |
Jens Axboe | 5fd4617 | 2021-08-06 14:04:31 -0600 | [diff] [blame] | 2160 | return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts; |
Jens Axboe | bda5216 | 2019-09-24 13:47:15 -0600 | [diff] [blame] | 2161 | } |
| 2162 | |
| 2163 | static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode, |
| 2164 | int wake_flags, void *key) |
| 2165 | { |
| 2166 | struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue, |
| 2167 | wq); |
| 2168 | |
Pavel Begunkov | 6c50315 | 2021-01-04 20:36:36 +0000 | [diff] [blame] | 2169 | /* |
| 2170 | * Cannot safely flush overflowed CQEs from here, ensure we wake up |
| 2171 | * the task, and the next invocation will do it. |
| 2172 | */ |
Dylan Yudaken | 10988a0 | 2022-04-21 02:13:43 -0700 | [diff] [blame] | 2173 | if (io_should_wake(iowq) || |
| 2174 | test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &iowq->ctx->check_cq)) |
Pavel Begunkov | 6c50315 | 2021-01-04 20:36:36 +0000 | [diff] [blame] | 2175 | return autoremove_wake_function(curr, mode, wake_flags, key); |
| 2176 | return -1; |
Jens Axboe | bda5216 | 2019-09-24 13:47:15 -0600 | [diff] [blame] | 2177 | } |
| 2178 | |
Jens Axboe | 7357298 | 2022-06-13 07:12:45 -0600 | [diff] [blame] | 2179 | int io_run_task_work_sig(void) |
Jens Axboe | af9c1a4 | 2020-09-24 13:32:18 -0600 | [diff] [blame] | 2180 | { |
| 2181 | if (io_run_task_work()) |
| 2182 | return 1; |
Olivier Langlois | c5020bc | 2022-02-16 14:53:42 -0500 | [diff] [blame] | 2183 | if (task_sigpending(current)) |
| 2184 | return -EINTR; |
| 2185 | return 0; |
Jens Axboe | af9c1a4 | 2020-09-24 13:32:18 -0600 | [diff] [blame] | 2186 | } |
| 2187 | |
Pavel Begunkov | eeb60b9 | 2021-02-04 13:51:58 +0000 | [diff] [blame] | 2188 | /* when returns >0, the caller should retry */ |
| 2189 | static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, |
| 2190 | struct io_wait_queue *iowq, |
Jens Axboe | 2283396 | 2022-02-21 05:49:30 -0700 | [diff] [blame] | 2191 | ktime_t timeout) |
Pavel Begunkov | eeb60b9 | 2021-02-04 13:51:58 +0000 | [diff] [blame] | 2192 | { |
| 2193 | int ret; |
Dylan Yudaken | 155bc95 | 2022-04-21 02:13:44 -0700 | [diff] [blame] | 2194 | unsigned long check_cq; |
Pavel Begunkov | eeb60b9 | 2021-02-04 13:51:58 +0000 | [diff] [blame] | 2195 | |
| 2196 | /* make sure we run task_work before checking for signals */ |
| 2197 | ret = io_run_task_work_sig(); |
| 2198 | if (ret || io_should_wake(iowq)) |
| 2199 | return ret; |
Pavel Begunkov | 3a08576 | 2022-06-15 17:33:55 +0100 | [diff] [blame] | 2200 | |
Dylan Yudaken | 155bc95 | 2022-04-21 02:13:44 -0700 | [diff] [blame] | 2201 | check_cq = READ_ONCE(ctx->check_cq); |
Pavel Begunkov | 3a08576 | 2022-06-15 17:33:55 +0100 | [diff] [blame] | 2202 | if (unlikely(check_cq)) { |
| 2203 | /* let the caller flush overflows, retry */ |
| 2204 | if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)) |
| 2205 | return 1; |
| 2206 | if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)) |
| 2207 | return -EBADR; |
| 2208 | } |
Jens Axboe | 2283396 | 2022-02-21 05:49:30 -0700 | [diff] [blame] | 2209 | if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS)) |
| 2210 | return -ETIME; |
| 2211 | return 1; |
Pavel Begunkov | eeb60b9 | 2021-02-04 13:51:58 +0000 | [diff] [blame] | 2212 | } |
| 2213 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2214 | /* |
| 2215 | * Wait until events become available, if we don't already have some. The |
| 2216 | * application must reap them itself, as they reside on the shared cq ring. |
| 2217 | */ |
| 2218 | static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, |
Hao Xu | c73ebb6 | 2020-11-03 10:54:37 +0800 | [diff] [blame] | 2219 | const sigset_t __user *sig, size_t sigsz, |
| 2220 | struct __kernel_timespec __user *uts) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2221 | { |
Pavel Begunkov | 90291099 | 2021-08-09 09:07:32 -0600 | [diff] [blame] | 2222 | struct io_wait_queue iowq; |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 2223 | struct io_rings *rings = ctx->rings; |
Jens Axboe | 2283396 | 2022-02-21 05:49:30 -0700 | [diff] [blame] | 2224 | ktime_t timeout = KTIME_MAX; |
Pavel Begunkov | c1d5a22 | 2021-02-04 13:51:57 +0000 | [diff] [blame] | 2225 | int ret; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2226 | |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 2227 | do { |
Pavel Begunkov | 90f6736 | 2021-08-09 20:18:12 +0100 | [diff] [blame] | 2228 | io_cqring_overflow_flush(ctx); |
Pavel Begunkov | 6c50315 | 2021-01-04 20:36:36 +0000 | [diff] [blame] | 2229 | if (io_cqring_events(ctx) >= min_events) |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 2230 | return 0; |
Jens Axboe | 4c6e277 | 2020-07-01 11:29:10 -0600 | [diff] [blame] | 2231 | if (!io_run_task_work()) |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 2232 | break; |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 2233 | } while (1); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2234 | |
| 2235 | if (sig) { |
Arnd Bergmann | 9e75ad5 | 2019-03-25 15:34:53 +0100 | [diff] [blame] | 2236 | #ifdef CONFIG_COMPAT |
| 2237 | if (in_compat_syscall()) |
| 2238 | ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig, |
Oleg Nesterov | b772434 | 2019-07-16 16:29:53 -0700 | [diff] [blame] | 2239 | sigsz); |
Arnd Bergmann | 9e75ad5 | 2019-03-25 15:34:53 +0100 | [diff] [blame] | 2240 | else |
| 2241 | #endif |
Oleg Nesterov | b772434 | 2019-07-16 16:29:53 -0700 | [diff] [blame] | 2242 | ret = set_user_sigmask(sig, sigsz); |
Arnd Bergmann | 9e75ad5 | 2019-03-25 15:34:53 +0100 | [diff] [blame] | 2243 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2244 | if (ret) |
| 2245 | return ret; |
| 2246 | } |
| 2247 | |
Olivier Langlois | 950e79d | 2022-03-08 17:17:21 -0500 | [diff] [blame] | 2248 | if (uts) { |
| 2249 | struct timespec64 ts; |
| 2250 | |
| 2251 | if (get_timespec64(&ts, uts)) |
| 2252 | return -EFAULT; |
| 2253 | timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns()); |
| 2254 | } |
| 2255 | |
Pavel Begunkov | 90291099 | 2021-08-09 09:07:32 -0600 | [diff] [blame] | 2256 | init_waitqueue_func_entry(&iowq.wq, io_wake_function); |
| 2257 | iowq.wq.private = current; |
| 2258 | INIT_LIST_HEAD(&iowq.wq.entry); |
| 2259 | iowq.ctx = ctx; |
Jens Axboe | bda5216 | 2019-09-24 13:47:15 -0600 | [diff] [blame] | 2260 | iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts); |
Jens Axboe | 5fd4617 | 2021-08-06 14:04:31 -0600 | [diff] [blame] | 2261 | iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events; |
Pavel Begunkov | 90291099 | 2021-08-09 09:07:32 -0600 | [diff] [blame] | 2262 | |
Dmitrii Dolgov | c826bd7 | 2019-10-15 19:02:01 +0200 | [diff] [blame] | 2263 | trace_io_uring_cqring_wait(ctx, min_events); |
Jens Axboe | bda5216 | 2019-09-24 13:47:15 -0600 | [diff] [blame] | 2264 | do { |
Jens Axboe | ca0a265 | 2021-03-04 17:15:48 -0700 | [diff] [blame] | 2265 | /* if we can't even flush overflow, don't wait for more */ |
Pavel Begunkov | 90f6736 | 2021-08-09 20:18:12 +0100 | [diff] [blame] | 2266 | if (!io_cqring_overflow_flush(ctx)) { |
Jens Axboe | ca0a265 | 2021-03-04 17:15:48 -0700 | [diff] [blame] | 2267 | ret = -EBUSY; |
| 2268 | break; |
| 2269 | } |
Pavel Begunkov | 311997b | 2021-06-14 23:37:28 +0100 | [diff] [blame] | 2270 | prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq, |
Jens Axboe | bda5216 | 2019-09-24 13:47:15 -0600 | [diff] [blame] | 2271 | TASK_INTERRUPTIBLE); |
Jens Axboe | 2283396 | 2022-02-21 05:49:30 -0700 | [diff] [blame] | 2272 | ret = io_cqring_wait_schedule(ctx, &iowq, timeout); |
Jens Axboe | ca0a265 | 2021-03-04 17:15:48 -0700 | [diff] [blame] | 2273 | cond_resched(); |
Pavel Begunkov | eeb60b9 | 2021-02-04 13:51:58 +0000 | [diff] [blame] | 2274 | } while (ret > 0); |
Jens Axboe | bda5216 | 2019-09-24 13:47:15 -0600 | [diff] [blame] | 2275 | |
Jens Axboe | b4f20bb | 2022-03-25 16:39:57 -0600 | [diff] [blame] | 2276 | finish_wait(&ctx->cq_wait, &iowq.wq); |
Jens Axboe | b7db41c | 2020-07-04 08:55:50 -0600 | [diff] [blame] | 2277 | restore_saved_sigmask_unless(ret == -EINTR); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2278 | |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 2279 | return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2280 | } |
| 2281 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2282 | static void io_mem_free(void *ptr) |
| 2283 | { |
Mark Rutland | 52e04ef | 2019-04-30 17:30:21 +0100 | [diff] [blame] | 2284 | struct page *page; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2285 | |
Mark Rutland | 52e04ef | 2019-04-30 17:30:21 +0100 | [diff] [blame] | 2286 | if (!ptr) |
| 2287 | return; |
| 2288 | |
| 2289 | page = virt_to_head_page(ptr); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2290 | if (put_page_testzero(page)) |
| 2291 | free_compound_page(page); |
| 2292 | } |
| 2293 | |
| 2294 | static void *io_mem_alloc(size_t size) |
| 2295 | { |
Shakeel Butt | 0a3f1e0 | 2022-01-24 21:17:36 -0800 | [diff] [blame] | 2296 | gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2297 | |
Shakeel Butt | 0a3f1e0 | 2022-01-24 21:17:36 -0800 | [diff] [blame] | 2298 | return (void *) __get_free_pages(gfp, get_order(size)); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2299 | } |
| 2300 | |
Stefan Roesch | baf9cb6 | 2022-04-26 11:21:25 -0700 | [diff] [blame] | 2301 | static unsigned long rings_size(struct io_ring_ctx *ctx, unsigned int sq_entries, |
| 2302 | unsigned int cq_entries, size_t *sq_offset) |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 2303 | { |
| 2304 | struct io_rings *rings; |
| 2305 | size_t off, sq_array_size; |
| 2306 | |
| 2307 | off = struct_size(rings, cqes, cq_entries); |
| 2308 | if (off == SIZE_MAX) |
| 2309 | return SIZE_MAX; |
Stefan Roesch | baf9cb6 | 2022-04-26 11:21:25 -0700 | [diff] [blame] | 2310 | if (ctx->flags & IORING_SETUP_CQE32) { |
| 2311 | if (check_shl_overflow(off, 1, &off)) |
| 2312 | return SIZE_MAX; |
| 2313 | } |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 2314 | |
| 2315 | #ifdef CONFIG_SMP |
| 2316 | off = ALIGN(off, SMP_CACHE_BYTES); |
| 2317 | if (off == 0) |
| 2318 | return SIZE_MAX; |
| 2319 | #endif |
| 2320 | |
Dmitry Vyukov | b36200f | 2020-07-11 11:31:11 +0200 | [diff] [blame] | 2321 | if (sq_offset) |
| 2322 | *sq_offset = off; |
| 2323 | |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 2324 | sq_array_size = array_size(sizeof(u32), sq_entries); |
| 2325 | if (sq_array_size == SIZE_MAX) |
| 2326 | return SIZE_MAX; |
| 2327 | |
| 2328 | if (check_add_overflow(off, sq_array_size, &off)) |
| 2329 | return SIZE_MAX; |
| 2330 | |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 2331 | return off; |
| 2332 | } |
| 2333 | |
Usama Arif | c75312d | 2022-02-04 14:51:15 +0000 | [diff] [blame] | 2334 | static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg, |
| 2335 | unsigned int eventfd_async) |
Jens Axboe | 9b40284 | 2019-04-11 11:45:41 -0600 | [diff] [blame] | 2336 | { |
Usama Arif | 77bc59b | 2022-02-04 14:51:14 +0000 | [diff] [blame] | 2337 | struct io_ev_fd *ev_fd; |
Jens Axboe | 9b40284 | 2019-04-11 11:45:41 -0600 | [diff] [blame] | 2338 | __s32 __user *fds = arg; |
Nathan Chancellor | f0a4e62b | 2022-02-07 09:24:11 -0700 | [diff] [blame] | 2339 | int fd; |
Jens Axboe | 9b40284 | 2019-04-11 11:45:41 -0600 | [diff] [blame] | 2340 | |
Usama Arif | 77bc59b | 2022-02-04 14:51:14 +0000 | [diff] [blame] | 2341 | ev_fd = rcu_dereference_protected(ctx->io_ev_fd, |
| 2342 | lockdep_is_held(&ctx->uring_lock)); |
| 2343 | if (ev_fd) |
Jens Axboe | 9b40284 | 2019-04-11 11:45:41 -0600 | [diff] [blame] | 2344 | return -EBUSY; |
| 2345 | |
| 2346 | if (copy_from_user(&fd, fds, sizeof(*fds))) |
| 2347 | return -EFAULT; |
| 2348 | |
Usama Arif | 77bc59b | 2022-02-04 14:51:14 +0000 | [diff] [blame] | 2349 | ev_fd = kmalloc(sizeof(*ev_fd), GFP_KERNEL); |
| 2350 | if (!ev_fd) |
| 2351 | return -ENOMEM; |
Pavel Begunkov | fe7e325 | 2021-06-24 15:09:57 +0100 | [diff] [blame] | 2352 | |
Usama Arif | 77bc59b | 2022-02-04 14:51:14 +0000 | [diff] [blame] | 2353 | ev_fd->cq_ev_fd = eventfd_ctx_fdget(fd); |
| 2354 | if (IS_ERR(ev_fd->cq_ev_fd)) { |
Nathan Chancellor | f0a4e62b | 2022-02-07 09:24:11 -0700 | [diff] [blame] | 2355 | int ret = PTR_ERR(ev_fd->cq_ev_fd); |
Usama Arif | 77bc59b | 2022-02-04 14:51:14 +0000 | [diff] [blame] | 2356 | kfree(ev_fd); |
Jens Axboe | 9b40284 | 2019-04-11 11:45:41 -0600 | [diff] [blame] | 2357 | return ret; |
| 2358 | } |
Pavel Begunkov | 305bef9 | 2022-06-20 01:25:55 +0100 | [diff] [blame] | 2359 | |
| 2360 | spin_lock(&ctx->completion_lock); |
| 2361 | ctx->evfd_last_cq_tail = ctx->cached_cq_tail; |
| 2362 | spin_unlock(&ctx->completion_lock); |
| 2363 | |
Usama Arif | c75312d | 2022-02-04 14:51:15 +0000 | [diff] [blame] | 2364 | ev_fd->eventfd_async = eventfd_async; |
Pavel Begunkov | 9aa8dfd | 2022-03-17 02:03:42 +0000 | [diff] [blame] | 2365 | ctx->has_evfd = true; |
Usama Arif | 77bc59b | 2022-02-04 14:51:14 +0000 | [diff] [blame] | 2366 | rcu_assign_pointer(ctx->io_ev_fd, ev_fd); |
Nathan Chancellor | f0a4e62b | 2022-02-07 09:24:11 -0700 | [diff] [blame] | 2367 | return 0; |
Usama Arif | 77bc59b | 2022-02-04 14:51:14 +0000 | [diff] [blame] | 2368 | } |
| 2369 | |
| 2370 | static void io_eventfd_put(struct rcu_head *rcu) |
| 2371 | { |
| 2372 | struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu); |
| 2373 | |
| 2374 | eventfd_ctx_put(ev_fd->cq_ev_fd); |
| 2375 | kfree(ev_fd); |
Jens Axboe | 9b40284 | 2019-04-11 11:45:41 -0600 | [diff] [blame] | 2376 | } |
| 2377 | |
| 2378 | static int io_eventfd_unregister(struct io_ring_ctx *ctx) |
| 2379 | { |
Usama Arif | 77bc59b | 2022-02-04 14:51:14 +0000 | [diff] [blame] | 2380 | struct io_ev_fd *ev_fd; |
| 2381 | |
| 2382 | ev_fd = rcu_dereference_protected(ctx->io_ev_fd, |
| 2383 | lockdep_is_held(&ctx->uring_lock)); |
| 2384 | if (ev_fd) { |
Pavel Begunkov | 9aa8dfd | 2022-03-17 02:03:42 +0000 | [diff] [blame] | 2385 | ctx->has_evfd = false; |
Usama Arif | 77bc59b | 2022-02-04 14:51:14 +0000 | [diff] [blame] | 2386 | rcu_assign_pointer(ctx->io_ev_fd, NULL); |
| 2387 | call_rcu(&ev_fd->rcu, io_eventfd_put); |
Jens Axboe | 9b40284 | 2019-04-11 11:45:41 -0600 | [diff] [blame] | 2388 | return 0; |
| 2389 | } |
| 2390 | |
| 2391 | return -ENXIO; |
| 2392 | } |
| 2393 | |
Jens Axboe | 4010fec | 2021-02-27 15:04:18 -0700 | [diff] [blame] | 2394 | static void io_req_caches_free(struct io_ring_ctx *ctx) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2395 | { |
Pavel Begunkov | cd0ca2e | 2021-08-09 20:18:11 +0100 | [diff] [blame] | 2396 | struct io_submit_state *state = &ctx->submit_state; |
Pavel Begunkov | 37f0e76 | 2021-10-04 20:02:53 +0100 | [diff] [blame] | 2397 | int nr = 0; |
Pavel Begunkov | bf019da | 2021-02-10 00:03:17 +0000 | [diff] [blame] | 2398 | |
Jens Axboe | 9a4fdbd | 2021-02-13 09:09:44 -0700 | [diff] [blame] | 2399 | mutex_lock(&ctx->uring_lock); |
Pavel Begunkov | cd0ca2e | 2021-08-09 20:18:11 +0100 | [diff] [blame] | 2400 | io_flush_cached_locked_reqs(ctx, state); |
Pavel Begunkov | c2b6c6b | 2021-09-24 21:59:47 +0100 | [diff] [blame] | 2401 | |
Pavel Begunkov | 88ab95b | 2022-04-12 15:09:47 +0100 | [diff] [blame] | 2402 | while (!io_req_cache_empty(ctx)) { |
Pavel Begunkov | c2b6c6b | 2021-09-24 21:59:47 +0100 | [diff] [blame] | 2403 | struct io_wq_work_node *node; |
| 2404 | struct io_kiocb *req; |
| 2405 | |
| 2406 | node = wq_stack_extract(&state->free_list); |
| 2407 | req = container_of(node, struct io_kiocb, comp_list); |
| 2408 | kmem_cache_free(req_cachep, req); |
Pavel Begunkov | 37f0e76 | 2021-10-04 20:02:53 +0100 | [diff] [blame] | 2409 | nr++; |
Pavel Begunkov | c2b6c6b | 2021-09-24 21:59:47 +0100 | [diff] [blame] | 2410 | } |
Pavel Begunkov | 37f0e76 | 2021-10-04 20:02:53 +0100 | [diff] [blame] | 2411 | if (nr) |
| 2412 | percpu_ref_put_many(&ctx->refs, nr); |
Jens Axboe | 9a4fdbd | 2021-02-13 09:09:44 -0700 | [diff] [blame] | 2413 | mutex_unlock(&ctx->uring_lock); |
| 2414 | } |
| 2415 | |
Pavel Begunkov | c072481 | 2021-10-04 20:02:54 +0100 | [diff] [blame] | 2416 | static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2417 | { |
Jens Axboe | 37d1e2e | 2021-02-17 21:03:43 -0700 | [diff] [blame] | 2418 | io_sq_thread_finish(ctx); |
Jens Axboe | 2aede0e | 2020-09-14 10:45:53 -0600 | [diff] [blame] | 2419 | |
Jens Axboe | 37d1e2e | 2021-02-17 21:03:43 -0700 | [diff] [blame] | 2420 | if (ctx->mm_account) { |
Jens Axboe | 2aede0e | 2020-09-14 10:45:53 -0600 | [diff] [blame] | 2421 | mmdrop(ctx->mm_account); |
| 2422 | ctx->mm_account = NULL; |
Bijan Mottahedeh | 3097582 | 2020-06-16 16:36:09 -0700 | [diff] [blame] | 2423 | } |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 2424 | |
Pavel Begunkov | ab40940 | 2021-10-09 23:14:41 +0100 | [diff] [blame] | 2425 | io_rsrc_refs_drop(ctx); |
Pavel Begunkov | 43597aa | 2021-08-10 02:44:23 +0100 | [diff] [blame] | 2426 | /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */ |
| 2427 | io_wait_rsrc_data(ctx->buf_data); |
| 2428 | io_wait_rsrc_data(ctx->file_data); |
| 2429 | |
Hao Xu | 8bad28d | 2021-02-19 17:19:36 +0800 | [diff] [blame] | 2430 | mutex_lock(&ctx->uring_lock); |
Pavel Begunkov | 43597aa | 2021-08-10 02:44:23 +0100 | [diff] [blame] | 2431 | if (ctx->buf_data) |
Bijan Mottahedeh | bd54b6f | 2021-04-25 14:32:25 +0100 | [diff] [blame] | 2432 | __io_sqe_buffers_unregister(ctx); |
Pavel Begunkov | 43597aa | 2021-08-10 02:44:23 +0100 | [diff] [blame] | 2433 | if (ctx->file_data) |
Pavel Begunkov | 0848040 | 2021-04-13 02:58:38 +0100 | [diff] [blame] | 2434 | __io_sqe_files_unregister(ctx); |
Pavel Begunkov | c4ea060 | 2021-04-01 15:43:58 +0100 | [diff] [blame] | 2435 | if (ctx->rings) |
| 2436 | __io_cqring_overflow_flush(ctx, true); |
Jens Axboe | 9b40284 | 2019-04-11 11:45:41 -0600 | [diff] [blame] | 2437 | io_eventfd_unregister(ctx); |
Jens Axboe | 9b797a3 | 2022-07-07 14:16:20 -0600 | [diff] [blame] | 2438 | io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free); |
Jens Axboe | 43e0bbb | 2022-07-07 14:30:09 -0600 | [diff] [blame] | 2439 | io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free); |
Usama Arif | 77bc59b | 2022-02-04 14:51:14 +0000 | [diff] [blame] | 2440 | mutex_unlock(&ctx->uring_lock); |
Jens Axboe | 5a2e745 | 2020-02-23 16:23:11 -0700 | [diff] [blame] | 2441 | io_destroy_buffers(ctx); |
Pavel Begunkov | 07db298 | 2021-04-20 12:03:32 +0100 | [diff] [blame] | 2442 | if (ctx->sq_creds) |
| 2443 | put_cred(ctx->sq_creds); |
Pavel Begunkov | 97bbdc0 | 2022-06-16 10:22:08 +0100 | [diff] [blame] | 2444 | if (ctx->submitter_task) |
| 2445 | put_task_struct(ctx->submitter_task); |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 2446 | |
Pavel Begunkov | a7f0ed5 | 2021-04-01 15:43:46 +0100 | [diff] [blame] | 2447 | /* there are no registered resources left, nobody uses it */ |
| 2448 | if (ctx->rsrc_node) |
| 2449 | io_rsrc_node_destroy(ctx->rsrc_node); |
Pavel Begunkov | 8dd03af | 2021-03-19 17:22:36 +0000 | [diff] [blame] | 2450 | if (ctx->rsrc_backup_node) |
Pavel Begunkov | b895c9a | 2021-04-01 15:43:40 +0100 | [diff] [blame] | 2451 | io_rsrc_node_destroy(ctx->rsrc_backup_node); |
Pavel Begunkov | a7f0ed5 | 2021-04-01 15:43:46 +0100 | [diff] [blame] | 2452 | flush_delayed_work(&ctx->rsrc_put_work); |
Pavel Begunkov | 756ab7c | 2021-10-06 16:06:47 +0100 | [diff] [blame] | 2453 | flush_delayed_work(&ctx->fallback_work); |
Pavel Begunkov | a7f0ed5 | 2021-04-01 15:43:46 +0100 | [diff] [blame] | 2454 | |
| 2455 | WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list)); |
| 2456 | WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist)); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2457 | |
| 2458 | #if defined(CONFIG_UNIX) |
Eric Biggers | 355e8d26 | 2019-06-12 14:58:43 -0700 | [diff] [blame] | 2459 | if (ctx->ring_sock) { |
| 2460 | ctx->ring_sock->file = NULL; /* so that iput() is called */ |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2461 | sock_release(ctx->ring_sock); |
Eric Biggers | 355e8d26 | 2019-06-12 14:58:43 -0700 | [diff] [blame] | 2462 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2463 | #endif |
Pavel Begunkov | ef9dd63 | 2021-08-28 19:54:38 -0600 | [diff] [blame] | 2464 | WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list)); |
Pavel Begunkov | eb42ceb | 2022-07-12 21:52:38 +0100 | [diff] [blame] | 2465 | WARN_ON_ONCE(ctx->notif_slots || ctx->nr_notif_slots); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2466 | |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 2467 | io_mem_free(ctx->rings); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2468 | io_mem_free(ctx->sq_sqes); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2469 | |
| 2470 | percpu_ref_exit(&ctx->refs); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2471 | free_uid(ctx->user); |
Jens Axboe | 4010fec | 2021-02-27 15:04:18 -0700 | [diff] [blame] | 2472 | io_req_caches_free(ctx); |
Jens Axboe | e941894 | 2021-02-19 12:33:30 -0700 | [diff] [blame] | 2473 | if (ctx->hash_map) |
| 2474 | io_wq_put_hash(ctx->hash_map); |
Pavel Begunkov | e6f89be | 2022-06-16 10:22:10 +0100 | [diff] [blame] | 2475 | kfree(ctx->cancel_table.hbs); |
Pavel Begunkov | 9ca9fb2 | 2022-06-16 10:22:12 +0100 | [diff] [blame] | 2476 | kfree(ctx->cancel_table_locked.hbs); |
Pavel Begunkov | 6224843 | 2021-04-28 13:11:29 +0100 | [diff] [blame] | 2477 | kfree(ctx->dummy_ubuf); |
Jens Axboe | 9cfc7e9 | 2022-05-01 10:52:44 -0600 | [diff] [blame] | 2478 | kfree(ctx->io_bl); |
| 2479 | xa_destroy(&ctx->io_bl_xa); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2480 | kfree(ctx); |
| 2481 | } |
| 2482 | |
| 2483 | static __poll_t io_uring_poll(struct file *file, poll_table *wait) |
| 2484 | { |
| 2485 | struct io_ring_ctx *ctx = file->private_data; |
| 2486 | __poll_t mask = 0; |
| 2487 | |
Pavel Begunkov | d60aa65 | 2021-10-04 20:02:52 +0100 | [diff] [blame] | 2488 | poll_wait(file, &ctx->cq_wait, wait); |
Stefan Bühler | 4f7067c | 2019-04-24 23:54:17 +0200 | [diff] [blame] | 2489 | /* |
| 2490 | * synchronizes with barrier from wq_has_sleeper call in |
| 2491 | * io_commit_cqring |
| 2492 | */ |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2493 | smp_rmb(); |
Jens Axboe | 9055420 | 2020-09-03 12:12:41 -0600 | [diff] [blame] | 2494 | if (!io_sqring_full(ctx)) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2495 | mask |= EPOLLOUT | EPOLLWRNORM; |
Hao Xu | ed670c3 | 2021-02-05 16:34:21 +0800 | [diff] [blame] | 2496 | |
| 2497 | /* |
| 2498 | * Don't flush cqring overflow list here, just do a simple check. |
| 2499 | * Otherwise there could possible be ABBA deadlock: |
| 2500 | * CPU0 CPU1 |
| 2501 | * ---- ---- |
| 2502 | * lock(&ctx->uring_lock); |
| 2503 | * lock(&ep->mtx); |
| 2504 | * lock(&ctx->uring_lock); |
| 2505 | * lock(&ep->mtx); |
| 2506 | * |
| 2507 | * Users may get EPOLLIN meanwhile seeing nothing in cqring, this |
| 2508 | * pushs them to do the flush. |
| 2509 | */ |
Dylan Yudaken | 10988a0 | 2022-04-21 02:13:43 -0700 | [diff] [blame] | 2510 | if (io_cqring_events(ctx) || |
| 2511 | test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2512 | mask |= EPOLLIN | EPOLLRDNORM; |
| 2513 | |
| 2514 | return mask; |
| 2515 | } |
| 2516 | |
Yejune Deng | 0bead8c | 2020-12-24 11:02:20 +0800 | [diff] [blame] | 2517 | static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id) |
Jens Axboe | 071698e | 2020-01-28 10:04:42 -0700 | [diff] [blame] | 2518 | { |
Jens Axboe | 4379bf8 | 2021-02-15 13:40:22 -0700 | [diff] [blame] | 2519 | const struct cred *creds; |
Jens Axboe | 071698e | 2020-01-28 10:04:42 -0700 | [diff] [blame] | 2520 | |
Matthew Wilcox (Oracle) | 61cf937 | 2021-03-08 14:16:16 +0000 | [diff] [blame] | 2521 | creds = xa_erase(&ctx->personalities, id); |
Jens Axboe | 4379bf8 | 2021-02-15 13:40:22 -0700 | [diff] [blame] | 2522 | if (creds) { |
| 2523 | put_cred(creds); |
Yejune Deng | 0bead8c | 2020-12-24 11:02:20 +0800 | [diff] [blame] | 2524 | return 0; |
Jens Axboe | 1e6fa52 | 2020-10-15 08:46:24 -0600 | [diff] [blame] | 2525 | } |
Yejune Deng | 0bead8c | 2020-12-24 11:02:20 +0800 | [diff] [blame] | 2526 | |
| 2527 | return -EINVAL; |
| 2528 | } |
| 2529 | |
Pavel Begunkov | d56d938 | 2021-03-06 11:02:13 +0000 | [diff] [blame] | 2530 | struct io_tctx_exit { |
| 2531 | struct callback_head task_work; |
| 2532 | struct completion completion; |
Pavel Begunkov | baf186c | 2021-03-06 11:02:15 +0000 | [diff] [blame] | 2533 | struct io_ring_ctx *ctx; |
Pavel Begunkov | d56d938 | 2021-03-06 11:02:13 +0000 | [diff] [blame] | 2534 | }; |
| 2535 | |
Pavel Begunkov | c072481 | 2021-10-04 20:02:54 +0100 | [diff] [blame] | 2536 | static __cold void io_tctx_exit_cb(struct callback_head *cb) |
Pavel Begunkov | d56d938 | 2021-03-06 11:02:13 +0000 | [diff] [blame] | 2537 | { |
| 2538 | struct io_uring_task *tctx = current->io_uring; |
| 2539 | struct io_tctx_exit *work; |
| 2540 | |
| 2541 | work = container_of(cb, struct io_tctx_exit, task_work); |
| 2542 | /* |
| 2543 | * When @in_idle, we're in cancellation and it's racy to remove the |
| 2544 | * node. It'll be removed by the end of cancellation, just ignore it. |
| 2545 | */ |
| 2546 | if (!atomic_read(&tctx->in_idle)) |
Pavel Begunkov | eef51da | 2021-06-14 02:36:15 +0100 | [diff] [blame] | 2547 | io_uring_del_tctx_node((unsigned long)work->ctx); |
Pavel Begunkov | d56d938 | 2021-03-06 11:02:13 +0000 | [diff] [blame] | 2548 | complete(&work->completion); |
| 2549 | } |
| 2550 | |
Pavel Begunkov | c072481 | 2021-10-04 20:02:54 +0100 | [diff] [blame] | 2551 | static __cold bool io_cancel_ctx_cb(struct io_wq_work *work, void *data) |
Pavel Begunkov | 28090c1 | 2021-04-25 23:34:45 +0100 | [diff] [blame] | 2552 | { |
| 2553 | struct io_kiocb *req = container_of(work, struct io_kiocb, work); |
| 2554 | |
| 2555 | return req->ctx == data; |
| 2556 | } |
| 2557 | |
Pavel Begunkov | c072481 | 2021-10-04 20:02:54 +0100 | [diff] [blame] | 2558 | static __cold void io_ring_exit_work(struct work_struct *work) |
Jens Axboe | 85faa7b | 2020-04-09 18:14:00 -0600 | [diff] [blame] | 2559 | { |
Pavel Begunkov | d56d938 | 2021-03-06 11:02:13 +0000 | [diff] [blame] | 2560 | struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work); |
Pavel Begunkov | b5bb3a2 | 2021-03-06 11:02:16 +0000 | [diff] [blame] | 2561 | unsigned long timeout = jiffies + HZ * 60 * 5; |
Pavel Begunkov | 58d3be2 | 2021-08-09 13:04:17 +0100 | [diff] [blame] | 2562 | unsigned long interval = HZ / 20; |
Pavel Begunkov | d56d938 | 2021-03-06 11:02:13 +0000 | [diff] [blame] | 2563 | struct io_tctx_exit exit; |
| 2564 | struct io_tctx_node *node; |
| 2565 | int ret; |
Jens Axboe | 85faa7b | 2020-04-09 18:14:00 -0600 | [diff] [blame] | 2566 | |
Jens Axboe | 56952e9 | 2020-06-17 15:00:04 -0600 | [diff] [blame] | 2567 | /* |
| 2568 | * If we're doing polled IO and end up having requests being |
| 2569 | * submitted async (out-of-line), then completions can come in while |
| 2570 | * we're waiting for refs to drop. We need to reap these manually, |
| 2571 | * as nobody else will be looking for them. |
| 2572 | */ |
Pavel Begunkov | b2edc0a | 2020-07-07 16:36:22 +0300 | [diff] [blame] | 2573 | do { |
Pavel Begunkov | affa87d | 2022-06-20 01:25:52 +0100 | [diff] [blame] | 2574 | while (io_uring_try_cancel_requests(ctx, NULL, true)) |
| 2575 | cond_resched(); |
| 2576 | |
Pavel Begunkov | 28090c1 | 2021-04-25 23:34:45 +0100 | [diff] [blame] | 2577 | if (ctx->sq_data) { |
| 2578 | struct io_sq_data *sqd = ctx->sq_data; |
| 2579 | struct task_struct *tsk; |
| 2580 | |
| 2581 | io_sq_thread_park(sqd); |
| 2582 | tsk = sqd->thread; |
| 2583 | if (tsk && tsk->io_uring && tsk->io_uring->io_wq) |
| 2584 | io_wq_cancel_cb(tsk->io_uring->io_wq, |
| 2585 | io_cancel_ctx_cb, ctx, true); |
| 2586 | io_sq_thread_unpark(sqd); |
| 2587 | } |
Pavel Begunkov | b5bb3a2 | 2021-03-06 11:02:16 +0000 | [diff] [blame] | 2588 | |
Pavel Begunkov | 37f0e76 | 2021-10-04 20:02:53 +0100 | [diff] [blame] | 2589 | io_req_caches_free(ctx); |
| 2590 | |
Pavel Begunkov | 58d3be2 | 2021-08-09 13:04:17 +0100 | [diff] [blame] | 2591 | if (WARN_ON_ONCE(time_after(jiffies, timeout))) { |
| 2592 | /* there is little hope left, don't run it too often */ |
| 2593 | interval = HZ * 60; |
| 2594 | } |
| 2595 | } while (!wait_for_completion_timeout(&ctx->ref_comp, interval)); |
Pavel Begunkov | d56d938 | 2021-03-06 11:02:13 +0000 | [diff] [blame] | 2596 | |
Pavel Begunkov | 7f00651 | 2021-04-14 13:38:34 +0100 | [diff] [blame] | 2597 | init_completion(&exit.completion); |
| 2598 | init_task_work(&exit.task_work, io_tctx_exit_cb); |
| 2599 | exit.ctx = ctx; |
Pavel Begunkov | 89b5066 | 2021-04-01 15:43:50 +0100 | [diff] [blame] | 2600 | /* |
| 2601 | * Some may use context even when all refs and requests have been put, |
| 2602 | * and they are free to do so while still holding uring_lock or |
Pavel Begunkov | 5b0a6ac | 2021-06-30 21:54:04 +0100 | [diff] [blame] | 2603 | * completion_lock, see io_req_task_submit(). Apart from other work, |
Pavel Begunkov | 89b5066 | 2021-04-01 15:43:50 +0100 | [diff] [blame] | 2604 | * this lock/unlock section also waits them to finish. |
| 2605 | */ |
Pavel Begunkov | d56d938 | 2021-03-06 11:02:13 +0000 | [diff] [blame] | 2606 | mutex_lock(&ctx->uring_lock); |
| 2607 | while (!list_empty(&ctx->tctx_list)) { |
Pavel Begunkov | b5bb3a2 | 2021-03-06 11:02:16 +0000 | [diff] [blame] | 2608 | WARN_ON_ONCE(time_after(jiffies, timeout)); |
| 2609 | |
Pavel Begunkov | d56d938 | 2021-03-06 11:02:13 +0000 | [diff] [blame] | 2610 | node = list_first_entry(&ctx->tctx_list, struct io_tctx_node, |
| 2611 | ctx_node); |
Pavel Begunkov | 7f00651 | 2021-04-14 13:38:34 +0100 | [diff] [blame] | 2612 | /* don't spin on a single task if cancellation failed */ |
| 2613 | list_rotate_left(&ctx->tctx_list); |
Pavel Begunkov | d56d938 | 2021-03-06 11:02:13 +0000 | [diff] [blame] | 2614 | ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL); |
| 2615 | if (WARN_ON_ONCE(ret)) |
| 2616 | continue; |
Pavel Begunkov | d56d938 | 2021-03-06 11:02:13 +0000 | [diff] [blame] | 2617 | |
| 2618 | mutex_unlock(&ctx->uring_lock); |
| 2619 | wait_for_completion(&exit.completion); |
Pavel Begunkov | d56d938 | 2021-03-06 11:02:13 +0000 | [diff] [blame] | 2620 | mutex_lock(&ctx->uring_lock); |
| 2621 | } |
| 2622 | mutex_unlock(&ctx->uring_lock); |
Jens Axboe | 79ebeae | 2021-08-10 15:18:27 -0600 | [diff] [blame] | 2623 | spin_lock(&ctx->completion_lock); |
| 2624 | spin_unlock(&ctx->completion_lock); |
Pavel Begunkov | d56d938 | 2021-03-06 11:02:13 +0000 | [diff] [blame] | 2625 | |
Jens Axboe | 85faa7b | 2020-04-09 18:14:00 -0600 | [diff] [blame] | 2626 | io_ring_ctx_free(ctx); |
| 2627 | } |
| 2628 | |
Pavel Begunkov | c072481 | 2021-10-04 20:02:54 +0100 | [diff] [blame] | 2629 | static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2630 | { |
Matthew Wilcox (Oracle) | 61cf937 | 2021-03-08 14:16:16 +0000 | [diff] [blame] | 2631 | unsigned long index; |
| 2632 | struct creds *creds; |
| 2633 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2634 | mutex_lock(&ctx->uring_lock); |
| 2635 | percpu_ref_kill(&ctx->refs); |
Pavel Begunkov | 634578f | 2020-12-06 22:22:44 +0000 | [diff] [blame] | 2636 | if (ctx->rings) |
Pavel Begunkov | 6c2450a | 2021-02-23 12:40:22 +0000 | [diff] [blame] | 2637 | __io_cqring_overflow_flush(ctx, true); |
Matthew Wilcox (Oracle) | 61cf937 | 2021-03-08 14:16:16 +0000 | [diff] [blame] | 2638 | xa_for_each(&ctx->personalities, index, creds) |
| 2639 | io_unregister_personality(ctx, index); |
Pavel Begunkov | 9ca9fb2 | 2022-06-16 10:22:12 +0100 | [diff] [blame] | 2640 | if (ctx->rings) |
| 2641 | io_poll_remove_all(ctx, NULL, true); |
Pavel Begunkov | eb42ceb | 2022-07-12 21:52:38 +0100 | [diff] [blame] | 2642 | io_notif_unregister(ctx); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2643 | mutex_unlock(&ctx->uring_lock); |
| 2644 | |
Pavel Begunkov | 60053be | 2022-03-21 22:02:20 +0000 | [diff] [blame] | 2645 | /* failed during ring init, it couldn't have issued any requests */ |
| 2646 | if (ctx->rings) { |
| 2647 | io_kill_timeouts(ctx, NULL, true); |
Pavel Begunkov | 60053be | 2022-03-21 22:02:20 +0000 | [diff] [blame] | 2648 | /* if we failed setting up the ctx, we might not have any rings */ |
| 2649 | io_iopoll_try_reap_events(ctx); |
| 2650 | } |
Jens Axboe | 309fc03 | 2020-07-10 09:13:34 -0600 | [diff] [blame] | 2651 | |
Jens Axboe | 85faa7b | 2020-04-09 18:14:00 -0600 | [diff] [blame] | 2652 | INIT_WORK(&ctx->exit_work, io_ring_exit_work); |
Jens Axboe | fc66677 | 2020-08-19 11:10:51 -0600 | [diff] [blame] | 2653 | /* |
| 2654 | * Use system_unbound_wq to avoid spawning tons of event kworkers |
| 2655 | * if we're exiting a ton of rings at the same time. It just adds |
| 2656 | * noise and overhead, there's no discernable change in runtime |
| 2657 | * over using system_wq. |
| 2658 | */ |
| 2659 | queue_work(system_unbound_wq, &ctx->exit_work); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2660 | } |
| 2661 | |
| 2662 | static int io_uring_release(struct inode *inode, struct file *file) |
| 2663 | { |
| 2664 | struct io_ring_ctx *ctx = file->private_data; |
| 2665 | |
| 2666 | file->private_data = NULL; |
| 2667 | io_ring_ctx_wait_and_kill(ctx); |
| 2668 | return 0; |
| 2669 | } |
| 2670 | |
Pavel Begunkov | f6edbab | 2020-11-06 13:00:26 +0000 | [diff] [blame] | 2671 | struct io_task_cancel { |
| 2672 | struct task_struct *task; |
Pavel Begunkov | 3dd0c97 | 2021-05-16 22:58:04 +0100 | [diff] [blame] | 2673 | bool all; |
Pavel Begunkov | f6edbab | 2020-11-06 13:00:26 +0000 | [diff] [blame] | 2674 | }; |
Pavel Begunkov | 67c4d9e | 2020-06-15 10:24:05 +0300 | [diff] [blame] | 2675 | |
Pavel Begunkov | f6edbab | 2020-11-06 13:00:26 +0000 | [diff] [blame] | 2676 | static bool io_cancel_task_cb(struct io_wq_work *work, void *data) |
Jens Axboe | b711d4e | 2020-08-16 08:23:05 -0700 | [diff] [blame] | 2677 | { |
Pavel Begunkov | 9a472ef | 2020-11-05 22:31:37 +0000 | [diff] [blame] | 2678 | struct io_kiocb *req = container_of(work, struct io_kiocb, work); |
Pavel Begunkov | f6edbab | 2020-11-06 13:00:26 +0000 | [diff] [blame] | 2679 | struct io_task_cancel *cancel = data; |
Pavel Begunkov | 9a472ef | 2020-11-05 22:31:37 +0000 | [diff] [blame] | 2680 | |
Pavel Begunkov | 6af3f48 | 2021-11-26 14:38:15 +0000 | [diff] [blame] | 2681 | return io_match_task_safe(req, cancel->task, cancel->all); |
Jens Axboe | b711d4e | 2020-08-16 08:23:05 -0700 | [diff] [blame] | 2682 | } |
| 2683 | |
Pavel Begunkov | c072481 | 2021-10-04 20:02:54 +0100 | [diff] [blame] | 2684 | static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx, |
| 2685 | struct task_struct *task, |
| 2686 | bool cancel_all) |
Pavel Begunkov | b7ddce3 | 2020-09-06 00:45:14 +0300 | [diff] [blame] | 2687 | { |
Pavel Begunkov | e1915f7 | 2021-03-11 23:29:35 +0000 | [diff] [blame] | 2688 | struct io_defer_entry *de; |
Pavel Begunkov | b7ddce3 | 2020-09-06 00:45:14 +0300 | [diff] [blame] | 2689 | LIST_HEAD(list); |
| 2690 | |
Jens Axboe | 79ebeae | 2021-08-10 15:18:27 -0600 | [diff] [blame] | 2691 | spin_lock(&ctx->completion_lock); |
Pavel Begunkov | b7ddce3 | 2020-09-06 00:45:14 +0300 | [diff] [blame] | 2692 | list_for_each_entry_reverse(de, &ctx->defer_list, list) { |
Pavel Begunkov | 6af3f48 | 2021-11-26 14:38:15 +0000 | [diff] [blame] | 2693 | if (io_match_task_safe(de->req, task, cancel_all)) { |
Pavel Begunkov | b7ddce3 | 2020-09-06 00:45:14 +0300 | [diff] [blame] | 2694 | list_cut_position(&list, &ctx->defer_list, &de->list); |
| 2695 | break; |
| 2696 | } |
| 2697 | } |
Jens Axboe | 79ebeae | 2021-08-10 15:18:27 -0600 | [diff] [blame] | 2698 | spin_unlock(&ctx->completion_lock); |
Pavel Begunkov | e1915f7 | 2021-03-11 23:29:35 +0000 | [diff] [blame] | 2699 | if (list_empty(&list)) |
| 2700 | return false; |
Pavel Begunkov | b7ddce3 | 2020-09-06 00:45:14 +0300 | [diff] [blame] | 2701 | |
| 2702 | while (!list_empty(&list)) { |
| 2703 | de = list_first_entry(&list, struct io_defer_entry, list); |
| 2704 | list_del_init(&de->list); |
Pavel Begunkov | f41db273 | 2021-02-28 22:35:12 +0000 | [diff] [blame] | 2705 | io_req_complete_failed(de->req, -ECANCELED); |
Pavel Begunkov | b7ddce3 | 2020-09-06 00:45:14 +0300 | [diff] [blame] | 2706 | kfree(de); |
| 2707 | } |
Pavel Begunkov | e1915f7 | 2021-03-11 23:29:35 +0000 | [diff] [blame] | 2708 | return true; |
Pavel Begunkov | b7ddce3 | 2020-09-06 00:45:14 +0300 | [diff] [blame] | 2709 | } |
| 2710 | |
Pavel Begunkov | c072481 | 2021-10-04 20:02:54 +0100 | [diff] [blame] | 2711 | static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx) |
Pavel Begunkov | 1b00764 | 2021-03-06 11:02:17 +0000 | [diff] [blame] | 2712 | { |
| 2713 | struct io_tctx_node *node; |
| 2714 | enum io_wq_cancel cret; |
| 2715 | bool ret = false; |
| 2716 | |
| 2717 | mutex_lock(&ctx->uring_lock); |
| 2718 | list_for_each_entry(node, &ctx->tctx_list, ctx_node) { |
| 2719 | struct io_uring_task *tctx = node->task->io_uring; |
| 2720 | |
| 2721 | /* |
| 2722 | * io_wq will stay alive while we hold uring_lock, because it's |
| 2723 | * killed after ctx nodes, which requires to take the lock. |
| 2724 | */ |
| 2725 | if (!tctx || !tctx->io_wq) |
| 2726 | continue; |
| 2727 | cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true); |
| 2728 | ret |= (cret != IO_WQ_CANCEL_NOTFOUND); |
| 2729 | } |
| 2730 | mutex_unlock(&ctx->uring_lock); |
| 2731 | |
| 2732 | return ret; |
| 2733 | } |
| 2734 | |
Pavel Begunkov | affa87d | 2022-06-20 01:25:52 +0100 | [diff] [blame] | 2735 | static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx, |
Pavel Begunkov | c072481 | 2021-10-04 20:02:54 +0100 | [diff] [blame] | 2736 | struct task_struct *task, |
| 2737 | bool cancel_all) |
Pavel Begunkov | 9936c7c | 2021-02-04 13:51:56 +0000 | [diff] [blame] | 2738 | { |
Pavel Begunkov | 3dd0c97 | 2021-05-16 22:58:04 +0100 | [diff] [blame] | 2739 | struct io_task_cancel cancel = { .task = task, .all = cancel_all, }; |
Pavel Begunkov | 1b00764 | 2021-03-06 11:02:17 +0000 | [diff] [blame] | 2740 | struct io_uring_task *tctx = task ? task->io_uring : NULL; |
Pavel Begunkov | affa87d | 2022-06-20 01:25:52 +0100 | [diff] [blame] | 2741 | enum io_wq_cancel cret; |
| 2742 | bool ret = false; |
Pavel Begunkov | 9936c7c | 2021-02-04 13:51:56 +0000 | [diff] [blame] | 2743 | |
Pavel Begunkov | 60053be | 2022-03-21 22:02:20 +0000 | [diff] [blame] | 2744 | /* failed during ring init, it couldn't have issued any requests */ |
| 2745 | if (!ctx->rings) |
Pavel Begunkov | affa87d | 2022-06-20 01:25:52 +0100 | [diff] [blame] | 2746 | return false; |
Pavel Begunkov | 60053be | 2022-03-21 22:02:20 +0000 | [diff] [blame] | 2747 | |
Pavel Begunkov | affa87d | 2022-06-20 01:25:52 +0100 | [diff] [blame] | 2748 | if (!task) { |
| 2749 | ret |= io_uring_try_cancel_iowq(ctx); |
| 2750 | } else if (tctx && tctx->io_wq) { |
| 2751 | /* |
| 2752 | * Cancels requests of all rings, not only @ctx, but |
| 2753 | * it's fine as the task is in exit/exec. |
| 2754 | */ |
| 2755 | cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb, |
| 2756 | &cancel, true); |
| 2757 | ret |= (cret != IO_WQ_CANCEL_NOTFOUND); |
Pavel Begunkov | 9936c7c | 2021-02-04 13:51:56 +0000 | [diff] [blame] | 2758 | } |
Pavel Begunkov | affa87d | 2022-06-20 01:25:52 +0100 | [diff] [blame] | 2759 | |
| 2760 | /* SQPOLL thread does its own polling */ |
| 2761 | if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) || |
| 2762 | (ctx->sq_data && ctx->sq_data->thread == current)) { |
| 2763 | while (!wq_list_empty(&ctx->iopoll_list)) { |
| 2764 | io_iopoll_try_reap_events(ctx); |
| 2765 | ret = true; |
| 2766 | } |
| 2767 | } |
| 2768 | |
| 2769 | ret |= io_cancel_defer_files(ctx, task, cancel_all); |
| 2770 | mutex_lock(&ctx->uring_lock); |
| 2771 | ret |= io_poll_remove_all(ctx, task, cancel_all); |
| 2772 | mutex_unlock(&ctx->uring_lock); |
| 2773 | ret |= io_kill_timeouts(ctx, task, cancel_all); |
| 2774 | if (task) |
| 2775 | ret |= io_run_task_work(); |
| 2776 | return ret; |
Pavel Begunkov | 9936c7c | 2021-02-04 13:51:56 +0000 | [diff] [blame] | 2777 | } |
| 2778 | |
Pavel Begunkov | 3f48cf1 | 2021-04-11 01:46:27 +0100 | [diff] [blame] | 2779 | static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked) |
Pavel Begunkov | 521d6a7 | 2021-03-11 23:29:38 +0000 | [diff] [blame] | 2780 | { |
Pavel Begunkov | 3f48cf1 | 2021-04-11 01:46:27 +0100 | [diff] [blame] | 2781 | if (tracked) |
Jens Axboe | 9cae36a | 2022-06-01 23:57:02 -0600 | [diff] [blame] | 2782 | return atomic_read(&tctx->inflight_tracked); |
Pavel Begunkov | 521d6a7 | 2021-03-11 23:29:38 +0000 | [diff] [blame] | 2783 | return percpu_counter_sum(&tctx->inflight); |
| 2784 | } |
| 2785 | |
Pavel Begunkov | 78cc687 | 2021-06-14 02:36:23 +0100 | [diff] [blame] | 2786 | /* |
| 2787 | * Find any io_uring ctx that this task has registered or done IO on, and cancel |
Jens Axboe | 78a7806 | 2021-12-09 08:54:29 -0700 | [diff] [blame] | 2788 | * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation. |
Pavel Begunkov | 78cc687 | 2021-06-14 02:36:23 +0100 | [diff] [blame] | 2789 | */ |
Jens Axboe | 17437f3 | 2022-05-25 09:13:39 -0600 | [diff] [blame] | 2790 | __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd) |
Pavel Begunkov | 0e9ddb3 | 2021-02-07 22:34:26 +0000 | [diff] [blame] | 2791 | { |
Pavel Begunkov | 521d6a7 | 2021-03-11 23:29:38 +0000 | [diff] [blame] | 2792 | struct io_uring_task *tctx = current->io_uring; |
Pavel Begunkov | 734551d | 2021-04-18 14:52:09 +0100 | [diff] [blame] | 2793 | struct io_ring_ctx *ctx; |
Jens Axboe | fdaf083 | 2020-10-30 09:37:30 -0600 | [diff] [blame] | 2794 | s64 inflight; |
Pavel Begunkov | 0e9ddb3 | 2021-02-07 22:34:26 +0000 | [diff] [blame] | 2795 | DEFINE_WAIT(wait); |
Jens Axboe | fdaf083 | 2020-10-30 09:37:30 -0600 | [diff] [blame] | 2796 | |
Pavel Begunkov | 78cc687 | 2021-06-14 02:36:23 +0100 | [diff] [blame] | 2797 | WARN_ON_ONCE(sqd && sqd->thread != current); |
| 2798 | |
Palash Oswal | 6d042ff | 2021-04-27 18:21:49 +0530 | [diff] [blame] | 2799 | if (!current->io_uring) |
| 2800 | return; |
Pavel Begunkov | 17a9105 | 2021-05-23 15:48:39 +0100 | [diff] [blame] | 2801 | if (tctx->io_wq) |
| 2802 | io_wq_exit_start(tctx->io_wq); |
| 2803 | |
Jens Axboe | fdaf083 | 2020-10-30 09:37:30 -0600 | [diff] [blame] | 2804 | atomic_inc(&tctx->in_idle); |
Jens Axboe | d8a6df1 | 2020-10-15 16:24:45 -0600 | [diff] [blame] | 2805 | do { |
Pavel Begunkov | affa87d | 2022-06-20 01:25:52 +0100 | [diff] [blame] | 2806 | bool loop = false; |
| 2807 | |
Pavel Begunkov | e9dbe22 | 2021-08-09 13:04:20 +0100 | [diff] [blame] | 2808 | io_uring_drop_tctx_refs(current); |
Jens Axboe | 0f21220 | 2020-09-13 13:09:39 -0600 | [diff] [blame] | 2809 | /* read completions before cancelations */ |
Pavel Begunkov | 3dd0c97 | 2021-05-16 22:58:04 +0100 | [diff] [blame] | 2810 | inflight = tctx_inflight(tctx, !cancel_all); |
Jens Axboe | d8a6df1 | 2020-10-15 16:24:45 -0600 | [diff] [blame] | 2811 | if (!inflight) |
| 2812 | break; |
Jens Axboe | 0f21220 | 2020-09-13 13:09:39 -0600 | [diff] [blame] | 2813 | |
Pavel Begunkov | 78cc687 | 2021-06-14 02:36:23 +0100 | [diff] [blame] | 2814 | if (!sqd) { |
| 2815 | struct io_tctx_node *node; |
| 2816 | unsigned long index; |
| 2817 | |
| 2818 | xa_for_each(&tctx->xa, index, node) { |
| 2819 | /* sqpoll task will cancel all its requests */ |
| 2820 | if (node->ctx->sq_data) |
| 2821 | continue; |
Pavel Begunkov | affa87d | 2022-06-20 01:25:52 +0100 | [diff] [blame] | 2822 | loop |= io_uring_try_cancel_requests(node->ctx, |
| 2823 | current, cancel_all); |
Pavel Begunkov | 78cc687 | 2021-06-14 02:36:23 +0100 | [diff] [blame] | 2824 | } |
| 2825 | } else { |
| 2826 | list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) |
Pavel Begunkov | affa87d | 2022-06-20 01:25:52 +0100 | [diff] [blame] | 2827 | loop |= io_uring_try_cancel_requests(ctx, |
| 2828 | current, |
| 2829 | cancel_all); |
| 2830 | } |
| 2831 | |
| 2832 | if (loop) { |
| 2833 | cond_resched(); |
| 2834 | continue; |
Pavel Begunkov | 78cc687 | 2021-06-14 02:36:23 +0100 | [diff] [blame] | 2835 | } |
| 2836 | |
Jens Axboe | 78a7806 | 2021-12-09 08:54:29 -0700 | [diff] [blame] | 2837 | prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE); |
| 2838 | io_run_task_work(); |
Pavel Begunkov | e9dbe22 | 2021-08-09 13:04:20 +0100 | [diff] [blame] | 2839 | io_uring_drop_tctx_refs(current); |
Jens Axboe | 78a7806 | 2021-12-09 08:54:29 -0700 | [diff] [blame] | 2840 | |
Jens Axboe | 0f21220 | 2020-09-13 13:09:39 -0600 | [diff] [blame] | 2841 | /* |
Pavel Begunkov | a1bb3cd | 2021-01-26 15:28:26 +0000 | [diff] [blame] | 2842 | * If we've seen completions, retry without waiting. This |
| 2843 | * avoids a race where a completion comes in before we did |
| 2844 | * prepare_to_wait(). |
Jens Axboe | 0f21220 | 2020-09-13 13:09:39 -0600 | [diff] [blame] | 2845 | */ |
Pavel Begunkov | 3dd0c97 | 2021-05-16 22:58:04 +0100 | [diff] [blame] | 2846 | if (inflight == tctx_inflight(tctx, !cancel_all)) |
Pavel Begunkov | a1bb3cd | 2021-01-26 15:28:26 +0000 | [diff] [blame] | 2847 | schedule(); |
Pavel Begunkov | f57555e | 2020-12-20 13:21:44 +0000 | [diff] [blame] | 2848 | finish_wait(&tctx->wait, &wait); |
Jens Axboe | d8a6df1 | 2020-10-15 16:24:45 -0600 | [diff] [blame] | 2849 | } while (1); |
Pavel Begunkov | de7f1d9 | 2021-01-04 20:43:29 +0000 | [diff] [blame] | 2850 | |
Pavel Begunkov | 8452d4a | 2021-02-27 11:16:46 +0000 | [diff] [blame] | 2851 | io_uring_clean_tctx(tctx); |
Pavel Begunkov | 3dd0c97 | 2021-05-16 22:58:04 +0100 | [diff] [blame] | 2852 | if (cancel_all) { |
Pavel Begunkov | 3cc7fdb | 2022-01-09 00:53:22 +0000 | [diff] [blame] | 2853 | /* |
| 2854 | * We shouldn't run task_works after cancel, so just leave |
| 2855 | * ->in_idle set for normal exit. |
| 2856 | */ |
| 2857 | atomic_dec(&tctx->in_idle); |
Pavel Begunkov | 3f48cf1 | 2021-04-11 01:46:27 +0100 | [diff] [blame] | 2858 | /* for exec all current's requests should be gone, kill tctx */ |
| 2859 | __io_uring_free(current); |
| 2860 | } |
Pavel Begunkov | 44e728b | 2020-06-15 10:24:04 +0300 | [diff] [blame] | 2861 | } |
| 2862 | |
Hao Xu | f552a27 | 2021-08-12 12:14:35 +0800 | [diff] [blame] | 2863 | void __io_uring_cancel(bool cancel_all) |
Pavel Begunkov | 78cc687 | 2021-06-14 02:36:23 +0100 | [diff] [blame] | 2864 | { |
Hao Xu | f552a27 | 2021-08-12 12:14:35 +0800 | [diff] [blame] | 2865 | io_uring_cancel_generic(cancel_all, NULL); |
Pavel Begunkov | 78cc687 | 2021-06-14 02:36:23 +0100 | [diff] [blame] | 2866 | } |
| 2867 | |
Roman Penyaev | 6c5c240 | 2019-11-28 12:53:22 +0100 | [diff] [blame] | 2868 | static void *io_uring_validate_mmap_request(struct file *file, |
| 2869 | loff_t pgoff, size_t sz) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2870 | { |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2871 | struct io_ring_ctx *ctx = file->private_data; |
Roman Penyaev | 6c5c240 | 2019-11-28 12:53:22 +0100 | [diff] [blame] | 2872 | loff_t offset = pgoff << PAGE_SHIFT; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2873 | struct page *page; |
| 2874 | void *ptr; |
| 2875 | |
| 2876 | switch (offset) { |
| 2877 | case IORING_OFF_SQ_RING: |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 2878 | case IORING_OFF_CQ_RING: |
| 2879 | ptr = ctx->rings; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2880 | break; |
| 2881 | case IORING_OFF_SQES: |
| 2882 | ptr = ctx->sq_sqes; |
| 2883 | break; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2884 | default: |
Roman Penyaev | 6c5c240 | 2019-11-28 12:53:22 +0100 | [diff] [blame] | 2885 | return ERR_PTR(-EINVAL); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2886 | } |
| 2887 | |
| 2888 | page = virt_to_head_page(ptr); |
Matthew Wilcox (Oracle) | a50b854 | 2019-09-23 15:34:25 -0700 | [diff] [blame] | 2889 | if (sz > page_size(page)) |
Roman Penyaev | 6c5c240 | 2019-11-28 12:53:22 +0100 | [diff] [blame] | 2890 | return ERR_PTR(-EINVAL); |
| 2891 | |
| 2892 | return ptr; |
| 2893 | } |
| 2894 | |
| 2895 | #ifdef CONFIG_MMU |
| 2896 | |
Pavel Begunkov | c072481 | 2021-10-04 20:02:54 +0100 | [diff] [blame] | 2897 | static __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma) |
Roman Penyaev | 6c5c240 | 2019-11-28 12:53:22 +0100 | [diff] [blame] | 2898 | { |
| 2899 | size_t sz = vma->vm_end - vma->vm_start; |
| 2900 | unsigned long pfn; |
| 2901 | void *ptr; |
| 2902 | |
| 2903 | ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz); |
| 2904 | if (IS_ERR(ptr)) |
| 2905 | return PTR_ERR(ptr); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2906 | |
| 2907 | pfn = virt_to_phys(ptr) >> PAGE_SHIFT; |
| 2908 | return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot); |
| 2909 | } |
| 2910 | |
Roman Penyaev | 6c5c240 | 2019-11-28 12:53:22 +0100 | [diff] [blame] | 2911 | #else /* !CONFIG_MMU */ |
| 2912 | |
| 2913 | static int io_uring_mmap(struct file *file, struct vm_area_struct *vma) |
| 2914 | { |
| 2915 | return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL; |
| 2916 | } |
| 2917 | |
| 2918 | static unsigned int io_uring_nommu_mmap_capabilities(struct file *file) |
| 2919 | { |
| 2920 | return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE; |
| 2921 | } |
| 2922 | |
| 2923 | static unsigned long io_uring_nommu_get_unmapped_area(struct file *file, |
| 2924 | unsigned long addr, unsigned long len, |
| 2925 | unsigned long pgoff, unsigned long flags) |
| 2926 | { |
| 2927 | void *ptr; |
| 2928 | |
| 2929 | ptr = io_uring_validate_mmap_request(file, pgoff, len); |
| 2930 | if (IS_ERR(ptr)) |
| 2931 | return PTR_ERR(ptr); |
| 2932 | |
| 2933 | return (unsigned long) ptr; |
| 2934 | } |
| 2935 | |
| 2936 | #endif /* !CONFIG_MMU */ |
| 2937 | |
Pavel Begunkov | f81440d | 2022-03-22 14:07:56 +0000 | [diff] [blame] | 2938 | static int io_validate_ext_arg(unsigned flags, const void __user *argp, size_t argsz) |
| 2939 | { |
| 2940 | if (flags & IORING_ENTER_EXT_ARG) { |
| 2941 | struct io_uring_getevents_arg arg; |
| 2942 | |
| 2943 | if (argsz != sizeof(arg)) |
| 2944 | return -EINVAL; |
| 2945 | if (copy_from_user(&arg, argp, sizeof(arg))) |
| 2946 | return -EFAULT; |
| 2947 | } |
| 2948 | return 0; |
| 2949 | } |
| 2950 | |
Hao Xu | c73ebb6 | 2020-11-03 10:54:37 +0800 | [diff] [blame] | 2951 | static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz, |
| 2952 | struct __kernel_timespec __user **ts, |
| 2953 | const sigset_t __user **sig) |
| 2954 | { |
| 2955 | struct io_uring_getevents_arg arg; |
| 2956 | |
| 2957 | /* |
| 2958 | * If EXT_ARG isn't set, then we have no timespec and the argp pointer |
| 2959 | * is just a pointer to the sigset_t. |
| 2960 | */ |
| 2961 | if (!(flags & IORING_ENTER_EXT_ARG)) { |
| 2962 | *sig = (const sigset_t __user *) argp; |
| 2963 | *ts = NULL; |
| 2964 | return 0; |
| 2965 | } |
| 2966 | |
| 2967 | /* |
| 2968 | * EXT_ARG is set - ensure we agree on the size of it and copy in our |
| 2969 | * timespec and sigset_t pointers if good. |
| 2970 | */ |
| 2971 | if (*argsz != sizeof(arg)) |
| 2972 | return -EINVAL; |
| 2973 | if (copy_from_user(&arg, argp, sizeof(arg))) |
| 2974 | return -EFAULT; |
Dylan Yudaken | d2347b9 | 2022-04-12 09:30:42 -0700 | [diff] [blame] | 2975 | if (arg.pad) |
| 2976 | return -EINVAL; |
Hao Xu | c73ebb6 | 2020-11-03 10:54:37 +0800 | [diff] [blame] | 2977 | *sig = u64_to_user_ptr(arg.sigmask); |
| 2978 | *argsz = arg.sigmask_sz; |
| 2979 | *ts = u64_to_user_ptr(arg.ts); |
| 2980 | return 0; |
| 2981 | } |
| 2982 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2983 | SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, |
Hao Xu | c73ebb6 | 2020-11-03 10:54:37 +0800 | [diff] [blame] | 2984 | u32, min_complete, u32, flags, const void __user *, argp, |
| 2985 | size_t, argsz) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2986 | { |
| 2987 | struct io_ring_ctx *ctx; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2988 | struct fd f; |
Pavel Begunkov | 33f993d | 2021-03-19 17:22:30 +0000 | [diff] [blame] | 2989 | long ret; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2990 | |
Jens Axboe | 4c6e277 | 2020-07-01 11:29:10 -0600 | [diff] [blame] | 2991 | io_run_task_work(); |
Jens Axboe | b41e985 | 2020-02-17 09:52:41 -0700 | [diff] [blame] | 2992 | |
Pavel Begunkov | 33f993d | 2021-03-19 17:22:30 +0000 | [diff] [blame] | 2993 | if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP | |
Jens Axboe | e7a6c00 | 2022-03-04 08:22:22 -0700 | [diff] [blame] | 2994 | IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG | |
| 2995 | IORING_ENTER_REGISTERED_RING))) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 2996 | return -EINVAL; |
| 2997 | |
Jens Axboe | e7a6c00 | 2022-03-04 08:22:22 -0700 | [diff] [blame] | 2998 | /* |
| 2999 | * Ring fd has been registered via IORING_REGISTER_RING_FDS, we |
| 3000 | * need only dereference our task private array to find it. |
| 3001 | */ |
| 3002 | if (flags & IORING_ENTER_REGISTERED_RING) { |
| 3003 | struct io_uring_task *tctx = current->io_uring; |
| 3004 | |
Pavel Begunkov | 3273c44 | 2022-06-25 11:53:01 +0100 | [diff] [blame] | 3005 | if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX)) |
Jens Axboe | e7a6c00 | 2022-03-04 08:22:22 -0700 | [diff] [blame] | 3006 | return -EINVAL; |
| 3007 | fd = array_index_nospec(fd, IO_RINGFD_REG_MAX); |
| 3008 | f.file = tctx->registered_rings[fd]; |
Al Viro | 4329490 | 2022-05-11 20:30:20 -0400 | [diff] [blame] | 3009 | f.flags = 0; |
Pavel Begunkov | 3273c44 | 2022-06-25 11:53:01 +0100 | [diff] [blame] | 3010 | if (unlikely(!f.file)) |
| 3011 | return -EBADF; |
Jens Axboe | e7a6c00 | 2022-03-04 08:22:22 -0700 | [diff] [blame] | 3012 | } else { |
| 3013 | f = fdget(fd); |
Pavel Begunkov | 3273c44 | 2022-06-25 11:53:01 +0100 | [diff] [blame] | 3014 | if (unlikely(!f.file)) |
| 3015 | return -EBADF; |
| 3016 | ret = -EOPNOTSUPP; |
| 3017 | if (unlikely(!io_is_uring_fops(f.file))) |
Pavel Begunkov | fbb8bb0 | 2022-06-25 11:53:02 +0100 | [diff] [blame] | 3018 | goto out; |
Jens Axboe | e7a6c00 | 2022-03-04 08:22:22 -0700 | [diff] [blame] | 3019 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3020 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3021 | ctx = f.file->private_data; |
Stefano Garzarella | 7e84e1c | 2020-08-27 16:58:31 +0200 | [diff] [blame] | 3022 | ret = -EBADFD; |
Pavel Begunkov | 33f993d | 2021-03-19 17:22:30 +0000 | [diff] [blame] | 3023 | if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED)) |
Stefano Garzarella | 7e84e1c | 2020-08-27 16:58:31 +0200 | [diff] [blame] | 3024 | goto out; |
| 3025 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 3026 | /* |
| 3027 | * For SQ polling, the thread will do all submissions and completions. |
| 3028 | * Just return the requested submit count, and wake the thread if |
| 3029 | * we were asked to. |
| 3030 | */ |
Jens Axboe | b2a9ead | 2019-09-12 14:19:16 -0600 | [diff] [blame] | 3031 | ret = 0; |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 3032 | if (ctx->flags & IORING_SETUP_SQPOLL) { |
Pavel Begunkov | 90f6736 | 2021-08-09 20:18:12 +0100 | [diff] [blame] | 3033 | io_cqring_overflow_flush(ctx); |
Pavel Begunkov | 89448c4 | 2020-12-17 00:24:39 +0000 | [diff] [blame] | 3034 | |
Jens Axboe | 21f9652 | 2021-08-14 09:04:40 -0600 | [diff] [blame] | 3035 | if (unlikely(ctx->sq_data->thread == NULL)) { |
| 3036 | ret = -EOWNERDEAD; |
Stefan Metzmacher | 0414748 | 2021-03-07 11:54:29 +0100 | [diff] [blame] | 3037 | goto out; |
Jens Axboe | 21f9652 | 2021-08-14 09:04:40 -0600 | [diff] [blame] | 3038 | } |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 3039 | if (flags & IORING_ENTER_SQ_WAKEUP) |
Jens Axboe | 534ca6d | 2020-09-02 13:52:19 -0600 | [diff] [blame] | 3040 | wake_up(&ctx->sq_data->wait); |
Pavel Begunkov | d9d0521 | 2021-01-08 20:57:25 +0000 | [diff] [blame] | 3041 | if (flags & IORING_ENTER_SQ_WAIT) { |
| 3042 | ret = io_sqpoll_wait_sq(ctx); |
| 3043 | if (ret) |
| 3044 | goto out; |
| 3045 | } |
Dylan Yudaken | 3e813c9 | 2022-04-21 02:13:42 -0700 | [diff] [blame] | 3046 | ret = to_submit; |
Jens Axboe | b2a9ead | 2019-09-12 14:19:16 -0600 | [diff] [blame] | 3047 | } else if (to_submit) { |
Pavel Begunkov | eef51da | 2021-06-14 02:36:15 +0100 | [diff] [blame] | 3048 | ret = io_uring_add_tctx_node(ctx); |
Jens Axboe | 0f21220 | 2020-09-13 13:09:39 -0600 | [diff] [blame] | 3049 | if (unlikely(ret)) |
| 3050 | goto out; |
Pavel Begunkov | 7c504e65 | 2019-12-18 19:53:45 +0300 | [diff] [blame] | 3051 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3052 | mutex_lock(&ctx->uring_lock); |
Dylan Yudaken | 3e813c9 | 2022-04-21 02:13:42 -0700 | [diff] [blame] | 3053 | ret = io_submit_sqes(ctx, to_submit); |
| 3054 | if (ret != to_submit) { |
Pavel Begunkov | d487b43 | 2022-03-22 14:07:58 +0000 | [diff] [blame] | 3055 | mutex_unlock(&ctx->uring_lock); |
Pavel Begunkov | 7c504e65 | 2019-12-18 19:53:45 +0300 | [diff] [blame] | 3056 | goto out; |
Pavel Begunkov | d487b43 | 2022-03-22 14:07:58 +0000 | [diff] [blame] | 3057 | } |
| 3058 | if ((flags & IORING_ENTER_GETEVENTS) && ctx->syscall_iopoll) |
| 3059 | goto iopoll_locked; |
| 3060 | mutex_unlock(&ctx->uring_lock); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3061 | } |
| 3062 | if (flags & IORING_ENTER_GETEVENTS) { |
Dylan Yudaken | 3e813c9 | 2022-04-21 02:13:42 -0700 | [diff] [blame] | 3063 | int ret2; |
Pavel Begunkov | 773697b | 2022-03-22 14:07:57 +0000 | [diff] [blame] | 3064 | if (ctx->syscall_iopoll) { |
Pavel Begunkov | d487b43 | 2022-03-22 14:07:58 +0000 | [diff] [blame] | 3065 | /* |
| 3066 | * We disallow the app entering submit/complete with |
| 3067 | * polling, but we still need to lock the ring to |
| 3068 | * prevent racing with polled issue that got punted to |
| 3069 | * a workqueue. |
| 3070 | */ |
| 3071 | mutex_lock(&ctx->uring_lock); |
| 3072 | iopoll_locked: |
Dylan Yudaken | 3e813c9 | 2022-04-21 02:13:42 -0700 | [diff] [blame] | 3073 | ret2 = io_validate_ext_arg(flags, argp, argsz); |
| 3074 | if (likely(!ret2)) { |
| 3075 | min_complete = min(min_complete, |
| 3076 | ctx->cq_entries); |
| 3077 | ret2 = io_iopoll_check(ctx, min_complete); |
Pavel Begunkov | d487b43 | 2022-03-22 14:07:58 +0000 | [diff] [blame] | 3078 | } |
| 3079 | mutex_unlock(&ctx->uring_lock); |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 3080 | } else { |
Pavel Begunkov | f81440d | 2022-03-22 14:07:56 +0000 | [diff] [blame] | 3081 | const sigset_t __user *sig; |
| 3082 | struct __kernel_timespec __user *ts; |
| 3083 | |
Dylan Yudaken | 3e813c9 | 2022-04-21 02:13:42 -0700 | [diff] [blame] | 3084 | ret2 = io_get_ext_arg(flags, argp, &argsz, &ts, &sig); |
| 3085 | if (likely(!ret2)) { |
| 3086 | min_complete = min(min_complete, |
| 3087 | ctx->cq_entries); |
| 3088 | ret2 = io_cqring_wait(ctx, min_complete, sig, |
| 3089 | argsz, ts); |
| 3090 | } |
Jens Axboe | def596e | 2019-01-09 08:59:42 -0700 | [diff] [blame] | 3091 | } |
Dylan Yudaken | 3e813c9 | 2022-04-21 02:13:42 -0700 | [diff] [blame] | 3092 | |
Dylan Yudaken | 155bc95 | 2022-04-21 02:13:44 -0700 | [diff] [blame] | 3093 | if (!ret) { |
Dylan Yudaken | 3e813c9 | 2022-04-21 02:13:42 -0700 | [diff] [blame] | 3094 | ret = ret2; |
| 3095 | |
Dylan Yudaken | 155bc95 | 2022-04-21 02:13:44 -0700 | [diff] [blame] | 3096 | /* |
| 3097 | * EBADR indicates that one or more CQE were dropped. |
| 3098 | * Once the user has been informed we can clear the bit |
| 3099 | * as they are obviously ok with those drops. |
| 3100 | */ |
| 3101 | if (unlikely(ret2 == -EBADR)) |
| 3102 | clear_bit(IO_CHECK_CQ_DROPPED_BIT, |
| 3103 | &ctx->check_cq); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3104 | } |
| 3105 | } |
Pavel Begunkov | 7c504e65 | 2019-12-18 19:53:45 +0300 | [diff] [blame] | 3106 | out: |
Al Viro | 4329490 | 2022-05-11 20:30:20 -0400 | [diff] [blame] | 3107 | fdput(f); |
Dylan Yudaken | 3e813c9 | 2022-04-21 02:13:42 -0700 | [diff] [blame] | 3108 | return ret; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3109 | } |
| 3110 | |
| 3111 | static const struct file_operations io_uring_fops = { |
| 3112 | .release = io_uring_release, |
| 3113 | .mmap = io_uring_mmap, |
Roman Penyaev | 6c5c240 | 2019-11-28 12:53:22 +0100 | [diff] [blame] | 3114 | #ifndef CONFIG_MMU |
| 3115 | .get_unmapped_area = io_uring_nommu_get_unmapped_area, |
| 3116 | .mmap_capabilities = io_uring_nommu_mmap_capabilities, |
| 3117 | #endif |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3118 | .poll = io_uring_poll, |
Tobias Klauser | bebdb65 | 2020-02-26 18:38:32 +0100 | [diff] [blame] | 3119 | #ifdef CONFIG_PROC_FS |
Jens Axboe | 87ce955 | 2020-01-30 08:25:34 -0700 | [diff] [blame] | 3120 | .show_fdinfo = io_uring_show_fdinfo, |
Tobias Klauser | bebdb65 | 2020-02-26 18:38:32 +0100 | [diff] [blame] | 3121 | #endif |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3122 | }; |
| 3123 | |
Jens Axboe | 92ac8be | 2022-05-25 11:48:35 -0600 | [diff] [blame] | 3124 | bool io_is_uring_fops(struct file *file) |
| 3125 | { |
| 3126 | return file->f_op == &io_uring_fops; |
| 3127 | } |
| 3128 | |
Pavel Begunkov | c072481 | 2021-10-04 20:02:54 +0100 | [diff] [blame] | 3129 | static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx, |
| 3130 | struct io_uring_params *p) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3131 | { |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 3132 | struct io_rings *rings; |
| 3133 | size_t size, sq_array_offset; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3134 | |
Jens Axboe | bd74048 | 2020-08-05 12:58:23 -0600 | [diff] [blame] | 3135 | /* make sure these are sane, as we already accounted them */ |
| 3136 | ctx->sq_entries = p->sq_entries; |
| 3137 | ctx->cq_entries = p->cq_entries; |
| 3138 | |
Stefan Roesch | baf9cb6 | 2022-04-26 11:21:25 -0700 | [diff] [blame] | 3139 | size = rings_size(ctx, p->sq_entries, p->cq_entries, &sq_array_offset); |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 3140 | if (size == SIZE_MAX) |
| 3141 | return -EOVERFLOW; |
| 3142 | |
| 3143 | rings = io_mem_alloc(size); |
| 3144 | if (!rings) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3145 | return -ENOMEM; |
| 3146 | |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 3147 | ctx->rings = rings; |
| 3148 | ctx->sq_array = (u32 *)((char *)rings + sq_array_offset); |
| 3149 | rings->sq_ring_mask = p->sq_entries - 1; |
| 3150 | rings->cq_ring_mask = p->cq_entries - 1; |
| 3151 | rings->sq_ring_entries = p->sq_entries; |
| 3152 | rings->cq_ring_entries = p->cq_entries; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3153 | |
Jens Axboe | ebdeb7c0 | 2022-03-31 19:27:52 -0600 | [diff] [blame] | 3154 | if (p->flags & IORING_SETUP_SQE128) |
| 3155 | size = array_size(2 * sizeof(struct io_uring_sqe), p->sq_entries); |
| 3156 | else |
| 3157 | size = array_size(sizeof(struct io_uring_sqe), p->sq_entries); |
Jens Axboe | eb065d3 | 2019-11-20 09:26:29 -0700 | [diff] [blame] | 3158 | if (size == SIZE_MAX) { |
| 3159 | io_mem_free(ctx->rings); |
| 3160 | ctx->rings = NULL; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3161 | return -EOVERFLOW; |
Jens Axboe | eb065d3 | 2019-11-20 09:26:29 -0700 | [diff] [blame] | 3162 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3163 | |
| 3164 | ctx->sq_sqes = io_mem_alloc(size); |
Jens Axboe | eb065d3 | 2019-11-20 09:26:29 -0700 | [diff] [blame] | 3165 | if (!ctx->sq_sqes) { |
| 3166 | io_mem_free(ctx->rings); |
| 3167 | ctx->rings = NULL; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3168 | return -ENOMEM; |
Jens Axboe | eb065d3 | 2019-11-20 09:26:29 -0700 | [diff] [blame] | 3169 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3170 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3171 | return 0; |
| 3172 | } |
| 3173 | |
Pavel Begunkov | 9faadcc | 2020-12-21 18:34:05 +0000 | [diff] [blame] | 3174 | static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file) |
| 3175 | { |
| 3176 | int ret, fd; |
| 3177 | |
| 3178 | fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC); |
| 3179 | if (fd < 0) |
| 3180 | return fd; |
| 3181 | |
Pavel Begunkov | 97bbdc0 | 2022-06-16 10:22:08 +0100 | [diff] [blame] | 3182 | ret = __io_uring_add_tctx_node(ctx, false); |
Pavel Begunkov | 9faadcc | 2020-12-21 18:34:05 +0000 | [diff] [blame] | 3183 | if (ret) { |
| 3184 | put_unused_fd(fd); |
| 3185 | return ret; |
| 3186 | } |
| 3187 | fd_install(fd, file); |
| 3188 | return fd; |
| 3189 | } |
| 3190 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3191 | /* |
| 3192 | * Allocate an anonymous fd, this is what constitutes the application |
| 3193 | * visible backing of an io_uring instance. The application mmaps this |
| 3194 | * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled, |
| 3195 | * we have to tie this fd to a socket for file garbage collection purposes. |
| 3196 | */ |
Pavel Begunkov | 9faadcc | 2020-12-21 18:34:05 +0000 | [diff] [blame] | 3197 | static struct file *io_uring_get_file(struct io_ring_ctx *ctx) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3198 | { |
| 3199 | struct file *file; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3200 | #if defined(CONFIG_UNIX) |
Pavel Begunkov | 9faadcc | 2020-12-21 18:34:05 +0000 | [diff] [blame] | 3201 | int ret; |
| 3202 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3203 | ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP, |
| 3204 | &ctx->ring_sock); |
| 3205 | if (ret) |
Pavel Begunkov | 9faadcc | 2020-12-21 18:34:05 +0000 | [diff] [blame] | 3206 | return ERR_PTR(ret); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3207 | #endif |
| 3208 | |
Paul Moore | 91a9ab7 | 2021-02-01 19:33:52 -0500 | [diff] [blame] | 3209 | file = anon_inode_getfile_secure("[io_uring]", &io_uring_fops, ctx, |
| 3210 | O_RDWR | O_CLOEXEC, NULL); |
Pavel Begunkov | 9faadcc | 2020-12-21 18:34:05 +0000 | [diff] [blame] | 3211 | #if defined(CONFIG_UNIX) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3212 | if (IS_ERR(file)) { |
Pavel Begunkov | 9faadcc | 2020-12-21 18:34:05 +0000 | [diff] [blame] | 3213 | sock_release(ctx->ring_sock); |
| 3214 | ctx->ring_sock = NULL; |
| 3215 | } else { |
| 3216 | ctx->ring_sock->file = file; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3217 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3218 | #endif |
Pavel Begunkov | 9faadcc | 2020-12-21 18:34:05 +0000 | [diff] [blame] | 3219 | return file; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3220 | } |
| 3221 | |
Pavel Begunkov | c072481 | 2021-10-04 20:02:54 +0100 | [diff] [blame] | 3222 | static __cold int io_uring_create(unsigned entries, struct io_uring_params *p, |
| 3223 | struct io_uring_params __user *params) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3224 | { |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3225 | struct io_ring_ctx *ctx; |
Pavel Begunkov | 9faadcc | 2020-12-21 18:34:05 +0000 | [diff] [blame] | 3226 | struct file *file; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3227 | int ret; |
| 3228 | |
Jens Axboe | 8110c1a | 2019-12-28 15:39:54 -0700 | [diff] [blame] | 3229 | if (!entries) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3230 | return -EINVAL; |
Jens Axboe | 8110c1a | 2019-12-28 15:39:54 -0700 | [diff] [blame] | 3231 | if (entries > IORING_MAX_ENTRIES) { |
| 3232 | if (!(p->flags & IORING_SETUP_CLAMP)) |
| 3233 | return -EINVAL; |
| 3234 | entries = IORING_MAX_ENTRIES; |
| 3235 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3236 | |
| 3237 | /* |
| 3238 | * Use twice as many entries for the CQ ring. It's possible for the |
| 3239 | * application to drive a higher depth than the size of the SQ ring, |
| 3240 | * since the sqes are only used at submission time. This allows for |
Jens Axboe | 33a107f | 2019-10-04 12:10:03 -0600 | [diff] [blame] | 3241 | * some flexibility in overcommitting a bit. If the application has |
| 3242 | * set IORING_SETUP_CQSIZE, it will have passed in the desired number |
| 3243 | * of CQ ring entries manually. |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3244 | */ |
| 3245 | p->sq_entries = roundup_pow_of_two(entries); |
Jens Axboe | 33a107f | 2019-10-04 12:10:03 -0600 | [diff] [blame] | 3246 | if (p->flags & IORING_SETUP_CQSIZE) { |
| 3247 | /* |
| 3248 | * If IORING_SETUP_CQSIZE is set, we do the same roundup |
| 3249 | * to a power-of-two, if it isn't already. We do NOT impose |
| 3250 | * any cq vs sq ring sizing. |
| 3251 | */ |
Joseph Qi | eb2667b3 | 2020-11-24 15:03:03 +0800 | [diff] [blame] | 3252 | if (!p->cq_entries) |
Jens Axboe | 33a107f | 2019-10-04 12:10:03 -0600 | [diff] [blame] | 3253 | return -EINVAL; |
Jens Axboe | 8110c1a | 2019-12-28 15:39:54 -0700 | [diff] [blame] | 3254 | if (p->cq_entries > IORING_MAX_CQ_ENTRIES) { |
| 3255 | if (!(p->flags & IORING_SETUP_CLAMP)) |
| 3256 | return -EINVAL; |
| 3257 | p->cq_entries = IORING_MAX_CQ_ENTRIES; |
| 3258 | } |
Joseph Qi | eb2667b3 | 2020-11-24 15:03:03 +0800 | [diff] [blame] | 3259 | p->cq_entries = roundup_pow_of_two(p->cq_entries); |
| 3260 | if (p->cq_entries < p->sq_entries) |
| 3261 | return -EINVAL; |
Jens Axboe | 33a107f | 2019-10-04 12:10:03 -0600 | [diff] [blame] | 3262 | } else { |
| 3263 | p->cq_entries = 2 * p->sq_entries; |
| 3264 | } |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3265 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3266 | ctx = io_ring_ctx_alloc(p); |
Jens Axboe | 62e398b | 2021-02-21 16:19:37 -0700 | [diff] [blame] | 3267 | if (!ctx) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3268 | return -ENOMEM; |
Pavel Begunkov | 773697b | 2022-03-22 14:07:57 +0000 | [diff] [blame] | 3269 | |
| 3270 | /* |
| 3271 | * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user |
| 3272 | * space applications don't need to do io completion events |
| 3273 | * polling again, they can rely on io_sq_thread to do polling |
| 3274 | * work, which can reduce cpu usage and uring_lock contention. |
| 3275 | */ |
| 3276 | if (ctx->flags & IORING_SETUP_IOPOLL && |
| 3277 | !(ctx->flags & IORING_SETUP_SQPOLL)) |
| 3278 | ctx->syscall_iopoll = 1; |
| 3279 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3280 | ctx->compat = in_compat_syscall(); |
Jens Axboe | 62e398b | 2021-02-21 16:19:37 -0700 | [diff] [blame] | 3281 | if (!capable(CAP_IPC_LOCK)) |
| 3282 | ctx->user = get_uid(current_user()); |
Jens Axboe | 2aede0e | 2020-09-14 10:45:53 -0600 | [diff] [blame] | 3283 | |
| 3284 | /* |
Jens Axboe | e1169f0 | 2022-04-25 19:49:03 -0600 | [diff] [blame] | 3285 | * For SQPOLL, we just need a wakeup, always. For !SQPOLL, if |
| 3286 | * COOP_TASKRUN is set, then IPIs are never needed by the app. |
Jens Axboe | 9f01050 | 2022-04-25 19:49:02 -0600 | [diff] [blame] | 3287 | */ |
Jens Axboe | e1169f0 | 2022-04-25 19:49:03 -0600 | [diff] [blame] | 3288 | ret = -EINVAL; |
| 3289 | if (ctx->flags & IORING_SETUP_SQPOLL) { |
| 3290 | /* IPI related flags don't make sense with SQPOLL */ |
Jens Axboe | ef060ea | 2022-04-25 19:49:04 -0600 | [diff] [blame] | 3291 | if (ctx->flags & (IORING_SETUP_COOP_TASKRUN | |
| 3292 | IORING_SETUP_TASKRUN_FLAG)) |
Jens Axboe | e1169f0 | 2022-04-25 19:49:03 -0600 | [diff] [blame] | 3293 | goto err; |
Jens Axboe | 9f01050 | 2022-04-25 19:49:02 -0600 | [diff] [blame] | 3294 | ctx->notify_method = TWA_SIGNAL_NO_IPI; |
Jens Axboe | e1169f0 | 2022-04-25 19:49:03 -0600 | [diff] [blame] | 3295 | } else if (ctx->flags & IORING_SETUP_COOP_TASKRUN) { |
| 3296 | ctx->notify_method = TWA_SIGNAL_NO_IPI; |
| 3297 | } else { |
Jens Axboe | ef060ea | 2022-04-25 19:49:04 -0600 | [diff] [blame] | 3298 | if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) |
| 3299 | goto err; |
Jens Axboe | 9f01050 | 2022-04-25 19:49:02 -0600 | [diff] [blame] | 3300 | ctx->notify_method = TWA_SIGNAL; |
Jens Axboe | e1169f0 | 2022-04-25 19:49:03 -0600 | [diff] [blame] | 3301 | } |
Jens Axboe | 9f01050 | 2022-04-25 19:49:02 -0600 | [diff] [blame] | 3302 | |
| 3303 | /* |
Jens Axboe | 2aede0e | 2020-09-14 10:45:53 -0600 | [diff] [blame] | 3304 | * This is just grabbed for accounting purposes. When a process exits, |
| 3305 | * the mm is exited and dropped before the files, hence we need to hang |
| 3306 | * on to this mm purely for the purposes of being able to unaccount |
| 3307 | * memory (locked/pinned vm). It's not used for anything else. |
| 3308 | */ |
Jens Axboe | 6b7898e | 2020-08-25 07:58:00 -0600 | [diff] [blame] | 3309 | mmgrab(current->mm); |
Jens Axboe | 2aede0e | 2020-09-14 10:45:53 -0600 | [diff] [blame] | 3310 | ctx->mm_account = current->mm; |
Jens Axboe | 6b7898e | 2020-08-25 07:58:00 -0600 | [diff] [blame] | 3311 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3312 | ret = io_allocate_scq_urings(ctx, p); |
| 3313 | if (ret) |
| 3314 | goto err; |
| 3315 | |
Stefano Garzarella | 7e84e1c | 2020-08-27 16:58:31 +0200 | [diff] [blame] | 3316 | ret = io_sq_offload_create(ctx, p); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3317 | if (ret) |
| 3318 | goto err; |
Pavel Begunkov | eae071c | 2021-04-25 14:32:24 +0100 | [diff] [blame] | 3319 | /* always set a rsrc node */ |
Pavel Begunkov | 47b228c | 2021-04-29 11:46:48 +0100 | [diff] [blame] | 3320 | ret = io_rsrc_node_switch_start(ctx); |
| 3321 | if (ret) |
| 3322 | goto err; |
Pavel Begunkov | eae071c | 2021-04-25 14:32:24 +0100 | [diff] [blame] | 3323 | io_rsrc_node_switch(ctx, NULL); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3324 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3325 | memset(&p->sq_off, 0, sizeof(p->sq_off)); |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 3326 | p->sq_off.head = offsetof(struct io_rings, sq.head); |
| 3327 | p->sq_off.tail = offsetof(struct io_rings, sq.tail); |
| 3328 | p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask); |
| 3329 | p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries); |
| 3330 | p->sq_off.flags = offsetof(struct io_rings, sq_flags); |
| 3331 | p->sq_off.dropped = offsetof(struct io_rings, sq_dropped); |
| 3332 | p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3333 | |
| 3334 | memset(&p->cq_off, 0, sizeof(p->cq_off)); |
Hristo Venev | 75b28af | 2019-08-26 17:23:46 +0000 | [diff] [blame] | 3335 | p->cq_off.head = offsetof(struct io_rings, cq.head); |
| 3336 | p->cq_off.tail = offsetof(struct io_rings, cq.tail); |
| 3337 | p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask); |
| 3338 | p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries); |
| 3339 | p->cq_off.overflow = offsetof(struct io_rings, cq_overflow); |
| 3340 | p->cq_off.cqes = offsetof(struct io_rings, cqes); |
Stefano Garzarella | 0d9b5b3 | 2020-05-15 18:38:04 +0200 | [diff] [blame] | 3341 | p->cq_off.flags = offsetof(struct io_rings, cq_flags); |
Jens Axboe | ac90f24 | 2019-09-06 10:26:21 -0600 | [diff] [blame] | 3342 | |
Xiaoguang Wang | 7f13657 | 2020-05-05 16:28:53 +0800 | [diff] [blame] | 3343 | p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP | |
| 3344 | IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS | |
Jiufei Xue | 5769a35 | 2020-06-17 17:53:55 +0800 | [diff] [blame] | 3345 | IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL | |
Hao Xu | c73ebb6 | 2020-11-03 10:54:37 +0800 | [diff] [blame] | 3346 | IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED | |
Pavel Begunkov | 9690557 | 2021-06-10 16:37:38 +0100 | [diff] [blame] | 3347 | IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS | |
Jens Axboe | c4212f3 | 2022-04-10 15:13:24 -0600 | [diff] [blame] | 3348 | IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP | |
| 3349 | IORING_FEAT_LINKED_FILE; |
Xiaoguang Wang | 7f13657 | 2020-05-05 16:28:53 +0800 | [diff] [blame] | 3350 | |
| 3351 | if (copy_to_user(params, p, sizeof(*p))) { |
| 3352 | ret = -EFAULT; |
| 3353 | goto err; |
| 3354 | } |
Jens Axboe | d1719f7 | 2020-07-30 13:43:53 -0600 | [diff] [blame] | 3355 | |
Pavel Begunkov | 9faadcc | 2020-12-21 18:34:05 +0000 | [diff] [blame] | 3356 | file = io_uring_get_file(ctx); |
| 3357 | if (IS_ERR(file)) { |
| 3358 | ret = PTR_ERR(file); |
| 3359 | goto err; |
| 3360 | } |
| 3361 | |
Jens Axboe | d1719f7 | 2020-07-30 13:43:53 -0600 | [diff] [blame] | 3362 | /* |
Jens Axboe | 044c1ab | 2019-10-28 09:15:33 -0600 | [diff] [blame] | 3363 | * Install ring fd as the very last thing, so we don't risk someone |
| 3364 | * having closed it before we finish setup |
| 3365 | */ |
Pavel Begunkov | 9faadcc | 2020-12-21 18:34:05 +0000 | [diff] [blame] | 3366 | ret = io_uring_install_fd(ctx, file); |
| 3367 | if (ret < 0) { |
| 3368 | /* fput will clean it up */ |
| 3369 | fput(file); |
| 3370 | return ret; |
| 3371 | } |
Jens Axboe | 044c1ab | 2019-10-28 09:15:33 -0600 | [diff] [blame] | 3372 | |
Dmitrii Dolgov | c826bd7 | 2019-10-15 19:02:01 +0200 | [diff] [blame] | 3373 | trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3374 | return ret; |
| 3375 | err: |
| 3376 | io_ring_ctx_wait_and_kill(ctx); |
| 3377 | return ret; |
| 3378 | } |
| 3379 | |
| 3380 | /* |
| 3381 | * Sets up an aio uring context, and returns the fd. Applications asks for a |
| 3382 | * ring size, we return the actual sq/cq ring sizes (among other things) in the |
| 3383 | * params structure passed in. |
| 3384 | */ |
| 3385 | static long io_uring_setup(u32 entries, struct io_uring_params __user *params) |
| 3386 | { |
| 3387 | struct io_uring_params p; |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3388 | int i; |
| 3389 | |
| 3390 | if (copy_from_user(&p, params, sizeof(p))) |
| 3391 | return -EFAULT; |
| 3392 | for (i = 0; i < ARRAY_SIZE(p.resv); i++) { |
| 3393 | if (p.resv[i]) |
| 3394 | return -EINVAL; |
| 3395 | } |
| 3396 | |
Jens Axboe | 6c271ce | 2019-01-10 11:22:30 -0700 | [diff] [blame] | 3397 | if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL | |
Jens Axboe | 8110c1a | 2019-12-28 15:39:54 -0700 | [diff] [blame] | 3398 | IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE | |
Stefano Garzarella | 7e84e1c | 2020-08-27 16:58:31 +0200 | [diff] [blame] | 3399 | IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ | |
Jens Axboe | e1169f0 | 2022-04-25 19:49:03 -0600 | [diff] [blame] | 3400 | IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL | |
Jens Axboe | ebdeb7c0 | 2022-03-31 19:27:52 -0600 | [diff] [blame] | 3401 | IORING_SETUP_COOP_TASKRUN | IORING_SETUP_TASKRUN_FLAG | |
Pavel Begunkov | 97bbdc0 | 2022-06-16 10:22:08 +0100 | [diff] [blame] | 3402 | IORING_SETUP_SQE128 | IORING_SETUP_CQE32 | |
| 3403 | IORING_SETUP_SINGLE_ISSUER)) |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3404 | return -EINVAL; |
| 3405 | |
Jens Axboe | ef060ea | 2022-04-25 19:49:04 -0600 | [diff] [blame] | 3406 | return io_uring_create(entries, &p, params); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3407 | } |
| 3408 | |
| 3409 | SYSCALL_DEFINE2(io_uring_setup, u32, entries, |
| 3410 | struct io_uring_params __user *, params) |
| 3411 | { |
| 3412 | return io_uring_setup(entries, params); |
| 3413 | } |
| 3414 | |
Pavel Begunkov | c072481 | 2021-10-04 20:02:54 +0100 | [diff] [blame] | 3415 | static __cold int io_probe(struct io_ring_ctx *ctx, void __user *arg, |
| 3416 | unsigned nr_args) |
Jens Axboe | 66f4af9 | 2020-01-16 15:36:52 -0700 | [diff] [blame] | 3417 | { |
| 3418 | struct io_uring_probe *p; |
| 3419 | size_t size; |
| 3420 | int i, ret; |
| 3421 | |
| 3422 | size = struct_size(p, ops, nr_args); |
| 3423 | if (size == SIZE_MAX) |
| 3424 | return -EOVERFLOW; |
| 3425 | p = kzalloc(size, GFP_KERNEL); |
| 3426 | if (!p) |
| 3427 | return -ENOMEM; |
| 3428 | |
| 3429 | ret = -EFAULT; |
| 3430 | if (copy_from_user(p, arg, size)) |
| 3431 | goto out; |
| 3432 | ret = -EINVAL; |
| 3433 | if (memchr_inv(p, 0, size)) |
| 3434 | goto out; |
| 3435 | |
| 3436 | p->last_op = IORING_OP_LAST - 1; |
| 3437 | if (nr_args > IORING_OP_LAST) |
| 3438 | nr_args = IORING_OP_LAST; |
| 3439 | |
| 3440 | for (i = 0; i < nr_args; i++) { |
| 3441 | p->ops[i].op = i; |
| 3442 | if (!io_op_defs[i].not_supported) |
| 3443 | p->ops[i].flags = IO_URING_OP_SUPPORTED; |
| 3444 | } |
| 3445 | p->ops_len = i; |
| 3446 | |
| 3447 | ret = 0; |
| 3448 | if (copy_to_user(arg, p, size)) |
| 3449 | ret = -EFAULT; |
| 3450 | out: |
| 3451 | kfree(p); |
| 3452 | return ret; |
| 3453 | } |
| 3454 | |
Jens Axboe | 071698e | 2020-01-28 10:04:42 -0700 | [diff] [blame] | 3455 | static int io_register_personality(struct io_ring_ctx *ctx) |
| 3456 | { |
Jens Axboe | 4379bf8 | 2021-02-15 13:40:22 -0700 | [diff] [blame] | 3457 | const struct cred *creds; |
Matthew Wilcox (Oracle) | 61cf937 | 2021-03-08 14:16:16 +0000 | [diff] [blame] | 3458 | u32 id; |
Jens Axboe | 1e6fa52 | 2020-10-15 08:46:24 -0600 | [diff] [blame] | 3459 | int ret; |
Jens Axboe | 071698e | 2020-01-28 10:04:42 -0700 | [diff] [blame] | 3460 | |
Jens Axboe | 4379bf8 | 2021-02-15 13:40:22 -0700 | [diff] [blame] | 3461 | creds = get_current_cred(); |
Jens Axboe | 1e6fa52 | 2020-10-15 08:46:24 -0600 | [diff] [blame] | 3462 | |
Matthew Wilcox (Oracle) | 61cf937 | 2021-03-08 14:16:16 +0000 | [diff] [blame] | 3463 | ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds, |
| 3464 | XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL); |
Jens Axboe | a30f895 | 2021-08-20 14:53:59 -0600 | [diff] [blame] | 3465 | if (ret < 0) { |
| 3466 | put_cred(creds); |
| 3467 | return ret; |
| 3468 | } |
| 3469 | return id; |
Jens Axboe | 071698e | 2020-01-28 10:04:42 -0700 | [diff] [blame] | 3470 | } |
| 3471 | |
Pavel Begunkov | c072481 | 2021-10-04 20:02:54 +0100 | [diff] [blame] | 3472 | static __cold int io_register_restrictions(struct io_ring_ctx *ctx, |
| 3473 | void __user *arg, unsigned int nr_args) |
Stefano Garzarella | 21b55db | 2020-08-27 16:58:30 +0200 | [diff] [blame] | 3474 | { |
| 3475 | struct io_uring_restriction *res; |
| 3476 | size_t size; |
| 3477 | int i, ret; |
| 3478 | |
Stefano Garzarella | 7e84e1c | 2020-08-27 16:58:31 +0200 | [diff] [blame] | 3479 | /* Restrictions allowed only if rings started disabled */ |
| 3480 | if (!(ctx->flags & IORING_SETUP_R_DISABLED)) |
| 3481 | return -EBADFD; |
| 3482 | |
Stefano Garzarella | 21b55db | 2020-08-27 16:58:30 +0200 | [diff] [blame] | 3483 | /* We allow only a single restrictions registration */ |
Stefano Garzarella | 7e84e1c | 2020-08-27 16:58:31 +0200 | [diff] [blame] | 3484 | if (ctx->restrictions.registered) |
Stefano Garzarella | 21b55db | 2020-08-27 16:58:30 +0200 | [diff] [blame] | 3485 | return -EBUSY; |
| 3486 | |
| 3487 | if (!arg || nr_args > IORING_MAX_RESTRICTIONS) |
| 3488 | return -EINVAL; |
| 3489 | |
| 3490 | size = array_size(nr_args, sizeof(*res)); |
| 3491 | if (size == SIZE_MAX) |
| 3492 | return -EOVERFLOW; |
| 3493 | |
| 3494 | res = memdup_user(arg, size); |
| 3495 | if (IS_ERR(res)) |
| 3496 | return PTR_ERR(res); |
| 3497 | |
| 3498 | ret = 0; |
| 3499 | |
| 3500 | for (i = 0; i < nr_args; i++) { |
| 3501 | switch (res[i].opcode) { |
| 3502 | case IORING_RESTRICTION_REGISTER_OP: |
| 3503 | if (res[i].register_op >= IORING_REGISTER_LAST) { |
| 3504 | ret = -EINVAL; |
| 3505 | goto out; |
| 3506 | } |
| 3507 | |
| 3508 | __set_bit(res[i].register_op, |
| 3509 | ctx->restrictions.register_op); |
| 3510 | break; |
| 3511 | case IORING_RESTRICTION_SQE_OP: |
| 3512 | if (res[i].sqe_op >= IORING_OP_LAST) { |
| 3513 | ret = -EINVAL; |
| 3514 | goto out; |
| 3515 | } |
| 3516 | |
| 3517 | __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op); |
| 3518 | break; |
| 3519 | case IORING_RESTRICTION_SQE_FLAGS_ALLOWED: |
| 3520 | ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags; |
| 3521 | break; |
| 3522 | case IORING_RESTRICTION_SQE_FLAGS_REQUIRED: |
| 3523 | ctx->restrictions.sqe_flags_required = res[i].sqe_flags; |
| 3524 | break; |
| 3525 | default: |
| 3526 | ret = -EINVAL; |
| 3527 | goto out; |
| 3528 | } |
| 3529 | } |
| 3530 | |
| 3531 | out: |
| 3532 | /* Reset all restrictions if an error happened */ |
| 3533 | if (ret != 0) |
| 3534 | memset(&ctx->restrictions, 0, sizeof(ctx->restrictions)); |
| 3535 | else |
Stefano Garzarella | 7e84e1c | 2020-08-27 16:58:31 +0200 | [diff] [blame] | 3536 | ctx->restrictions.registered = true; |
Stefano Garzarella | 21b55db | 2020-08-27 16:58:30 +0200 | [diff] [blame] | 3537 | |
| 3538 | kfree(res); |
| 3539 | return ret; |
| 3540 | } |
| 3541 | |
Stefano Garzarella | 7e84e1c | 2020-08-27 16:58:31 +0200 | [diff] [blame] | 3542 | static int io_register_enable_rings(struct io_ring_ctx *ctx) |
| 3543 | { |
| 3544 | if (!(ctx->flags & IORING_SETUP_R_DISABLED)) |
| 3545 | return -EBADFD; |
| 3546 | |
| 3547 | if (ctx->restrictions.registered) |
| 3548 | ctx->restricted = 1; |
| 3549 | |
Pavel Begunkov | 0298ef9 | 2021-03-08 13:20:57 +0000 | [diff] [blame] | 3550 | ctx->flags &= ~IORING_SETUP_R_DISABLED; |
| 3551 | if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait)) |
| 3552 | wake_up(&ctx->sq_data->wait); |
Stefano Garzarella | 7e84e1c | 2020-08-27 16:58:31 +0200 | [diff] [blame] | 3553 | return 0; |
| 3554 | } |
| 3555 | |
Pavel Begunkov | c072481 | 2021-10-04 20:02:54 +0100 | [diff] [blame] | 3556 | static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx, |
| 3557 | void __user *arg, unsigned len) |
Jens Axboe | fe76421 | 2021-06-17 10:19:54 -0600 | [diff] [blame] | 3558 | { |
| 3559 | struct io_uring_task *tctx = current->io_uring; |
| 3560 | cpumask_var_t new_mask; |
| 3561 | int ret; |
| 3562 | |
| 3563 | if (!tctx || !tctx->io_wq) |
| 3564 | return -EINVAL; |
| 3565 | |
| 3566 | if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) |
| 3567 | return -ENOMEM; |
| 3568 | |
| 3569 | cpumask_clear(new_mask); |
| 3570 | if (len > cpumask_size()) |
| 3571 | len = cpumask_size(); |
| 3572 | |
Eugene Syromiatnikov | 0f5e4b8 | 2022-04-06 13:55:33 +0200 | [diff] [blame] | 3573 | if (in_compat_syscall()) { |
| 3574 | ret = compat_get_bitmap(cpumask_bits(new_mask), |
| 3575 | (const compat_ulong_t __user *)arg, |
| 3576 | len * 8 /* CHAR_BIT */); |
| 3577 | } else { |
| 3578 | ret = copy_from_user(new_mask, arg, len); |
| 3579 | } |
| 3580 | |
| 3581 | if (ret) { |
Jens Axboe | fe76421 | 2021-06-17 10:19:54 -0600 | [diff] [blame] | 3582 | free_cpumask_var(new_mask); |
| 3583 | return -EFAULT; |
| 3584 | } |
| 3585 | |
| 3586 | ret = io_wq_cpu_affinity(tctx->io_wq, new_mask); |
| 3587 | free_cpumask_var(new_mask); |
| 3588 | return ret; |
| 3589 | } |
| 3590 | |
Pavel Begunkov | c072481 | 2021-10-04 20:02:54 +0100 | [diff] [blame] | 3591 | static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx) |
Jens Axboe | fe76421 | 2021-06-17 10:19:54 -0600 | [diff] [blame] | 3592 | { |
| 3593 | struct io_uring_task *tctx = current->io_uring; |
| 3594 | |
| 3595 | if (!tctx || !tctx->io_wq) |
| 3596 | return -EINVAL; |
| 3597 | |
| 3598 | return io_wq_cpu_affinity(tctx->io_wq, NULL); |
| 3599 | } |
| 3600 | |
Pavel Begunkov | c072481 | 2021-10-04 20:02:54 +0100 | [diff] [blame] | 3601 | static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx, |
| 3602 | void __user *arg) |
Pavel Begunkov | b22fa62 | 2021-10-21 13:20:29 +0100 | [diff] [blame] | 3603 | __must_hold(&ctx->uring_lock) |
Jens Axboe | 2e48005 | 2021-08-27 11:33:19 -0600 | [diff] [blame] | 3604 | { |
Pavel Begunkov | b22fa62 | 2021-10-21 13:20:29 +0100 | [diff] [blame] | 3605 | struct io_tctx_node *node; |
Jens Axboe | fa84693 | 2021-09-01 14:15:59 -0600 | [diff] [blame] | 3606 | struct io_uring_task *tctx = NULL; |
| 3607 | struct io_sq_data *sqd = NULL; |
Jens Axboe | 2e48005 | 2021-08-27 11:33:19 -0600 | [diff] [blame] | 3608 | __u32 new_count[2]; |
| 3609 | int i, ret; |
| 3610 | |
Jens Axboe | 2e48005 | 2021-08-27 11:33:19 -0600 | [diff] [blame] | 3611 | if (copy_from_user(new_count, arg, sizeof(new_count))) |
| 3612 | return -EFAULT; |
| 3613 | for (i = 0; i < ARRAY_SIZE(new_count); i++) |
| 3614 | if (new_count[i] > INT_MAX) |
| 3615 | return -EINVAL; |
| 3616 | |
Jens Axboe | fa84693 | 2021-09-01 14:15:59 -0600 | [diff] [blame] | 3617 | if (ctx->flags & IORING_SETUP_SQPOLL) { |
| 3618 | sqd = ctx->sq_data; |
| 3619 | if (sqd) { |
Jens Axboe | 009ad9f | 2021-09-08 19:07:26 -0600 | [diff] [blame] | 3620 | /* |
| 3621 | * Observe the correct sqd->lock -> ctx->uring_lock |
| 3622 | * ordering. Fine to drop uring_lock here, we hold |
| 3623 | * a ref to the ctx. |
| 3624 | */ |
Jens Axboe | 41d3a6b | 2021-09-13 13:08:51 -0600 | [diff] [blame] | 3625 | refcount_inc(&sqd->refs); |
Jens Axboe | 009ad9f | 2021-09-08 19:07:26 -0600 | [diff] [blame] | 3626 | mutex_unlock(&ctx->uring_lock); |
Jens Axboe | fa84693 | 2021-09-01 14:15:59 -0600 | [diff] [blame] | 3627 | mutex_lock(&sqd->lock); |
Jens Axboe | 009ad9f | 2021-09-08 19:07:26 -0600 | [diff] [blame] | 3628 | mutex_lock(&ctx->uring_lock); |
Jens Axboe | 41d3a6b | 2021-09-13 13:08:51 -0600 | [diff] [blame] | 3629 | if (sqd->thread) |
| 3630 | tctx = sqd->thread->io_uring; |
Jens Axboe | fa84693 | 2021-09-01 14:15:59 -0600 | [diff] [blame] | 3631 | } |
| 3632 | } else { |
| 3633 | tctx = current->io_uring; |
| 3634 | } |
| 3635 | |
Pavel Begunkov | e139a1e | 2021-10-19 23:43:46 +0100 | [diff] [blame] | 3636 | BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits)); |
Jens Axboe | fa84693 | 2021-09-01 14:15:59 -0600 | [diff] [blame] | 3637 | |
Pavel Begunkov | bad119b | 2021-11-08 15:10:03 +0000 | [diff] [blame] | 3638 | for (i = 0; i < ARRAY_SIZE(new_count); i++) |
| 3639 | if (new_count[i]) |
| 3640 | ctx->iowq_limits[i] = new_count[i]; |
Pavel Begunkov | e139a1e | 2021-10-19 23:43:46 +0100 | [diff] [blame] | 3641 | ctx->iowq_limits_set = true; |
| 3642 | |
Pavel Begunkov | e139a1e | 2021-10-19 23:43:46 +0100 | [diff] [blame] | 3643 | if (tctx && tctx->io_wq) { |
| 3644 | ret = io_wq_max_workers(tctx->io_wq, new_count); |
| 3645 | if (ret) |
| 3646 | goto err; |
| 3647 | } else { |
| 3648 | memset(new_count, 0, sizeof(new_count)); |
| 3649 | } |
Jens Axboe | fa84693 | 2021-09-01 14:15:59 -0600 | [diff] [blame] | 3650 | |
Jens Axboe | 41d3a6b | 2021-09-13 13:08:51 -0600 | [diff] [blame] | 3651 | if (sqd) { |
Jens Axboe | fa84693 | 2021-09-01 14:15:59 -0600 | [diff] [blame] | 3652 | mutex_unlock(&sqd->lock); |
Jens Axboe | 41d3a6b | 2021-09-13 13:08:51 -0600 | [diff] [blame] | 3653 | io_put_sq_data(sqd); |
| 3654 | } |
Jens Axboe | 2e48005 | 2021-08-27 11:33:19 -0600 | [diff] [blame] | 3655 | |
| 3656 | if (copy_to_user(arg, new_count, sizeof(new_count))) |
| 3657 | return -EFAULT; |
| 3658 | |
Pavel Begunkov | b22fa62 | 2021-10-21 13:20:29 +0100 | [diff] [blame] | 3659 | /* that's it for SQPOLL, only the SQPOLL task creates requests */ |
| 3660 | if (sqd) |
| 3661 | return 0; |
| 3662 | |
| 3663 | /* now propagate the restriction to all registered users */ |
| 3664 | list_for_each_entry(node, &ctx->tctx_list, ctx_node) { |
| 3665 | struct io_uring_task *tctx = node->task->io_uring; |
| 3666 | |
| 3667 | if (WARN_ON_ONCE(!tctx->io_wq)) |
| 3668 | continue; |
| 3669 | |
| 3670 | for (i = 0; i < ARRAY_SIZE(new_count); i++) |
| 3671 | new_count[i] = ctx->iowq_limits[i]; |
| 3672 | /* ignore errors, it always returns zero anyway */ |
| 3673 | (void)io_wq_max_workers(tctx->io_wq, new_count); |
| 3674 | } |
Jens Axboe | 2e48005 | 2021-08-27 11:33:19 -0600 | [diff] [blame] | 3675 | return 0; |
Jens Axboe | fa84693 | 2021-09-01 14:15:59 -0600 | [diff] [blame] | 3676 | err: |
Jens Axboe | 41d3a6b | 2021-09-13 13:08:51 -0600 | [diff] [blame] | 3677 | if (sqd) { |
Jens Axboe | fa84693 | 2021-09-01 14:15:59 -0600 | [diff] [blame] | 3678 | mutex_unlock(&sqd->lock); |
Jens Axboe | 41d3a6b | 2021-09-13 13:08:51 -0600 | [diff] [blame] | 3679 | io_put_sq_data(sqd); |
| 3680 | } |
Jens Axboe | fa84693 | 2021-09-01 14:15:59 -0600 | [diff] [blame] | 3681 | return ret; |
Jens Axboe | 2e48005 | 2021-08-27 11:33:19 -0600 | [diff] [blame] | 3682 | } |
| 3683 | |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 3684 | static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, |
| 3685 | void __user *arg, unsigned nr_args) |
Jens Axboe | b19062a | 2019-04-15 10:49:38 -0600 | [diff] [blame] | 3686 | __releases(ctx->uring_lock) |
| 3687 | __acquires(ctx->uring_lock) |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 3688 | { |
| 3689 | int ret; |
| 3690 | |
Jens Axboe | 35fa71a | 2019-04-22 10:23:23 -0600 | [diff] [blame] | 3691 | /* |
Pavel Begunkov | fbb8bb0 | 2022-06-25 11:53:02 +0100 | [diff] [blame] | 3692 | * We don't quiesce the refs for register anymore and so it can't be |
| 3693 | * dying as we're holding a file ref here. |
Jens Axboe | 35fa71a | 2019-04-22 10:23:23 -0600 | [diff] [blame] | 3694 | */ |
Pavel Begunkov | fbb8bb0 | 2022-06-25 11:53:02 +0100 | [diff] [blame] | 3695 | if (WARN_ON_ONCE(percpu_ref_is_dying(&ctx->refs))) |
Jens Axboe | 35fa71a | 2019-04-22 10:23:23 -0600 | [diff] [blame] | 3696 | return -ENXIO; |
| 3697 | |
Pavel Begunkov | 75c4021 | 2021-04-15 13:07:40 +0100 | [diff] [blame] | 3698 | if (ctx->restricted) { |
| 3699 | if (opcode >= IORING_REGISTER_LAST) |
| 3700 | return -EINVAL; |
| 3701 | opcode = array_index_nospec(opcode, IORING_REGISTER_LAST); |
| 3702 | if (!test_bit(opcode, ctx->restrictions.register_op)) |
| 3703 | return -EACCES; |
| 3704 | } |
| 3705 | |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 3706 | switch (opcode) { |
| 3707 | case IORING_REGISTER_BUFFERS: |
Pavel Begunkov | 0184f08 | 2022-05-18 19:13:49 +0100 | [diff] [blame] | 3708 | ret = -EFAULT; |
| 3709 | if (!arg) |
| 3710 | break; |
Pavel Begunkov | 634d00d | 2021-04-25 14:32:26 +0100 | [diff] [blame] | 3711 | ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 3712 | break; |
| 3713 | case IORING_UNREGISTER_BUFFERS: |
| 3714 | ret = -EINVAL; |
| 3715 | if (arg || nr_args) |
| 3716 | break; |
Bijan Mottahedeh | 0a96bbe | 2021-01-06 12:39:10 -0800 | [diff] [blame] | 3717 | ret = io_sqe_buffers_unregister(ctx); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 3718 | break; |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 3719 | case IORING_REGISTER_FILES: |
Jens Axboe | a8da73a | 2022-05-09 09:29:14 -0600 | [diff] [blame] | 3720 | ret = -EFAULT; |
| 3721 | if (!arg) |
| 3722 | break; |
Pavel Begunkov | 792e358 | 2021-04-25 14:32:21 +0100 | [diff] [blame] | 3723 | ret = io_sqe_files_register(ctx, arg, nr_args, NULL); |
Jens Axboe | 6b06314 | 2019-01-10 22:13:58 -0700 | [diff] [blame] | 3724 | break; |
| 3725 | case IORING_UNREGISTER_FILES: |
| 3726 | ret = -EINVAL; |
| 3727 | if (arg || nr_args) |
| 3728 | break; |
| 3729 | ret = io_sqe_files_unregister(ctx); |
| 3730 | break; |
Jens Axboe | c3a31e6 | 2019-10-03 13:59:56 -0600 | [diff] [blame] | 3731 | case IORING_REGISTER_FILES_UPDATE: |
Pavel Begunkov | c3bdad0 | 2021-04-25 14:32:22 +0100 | [diff] [blame] | 3732 | ret = io_register_files_update(ctx, arg, nr_args); |
Jens Axboe | c3a31e6 | 2019-10-03 13:59:56 -0600 | [diff] [blame] | 3733 | break; |
Jens Axboe | 9b40284 | 2019-04-11 11:45:41 -0600 | [diff] [blame] | 3734 | case IORING_REGISTER_EVENTFD: |
Usama Arif | c75312d | 2022-02-04 14:51:15 +0000 | [diff] [blame] | 3735 | ret = -EINVAL; |
| 3736 | if (nr_args != 1) |
| 3737 | break; |
| 3738 | ret = io_eventfd_register(ctx, arg, 0); |
| 3739 | break; |
Jens Axboe | f2842ab | 2020-01-08 11:04:00 -0700 | [diff] [blame] | 3740 | case IORING_REGISTER_EVENTFD_ASYNC: |
Jens Axboe | 9b40284 | 2019-04-11 11:45:41 -0600 | [diff] [blame] | 3741 | ret = -EINVAL; |
| 3742 | if (nr_args != 1) |
| 3743 | break; |
Usama Arif | c75312d | 2022-02-04 14:51:15 +0000 | [diff] [blame] | 3744 | ret = io_eventfd_register(ctx, arg, 1); |
Jens Axboe | 9b40284 | 2019-04-11 11:45:41 -0600 | [diff] [blame] | 3745 | break; |
| 3746 | case IORING_UNREGISTER_EVENTFD: |
| 3747 | ret = -EINVAL; |
| 3748 | if (arg || nr_args) |
| 3749 | break; |
| 3750 | ret = io_eventfd_unregister(ctx); |
| 3751 | break; |
Jens Axboe | 66f4af9 | 2020-01-16 15:36:52 -0700 | [diff] [blame] | 3752 | case IORING_REGISTER_PROBE: |
| 3753 | ret = -EINVAL; |
| 3754 | if (!arg || nr_args > 256) |
| 3755 | break; |
| 3756 | ret = io_probe(ctx, arg, nr_args); |
| 3757 | break; |
Jens Axboe | 071698e | 2020-01-28 10:04:42 -0700 | [diff] [blame] | 3758 | case IORING_REGISTER_PERSONALITY: |
| 3759 | ret = -EINVAL; |
| 3760 | if (arg || nr_args) |
| 3761 | break; |
| 3762 | ret = io_register_personality(ctx); |
| 3763 | break; |
| 3764 | case IORING_UNREGISTER_PERSONALITY: |
| 3765 | ret = -EINVAL; |
| 3766 | if (arg) |
| 3767 | break; |
| 3768 | ret = io_unregister_personality(ctx, nr_args); |
| 3769 | break; |
Stefano Garzarella | 7e84e1c | 2020-08-27 16:58:31 +0200 | [diff] [blame] | 3770 | case IORING_REGISTER_ENABLE_RINGS: |
| 3771 | ret = -EINVAL; |
| 3772 | if (arg || nr_args) |
| 3773 | break; |
| 3774 | ret = io_register_enable_rings(ctx); |
| 3775 | break; |
Stefano Garzarella | 21b55db | 2020-08-27 16:58:30 +0200 | [diff] [blame] | 3776 | case IORING_REGISTER_RESTRICTIONS: |
| 3777 | ret = io_register_restrictions(ctx, arg, nr_args); |
| 3778 | break; |
Pavel Begunkov | 992da01 | 2021-06-10 16:37:37 +0100 | [diff] [blame] | 3779 | case IORING_REGISTER_FILES2: |
| 3780 | ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE); |
Pavel Begunkov | 792e358 | 2021-04-25 14:32:21 +0100 | [diff] [blame] | 3781 | break; |
Pavel Begunkov | 992da01 | 2021-06-10 16:37:37 +0100 | [diff] [blame] | 3782 | case IORING_REGISTER_FILES_UPDATE2: |
| 3783 | ret = io_register_rsrc_update(ctx, arg, nr_args, |
| 3784 | IORING_RSRC_FILE); |
| 3785 | break; |
| 3786 | case IORING_REGISTER_BUFFERS2: |
| 3787 | ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER); |
| 3788 | break; |
| 3789 | case IORING_REGISTER_BUFFERS_UPDATE: |
| 3790 | ret = io_register_rsrc_update(ctx, arg, nr_args, |
| 3791 | IORING_RSRC_BUFFER); |
Pavel Begunkov | c3bdad0 | 2021-04-25 14:32:22 +0100 | [diff] [blame] | 3792 | break; |
Jens Axboe | fe76421 | 2021-06-17 10:19:54 -0600 | [diff] [blame] | 3793 | case IORING_REGISTER_IOWQ_AFF: |
| 3794 | ret = -EINVAL; |
| 3795 | if (!arg || !nr_args) |
| 3796 | break; |
| 3797 | ret = io_register_iowq_aff(ctx, arg, nr_args); |
| 3798 | break; |
| 3799 | case IORING_UNREGISTER_IOWQ_AFF: |
| 3800 | ret = -EINVAL; |
| 3801 | if (arg || nr_args) |
| 3802 | break; |
| 3803 | ret = io_unregister_iowq_aff(ctx); |
| 3804 | break; |
Jens Axboe | 2e48005 | 2021-08-27 11:33:19 -0600 | [diff] [blame] | 3805 | case IORING_REGISTER_IOWQ_MAX_WORKERS: |
| 3806 | ret = -EINVAL; |
| 3807 | if (!arg || nr_args != 2) |
| 3808 | break; |
| 3809 | ret = io_register_iowq_max_workers(ctx, arg); |
| 3810 | break; |
Jens Axboe | e7a6c00 | 2022-03-04 08:22:22 -0700 | [diff] [blame] | 3811 | case IORING_REGISTER_RING_FDS: |
| 3812 | ret = io_ringfd_register(ctx, arg, nr_args); |
| 3813 | break; |
| 3814 | case IORING_UNREGISTER_RING_FDS: |
| 3815 | ret = io_ringfd_unregister(ctx, arg, nr_args); |
| 3816 | break; |
Jens Axboe | c7fb194 | 2022-04-30 14:38:53 -0600 | [diff] [blame] | 3817 | case IORING_REGISTER_PBUF_RING: |
| 3818 | ret = -EINVAL; |
| 3819 | if (!arg || nr_args != 1) |
| 3820 | break; |
| 3821 | ret = io_register_pbuf_ring(ctx, arg); |
| 3822 | break; |
| 3823 | case IORING_UNREGISTER_PBUF_RING: |
| 3824 | ret = -EINVAL; |
| 3825 | if (!arg || nr_args != 1) |
| 3826 | break; |
| 3827 | ret = io_unregister_pbuf_ring(ctx, arg); |
| 3828 | break; |
Jens Axboe | 78a861b | 2022-06-18 10:00:50 -0600 | [diff] [blame] | 3829 | case IORING_REGISTER_SYNC_CANCEL: |
| 3830 | ret = -EINVAL; |
| 3831 | if (!arg || nr_args != 1) |
| 3832 | break; |
| 3833 | ret = io_sync_cancel(ctx, arg); |
| 3834 | break; |
Pavel Begunkov | 6e73dff | 2022-06-25 11:55:38 +0100 | [diff] [blame] | 3835 | case IORING_REGISTER_FILE_ALLOC_RANGE: |
| 3836 | ret = -EINVAL; |
| 3837 | if (!arg || nr_args) |
| 3838 | break; |
| 3839 | ret = io_register_file_alloc_range(ctx, arg); |
| 3840 | break; |
Pavel Begunkov | bc24d6b | 2022-07-12 21:52:42 +0100 | [diff] [blame] | 3841 | case IORING_REGISTER_NOTIFIERS: |
| 3842 | ret = io_notif_register(ctx, arg, nr_args); |
| 3843 | break; |
| 3844 | case IORING_UNREGISTER_NOTIFIERS: |
| 3845 | ret = -EINVAL; |
| 3846 | if (arg || nr_args) |
| 3847 | break; |
| 3848 | ret = io_notif_unregister(ctx); |
| 3849 | break; |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 3850 | default: |
| 3851 | ret = -EINVAL; |
| 3852 | break; |
| 3853 | } |
| 3854 | |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 3855 | return ret; |
| 3856 | } |
| 3857 | |
| 3858 | SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode, |
| 3859 | void __user *, arg, unsigned int, nr_args) |
| 3860 | { |
| 3861 | struct io_ring_ctx *ctx; |
| 3862 | long ret = -EBADF; |
| 3863 | struct fd f; |
| 3864 | |
| 3865 | f = fdget(fd); |
| 3866 | if (!f.file) |
| 3867 | return -EBADF; |
| 3868 | |
| 3869 | ret = -EOPNOTSUPP; |
Jens Axboe | e5550a1 | 2022-05-25 10:28:04 -0600 | [diff] [blame] | 3870 | if (!io_is_uring_fops(f.file)) |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 3871 | goto out_fput; |
| 3872 | |
| 3873 | ctx = f.file->private_data; |
| 3874 | |
Pavel Begunkov | b6c23dd | 2021-02-20 15:17:18 +0000 | [diff] [blame] | 3875 | io_run_task_work(); |
| 3876 | |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 3877 | mutex_lock(&ctx->uring_lock); |
| 3878 | ret = __io_uring_register(ctx, opcode, arg, nr_args); |
| 3879 | mutex_unlock(&ctx->uring_lock); |
Usama Arif | 2757be2 | 2022-02-04 14:51:13 +0000 | [diff] [blame] | 3880 | trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs, ret); |
Jens Axboe | edafcce | 2019-01-09 09:16:05 -0700 | [diff] [blame] | 3881 | out_fput: |
| 3882 | fdput(f); |
| 3883 | return ret; |
| 3884 | } |
| 3885 | |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3886 | static int __init io_uring_init(void) |
| 3887 | { |
Stefan Metzmacher | 9c71d39 | 2022-08-11 09:11:16 +0200 | [diff] [blame] | 3888 | #define __BUILD_BUG_VERIFY_OFFSET_SIZE(stype, eoffset, esize, ename) do { \ |
Stefan Metzmacher | d7f62e8 | 2020-01-29 14:39:41 +0100 | [diff] [blame] | 3889 | BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \ |
Stefan Metzmacher | 9c71d39 | 2022-08-11 09:11:16 +0200 | [diff] [blame] | 3890 | BUILD_BUG_ON(sizeof_field(stype, ename) != esize); \ |
Stefan Metzmacher | d7f62e8 | 2020-01-29 14:39:41 +0100 | [diff] [blame] | 3891 | } while (0) |
| 3892 | |
| 3893 | #define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \ |
Stefan Metzmacher | 9c71d39 | 2022-08-11 09:11:16 +0200 | [diff] [blame] | 3894 | __BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, sizeof(etype), ename) |
| 3895 | #define BUILD_BUG_SQE_ELEM_SIZE(eoffset, esize, ename) \ |
| 3896 | __BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, esize, ename) |
Stefan Metzmacher | d7f62e8 | 2020-01-29 14:39:41 +0100 | [diff] [blame] | 3897 | BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64); |
| 3898 | BUILD_BUG_SQE_ELEM(0, __u8, opcode); |
| 3899 | BUILD_BUG_SQE_ELEM(1, __u8, flags); |
| 3900 | BUILD_BUG_SQE_ELEM(2, __u16, ioprio); |
| 3901 | BUILD_BUG_SQE_ELEM(4, __s32, fd); |
| 3902 | BUILD_BUG_SQE_ELEM(8, __u64, off); |
| 3903 | BUILD_BUG_SQE_ELEM(8, __u64, addr2); |
Stefan Metzmacher | 9c71d39 | 2022-08-11 09:11:16 +0200 | [diff] [blame] | 3904 | BUILD_BUG_SQE_ELEM(8, __u32, cmd_op); |
| 3905 | BUILD_BUG_SQE_ELEM(12, __u32, __pad1); |
Stefan Metzmacher | d7f62e8 | 2020-01-29 14:39:41 +0100 | [diff] [blame] | 3906 | BUILD_BUG_SQE_ELEM(16, __u64, addr); |
Pavel Begunkov | 7d67af2 | 2020-02-24 11:32:45 +0300 | [diff] [blame] | 3907 | BUILD_BUG_SQE_ELEM(16, __u64, splice_off_in); |
Stefan Metzmacher | d7f62e8 | 2020-01-29 14:39:41 +0100 | [diff] [blame] | 3908 | BUILD_BUG_SQE_ELEM(24, __u32, len); |
| 3909 | BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t, rw_flags); |
| 3910 | BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags); |
| 3911 | BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags); |
| 3912 | BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags); |
Jiufei Xue | 5769a35 | 2020-06-17 17:53:55 +0800 | [diff] [blame] | 3913 | BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events); |
| 3914 | BUILD_BUG_SQE_ELEM(28, __u32, poll32_events); |
Stefan Metzmacher | d7f62e8 | 2020-01-29 14:39:41 +0100 | [diff] [blame] | 3915 | BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags); |
| 3916 | BUILD_BUG_SQE_ELEM(28, __u32, msg_flags); |
| 3917 | BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags); |
| 3918 | BUILD_BUG_SQE_ELEM(28, __u32, accept_flags); |
| 3919 | BUILD_BUG_SQE_ELEM(28, __u32, cancel_flags); |
| 3920 | BUILD_BUG_SQE_ELEM(28, __u32, open_flags); |
| 3921 | BUILD_BUG_SQE_ELEM(28, __u32, statx_flags); |
| 3922 | BUILD_BUG_SQE_ELEM(28, __u32, fadvise_advice); |
Pavel Begunkov | 7d67af2 | 2020-02-24 11:32:45 +0300 | [diff] [blame] | 3923 | BUILD_BUG_SQE_ELEM(28, __u32, splice_flags); |
Stefan Metzmacher | 9c71d39 | 2022-08-11 09:11:16 +0200 | [diff] [blame] | 3924 | BUILD_BUG_SQE_ELEM(28, __u32, rename_flags); |
| 3925 | BUILD_BUG_SQE_ELEM(28, __u32, unlink_flags); |
| 3926 | BUILD_BUG_SQE_ELEM(28, __u32, hardlink_flags); |
| 3927 | BUILD_BUG_SQE_ELEM(28, __u32, xattr_flags); |
| 3928 | BUILD_BUG_SQE_ELEM(28, __u32, msg_ring_flags); |
Stefan Metzmacher | d7f62e8 | 2020-01-29 14:39:41 +0100 | [diff] [blame] | 3929 | BUILD_BUG_SQE_ELEM(32, __u64, user_data); |
| 3930 | BUILD_BUG_SQE_ELEM(40, __u16, buf_index); |
Pavel Begunkov | 16340ea | 2021-06-24 15:09:58 +0100 | [diff] [blame] | 3931 | BUILD_BUG_SQE_ELEM(40, __u16, buf_group); |
Stefan Metzmacher | d7f62e8 | 2020-01-29 14:39:41 +0100 | [diff] [blame] | 3932 | BUILD_BUG_SQE_ELEM(42, __u16, personality); |
Pavel Begunkov | 7d67af2 | 2020-02-24 11:32:45 +0300 | [diff] [blame] | 3933 | BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in); |
Pavel Begunkov | b944559 | 2021-08-25 12:25:45 +0100 | [diff] [blame] | 3934 | BUILD_BUG_SQE_ELEM(44, __u32, file_index); |
Stefan Metzmacher | 9c71d39 | 2022-08-11 09:11:16 +0200 | [diff] [blame] | 3935 | BUILD_BUG_SQE_ELEM(44, __u16, notification_idx); |
| 3936 | BUILD_BUG_SQE_ELEM(46, __u16, addr_len); |
Stefan Roesch | e9621e2b | 2022-03-23 08:44:19 -0700 | [diff] [blame] | 3937 | BUILD_BUG_SQE_ELEM(48, __u64, addr3); |
Stefan Metzmacher | 9c71d39 | 2022-08-11 09:11:16 +0200 | [diff] [blame] | 3938 | BUILD_BUG_SQE_ELEM_SIZE(48, 0, cmd); |
| 3939 | BUILD_BUG_SQE_ELEM(56, __u64, __pad2); |
Stefan Metzmacher | d7f62e8 | 2020-01-29 14:39:41 +0100 | [diff] [blame] | 3940 | |
Pavel Begunkov | b0d658ec | 2021-04-27 16:13:53 +0100 | [diff] [blame] | 3941 | BUILD_BUG_ON(sizeof(struct io_uring_files_update) != |
| 3942 | sizeof(struct io_uring_rsrc_update)); |
| 3943 | BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) > |
| 3944 | sizeof(struct io_uring_rsrc_update2)); |
Pavel Begunkov | 90499ad | 2021-08-25 20:51:40 +0100 | [diff] [blame] | 3945 | |
| 3946 | /* ->buf_index is u16 */ |
Jens Axboe | c7fb194 | 2022-04-30 14:38:53 -0600 | [diff] [blame] | 3947 | BUILD_BUG_ON(offsetof(struct io_uring_buf_ring, bufs) != 0); |
| 3948 | BUILD_BUG_ON(offsetof(struct io_uring_buf, resv) != |
| 3949 | offsetof(struct io_uring_buf_ring, tail)); |
Pavel Begunkov | 90499ad | 2021-08-25 20:51:40 +0100 | [diff] [blame] | 3950 | |
Pavel Begunkov | b0d658ec | 2021-04-27 16:13:53 +0100 | [diff] [blame] | 3951 | /* should fit into one byte */ |
| 3952 | BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8)); |
Pavel Begunkov | 68fe256 | 2021-09-15 12:03:38 +0100 | [diff] [blame] | 3953 | BUILD_BUG_ON(SQE_COMMON_FLAGS >= (1 << 8)); |
| 3954 | BUILD_BUG_ON((SQE_VALID_FLAGS | SQE_COMMON_FLAGS) != SQE_VALID_FLAGS); |
Pavel Begunkov | b0d658ec | 2021-04-27 16:13:53 +0100 | [diff] [blame] | 3955 | |
Hao Xu | 32c2d33 | 2021-09-07 11:22:43 +0800 | [diff] [blame] | 3956 | BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(int)); |
Pavel Begunkov | 16340ea | 2021-06-24 15:09:58 +0100 | [diff] [blame] | 3957 | |
Jens Axboe | 3a4b89a | 2022-04-25 19:49:00 -0600 | [diff] [blame] | 3958 | BUILD_BUG_ON(sizeof(atomic_t) != sizeof(u32)); |
| 3959 | |
Jens Axboe | d9b57aa | 2022-06-15 16:27:42 -0600 | [diff] [blame] | 3960 | io_uring_optable_init(); |
Jens Axboe | 0702e53 | 2022-05-23 16:56:21 -0600 | [diff] [blame] | 3961 | |
Jens Axboe | 91f245d | 2021-02-09 13:48:50 -0700 | [diff] [blame] | 3962 | req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC | |
| 3963 | SLAB_ACCOUNT); |
Jens Axboe | 2b188cc | 2019-01-07 10:46:33 -0700 | [diff] [blame] | 3964 | return 0; |
| 3965 | }; |
| 3966 | __initcall(io_uring_init); |