Jens Axboe | de23077 | 2022-05-24 12:45:38 -0600 | [diff] [blame] | 1 | #ifndef IOU_CORE_H |
| 2 | #define IOU_CORE_H |
| 3 | |
| 4 | #include <linux/errno.h> |
Jens Axboe | cd40cae | 2022-05-24 21:54:43 -0600 | [diff] [blame] | 5 | #include <linux/lockdep.h> |
Jens Axboe | b5d3ae2 | 2023-01-24 08:24:25 -0700 | [diff] [blame] | 6 | #include <linux/resume_user_mode.h> |
Breno Leitao | c1755c2 | 2023-01-18 07:56:30 -0800 | [diff] [blame] | 7 | #include <linux/kasan.h> |
Jens Axboe | 95041b93 | 2024-01-28 20:08:24 -0700 | [diff] [blame] | 8 | #include <linux/poll.h> |
Pavel Begunkov | ab1c84d | 2022-06-16 13:57:19 +0100 | [diff] [blame] | 9 | #include <linux/io_uring_types.h> |
Jens Axboe | 4464853 | 2022-11-20 10:18:45 -0700 | [diff] [blame] | 10 | #include <uapi/linux/eventpoll.h> |
Pavel Begunkov | ab1c84d | 2022-06-16 13:57:19 +0100 | [diff] [blame] | 11 | #include "io-wq.h" |
Pavel Begunkov | a6b21fb | 2022-06-21 10:09:01 +0100 | [diff] [blame] | 12 | #include "slist.h" |
Pavel Begunkov | ab1c84d | 2022-06-16 13:57:19 +0100 | [diff] [blame] | 13 | #include "filetable.h" |
Jens Axboe | de23077 | 2022-05-24 12:45:38 -0600 | [diff] [blame] | 14 | |
Jens Axboe | f3b44f9 | 2022-06-13 07:27:03 -0600 | [diff] [blame] | 15 | #ifndef CREATE_TRACE_POINTS |
| 16 | #include <trace/events/io_uring.h> |
| 17 | #endif |
| 18 | |
Pavel Begunkov | 8501fe7 | 2023-04-06 14:20:10 +0100 | [diff] [blame] | 19 | enum { |
Jens Axboe | 97b388d | 2022-05-24 15:21:00 -0600 | [diff] [blame] | 20 | IOU_OK = 0, |
| 21 | IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED, |
Dylan Yudaken | 114eccd | 2022-06-30 02:12:25 -0700 | [diff] [blame] | 22 | |
| 23 | /* |
Jens Axboe | 704ea88 | 2024-01-29 11:57:11 -0700 | [diff] [blame] | 24 | * Requeue the task_work to restart operations on this request. The |
| 25 | * actual value isn't important, should just be not an otherwise |
| 26 | * valid error code, yet less than -MAX_ERRNO and valid internally. |
| 27 | */ |
| 28 | IOU_REQUEUE = -3072, |
| 29 | |
| 30 | /* |
Pavel Begunkov | 9148286 | 2022-11-17 18:40:16 +0000 | [diff] [blame] | 31 | * Intended only when both IO_URING_F_MULTISHOT is passed |
| 32 | * to indicate to the poll runner that multishot should be |
Dylan Yudaken | 114eccd | 2022-06-30 02:12:25 -0700 | [diff] [blame] | 33 | * removed and the result is set on req->cqe.res. |
| 34 | */ |
| 35 | IOU_STOP_MULTISHOT = -ECANCELED, |
Jens Axboe | 97b388d | 2022-05-24 15:21:00 -0600 | [diff] [blame] | 36 | }; |
| 37 | |
Stefan Roesch | 405b4dc | 2023-06-08 09:38:35 -0700 | [diff] [blame] | 38 | struct io_wait_queue { |
| 39 | struct wait_queue_entry wq; |
| 40 | struct io_ring_ctx *ctx; |
| 41 | unsigned cq_tail; |
Jens Axboe | 1100c4a | 2024-01-04 10:17:54 -0700 | [diff] [blame] | 42 | unsigned cq_min_tail; |
Stefan Roesch | 405b4dc | 2023-06-08 09:38:35 -0700 | [diff] [blame] | 43 | unsigned nr_timeouts; |
Jens Axboe | cebf123 | 2024-01-04 08:46:23 -0700 | [diff] [blame] | 44 | int hit_timeout; |
Jens Axboe | 1100c4a | 2024-01-04 10:17:54 -0700 | [diff] [blame] | 45 | ktime_t min_timeout; |
Stefan Roesch | 405b4dc | 2023-06-08 09:38:35 -0700 | [diff] [blame] | 46 | ktime_t timeout; |
Jens Axboe | cebf123 | 2024-01-04 08:46:23 -0700 | [diff] [blame] | 47 | struct hrtimer t; |
Stefan Roesch | 405b4dc | 2023-06-08 09:38:35 -0700 | [diff] [blame] | 48 | |
Stefan Roesch | 8d0c12a | 2023-06-08 09:38:36 -0700 | [diff] [blame] | 49 | #ifdef CONFIG_NET_RX_BUSY_POLL |
Pavel Begunkov | 342b2e3 | 2024-07-26 15:24:30 +0100 | [diff] [blame] | 50 | ktime_t napi_busy_poll_dt; |
Stefan Roesch | 8d0c12a | 2023-06-08 09:38:36 -0700 | [diff] [blame] | 51 | bool napi_prefer_busy_poll; |
| 52 | #endif |
Stefan Roesch | 405b4dc | 2023-06-08 09:38:35 -0700 | [diff] [blame] | 53 | }; |
| 54 | |
| 55 | static inline bool io_should_wake(struct io_wait_queue *iowq) |
| 56 | { |
| 57 | struct io_ring_ctx *ctx = iowq->ctx; |
| 58 | int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail; |
| 59 | |
| 60 | /* |
| 61 | * Wake up if we have enough events, or if a timeout occurred since we |
| 62 | * started waiting. For timeouts, we always want to return to userspace, |
| 63 | * regardless of event count. |
| 64 | */ |
| 65 | return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts; |
| 66 | } |
| 67 | |
Pavel Begunkov | 20d6b63 | 2023-08-24 23:53:26 +0100 | [diff] [blame] | 68 | bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow); |
Dylan Yudaken | c0e0d6b | 2022-08-30 05:50:10 -0700 | [diff] [blame] | 69 | int io_run_task_work_sig(struct io_ring_ctx *ctx); |
Dylan Yudaken | 973fc83 | 2022-11-24 01:35:53 -0800 | [diff] [blame] | 70 | void io_req_defer_failed(struct io_kiocb *req, s32 res); |
Dylan Yudaken | b529c96 | 2022-11-24 01:35:58 -0800 | [diff] [blame] | 71 | bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags); |
Jens Axboe | f33096a | 2024-06-06 10:28:26 -0600 | [diff] [blame] | 72 | void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags); |
Pavel Begunkov | e5c1294 | 2024-03-18 22:00:31 +0000 | [diff] [blame] | 73 | bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags); |
Pavel Begunkov | 9046c64 | 2022-06-19 12:26:05 +0100 | [diff] [blame] | 74 | void __io_commit_cqring_flush(struct io_ring_ctx *ctx); |
| 75 | |
Pavel Begunkov | 9046c64 | 2022-06-19 12:26:05 +0100 | [diff] [blame] | 76 | struct file *io_file_get_normal(struct io_kiocb *req, int fd); |
| 77 | struct file *io_file_get_fixed(struct io_kiocb *req, int fd, |
| 78 | unsigned issue_flags); |
| 79 | |
Pavel Begunkov | 8501fe7 | 2023-04-06 14:20:10 +0100 | [diff] [blame] | 80 | void __io_req_task_work_add(struct io_kiocb *req, unsigned flags); |
Jens Axboe | c3ac76f | 2024-03-28 12:38:44 -0600 | [diff] [blame] | 81 | void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx, |
| 82 | unsigned flags); |
Pavel Begunkov | 9046c64 | 2022-06-19 12:26:05 +0100 | [diff] [blame] | 83 | bool io_alloc_async_data(struct io_kiocb *req); |
Pavel Begunkov | 9046c64 | 2022-06-19 12:26:05 +0100 | [diff] [blame] | 84 | void io_req_task_queue(struct io_kiocb *req); |
Pavel Begunkov | a282967 | 2023-03-27 16:38:15 +0100 | [diff] [blame] | 85 | void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts); |
Pavel Begunkov | 9046c64 | 2022-06-19 12:26:05 +0100 | [diff] [blame] | 86 | void io_req_task_queue_fail(struct io_kiocb *req, int ret); |
Pavel Begunkov | a282967 | 2023-03-27 16:38:15 +0100 | [diff] [blame] | 87 | void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts); |
Jens Axboe | af5d68f | 2024-02-02 10:20:05 -0700 | [diff] [blame] | 88 | struct llist_node *io_handle_tw_list(struct llist_node *node, unsigned int *count, unsigned int max_entries); |
| 89 | struct llist_node *tctx_task_work_run(struct io_uring_task *tctx, unsigned int max_entries, unsigned int *count); |
Pavel Begunkov | 9046c64 | 2022-06-19 12:26:05 +0100 | [diff] [blame] | 90 | void tctx_task_work(struct callback_head *cb); |
| 91 | __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd); |
| 92 | int io_uring_alloc_task_context(struct task_struct *task, |
| 93 | struct io_ring_ctx *ctx); |
| 94 | |
Josh Triplett | 6e76ac5 | 2023-04-29 01:40:30 +0900 | [diff] [blame] | 95 | int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file, |
| 96 | int start, int end); |
Pavel Begunkov | 6746ee4 | 2024-09-11 17:34:37 +0100 | [diff] [blame] | 97 | void io_req_queue_iowq(struct io_kiocb *req); |
Josh Triplett | 6e76ac5 | 2023-04-29 01:40:30 +0900 | [diff] [blame] | 98 | |
Pavel Begunkov | a282967 | 2023-03-27 16:38:15 +0100 | [diff] [blame] | 99 | int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts); |
Pavel Begunkov | 9046c64 | 2022-06-19 12:26:05 +0100 | [diff] [blame] | 100 | int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr); |
| 101 | int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin); |
Pavel Begunkov | ec26c22 | 2023-08-24 23:53:29 +0100 | [diff] [blame] | 102 | void __io_submit_flush_completions(struct io_ring_ctx *ctx); |
Pavel Begunkov | 9046c64 | 2022-06-19 12:26:05 +0100 | [diff] [blame] | 103 | |
| 104 | struct io_wq_work *io_wq_free_work(struct io_wq_work *work); |
| 105 | void io_wq_submit_work(struct io_wq_work *work); |
| 106 | |
| 107 | void io_free_req(struct io_kiocb *req); |
| 108 | void io_queue_next(struct io_kiocb *req); |
Pavel Begunkov | 6380913 | 2022-07-12 21:52:47 +0100 | [diff] [blame] | 109 | void io_task_refs_refill(struct io_uring_task *tctx); |
Pavel Begunkov | bd1a378 | 2022-07-27 10:30:40 +0100 | [diff] [blame] | 110 | bool __io_alloc_req_refill(struct io_ring_ctx *ctx); |
Pavel Begunkov | 9046c64 | 2022-06-19 12:26:05 +0100 | [diff] [blame] | 111 | |
| 112 | bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task, |
| 113 | bool cancel_all); |
| 114 | |
Jens Axboe | c432031 | 2023-12-19 08:54:20 -0700 | [diff] [blame] | 115 | void io_activate_pollwq(struct io_ring_ctx *ctx); |
| 116 | |
Jens Axboe | 1658633 | 2023-10-02 19:51:38 -0600 | [diff] [blame] | 117 | static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx) |
| 118 | { |
Pavel Begunkov | c133b3b | 2024-03-18 22:00:35 +0000 | [diff] [blame] | 119 | #if defined(CONFIG_PROVE_LOCKING) |
Jens Axboe | 1658633 | 2023-10-02 19:51:38 -0600 | [diff] [blame] | 120 | lockdep_assert(in_task()); |
| 121 | |
| 122 | if (ctx->flags & IORING_SETUP_IOPOLL) { |
| 123 | lockdep_assert_held(&ctx->uring_lock); |
| 124 | } else if (!ctx->task_complete) { |
| 125 | lockdep_assert_held(&ctx->completion_lock); |
| 126 | } else if (ctx->submitter_task) { |
| 127 | /* |
| 128 | * ->submitter_task may be NULL and we can still post a CQE, |
| 129 | * if the ring has been setup with IORING_SETUP_R_DISABLED. |
| 130 | * Not from an SQE, as those cannot be submitted, but via |
| 131 | * updating tagged resources. |
| 132 | */ |
| 133 | if (ctx->submitter_task->flags & PF_EXITING) |
| 134 | lockdep_assert(current_work()); |
| 135 | else |
| 136 | lockdep_assert(current == ctx->submitter_task); |
| 137 | } |
Jens Axboe | 1658633 | 2023-10-02 19:51:38 -0600 | [diff] [blame] | 138 | #endif |
Pavel Begunkov | c133b3b | 2024-03-18 22:00:35 +0000 | [diff] [blame] | 139 | } |
Pavel Begunkov | f26cc95 | 2023-01-04 01:34:57 +0000 | [diff] [blame] | 140 | |
Pavel Begunkov | e52d2e5 | 2022-11-11 16:54:08 +0000 | [diff] [blame] | 141 | static inline void io_req_task_work_add(struct io_kiocb *req) |
| 142 | { |
Pavel Begunkov | 8501fe7 | 2023-04-06 14:20:10 +0100 | [diff] [blame] | 143 | __io_req_task_work_add(req, 0); |
Pavel Begunkov | e52d2e5 | 2022-11-11 16:54:08 +0000 | [diff] [blame] | 144 | } |
| 145 | |
Pavel Begunkov | da12d9a | 2024-03-18 22:00:23 +0000 | [diff] [blame] | 146 | static inline void io_submit_flush_completions(struct io_ring_ctx *ctx) |
| 147 | { |
| 148 | if (!wq_list_empty(&ctx->submit_state.compl_reqs) || |
Pavel Begunkov | 902ce82 | 2024-03-18 22:00:32 +0000 | [diff] [blame] | 149 | ctx->submit_state.cq_flush) |
Pavel Begunkov | da12d9a | 2024-03-18 22:00:23 +0000 | [diff] [blame] | 150 | __io_submit_flush_completions(ctx); |
| 151 | } |
| 152 | |
Pavel Begunkov | 9046c64 | 2022-06-19 12:26:05 +0100 | [diff] [blame] | 153 | #define io_for_each_link(pos, head) \ |
| 154 | for (pos = (head); pos; pos = pos->link) |
Jens Axboe | f3b44f9 | 2022-06-13 07:27:03 -0600 | [diff] [blame] | 155 | |
Pavel Begunkov | 59fbc40 | 2023-08-24 23:53:27 +0100 | [diff] [blame] | 156 | static inline bool io_get_cqe_overflow(struct io_ring_ctx *ctx, |
| 157 | struct io_uring_cqe **ret, |
| 158 | bool overflow) |
Jens Axboe | f3b44f9 | 2022-06-13 07:27:03 -0600 | [diff] [blame] | 159 | { |
Pavel Begunkov | f26cc95 | 2023-01-04 01:34:57 +0000 | [diff] [blame] | 160 | io_lockdep_assert_cq_locked(ctx); |
| 161 | |
Pavel Begunkov | 20d6b63 | 2023-08-24 23:53:26 +0100 | [diff] [blame] | 162 | if (unlikely(ctx->cqe_cached >= ctx->cqe_sentinel)) { |
| 163 | if (unlikely(!io_cqe_cache_refill(ctx, overflow))) |
Pavel Begunkov | 59fbc40 | 2023-08-24 23:53:27 +0100 | [diff] [blame] | 164 | return false; |
Jens Axboe | f3b44f9 | 2022-06-13 07:27:03 -0600 | [diff] [blame] | 165 | } |
Pavel Begunkov | 59fbc40 | 2023-08-24 23:53:27 +0100 | [diff] [blame] | 166 | *ret = ctx->cqe_cached; |
Pavel Begunkov | 20d6b63 | 2023-08-24 23:53:26 +0100 | [diff] [blame] | 167 | ctx->cached_cq_tail++; |
| 168 | ctx->cqe_cached++; |
| 169 | if (ctx->flags & IORING_SETUP_CQE32) |
| 170 | ctx->cqe_cached++; |
Pavel Begunkov | 59fbc40 | 2023-08-24 23:53:27 +0100 | [diff] [blame] | 171 | return true; |
Pavel Begunkov | aa1df3a | 2022-09-23 14:53:25 +0100 | [diff] [blame] | 172 | } |
| 173 | |
Pavel Begunkov | 59fbc40 | 2023-08-24 23:53:27 +0100 | [diff] [blame] | 174 | static inline bool io_get_cqe(struct io_ring_ctx *ctx, struct io_uring_cqe **ret) |
Pavel Begunkov | aa1df3a | 2022-09-23 14:53:25 +0100 | [diff] [blame] | 175 | { |
Pavel Begunkov | 59fbc40 | 2023-08-24 23:53:27 +0100 | [diff] [blame] | 176 | return io_get_cqe_overflow(ctx, ret, false); |
Jens Axboe | f3b44f9 | 2022-06-13 07:27:03 -0600 | [diff] [blame] | 177 | } |
| 178 | |
Pavel Begunkov | 093a650 | 2023-08-24 23:53:30 +0100 | [diff] [blame] | 179 | static __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx, |
| 180 | struct io_kiocb *req) |
Jens Axboe | f3b44f9 | 2022-06-13 07:27:03 -0600 | [diff] [blame] | 181 | { |
| 182 | struct io_uring_cqe *cqe; |
| 183 | |
Pavel Begunkov | e8c328c | 2022-06-17 09:48:04 +0100 | [diff] [blame] | 184 | /* |
| 185 | * If we can't get a cq entry, userspace overflowed the |
| 186 | * submission (by quite a lot). Increment the overflow count in |
| 187 | * the ring. |
| 188 | */ |
Pavel Begunkov | 59fbc40 | 2023-08-24 23:53:27 +0100 | [diff] [blame] | 189 | if (unlikely(!io_get_cqe(ctx, &cqe))) |
Pavel Begunkov | f66f734 | 2022-12-07 08:50:01 -0700 | [diff] [blame] | 190 | return false; |
Dylan Yudaken | e0486f3 | 2022-06-30 02:12:31 -0700 | [diff] [blame] | 191 | |
Pavel Begunkov | a0727c7 | 2023-08-24 23:53:23 +0100 | [diff] [blame] | 192 | if (trace_io_uring_complete_enabled()) |
| 193 | trace_io_uring_complete(req->ctx, req, req->cqe.user_data, |
| 194 | req->cqe.res, req->cqe.flags, |
Pavel Begunkov | b24c5d7 | 2023-08-24 23:53:25 +0100 | [diff] [blame] | 195 | req->big_cqe.extra1, req->big_cqe.extra2); |
Dylan Yudaken | e0486f3 | 2022-06-30 02:12:31 -0700 | [diff] [blame] | 196 | |
Pavel Begunkov | e8c328c | 2022-06-17 09:48:04 +0100 | [diff] [blame] | 197 | memcpy(cqe, &req->cqe, sizeof(*cqe)); |
Pavel Begunkov | e8c328c | 2022-06-17 09:48:04 +0100 | [diff] [blame] | 198 | if (ctx->flags & IORING_SETUP_CQE32) { |
Pavel Begunkov | b24c5d7 | 2023-08-24 23:53:25 +0100 | [diff] [blame] | 199 | memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe)); |
| 200 | memset(&req->big_cqe, 0, sizeof(req->big_cqe)); |
Jens Axboe | f3b44f9 | 2022-06-13 07:27:03 -0600 | [diff] [blame] | 201 | } |
Pavel Begunkov | e8c328c | 2022-06-17 09:48:04 +0100 | [diff] [blame] | 202 | return true; |
Jens Axboe | f3b44f9 | 2022-06-13 07:27:03 -0600 | [diff] [blame] | 203 | } |
| 204 | |
Jens Axboe | 531113b | 2022-05-24 21:19:47 -0600 | [diff] [blame] | 205 | static inline void req_set_fail(struct io_kiocb *req) |
| 206 | { |
| 207 | req->flags |= REQ_F_FAIL; |
| 208 | if (req->flags & REQ_F_CQE_SKIP) { |
| 209 | req->flags &= ~REQ_F_CQE_SKIP; |
| 210 | req->flags |= REQ_F_SKIP_LINK_CQES; |
| 211 | } |
| 212 | } |
| 213 | |
Jens Axboe | de23077 | 2022-05-24 12:45:38 -0600 | [diff] [blame] | 214 | static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags) |
| 215 | { |
| 216 | req->cqe.res = res; |
| 217 | req->cqe.flags = cflags; |
| 218 | } |
| 219 | |
Jens Axboe | 99f15d8 | 2022-05-25 05:59:19 -0600 | [diff] [blame] | 220 | static inline bool req_has_async_data(struct io_kiocb *req) |
| 221 | { |
| 222 | return req->flags & REQ_F_ASYNC_DATA; |
| 223 | } |
| 224 | |
Jens Axboe | 17bc283 | 2023-07-07 11:14:40 -0600 | [diff] [blame] | 225 | static inline void io_put_file(struct io_kiocb *req) |
Jens Axboe | 531113b | 2022-05-24 21:19:47 -0600 | [diff] [blame] | 226 | { |
Jens Axboe | 17bc283 | 2023-07-07 11:14:40 -0600 | [diff] [blame] | 227 | if (!(req->flags & REQ_F_FIXED_FILE) && req->file) |
| 228 | fput(req->file); |
Jens Axboe | 531113b | 2022-05-24 21:19:47 -0600 | [diff] [blame] | 229 | } |
| 230 | |
Jens Axboe | cd40cae | 2022-05-24 21:54:43 -0600 | [diff] [blame] | 231 | static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx, |
| 232 | unsigned issue_flags) |
| 233 | { |
| 234 | lockdep_assert_held(&ctx->uring_lock); |
Jens Axboe | bfe30bf | 2024-01-28 20:32:52 -0700 | [diff] [blame] | 235 | if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) |
Jens Axboe | cd40cae | 2022-05-24 21:54:43 -0600 | [diff] [blame] | 236 | mutex_unlock(&ctx->uring_lock); |
| 237 | } |
| 238 | |
| 239 | static inline void io_ring_submit_lock(struct io_ring_ctx *ctx, |
| 240 | unsigned issue_flags) |
| 241 | { |
| 242 | /* |
| 243 | * "Normal" inline submissions always hold the uring_lock, since we |
| 244 | * grab it from the system call. Same is true for the SQPOLL offload. |
| 245 | * The only exception is when we've detached the request and issue it |
| 246 | * from an async worker thread, grab the lock for that case. |
| 247 | */ |
Jens Axboe | bfe30bf | 2024-01-28 20:32:52 -0700 | [diff] [blame] | 248 | if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) |
Jens Axboe | cd40cae | 2022-05-24 21:54:43 -0600 | [diff] [blame] | 249 | mutex_lock(&ctx->uring_lock); |
| 250 | lockdep_assert_held(&ctx->uring_lock); |
| 251 | } |
| 252 | |
Jens Axboe | f9ead18 | 2022-05-25 06:25:13 -0600 | [diff] [blame] | 253 | static inline void io_commit_cqring(struct io_ring_ctx *ctx) |
| 254 | { |
| 255 | /* order cqe stores with ring update */ |
| 256 | smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail); |
| 257 | } |
| 258 | |
Pavel Begunkov | 7b235dd | 2023-01-09 14:46:08 +0000 | [diff] [blame] | 259 | static inline void io_poll_wq_wake(struct io_ring_ctx *ctx) |
| 260 | { |
Pavel Begunkov | bca39f3 | 2023-01-09 14:46:09 +0000 | [diff] [blame] | 261 | if (wq_has_sleeper(&ctx->poll_wq)) |
Pavel Begunkov | 7b235dd | 2023-01-09 14:46:08 +0000 | [diff] [blame] | 262 | __wake_up(&ctx->poll_wq, TASK_NORMAL, 0, |
| 263 | poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); |
| 264 | } |
| 265 | |
Pavel Begunkov | 6e7248a | 2023-04-06 14:20:09 +0100 | [diff] [blame] | 266 | static inline void io_cqring_wake(struct io_ring_ctx *ctx) |
Jens Axboe | f3b44f9 | 2022-06-13 07:27:03 -0600 | [diff] [blame] | 267 | { |
| 268 | /* |
Jens Axboe | 4464853 | 2022-11-20 10:18:45 -0700 | [diff] [blame] | 269 | * Trigger waitqueue handler on all waiters on our waitqueue. This |
| 270 | * won't necessarily wake up all the tasks, io_should_wake() will make |
| 271 | * that decision. |
| 272 | * |
| 273 | * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter |
| 274 | * set in the mask so that if we recurse back into our own poll |
| 275 | * waitqueue handlers, we know we have a dependency between eventfd or |
| 276 | * epoll and should terminate multishot poll at that point. |
Jens Axboe | f3b44f9 | 2022-06-13 07:27:03 -0600 | [diff] [blame] | 277 | */ |
Pavel Begunkov | 6e7248a | 2023-04-06 14:20:09 +0100 | [diff] [blame] | 278 | if (wq_has_sleeper(&ctx->cq_wait)) |
Jens Axboe | 4464853 | 2022-11-20 10:18:45 -0700 | [diff] [blame] | 279 | __wake_up(&ctx->cq_wait, TASK_NORMAL, 0, |
| 280 | poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); |
Jens Axboe | f3b44f9 | 2022-06-13 07:27:03 -0600 | [diff] [blame] | 281 | } |
| 282 | |
Jens Axboe | 17437f3 | 2022-05-25 09:13:39 -0600 | [diff] [blame] | 283 | static inline bool io_sqring_full(struct io_ring_ctx *ctx) |
| 284 | { |
| 285 | struct io_rings *r = ctx->rings; |
| 286 | |
| 287 | return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries; |
| 288 | } |
| 289 | |
| 290 | static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx) |
| 291 | { |
| 292 | struct io_rings *rings = ctx->rings; |
Jens Axboe | e3ef728 | 2023-03-30 10:05:31 -0600 | [diff] [blame] | 293 | unsigned int entries; |
Jens Axboe | 17437f3 | 2022-05-25 09:13:39 -0600 | [diff] [blame] | 294 | |
| 295 | /* make sure SQ entry isn't read before tail */ |
Jens Axboe | e3ef728 | 2023-03-30 10:05:31 -0600 | [diff] [blame] | 296 | entries = smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head; |
| 297 | return min(entries, ctx->sq_entries); |
Jens Axboe | 17437f3 | 2022-05-25 09:13:39 -0600 | [diff] [blame] | 298 | } |
| 299 | |
Dylan Yudaken | c0e0d6b | 2022-08-30 05:50:10 -0700 | [diff] [blame] | 300 | static inline int io_run_task_work(void) |
Jens Axboe | 17437f3 | 2022-05-25 09:13:39 -0600 | [diff] [blame] | 301 | { |
Jens Axboe | af5d68f | 2024-02-02 10:20:05 -0700 | [diff] [blame] | 302 | bool ret = false; |
| 303 | |
Jens Axboe | 7cfe7a0 | 2022-11-25 09:36:29 -0700 | [diff] [blame] | 304 | /* |
| 305 | * Always check-and-clear the task_work notification signal. With how |
| 306 | * signaling works for task_work, we can find it set with nothing to |
| 307 | * run. We need to clear it for that case, like get_signal() does. |
| 308 | */ |
| 309 | if (test_thread_flag(TIF_NOTIFY_SIGNAL)) |
| 310 | clear_notify_signal(); |
Jens Axboe | b5d3ae2 | 2023-01-24 08:24:25 -0700 | [diff] [blame] | 311 | /* |
| 312 | * PF_IO_WORKER never returns to userspace, so check here if we have |
| 313 | * notify work that needs processing. |
| 314 | */ |
Jens Axboe | af5d68f | 2024-02-02 10:20:05 -0700 | [diff] [blame] | 315 | if (current->flags & PF_IO_WORKER) { |
| 316 | if (test_thread_flag(TIF_NOTIFY_RESUME)) { |
| 317 | __set_current_state(TASK_RUNNING); |
| 318 | resume_user_mode_work(NULL); |
| 319 | } |
| 320 | if (current->io_uring) { |
| 321 | unsigned int count = 0; |
| 322 | |
| 323 | tctx_task_work_run(current->io_uring, UINT_MAX, &count); |
| 324 | if (count) |
| 325 | ret = true; |
| 326 | } |
Jens Axboe | 2f2bb1f | 2023-02-06 08:20:46 -0700 | [diff] [blame] | 327 | } |
Jens Axboe | 46a525e | 2022-09-29 15:29:13 -0600 | [diff] [blame] | 328 | if (task_work_pending(current)) { |
Jens Axboe | 17437f3 | 2022-05-25 09:13:39 -0600 | [diff] [blame] | 329 | __set_current_state(TASK_RUNNING); |
Jens Axboe | 46a525e | 2022-09-29 15:29:13 -0600 | [diff] [blame] | 330 | task_work_run(); |
Jens Axboe | af5d68f | 2024-02-02 10:20:05 -0700 | [diff] [blame] | 331 | ret = true; |
Jens Axboe | 17437f3 | 2022-05-25 09:13:39 -0600 | [diff] [blame] | 332 | } |
| 333 | |
Jens Axboe | af5d68f | 2024-02-02 10:20:05 -0700 | [diff] [blame] | 334 | return ret; |
Dylan Yudaken | c0e0d6b | 2022-08-30 05:50:10 -0700 | [diff] [blame] | 335 | } |
| 336 | |
Jens Axboe | dac6a0e | 2022-09-03 09:52:01 -0600 | [diff] [blame] | 337 | static inline bool io_task_work_pending(struct io_ring_ctx *ctx) |
| 338 | { |
Jens Axboe | 22537c9 | 2024-03-25 18:53:33 -0600 | [diff] [blame] | 339 | return task_work_pending(current) || !llist_empty(&ctx->work_llist); |
Jens Axboe | dac6a0e | 2022-09-03 09:52:01 -0600 | [diff] [blame] | 340 | } |
| 341 | |
Pavel Begunkov | a282967 | 2023-03-27 16:38:15 +0100 | [diff] [blame] | 342 | static inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts) |
Pavel Begunkov | aa1e90f | 2022-06-15 17:33:51 +0100 | [diff] [blame] | 343 | { |
Pavel Begunkov | 8e5b3b8 | 2024-03-18 22:00:30 +0000 | [diff] [blame] | 344 | lockdep_assert_held(&ctx->uring_lock); |
Pavel Begunkov | aa1e90f | 2022-06-15 17:33:51 +0100 | [diff] [blame] | 345 | } |
| 346 | |
Pavel Begunkov | 9da070b | 2022-06-20 01:26:00 +0100 | [diff] [blame] | 347 | /* |
| 348 | * Don't complete immediately but use deferred completion infrastructure. |
| 349 | * Protected by ->uring_lock and can only be used either with |
| 350 | * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex. |
| 351 | */ |
| 352 | static inline void io_req_complete_defer(struct io_kiocb *req) |
| 353 | __must_hold(&req->ctx->uring_lock) |
Pavel Begunkov | aa1e90f | 2022-06-15 17:33:51 +0100 | [diff] [blame] | 354 | { |
| 355 | struct io_submit_state *state = &req->ctx->submit_state; |
| 356 | |
Pavel Begunkov | 9da070b | 2022-06-20 01:26:00 +0100 | [diff] [blame] | 357 | lockdep_assert_held(&req->ctx->uring_lock); |
| 358 | |
Pavel Begunkov | aa1e90f | 2022-06-15 17:33:51 +0100 | [diff] [blame] | 359 | wq_list_add_tail(&req->comp_list, &state->compl_reqs); |
| 360 | } |
| 361 | |
Pavel Begunkov | 46929b0 | 2022-06-20 01:25:57 +0100 | [diff] [blame] | 362 | static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx) |
| 363 | { |
Pavel Begunkov | bca39f3 | 2023-01-09 14:46:09 +0000 | [diff] [blame] | 364 | if (unlikely(ctx->off_timeout_used || ctx->drain_active || |
| 365 | ctx->has_evfd || ctx->poll_activated)) |
Pavel Begunkov | 46929b0 | 2022-06-20 01:25:57 +0100 | [diff] [blame] | 366 | __io_commit_cqring_flush(ctx); |
| 367 | } |
| 368 | |
Pavel Begunkov | 6380913 | 2022-07-12 21:52:47 +0100 | [diff] [blame] | 369 | static inline void io_get_task_refs(int nr) |
| 370 | { |
| 371 | struct io_uring_task *tctx = current->io_uring; |
| 372 | |
| 373 | tctx->cached_refs -= nr; |
| 374 | if (unlikely(tctx->cached_refs < 0)) |
| 375 | io_task_refs_refill(tctx); |
| 376 | } |
| 377 | |
Pavel Begunkov | bd1a378 | 2022-07-27 10:30:40 +0100 | [diff] [blame] | 378 | static inline bool io_req_cache_empty(struct io_ring_ctx *ctx) |
| 379 | { |
| 380 | return !ctx->submit_state.free_list.next; |
| 381 | } |
| 382 | |
Breno Leitao | c1755c2 | 2023-01-18 07:56:30 -0800 | [diff] [blame] | 383 | extern struct kmem_cache *req_cachep; |
Gabriel Krisman Bertazi | b3a4dbc | 2023-10-04 20:05:31 -0400 | [diff] [blame] | 384 | extern struct kmem_cache *io_buf_cachep; |
Breno Leitao | c1755c2 | 2023-01-18 07:56:30 -0800 | [diff] [blame] | 385 | |
Pavel Begunkov | c8576f3 | 2023-01-23 14:37:16 +0000 | [diff] [blame] | 386 | static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx) |
Pavel Begunkov | bd1a378 | 2022-07-27 10:30:40 +0100 | [diff] [blame] | 387 | { |
Breno Leitao | c1755c2 | 2023-01-18 07:56:30 -0800 | [diff] [blame] | 388 | struct io_kiocb *req; |
Pavel Begunkov | bd1a378 | 2022-07-27 10:30:40 +0100 | [diff] [blame] | 389 | |
Breno Leitao | c1755c2 | 2023-01-18 07:56:30 -0800 | [diff] [blame] | 390 | req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list); |
Breno Leitao | c1755c2 | 2023-01-18 07:56:30 -0800 | [diff] [blame] | 391 | wq_stack_extract(&ctx->submit_state.free_list); |
| 392 | return req; |
Pavel Begunkov | bd1a378 | 2022-07-27 10:30:40 +0100 | [diff] [blame] | 393 | } |
| 394 | |
Pavel Begunkov | c8576f3 | 2023-01-23 14:37:16 +0000 | [diff] [blame] | 395 | static inline bool io_alloc_req(struct io_ring_ctx *ctx, struct io_kiocb **req) |
| 396 | { |
| 397 | if (unlikely(io_req_cache_empty(ctx))) { |
| 398 | if (!__io_alloc_req_refill(ctx)) |
| 399 | return false; |
| 400 | } |
| 401 | *req = io_extract_req(ctx); |
| 402 | return true; |
| 403 | } |
| 404 | |
Pavel Begunkov | 140102a | 2023-01-05 11:22:23 +0000 | [diff] [blame] | 405 | static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx) |
| 406 | { |
| 407 | return likely(ctx->submitter_task == current); |
| 408 | } |
| 409 | |
Pavel Begunkov | 76de674 | 2022-09-08 16:56:52 +0100 | [diff] [blame] | 410 | static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx) |
| 411 | { |
Pavel Begunkov | 6567506 | 2022-09-08 16:56:53 +0100 | [diff] [blame] | 412 | return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) || |
| 413 | ctx->submitter_task == current); |
Pavel Begunkov | 76de674 | 2022-09-08 16:56:52 +0100 | [diff] [blame] | 414 | } |
| 415 | |
Pavel Begunkov | 833b5df | 2022-11-23 11:33:39 +0000 | [diff] [blame] | 416 | static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res) |
| 417 | { |
| 418 | io_req_set_res(req, res, 0); |
| 419 | req->io_task_work.func = io_req_task_complete; |
| 420 | io_req_task_work_add(req); |
| 421 | } |
| 422 | |
Breno Leitao | 96c7d4f | 2023-05-04 05:18:54 -0700 | [diff] [blame] | 423 | /* |
| 424 | * IORING_SETUP_SQE128 contexts allocate twice the normal SQE size for each |
| 425 | * slot. |
| 426 | */ |
| 427 | static inline size_t uring_sqe_size(struct io_ring_ctx *ctx) |
| 428 | { |
| 429 | if (ctx->flags & IORING_SETUP_SQE128) |
| 430 | return 2 * sizeof(struct io_uring_sqe); |
| 431 | return sizeof(struct io_uring_sqe); |
| 432 | } |
Jens Axboe | 95041b93 | 2024-01-28 20:08:24 -0700 | [diff] [blame] | 433 | |
| 434 | static inline bool io_file_can_poll(struct io_kiocb *req) |
| 435 | { |
| 436 | if (req->flags & REQ_F_CAN_POLL) |
| 437 | return true; |
Jens Axboe | 5fc16fa | 2024-06-01 12:25:35 -0600 | [diff] [blame] | 438 | if (req->file && file_can_poll(req->file)) { |
Jens Axboe | 95041b93 | 2024-01-28 20:08:24 -0700 | [diff] [blame] | 439 | req->flags |= REQ_F_CAN_POLL; |
| 440 | return true; |
| 441 | } |
| 442 | return false; |
| 443 | } |
Jens Axboe | 428f138 | 2024-02-14 12:59:36 -0700 | [diff] [blame] | 444 | |
Pavel Begunkov | 2b8e976 | 2024-08-07 15:18:14 +0100 | [diff] [blame] | 445 | static inline ktime_t io_get_time(struct io_ring_ctx *ctx) |
| 446 | { |
| 447 | if (ctx->clockid == CLOCK_MONOTONIC) |
| 448 | return ktime_get(); |
| 449 | |
| 450 | return ktime_get_with_offset(ctx->clock_offset); |
| 451 | } |
| 452 | |
Jens Axboe | 428f138 | 2024-02-14 12:59:36 -0700 | [diff] [blame] | 453 | enum { |
| 454 | IO_CHECK_CQ_OVERFLOW_BIT, |
| 455 | IO_CHECK_CQ_DROPPED_BIT, |
| 456 | }; |
| 457 | |
| 458 | static inline bool io_has_work(struct io_ring_ctx *ctx) |
| 459 | { |
| 460 | return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) || |
| 461 | !llist_empty(&ctx->work_llist); |
| 462 | } |
Jens Axboe | de23077 | 2022-05-24 12:45:38 -0600 | [diff] [blame] | 463 | #endif |