blob: 29fa9285a33d9e1b4c3f2f1ff80e9858de318f4e [file] [log] [blame]
Jens Axboe36404b02022-05-25 06:42:08 -06001// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/file.h>
5#include <linux/slab.h>
Jens Axboee6130eb2022-06-13 04:47:02 -06006#include <linux/nospec.h>
Jens Axboe36404b02022-05-25 06:42:08 -06007#include <linux/io_uring.h>
8
9#include <uapi/linux/io_uring.h>
10
Jens Axboe36404b02022-05-25 06:42:08 -060011#include "io_uring.h"
Jens Axboee6130eb2022-06-13 04:47:02 -060012#include "rsrc.h"
13#include "filetable.h"
Jens Axboe50cf5f32024-06-06 12:25:01 -060014#include "alloc_cache.h"
Jens Axboe36404b02022-05-25 06:42:08 -060015#include "msg_ring.h"
16
Breno Leitaocbeb47a2023-01-03 08:05:07 -080017/* All valid masks for MSG_RING */
18#define IORING_MSG_RING_MASK (IORING_MSG_RING_CQE_SKIP | \
19 IORING_MSG_RING_FLAGS_PASS)
20
Jens Axboe36404b02022-05-25 06:42:08 -060021struct io_msg {
22 struct file *file;
Pavel Begunkov11373022022-12-07 03:53:34 +000023 struct file *src_file;
Pavel Begunkov6d043ee2022-12-07 03:53:36 +000024 struct callback_head tw;
Jens Axboe36404b02022-05-25 06:42:08 -060025 u64 user_data;
26 u32 len;
Jens Axboee6130eb2022-06-13 04:47:02 -060027 u32 cmd;
28 u32 src_fd;
Breno Leitaocbeb47a2023-01-03 08:05:07 -080029 union {
30 u32 dst_fd;
31 u32 cqe_flags;
32 };
Jens Axboee6130eb2022-06-13 04:47:02 -060033 u32 flags;
Jens Axboe36404b02022-05-25 06:42:08 -060034};
35
Jens Axboe423d5082023-01-19 09:01:27 -070036static void io_double_unlock_ctx(struct io_ring_ctx *octx)
37{
38 mutex_unlock(&octx->uring_lock);
39}
40
41static int io_double_lock_ctx(struct io_ring_ctx *octx,
42 unsigned int issue_flags)
43{
44 /*
45 * To ensure proper ordering between the two ctxs, we can only
46 * attempt a trylock on the target. If that fails and we already have
47 * the source ctx lock, punt to io-wq.
48 */
49 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
50 if (!mutex_trylock(&octx->uring_lock))
51 return -EAGAIN;
52 return 0;
53 }
54 mutex_lock(&octx->uring_lock);
55 return 0;
56}
57
Pavel Begunkov11373022022-12-07 03:53:34 +000058void io_msg_ring_cleanup(struct io_kiocb *req)
59{
60 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
61
62 if (WARN_ON_ONCE(!msg->src_file))
63 return;
64
65 fput(msg->src_file);
66 msg->src_file = NULL;
67}
68
Pavel Begunkov56d8e312023-01-20 16:38:05 +000069static inline bool io_msg_need_remote(struct io_ring_ctx *target_ctx)
70{
Jens Axboed57afd82024-05-28 08:40:12 -060071 return target_ctx->task_complete;
Pavel Begunkov56d8e312023-01-20 16:38:05 +000072}
73
Jens Axboe0617bb52024-03-28 11:00:21 -060074static void io_msg_tw_complete(struct io_kiocb *req, struct io_tw_state *ts)
Pavel Begunkov56d8e312023-01-20 16:38:05 +000075{
Jens Axboe0617bb52024-03-28 11:00:21 -060076 struct io_ring_ctx *ctx = req->ctx;
Pavel Begunkov85795382023-01-20 16:38:06 +000077
Jens Axboe0617bb52024-03-28 11:00:21 -060078 io_add_aux_cqe(ctx, req->cqe.user_data, req->cqe.res, req->cqe.flags);
Jens Axboe50cf5f32024-06-06 12:25:01 -060079 if (spin_trylock(&ctx->msg_lock)) {
80 if (io_alloc_cache_put(&ctx->msg_cache, req))
81 req = NULL;
82 spin_unlock(&ctx->msg_lock);
83 }
84 if (req)
Jens Axboebe4f5d92024-07-01 08:46:25 -060085 kmem_cache_free(req_cachep, req);
Jens Axboe0617bb52024-03-28 11:00:21 -060086 percpu_ref_put(&ctx->refs);
Pavel Begunkov56d8e312023-01-20 16:38:05 +000087}
88
Jens Axboeb0727b12024-07-01 08:40:29 -060089static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
90 int res, u32 cflags, u64 user_data)
Pavel Begunkov6d043ee2022-12-07 03:53:36 +000091{
Jens Axboeb0727b12024-07-01 08:40:29 -060092 req->task = READ_ONCE(ctx->submitter_task);
93 if (!req->task) {
94 kmem_cache_free(req_cachep, req);
95 return -EOWNERDEAD;
96 }
Jens Axboe0617bb52024-03-28 11:00:21 -060097 req->cqe.user_data = user_data;
98 io_req_set_res(req, res, cflags);
99 percpu_ref_get(&ctx->refs);
100 req->ctx = ctx;
Jens Axboe0617bb52024-03-28 11:00:21 -0600101 req->io_task_work.func = io_msg_tw_complete;
102 io_req_task_work_add_remote(req, ctx, IOU_F_TWQ_LAZY_WAKE);
Jens Axboeb0727b12024-07-01 08:40:29 -0600103 return 0;
Jens Axboe0617bb52024-03-28 11:00:21 -0600104}
105
Jens Axboe50cf5f32024-06-06 12:25:01 -0600106static struct io_kiocb *io_msg_get_kiocb(struct io_ring_ctx *ctx)
107{
108 struct io_kiocb *req = NULL;
109
110 if (spin_trylock(&ctx->msg_lock)) {
111 req = io_alloc_cache_get(&ctx->msg_cache);
112 spin_unlock(&ctx->msg_lock);
113 }
114 if (req)
115 return req;
116 return kmem_cache_alloc(req_cachep, GFP_KERNEL | __GFP_NOWARN);
117}
118
Jens Axboe0617bb52024-03-28 11:00:21 -0600119static int io_msg_data_remote(struct io_kiocb *req)
120{
Pavel Begunkov6d043ee2022-12-07 03:53:36 +0000121 struct io_ring_ctx *target_ctx = req->file->private_data;
Jens Axboe0617bb52024-03-28 11:00:21 -0600122 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
123 struct io_kiocb *target;
124 u32 flags = 0;
Pavel Begunkov6d043ee2022-12-07 03:53:36 +0000125
Jens Axboe50cf5f32024-06-06 12:25:01 -0600126 target = io_msg_get_kiocb(req->ctx);
Jens Axboe0617bb52024-03-28 11:00:21 -0600127 if (unlikely(!target))
128 return -ENOMEM;
Jens Axboe8572df92023-01-21 19:53:41 -0700129
Jens Axboe0617bb52024-03-28 11:00:21 -0600130 if (msg->flags & IORING_MSG_RING_FLAGS_PASS)
131 flags = msg->cqe_flags;
Jens Axboe8572df92023-01-21 19:53:41 -0700132
Jens Axboeb0727b12024-07-01 08:40:29 -0600133 return io_msg_remote_post(target_ctx, target, msg->len, flags,
134 msg->user_data);
Pavel Begunkov6d043ee2022-12-07 03:53:36 +0000135}
136
Jens Axboee12d7a42023-01-19 09:04:40 -0700137static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboee6130eb2022-06-13 04:47:02 -0600138{
139 struct io_ring_ctx *target_ctx = req->file->private_data;
Stefan Metzmacherf2ccb5a2022-08-11 09:11:15 +0200140 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
Breno Leitaocbeb47a2023-01-03 08:05:07 -0800141 u32 flags = 0;
Jens Axboee12d7a42023-01-19 09:04:40 -0700142 int ret;
Jens Axboee6130eb2022-06-13 04:47:02 -0600143
Breno Leitaocbeb47a2023-01-03 08:05:07 -0800144 if (msg->src_fd || msg->flags & ~IORING_MSG_RING_FLAGS_PASS)
145 return -EINVAL;
146 if (!(msg->flags & IORING_MSG_RING_FLAGS_PASS) && msg->dst_fd)
Jens Axboee6130eb2022-06-13 04:47:02 -0600147 return -EINVAL;
Pavel Begunkov85795382023-01-20 16:38:06 +0000148 if (target_ctx->flags & IORING_SETUP_R_DISABLED)
149 return -EBADFD;
Jens Axboee6130eb2022-06-13 04:47:02 -0600150
Pavel Begunkov56d8e312023-01-20 16:38:05 +0000151 if (io_msg_need_remote(target_ctx))
Jens Axboe0617bb52024-03-28 11:00:21 -0600152 return io_msg_data_remote(req);
Pavel Begunkov6d043ee2022-12-07 03:53:36 +0000153
Breno Leitaocbeb47a2023-01-03 08:05:07 -0800154 if (msg->flags & IORING_MSG_RING_FLAGS_PASS)
155 flags = msg->cqe_flags;
156
Jens Axboee12d7a42023-01-19 09:04:40 -0700157 ret = -EOVERFLOW;
158 if (target_ctx->flags & IORING_SETUP_IOPOLL) {
159 if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
160 return -EAGAIN;
Jens Axboee12d7a42023-01-19 09:04:40 -0700161 }
Jens Axboe59b28a62024-03-28 10:42:40 -0600162 if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, flags))
163 ret = 0;
164 if (target_ctx->flags & IORING_SETUP_IOPOLL)
165 io_double_unlock_ctx(target_ctx);
Jens Axboee12d7a42023-01-19 09:04:40 -0700166 return ret;
Jens Axboee6130eb2022-06-13 04:47:02 -0600167}
168
Pavel Begunkov11373022022-12-07 03:53:34 +0000169static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags)
170{
171 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
172 struct io_ring_ctx *ctx = req->ctx;
173 struct file *file = NULL;
Pavel Begunkov11373022022-12-07 03:53:34 +0000174 int idx = msg->src_fd;
175
176 io_ring_submit_lock(ctx, issue_flags);
177 if (likely(idx < ctx->nr_user_files)) {
178 idx = array_index_nospec(idx, ctx->nr_user_files);
Christoph Hellwigf432c8c2023-06-20 13:32:34 +0200179 file = io_file_from_index(&ctx->file_table, idx);
Pavel Begunkov11373022022-12-07 03:53:34 +0000180 if (file)
181 get_file(file);
182 }
183 io_ring_submit_unlock(ctx, issue_flags);
184 return file;
185}
186
Pavel Begunkov17211312022-12-07 03:53:35 +0000187static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flags)
Jens Axboee6130eb2022-06-13 04:47:02 -0600188{
189 struct io_ring_ctx *target_ctx = req->file->private_data;
Stefan Metzmacherf2ccb5a2022-08-11 09:11:15 +0200190 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
Pavel Begunkov11373022022-12-07 03:53:34 +0000191 struct file *src_file = msg->src_file;
Jens Axboee6130eb2022-06-13 04:47:02 -0600192 int ret;
193
Pavel Begunkov11373022022-12-07 03:53:34 +0000194 if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
195 return -EAGAIN;
Jens Axboee6130eb2022-06-13 04:47:02 -0600196
197 ret = __io_fixed_fd_install(target_ctx, src_file, msg->dst_fd);
Pavel Begunkov11373022022-12-07 03:53:34 +0000198 if (ret < 0)
Jens Axboee6130eb2022-06-13 04:47:02 -0600199 goto out_unlock;
Pavel Begunkov17211312022-12-07 03:53:35 +0000200
Pavel Begunkov11373022022-12-07 03:53:34 +0000201 msg->src_file = NULL;
202 req->flags &= ~REQ_F_NEED_CLEANUP;
Jens Axboee6130eb2022-06-13 04:47:02 -0600203
204 if (msg->flags & IORING_MSG_RING_CQE_SKIP)
205 goto out_unlock;
Jens Axboee6130eb2022-06-13 04:47:02 -0600206 /*
207 * If this fails, the target still received the file descriptor but
208 * wasn't notified of the fact. This means that if this request
209 * completes with -EOVERFLOW, then the sender must ensure that a
210 * later IORING_OP_MSG_RING delivers the message.
211 */
Pavel Begunkov5da28ed2023-03-16 12:11:42 +0000212 if (!io_post_aux_cqe(target_ctx, msg->user_data, ret, 0))
Jens Axboee6130eb2022-06-13 04:47:02 -0600213 ret = -EOVERFLOW;
214out_unlock:
Jens Axboe423d5082023-01-19 09:01:27 -0700215 io_double_unlock_ctx(target_ctx);
Jens Axboee6130eb2022-06-13 04:47:02 -0600216 return ret;
217}
218
Pavel Begunkov6d043ee2022-12-07 03:53:36 +0000219static void io_msg_tw_fd_complete(struct callback_head *head)
220{
221 struct io_msg *msg = container_of(head, struct io_msg, tw);
222 struct io_kiocb *req = cmd_to_io_kiocb(msg);
223 int ret = -EOWNERDEAD;
224
225 if (!(current->flags & PF_EXITING))
226 ret = io_msg_install_complete(req, IO_URING_F_UNLOCKED);
227 if (ret < 0)
228 req_set_fail(req);
229 io_req_queue_tw_complete(req, ret);
230}
231
Jens Axboe0617bb52024-03-28 11:00:21 -0600232static int io_msg_fd_remote(struct io_kiocb *req)
233{
234 struct io_ring_ctx *ctx = req->file->private_data;
235 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
236 struct task_struct *task = READ_ONCE(ctx->submitter_task);
237
238 if (unlikely(!task))
239 return -EOWNERDEAD;
240
241 init_task_work(&msg->tw, io_msg_tw_fd_complete);
242 if (task_work_add(task, &msg->tw, TWA_SIGNAL))
243 return -EOWNERDEAD;
244
245 return IOU_ISSUE_SKIP_COMPLETE;
246}
247
Pavel Begunkov17211312022-12-07 03:53:35 +0000248static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
249{
250 struct io_ring_ctx *target_ctx = req->file->private_data;
251 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
252 struct io_ring_ctx *ctx = req->ctx;
253 struct file *src_file = msg->src_file;
254
Pavel Begunkov5da28ed2023-03-16 12:11:42 +0000255 if (msg->len)
256 return -EINVAL;
Pavel Begunkov17211312022-12-07 03:53:35 +0000257 if (target_ctx == ctx)
258 return -EINVAL;
Pavel Begunkov85795382023-01-20 16:38:06 +0000259 if (target_ctx->flags & IORING_SETUP_R_DISABLED)
260 return -EBADFD;
Pavel Begunkov17211312022-12-07 03:53:35 +0000261 if (!src_file) {
262 src_file = io_msg_grab_file(req, issue_flags);
263 if (!src_file)
264 return -EBADF;
265 msg->src_file = src_file;
266 req->flags |= REQ_F_NEED_CLEANUP;
267 }
Pavel Begunkov6d043ee2022-12-07 03:53:36 +0000268
Pavel Begunkov56d8e312023-01-20 16:38:05 +0000269 if (io_msg_need_remote(target_ctx))
Jens Axboe0617bb52024-03-28 11:00:21 -0600270 return io_msg_fd_remote(req);
Pavel Begunkov17211312022-12-07 03:53:35 +0000271 return io_msg_install_complete(req, issue_flags);
272}
273
Jens Axboe36404b02022-05-25 06:42:08 -0600274int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
275{
Stefan Metzmacherf2ccb5a2022-08-11 09:11:15 +0200276 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
Jens Axboe36404b02022-05-25 06:42:08 -0600277
Jens Axboee6130eb2022-06-13 04:47:02 -0600278 if (unlikely(sqe->buf_index || sqe->personality))
Jens Axboe36404b02022-05-25 06:42:08 -0600279 return -EINVAL;
280
Pavel Begunkov11373022022-12-07 03:53:34 +0000281 msg->src_file = NULL;
Jens Axboe36404b02022-05-25 06:42:08 -0600282 msg->user_data = READ_ONCE(sqe->off);
283 msg->len = READ_ONCE(sqe->len);
Jens Axboee6130eb2022-06-13 04:47:02 -0600284 msg->cmd = READ_ONCE(sqe->addr);
285 msg->src_fd = READ_ONCE(sqe->addr3);
286 msg->dst_fd = READ_ONCE(sqe->file_index);
287 msg->flags = READ_ONCE(sqe->msg_ring_flags);
Breno Leitaocbeb47a2023-01-03 08:05:07 -0800288 if (msg->flags & ~IORING_MSG_RING_MASK)
Jens Axboee6130eb2022-06-13 04:47:02 -0600289 return -EINVAL;
290
Jens Axboe36404b02022-05-25 06:42:08 -0600291 return 0;
292}
293
294int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
295{
Stefan Metzmacherf2ccb5a2022-08-11 09:11:15 +0200296 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
Jens Axboe36404b02022-05-25 06:42:08 -0600297 int ret;
298
299 ret = -EBADFD;
300 if (!io_is_uring_fops(req->file))
301 goto done;
302
Jens Axboee6130eb2022-06-13 04:47:02 -0600303 switch (msg->cmd) {
304 case IORING_MSG_DATA:
Jens Axboee12d7a42023-01-19 09:04:40 -0700305 ret = io_msg_ring_data(req, issue_flags);
Jens Axboee6130eb2022-06-13 04:47:02 -0600306 break;
307 case IORING_MSG_SEND_FD:
308 ret = io_msg_send_fd(req, issue_flags);
309 break;
310 default:
311 ret = -EINVAL;
312 break;
313 }
Jens Axboe36404b02022-05-25 06:42:08 -0600314
315done:
Pavel Begunkov6d043ee2022-12-07 03:53:36 +0000316 if (ret < 0) {
317 if (ret == -EAGAIN || ret == IOU_ISSUE_SKIP_COMPLETE)
318 return ret;
Jens Axboe36404b02022-05-25 06:42:08 -0600319 req_set_fail(req);
Pavel Begunkov6d043ee2022-12-07 03:53:36 +0000320 }
Jens Axboe36404b02022-05-25 06:42:08 -0600321 io_req_set_res(req, ret, 0);
Jens Axboe36404b02022-05-25 06:42:08 -0600322 return IOU_OK;
323}
Jens Axboe50cf5f32024-06-06 12:25:01 -0600324
325void io_msg_cache_free(const void *entry)
326{
327 struct io_kiocb *req = (struct io_kiocb *) entry;
328
329 kmem_cache_free(req_cachep, req);
330}