blob: 67178e4bb282d541a69d096241fa7065a4c8a7c6 [file] [log] [blame]
Jens Axboecd40cae2022-05-24 21:54:43 -06001// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/fs.h>
5#include <linux/file.h>
6#include <linux/fdtable.h>
7#include <linux/fsnotify.h>
8#include <linux/namei.h>
9#include <linux/io_uring.h>
10
11#include <uapi/linux/io_uring.h>
12
13#include "../fs/internal.h"
14
Jens Axboecd40cae2022-05-24 21:54:43 -060015#include "io_uring.h"
Jens Axboe73572982022-06-13 07:12:45 -060016#include "rsrc.h"
Jens Axboecd40cae2022-05-24 21:54:43 -060017#include "openclose.h"
18
19struct io_open {
20 struct file *file;
21 int dfd;
22 u32 file_slot;
23 struct filename *filename;
24 struct open_how how;
25 unsigned long nofile;
26};
27
28struct io_close {
29 struct file *file;
30 int fd;
31 u32 file_slot;
32};
33
34static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
35{
Stefan Metzmacherf2ccb5a2022-08-11 09:11:15 +020036 struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
Jens Axboecd40cae2022-05-24 21:54:43 -060037 const char __user *fname;
38 int ret;
39
40 if (unlikely(sqe->buf_index))
41 return -EINVAL;
42 if (unlikely(req->flags & REQ_F_FIXED_FILE))
43 return -EBADF;
44
45 /* open.how should be already initialised */
46 if (!(open->how.flags & O_PATH) && force_o_largefile())
47 open->how.flags |= O_LARGEFILE;
48
49 open->dfd = READ_ONCE(sqe->fd);
50 fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
51 open->filename = getname(fname);
52 if (IS_ERR(open->filename)) {
53 ret = PTR_ERR(open->filename);
54 open->filename = NULL;
55 return ret;
56 }
57
58 open->file_slot = READ_ONCE(sqe->file_index);
59 if (open->file_slot && (open->how.flags & O_CLOEXEC))
60 return -EINVAL;
61
62 open->nofile = rlimit(RLIMIT_NOFILE);
63 req->flags |= REQ_F_NEED_CLEANUP;
64 return 0;
65}
66
67int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
68{
Stefan Metzmacherf2ccb5a2022-08-11 09:11:15 +020069 struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
Jens Axboecd40cae2022-05-24 21:54:43 -060070 u64 mode = READ_ONCE(sqe->len);
71 u64 flags = READ_ONCE(sqe->open_flags);
72
73 open->how = build_open_how(flags, mode);
74 return __io_openat_prep(req, sqe);
75}
76
77int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
78{
Stefan Metzmacherf2ccb5a2022-08-11 09:11:15 +020079 struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
Jens Axboecd40cae2022-05-24 21:54:43 -060080 struct open_how __user *how;
81 size_t len;
82 int ret;
83
84 how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
85 len = READ_ONCE(sqe->len);
86 if (len < OPEN_HOW_SIZE_VER0)
87 return -EINVAL;
88
89 ret = copy_struct_from_user(&open->how, sizeof(open->how), how, len);
90 if (ret)
91 return ret;
92
93 return __io_openat_prep(req, sqe);
94}
95
96int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
97{
Stefan Metzmacherf2ccb5a2022-08-11 09:11:15 +020098 struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
Jens Axboecd40cae2022-05-24 21:54:43 -060099 struct open_flags op;
100 struct file *file;
101 bool resolve_nonblock, nonblock_set;
102 bool fixed = !!open->file_slot;
103 int ret;
104
105 ret = build_open_flags(&open->how, &op);
106 if (ret)
107 goto err;
108 nonblock_set = op.open_flag & O_NONBLOCK;
109 resolve_nonblock = open->how.resolve & RESOLVE_CACHED;
110 if (issue_flags & IO_URING_F_NONBLOCK) {
111 /*
112 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
113 * it'll always -EAGAIN
114 */
115 if (open->how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
116 return -EAGAIN;
117 op.lookup_flags |= LOOKUP_CACHED;
118 op.open_flag |= O_NONBLOCK;
119 }
120
121 if (!fixed) {
122 ret = __get_unused_fd_flags(open->how.flags, open->nofile);
123 if (ret < 0)
124 goto err;
125 }
126
127 file = do_filp_open(open->dfd, open->filename, &op);
128 if (IS_ERR(file)) {
129 /*
130 * We could hang on to this 'fd' on retrying, but seems like
131 * marginal gain for something that is now known to be a slower
132 * path. So just put it, and we'll get a new one when we retry.
133 */
134 if (!fixed)
135 put_unused_fd(ret);
136
137 ret = PTR_ERR(file);
138 /* only retry if RESOLVE_CACHED wasn't already set by application */
139 if (ret == -EAGAIN &&
140 (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)))
141 return -EAGAIN;
142 goto err;
143 }
144
145 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
146 file->f_flags &= ~O_NONBLOCK;
147 fsnotify_open(file);
148
149 if (!fixed)
150 fd_install(ret, file);
151 else
152 ret = io_fixed_fd_install(req, issue_flags, file,
153 open->file_slot);
154err:
155 putname(open->filename);
156 req->flags &= ~REQ_F_NEED_CLEANUP;
157 if (ret < 0)
158 req_set_fail(req);
159 io_req_set_res(req, ret, 0);
160 return IOU_OK;
161}
162
163int io_openat(struct io_kiocb *req, unsigned int issue_flags)
164{
165 return io_openat2(req, issue_flags);
166}
167
168void io_open_cleanup(struct io_kiocb *req)
169{
Stefan Metzmacherf2ccb5a2022-08-11 09:11:15 +0200170 struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
Jens Axboecd40cae2022-05-24 21:54:43 -0600171
172 if (open->filename)
173 putname(open->filename);
174}
175
Jens Axboef110ed82022-06-13 04:42:56 -0600176int __io_close_fixed(struct io_ring_ctx *ctx, unsigned int issue_flags,
Jens Axboecd40cae2022-05-24 21:54:43 -0600177 unsigned int offset)
178{
Jens Axboecd40cae2022-05-24 21:54:43 -0600179 int ret;
180
181 io_ring_submit_lock(ctx, issue_flags);
Jens Axboef110ed82022-06-13 04:42:56 -0600182 ret = io_fixed_fd_remove(ctx, offset);
Jens Axboecd40cae2022-05-24 21:54:43 -0600183 io_ring_submit_unlock(ctx, issue_flags);
Jens Axboef110ed82022-06-13 04:42:56 -0600184
Jens Axboecd40cae2022-05-24 21:54:43 -0600185 return ret;
186}
187
188static inline int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
189{
Stefan Metzmacherf2ccb5a2022-08-11 09:11:15 +0200190 struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
Jens Axboecd40cae2022-05-24 21:54:43 -0600191
Jens Axboef110ed82022-06-13 04:42:56 -0600192 return __io_close_fixed(req->ctx, issue_flags, close->file_slot - 1);
Jens Axboecd40cae2022-05-24 21:54:43 -0600193}
194
195int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
196{
Stefan Metzmacherf2ccb5a2022-08-11 09:11:15 +0200197 struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
Jens Axboecd40cae2022-05-24 21:54:43 -0600198
199 if (sqe->off || sqe->addr || sqe->len || sqe->rw_flags || sqe->buf_index)
200 return -EINVAL;
201 if (req->flags & REQ_F_FIXED_FILE)
202 return -EBADF;
203
204 close->fd = READ_ONCE(sqe->fd);
205 close->file_slot = READ_ONCE(sqe->file_index);
206 if (close->file_slot && close->fd)
207 return -EINVAL;
208
209 return 0;
210}
211
212int io_close(struct io_kiocb *req, unsigned int issue_flags)
213{
214 struct files_struct *files = current->files;
Stefan Metzmacherf2ccb5a2022-08-11 09:11:15 +0200215 struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
Jens Axboecd40cae2022-05-24 21:54:43 -0600216 struct fdtable *fdt;
217 struct file *file;
218 int ret = -EBADF;
219
220 if (close->file_slot) {
221 ret = io_close_fixed(req, issue_flags);
222 goto err;
223 }
224
225 spin_lock(&files->file_lock);
226 fdt = files_fdtable(files);
227 if (close->fd >= fdt->max_fds) {
228 spin_unlock(&files->file_lock);
229 goto err;
230 }
231 file = rcu_dereference_protected(fdt->fd[close->fd],
232 lockdep_is_held(&files->file_lock));
233 if (!file || io_is_uring_fops(file)) {
234 spin_unlock(&files->file_lock);
235 goto err;
236 }
237
238 /* if the file has a flush method, be safe and punt to async */
239 if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
240 spin_unlock(&files->file_lock);
241 return -EAGAIN;
242 }
243
244 file = __close_fd_get_file(close->fd);
245 spin_unlock(&files->file_lock);
246 if (!file)
247 goto err;
248
249 /* No ->flush() or already async, safely close from here */
250 ret = filp_close(file, current->files);
251err:
252 if (ret < 0)
253 req_set_fail(req);
254 io_req_set_res(req, ret, 0);
255 return IOU_OK;
256}