blob: c032ca3436ca99a531059090d36997a6130678cc [file] [log] [blame]
Jens Axboe73572982022-06-13 07:12:45 -06001// SPDX-License-Identifier: GPL-2.0
2#ifndef IOU_RSRC_H
3#define IOU_RSRC_H
4
Pavel Begunkov69bbc6a2023-04-04 13:39:57 +01005#define IO_NODE_ALLOC_CACHE_MAX 32
6
Jens Axboe73572982022-06-13 07:12:45 -06007#define IO_RSRC_TAG_TABLE_SHIFT (PAGE_SHIFT - 3)
8#define IO_RSRC_TAG_TABLE_MAX (1U << IO_RSRC_TAG_TABLE_SHIFT)
9#define IO_RSRC_TAG_TABLE_MASK (IO_RSRC_TAG_TABLE_MAX - 1)
10
11enum {
12 IORING_RSRC_FILE = 0,
13 IORING_RSRC_BUFFER = 1,
14};
15
16struct io_rsrc_put {
Jens Axboe73572982022-06-13 07:12:45 -060017 u64 tag;
18 union {
19 void *rsrc;
20 struct file *file;
21 struct io_mapped_ubuf *buf;
22 };
23};
24
25typedef void (rsrc_put_fn)(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
26
27struct io_rsrc_data {
28 struct io_ring_ctx *ctx;
29
30 u64 **tags;
31 unsigned int nr;
Pavel Begunkovfc7f3a82023-04-18 14:06:40 +010032 u16 rsrc_type;
Jens Axboe73572982022-06-13 07:12:45 -060033 bool quiesce;
34};
35
36struct io_rsrc_node {
Jens Axboe414d0f42024-03-20 15:19:44 -060037 struct io_ring_ctx *ctx;
Pavel Begunkovef8ae642023-04-04 13:39:49 +010038 int refs;
Pavel Begunkov26147da2023-04-18 14:06:37 +010039 bool empty;
Pavel Begunkov2236b392023-04-18 14:06:41 +010040 u16 type;
Pavel Begunkovc3766442023-04-18 14:06:36 +010041 struct list_head node;
42 struct io_rsrc_put item;
Jens Axboe73572982022-06-13 07:12:45 -060043};
44
Jens Axboead163a72022-06-18 19:44:33 -060045struct io_mapped_ubuf {
46 u64 ubuf;
47 u64 ubuf_end;
48 unsigned int nr_bvecs;
49 unsigned long acct_pages;
Kees Cook04d92442023-08-17 14:21:47 -070050 struct bio_vec bvec[] __counted_by(nr_bvecs);
Jens Axboead163a72022-06-18 19:44:33 -060051};
52
Pavel Begunkovb8fb5b42023-04-04 13:39:45 +010053void io_rsrc_node_ref_zero(struct io_rsrc_node *node);
Pavel Begunkov9eae8652023-04-04 13:39:54 +010054void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *ref_node);
Pavel Begunkov2933ae62023-04-11 12:06:07 +010055struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx);
Pavel Begunkov63fea892023-04-18 14:06:35 +010056int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, void *rsrc);
Jens Axboe73572982022-06-13 07:12:45 -060057
Pavel Begunkovc059f7852022-06-20 01:25:59 +010058int io_import_fixed(int ddir, struct iov_iter *iter,
59 struct io_mapped_ubuf *imu,
60 u64 buf_addr, size_t len);
Jens Axboe73572982022-06-13 07:12:45 -060061
62void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx);
63int io_sqe_buffers_unregister(struct io_ring_ctx *ctx);
64int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
65 unsigned int nr_args, u64 __user *tags);
66void __io_sqe_files_unregister(struct io_ring_ctx *ctx);
67int io_sqe_files_unregister(struct io_ring_ctx *ctx);
68int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
69 unsigned nr_args, u64 __user *tags);
70
Jens Axboe73572982022-06-13 07:12:45 -060071int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
72 unsigned nr_args);
73int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
74 unsigned size, unsigned type);
75int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
76 unsigned int size, unsigned int type);
77
Pavel Begunkov1f2c8f62023-04-04 13:39:55 +010078static inline void io_put_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
Jens Axboe73572982022-06-13 07:12:45 -060079{
Pavel Begunkov1f2c8f62023-04-04 13:39:55 +010080 lockdep_assert_held(&ctx->uring_lock);
81
Pavel Begunkovef8ae642023-04-04 13:39:49 +010082 if (node && !--node->refs)
83 io_rsrc_node_ref_zero(node);
Jens Axboe73572982022-06-13 07:12:45 -060084}
85
Pavel Begunkov8e15c0e2023-04-04 13:39:46 +010086static inline void io_charge_rsrc_node(struct io_ring_ctx *ctx,
87 struct io_rsrc_node *node)
Pavel Begunkov68ef5572022-07-12 21:52:41 +010088{
Pavel Begunkovef8ae642023-04-04 13:39:49 +010089 node->refs++;
Pavel Begunkov68ef5572022-07-12 21:52:41 +010090}
91
Jens Axboe3f302382024-01-11 13:34:33 -070092static inline void __io_req_set_rsrc_node(struct io_kiocb *req,
93 struct io_ring_ctx *ctx)
94{
95 lockdep_assert_held(&ctx->uring_lock);
96 req->rsrc_node = ctx->rsrc_node;
97 io_charge_rsrc_node(ctx, ctx->rsrc_node);
98}
99
Jens Axboe73572982022-06-13 07:12:45 -0600100static inline void io_req_set_rsrc_node(struct io_kiocb *req,
101 struct io_ring_ctx *ctx,
102 unsigned int issue_flags)
103{
104 if (!req->rsrc_node) {
Pavel Begunkov4ff0b502023-03-29 15:03:43 +0100105 io_ring_submit_lock(ctx, issue_flags);
Jens Axboe3f302382024-01-11 13:34:33 -0700106 __io_req_set_rsrc_node(req, ctx);
Pavel Begunkov4ff0b502023-03-29 15:03:43 +0100107 io_ring_submit_unlock(ctx, issue_flags);
Jens Axboe73572982022-06-13 07:12:45 -0600108 }
109}
110
111static inline u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx)
112{
113 unsigned int off = idx & IO_RSRC_TAG_TABLE_MASK;
114 unsigned int table_idx = idx >> IO_RSRC_TAG_TABLE_SHIFT;
115
116 return &data->tags[table_idx][off];
117}
118
Pavel Begunkov2933ae62023-04-11 12:06:07 +0100119static inline int io_rsrc_init(struct io_ring_ctx *ctx)
120{
121 ctx->rsrc_node = io_rsrc_node_alloc(ctx);
122 return ctx->rsrc_node ? 0 : -ENOMEM;
123}
124
Pavel Begunkovd9808ce2022-09-01 11:54:02 +0100125int io_files_update(struct io_kiocb *req, unsigned int issue_flags);
126int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
Pavel Begunkov6a9ce662022-07-25 10:52:05 +0100127
128int __io_account_mem(struct user_struct *user, unsigned long nr_pages);
129
130static inline void __io_unaccount_mem(struct user_struct *user,
131 unsigned long nr_pages)
132{
133 atomic_long_sub(nr_pages, &user->locked_vm);
134}
135
Jens Axboe73572982022-06-13 07:12:45 -0600136#endif