Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 1 | #ifndef INTERNAL_IO_WQ_H |
| 2 | #define INTERNAL_IO_WQ_H |
| 3 | |
Jens Axboe | e941894 | 2021-02-19 12:33:30 -0700 | [diff] [blame] | 4 | #include <linux/refcount.h> |
Pavel Begunkov | ab1c84d | 2022-06-16 13:57:19 +0100 | [diff] [blame] | 5 | #include <linux/io_uring_types.h> |
Jens Axboe | 98447d6 | 2020-10-14 10:48:51 -0600 | [diff] [blame] | 6 | |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 7 | struct io_wq; |
| 8 | |
| 9 | enum { |
| 10 | IO_WQ_WORK_CANCEL = 1, |
Pavel Begunkov | e883a79 | 2020-06-25 18:20:53 +0300 | [diff] [blame] | 11 | IO_WQ_WORK_HASHED = 2, |
| 12 | IO_WQ_WORK_UNBOUND = 4, |
Pavel Begunkov | e883a79 | 2020-06-25 18:20:53 +0300 | [diff] [blame] | 13 | IO_WQ_WORK_CONCURRENT = 16, |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 14 | |
| 15 | IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */ |
| 16 | }; |
| 17 | |
| 18 | enum io_wq_cancel { |
| 19 | IO_WQ_CANCEL_OK, /* cancelled before started */ |
| 20 | IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */ |
| 21 | IO_WQ_CANCEL_NOTFOUND, /* work not found */ |
| 22 | }; |
| 23 | |
Pavel Begunkov | 5280f7e | 2021-02-04 13:52:08 +0000 | [diff] [blame] | 24 | typedef struct io_wq_work *(free_work_fn)(struct io_wq_work *); |
| 25 | typedef void (io_wq_work_fn)(struct io_wq_work *); |
Jens Axboe | 7d72306 | 2019-11-12 22:31:31 -0700 | [diff] [blame] | 26 | |
Jens Axboe | e941894 | 2021-02-19 12:33:30 -0700 | [diff] [blame] | 27 | struct io_wq_hash { |
| 28 | refcount_t refs; |
| 29 | unsigned long map; |
| 30 | struct wait_queue_head wait; |
| 31 | }; |
| 32 | |
| 33 | static inline void io_wq_put_hash(struct io_wq_hash *hash) |
| 34 | { |
| 35 | if (refcount_dec_and_test(&hash->refs)) |
| 36 | kfree(hash); |
| 37 | } |
| 38 | |
Jens Axboe | 576a347 | 2019-11-25 08:49:20 -0700 | [diff] [blame] | 39 | struct io_wq_data { |
Jens Axboe | e941894 | 2021-02-19 12:33:30 -0700 | [diff] [blame] | 40 | struct io_wq_hash *hash; |
Jens Axboe | 685fe7f | 2021-03-08 09:37:51 -0700 | [diff] [blame] | 41 | struct task_struct *task; |
Pavel Begunkov | f5fa38c | 2020-06-08 21:08:20 +0300 | [diff] [blame] | 42 | io_wq_work_fn *do_work; |
Pavel Begunkov | e9fd939 | 2020-03-04 16:14:12 +0300 | [diff] [blame] | 43 | free_work_fn *free_work; |
Jens Axboe | 576a347 | 2019-11-25 08:49:20 -0700 | [diff] [blame] | 44 | }; |
| 45 | |
| 46 | struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data); |
Pavel Begunkov | 17a9105 | 2021-05-23 15:48:39 +0100 | [diff] [blame] | 47 | void io_wq_exit_start(struct io_wq *wq); |
Jens Axboe | afcc401 | 2021-02-26 13:48:19 -0700 | [diff] [blame] | 48 | void io_wq_put_and_exit(struct io_wq *wq); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 49 | |
| 50 | void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work); |
Pavel Begunkov | 8766dd5 | 2020-03-14 00:31:04 +0300 | [diff] [blame] | 51 | void io_wq_hash_work(struct io_wq_work *work, void *val); |
| 52 | |
Jens Axboe | ebdfefc | 2023-08-13 11:05:36 -0600 | [diff] [blame] | 53 | int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask); |
Jens Axboe | 2e48005 | 2021-08-27 11:33:19 -0600 | [diff] [blame] | 54 | int io_wq_max_workers(struct io_wq *wq, int *new_count); |
Pavel Begunkov | 45500dc | 2023-09-07 13:50:07 +0100 | [diff] [blame] | 55 | bool io_wq_worker_stopped(void); |
Jens Axboe | fe76421 | 2021-06-17 10:19:54 -0600 | [diff] [blame] | 56 | |
Pavel Begunkov | 8766dd5 | 2020-03-14 00:31:04 +0300 | [diff] [blame] | 57 | static inline bool io_wq_is_hashed(struct io_wq_work *work) |
| 58 | { |
Jens Axboe | 3474d1b | 2024-06-13 19:28:27 +0000 | [diff] [blame] | 59 | return atomic_read(&work->flags) & IO_WQ_WORK_HASHED; |
Pavel Begunkov | 8766dd5 | 2020-03-14 00:31:04 +0300 | [diff] [blame] | 60 | } |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 61 | |
Jens Axboe | 62755e3 | 2019-10-28 21:49:21 -0600 | [diff] [blame] | 62 | typedef bool (work_cancel_fn)(struct io_wq_work *, void *); |
| 63 | |
| 64 | enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, |
Pavel Begunkov | 4f26bda | 2020-06-15 10:24:03 +0300 | [diff] [blame] | 65 | void *data, bool cancel_all); |
Jens Axboe | 62755e3 | 2019-10-28 21:49:21 -0600 | [diff] [blame] | 66 | |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 67 | #if defined(CONFIG_IO_WQ) |
| 68 | extern void io_wq_worker_sleeping(struct task_struct *); |
| 69 | extern void io_wq_worker_running(struct task_struct *); |
| 70 | #else |
| 71 | static inline void io_wq_worker_sleeping(struct task_struct *tsk) |
| 72 | { |
| 73 | } |
| 74 | static inline void io_wq_worker_running(struct task_struct *tsk) |
| 75 | { |
| 76 | } |
Jens Axboe | 525b305 | 2019-12-17 14:13:37 -0700 | [diff] [blame] | 77 | #endif |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 78 | |
Jens Axboe | 525b305 | 2019-12-17 14:13:37 -0700 | [diff] [blame] | 79 | static inline bool io_wq_current_is_worker(void) |
| 80 | { |
Jens Axboe | 3bfe610 | 2021-02-16 14:15:30 -0700 | [diff] [blame] | 81 | return in_task() && (current->flags & PF_IO_WORKER) && |
Eric W. Biederman | e32cf5d | 2021-12-22 22:10:09 -0600 | [diff] [blame] | 82 | current->worker_private; |
Jens Axboe | 525b305 | 2019-12-17 14:13:37 -0700 | [diff] [blame] | 83 | } |
| 84 | #endif |