blob: b3b004a7b6252861ad64695b8bf663ac9ead4831 [file] [log] [blame]
Jens Axboe771b53d02019-10-22 10:25:58 -06001#ifndef INTERNAL_IO_WQ_H
2#define INTERNAL_IO_WQ_H
3
Jens Axboee9418942021-02-19 12:33:30 -07004#include <linux/refcount.h>
Pavel Begunkovab1c84d2022-06-16 13:57:19 +01005#include <linux/io_uring_types.h>
Jens Axboe98447d62020-10-14 10:48:51 -06006
Jens Axboe771b53d02019-10-22 10:25:58 -06007struct io_wq;
8
9enum {
10 IO_WQ_WORK_CANCEL = 1,
Pavel Begunkove883a792020-06-25 18:20:53 +030011 IO_WQ_WORK_HASHED = 2,
12 IO_WQ_WORK_UNBOUND = 4,
Pavel Begunkove883a792020-06-25 18:20:53 +030013 IO_WQ_WORK_CONCURRENT = 16,
Jens Axboe771b53d02019-10-22 10:25:58 -060014
15 IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */
16};
17
18enum io_wq_cancel {
19 IO_WQ_CANCEL_OK, /* cancelled before started */
20 IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */
21 IO_WQ_CANCEL_NOTFOUND, /* work not found */
22};
23
Pavel Begunkov5280f7e2021-02-04 13:52:08 +000024typedef struct io_wq_work *(free_work_fn)(struct io_wq_work *);
25typedef void (io_wq_work_fn)(struct io_wq_work *);
Jens Axboe7d723062019-11-12 22:31:31 -070026
Jens Axboee9418942021-02-19 12:33:30 -070027struct io_wq_hash {
28 refcount_t refs;
29 unsigned long map;
30 struct wait_queue_head wait;
31};
32
33static inline void io_wq_put_hash(struct io_wq_hash *hash)
34{
35 if (refcount_dec_and_test(&hash->refs))
36 kfree(hash);
37}
38
Jens Axboe576a3472019-11-25 08:49:20 -070039struct io_wq_data {
Jens Axboee9418942021-02-19 12:33:30 -070040 struct io_wq_hash *hash;
Jens Axboe685fe7f2021-03-08 09:37:51 -070041 struct task_struct *task;
Pavel Begunkovf5fa38c2020-06-08 21:08:20 +030042 io_wq_work_fn *do_work;
Pavel Begunkove9fd9392020-03-04 16:14:12 +030043 free_work_fn *free_work;
Jens Axboe576a3472019-11-25 08:49:20 -070044};
45
46struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
Pavel Begunkov17a91052021-05-23 15:48:39 +010047void io_wq_exit_start(struct io_wq *wq);
Jens Axboeafcc4012021-02-26 13:48:19 -070048void io_wq_put_and_exit(struct io_wq *wq);
Jens Axboe771b53d02019-10-22 10:25:58 -060049
50void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
Pavel Begunkov8766dd52020-03-14 00:31:04 +030051void io_wq_hash_work(struct io_wq_work *work, void *val);
52
Jens Axboeebdfefc2023-08-13 11:05:36 -060053int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask);
Jens Axboe2e480052021-08-27 11:33:19 -060054int io_wq_max_workers(struct io_wq *wq, int *new_count);
Pavel Begunkov45500dc2023-09-07 13:50:07 +010055bool io_wq_worker_stopped(void);
Jens Axboefe764212021-06-17 10:19:54 -060056
Pavel Begunkov8766dd52020-03-14 00:31:04 +030057static inline bool io_wq_is_hashed(struct io_wq_work *work)
58{
Jens Axboe3474d1b2024-06-13 19:28:27 +000059 return atomic_read(&work->flags) & IO_WQ_WORK_HASHED;
Pavel Begunkov8766dd52020-03-14 00:31:04 +030060}
Jens Axboe771b53d02019-10-22 10:25:58 -060061
Jens Axboe62755e32019-10-28 21:49:21 -060062typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
63
64enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
Pavel Begunkov4f26bda2020-06-15 10:24:03 +030065 void *data, bool cancel_all);
Jens Axboe62755e32019-10-28 21:49:21 -060066
Jens Axboe771b53d02019-10-22 10:25:58 -060067#if defined(CONFIG_IO_WQ)
68extern void io_wq_worker_sleeping(struct task_struct *);
69extern void io_wq_worker_running(struct task_struct *);
70#else
71static inline void io_wq_worker_sleeping(struct task_struct *tsk)
72{
73}
74static inline void io_wq_worker_running(struct task_struct *tsk)
75{
76}
Jens Axboe525b3052019-12-17 14:13:37 -070077#endif
Jens Axboe771b53d02019-10-22 10:25:58 -060078
Jens Axboe525b3052019-12-17 14:13:37 -070079static inline bool io_wq_current_is_worker(void)
80{
Jens Axboe3bfe6102021-02-16 14:15:30 -070081 return in_task() && (current->flags & PF_IO_WORKER) &&
Eric W. Biedermane32cf5d2021-12-22 22:10:09 -060082 current->worker_private;
Jens Axboe525b3052019-12-17 14:13:37 -070083}
84#endif