Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Basic worker thread pool for io_uring |
| 4 | * |
| 5 | * Copyright (C) 2019 Jens Axboe |
| 6 | * |
| 7 | */ |
| 8 | #include <linux/kernel.h> |
| 9 | #include <linux/init.h> |
| 10 | #include <linux/errno.h> |
| 11 | #include <linux/sched/signal.h> |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 12 | #include <linux/percpu.h> |
| 13 | #include <linux/slab.h> |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 14 | #include <linux/rculist_nulls.h> |
Jens Axboe | 43c01fb | 2020-10-22 09:02:50 -0600 | [diff] [blame] | 15 | #include <linux/cpu.h> |
Eric W. Biederman | 355f841 | 2022-02-09 12:47:08 -0600 | [diff] [blame] | 16 | #include <linux/task_work.h> |
Paul Moore | 5bd2182 | 2021-02-16 19:46:48 -0500 | [diff] [blame] | 17 | #include <linux/audit.h> |
Breno Leitao | da64d6d | 2023-03-10 12:11:07 -0800 | [diff] [blame] | 18 | #include <linux/mmu_context.h> |
Eugene Syromiatnikov | dd47c10 | 2021-09-13 17:44:15 +0200 | [diff] [blame] | 19 | #include <uapi/linux/io_uring.h> |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 20 | |
| 21 | #include "io-wq.h" |
Pavel Begunkov | a6b21fb | 2022-06-21 10:09:01 +0100 | [diff] [blame] | 22 | #include "slist.h" |
Pavel Begunkov | 024f15e | 2022-06-21 10:09:02 +0100 | [diff] [blame] | 23 | #include "io_uring.h" |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 24 | |
| 25 | #define WORKER_IDLE_TIMEOUT (5 * HZ) |
| 26 | |
| 27 | enum { |
| 28 | IO_WORKER_F_UP = 1, /* up and active */ |
| 29 | IO_WORKER_F_RUNNING = 2, /* account as running */ |
| 30 | IO_WORKER_F_FREE = 4, /* worker on free list */ |
Jens Axboe | 05c5f4e | 2021-09-01 13:01:17 -0600 | [diff] [blame] | 31 | IO_WORKER_F_BOUND = 8, /* is doing bounded work */ |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 32 | }; |
| 33 | |
| 34 | enum { |
| 35 | IO_WQ_BIT_EXIT = 0, /* wq exiting */ |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 36 | }; |
| 37 | |
| 38 | enum { |
Jens Axboe | f95dc20 | 2021-08-31 13:57:32 -0600 | [diff] [blame] | 39 | IO_ACCT_STALLED_BIT = 0, /* stalled on hash */ |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 40 | }; |
| 41 | |
| 42 | /* |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 43 | * One for each thread in a wq pool |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 44 | */ |
| 45 | struct io_worker { |
| 46 | refcount_t ref; |
| 47 | unsigned flags; |
| 48 | struct hlist_nulls_node nulls_node; |
Jens Axboe | e61df66 | 2019-11-13 13:54:49 -0700 | [diff] [blame] | 49 | struct list_head all_list; |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 50 | struct task_struct *task; |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 51 | struct io_wq *wq; |
Jens Axboe | 36c2f92 | 2019-11-13 09:43:34 -0700 | [diff] [blame] | 52 | |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 53 | struct io_wq_work *cur_work; |
Jens Axboe | 361aee4 | 2022-01-18 19:23:51 -0700 | [diff] [blame] | 54 | struct io_wq_work *next_work; |
Jens Axboe | 081b582 | 2022-01-18 19:13:43 -0700 | [diff] [blame] | 55 | raw_spinlock_t lock; |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 56 | |
Jens Axboe | eb2de94 | 2021-02-23 19:59:06 -0700 | [diff] [blame] | 57 | struct completion ref_done; |
| 58 | |
Jens Axboe | d3e9f73 | 2021-08-04 08:37:25 -0600 | [diff] [blame] | 59 | unsigned long create_state; |
| 60 | struct callback_head create_work; |
| 61 | int create_index; |
| 62 | |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 63 | union { |
| 64 | struct rcu_head rcu; |
| 65 | struct work_struct work; |
| 66 | }; |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 67 | }; |
| 68 | |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 69 | #if BITS_PER_LONG == 64 |
| 70 | #define IO_WQ_HASH_ORDER 6 |
| 71 | #else |
| 72 | #define IO_WQ_HASH_ORDER 5 |
| 73 | #endif |
| 74 | |
Pavel Begunkov | 86f3cd1 | 2020-03-23 22:57:22 +0300 | [diff] [blame] | 75 | #define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER) |
| 76 | |
Gabriel Krisman Bertazi | dfd63ba | 2023-03-21 22:16:27 -0300 | [diff] [blame] | 77 | struct io_wq_acct { |
Jens Axboe | c5def4a | 2019-11-07 11:41:16 -0700 | [diff] [blame] | 78 | unsigned nr_workers; |
| 79 | unsigned max_workers; |
Jens Axboe | 685fe7f | 2021-03-08 09:37:51 -0700 | [diff] [blame] | 80 | int index; |
Jens Axboe | c5def4a | 2019-11-07 11:41:16 -0700 | [diff] [blame] | 81 | atomic_t nr_running; |
Hao Xu | 42abc95 | 2022-02-06 17:52:39 +0800 | [diff] [blame] | 82 | raw_spinlock_t lock; |
Jens Axboe | f95dc20 | 2021-08-31 13:57:32 -0600 | [diff] [blame] | 83 | struct io_wq_work_list work_list; |
| 84 | unsigned long flags; |
Jens Axboe | c5def4a | 2019-11-07 11:41:16 -0700 | [diff] [blame] | 85 | }; |
| 86 | |
| 87 | enum { |
| 88 | IO_WQ_ACCT_BOUND, |
| 89 | IO_WQ_ACCT_UNBOUND, |
Jens Axboe | f95dc20 | 2021-08-31 13:57:32 -0600 | [diff] [blame] | 90 | IO_WQ_ACCT_NR, |
Jens Axboe | c5def4a | 2019-11-07 11:41:16 -0700 | [diff] [blame] | 91 | }; |
| 92 | |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 93 | /* |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 94 | * Per io_wq state |
| 95 | */ |
| 96 | struct io_wq { |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 97 | unsigned long state; |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 98 | |
Pavel Begunkov | e9fd939 | 2020-03-04 16:14:12 +0300 | [diff] [blame] | 99 | free_work_fn *free_work; |
Pavel Begunkov | f5fa38c | 2020-06-08 21:08:20 +0300 | [diff] [blame] | 100 | io_wq_work_fn *do_work; |
Jens Axboe | 7d72306 | 2019-11-12 22:31:31 -0700 | [diff] [blame] | 101 | |
Jens Axboe | e941894 | 2021-02-19 12:33:30 -0700 | [diff] [blame] | 102 | struct io_wq_hash *hash; |
| 103 | |
Jens Axboe | fb3a1f6 | 2021-02-26 09:47:20 -0700 | [diff] [blame] | 104 | atomic_t worker_refs; |
| 105 | struct completion worker_done; |
| 106 | |
Jens Axboe | 43c01fb | 2020-10-22 09:02:50 -0600 | [diff] [blame] | 107 | struct hlist_node cpuhp_node; |
Jens Axboe | 3bfe610 | 2021-02-16 14:15:30 -0700 | [diff] [blame] | 108 | |
Jens Axboe | 685fe7f | 2021-03-08 09:37:51 -0700 | [diff] [blame] | 109 | struct task_struct *task; |
Pavel Begunkov | c7f405d | 2021-06-14 02:36:12 +0100 | [diff] [blame] | 110 | |
Gabriel Krisman Bertazi | dfd63ba | 2023-03-21 22:16:27 -0300 | [diff] [blame] | 111 | struct io_wq_acct acct[IO_WQ_ACCT_NR]; |
| 112 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 113 | /* lock protects access to elements below */ |
| 114 | raw_spinlock_t lock; |
| 115 | |
| 116 | struct hlist_nulls_head free_list; |
| 117 | struct list_head all_list; |
| 118 | |
| 119 | struct wait_queue_entry wait; |
| 120 | |
| 121 | struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS]; |
| 122 | |
| 123 | cpumask_var_t cpu_mask; |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 124 | }; |
| 125 | |
Jens Axboe | 43c01fb | 2020-10-22 09:02:50 -0600 | [diff] [blame] | 126 | static enum cpuhp_state io_wq_online; |
| 127 | |
Jens Axboe | f012725 | 2021-03-03 15:47:04 -0700 | [diff] [blame] | 128 | struct io_cb_cancel_data { |
| 129 | work_cancel_fn *fn; |
| 130 | void *data; |
| 131 | int nr_running; |
| 132 | int nr_pending; |
| 133 | bool cancel_all; |
| 134 | }; |
| 135 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 136 | static bool create_io_worker(struct io_wq *wq, int index); |
| 137 | static void io_wq_dec_running(struct io_worker *worker); |
| 138 | static bool io_acct_cancel_pending_work(struct io_wq *wq, |
Gabriel Krisman Bertazi | dfd63ba | 2023-03-21 22:16:27 -0300 | [diff] [blame] | 139 | struct io_wq_acct *acct, |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 140 | struct io_cb_cancel_data *match); |
Pavel Begunkov | 1d5f5ea | 2021-10-29 13:11:33 +0100 | [diff] [blame] | 141 | static void create_worker_cb(struct callback_head *cb); |
Jens Axboe | 71a8538 | 2021-12-10 08:29:30 -0700 | [diff] [blame] | 142 | static void io_wq_cancel_tw_create(struct io_wq *wq); |
Jens Axboe | f012725 | 2021-03-03 15:47:04 -0700 | [diff] [blame] | 143 | |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 144 | static bool io_worker_get(struct io_worker *worker) |
| 145 | { |
| 146 | return refcount_inc_not_zero(&worker->ref); |
| 147 | } |
| 148 | |
| 149 | static void io_worker_release(struct io_worker *worker) |
| 150 | { |
| 151 | if (refcount_dec_and_test(&worker->ref)) |
Jens Axboe | eb2de94 | 2021-02-23 19:59:06 -0700 | [diff] [blame] | 152 | complete(&worker->ref_done); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 153 | } |
| 154 | |
Gabriel Krisman Bertazi | dfd63ba | 2023-03-21 22:16:27 -0300 | [diff] [blame] | 155 | static inline struct io_wq_acct *io_get_acct(struct io_wq *wq, bool bound) |
Pavel Begunkov | 8418f22 | 2021-03-22 01:58:28 +0000 | [diff] [blame] | 156 | { |
Gabriel Krisman Bertazi | dfd63ba | 2023-03-21 22:16:27 -0300 | [diff] [blame] | 157 | return &wq->acct[bound ? IO_WQ_ACCT_BOUND : IO_WQ_ACCT_UNBOUND]; |
Pavel Begunkov | 8418f22 | 2021-03-22 01:58:28 +0000 | [diff] [blame] | 158 | } |
| 159 | |
Gabriel Krisman Bertazi | dfd63ba | 2023-03-21 22:16:27 -0300 | [diff] [blame] | 160 | static inline struct io_wq_acct *io_work_get_acct(struct io_wq *wq, |
| 161 | struct io_wq_work *work) |
Jens Axboe | c5def4a | 2019-11-07 11:41:16 -0700 | [diff] [blame] | 162 | { |
Gabriel Krisman Bertazi | dfd63ba | 2023-03-21 22:16:27 -0300 | [diff] [blame] | 163 | return io_get_acct(wq, !(work->flags & IO_WQ_WORK_UNBOUND)); |
Jens Axboe | c5def4a | 2019-11-07 11:41:16 -0700 | [diff] [blame] | 164 | } |
| 165 | |
Gabriel Krisman Bertazi | dfd63ba | 2023-03-21 22:16:27 -0300 | [diff] [blame] | 166 | static inline struct io_wq_acct *io_wq_get_acct(struct io_worker *worker) |
Jens Axboe | c5def4a | 2019-11-07 11:41:16 -0700 | [diff] [blame] | 167 | { |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 168 | return io_get_acct(worker->wq, worker->flags & IO_WORKER_F_BOUND); |
Jens Axboe | c5def4a | 2019-11-07 11:41:16 -0700 | [diff] [blame] | 169 | } |
| 170 | |
Jens Axboe | 685fe7f | 2021-03-08 09:37:51 -0700 | [diff] [blame] | 171 | static void io_worker_ref_put(struct io_wq *wq) |
| 172 | { |
| 173 | if (atomic_dec_and_test(&wq->worker_refs)) |
| 174 | complete(&wq->worker_done); |
| 175 | } |
| 176 | |
Pavel Begunkov | 1d5f5ea | 2021-10-29 13:11:33 +0100 | [diff] [blame] | 177 | static void io_worker_cancel_cb(struct io_worker *worker) |
| 178 | { |
Gabriel Krisman Bertazi | dfd63ba | 2023-03-21 22:16:27 -0300 | [diff] [blame] | 179 | struct io_wq_acct *acct = io_wq_get_acct(worker); |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 180 | struct io_wq *wq = worker->wq; |
Pavel Begunkov | 1d5f5ea | 2021-10-29 13:11:33 +0100 | [diff] [blame] | 181 | |
| 182 | atomic_dec(&acct->nr_running); |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 183 | raw_spin_lock(&wq->lock); |
Pavel Begunkov | 1d5f5ea | 2021-10-29 13:11:33 +0100 | [diff] [blame] | 184 | acct->nr_workers--; |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 185 | raw_spin_unlock(&wq->lock); |
Pavel Begunkov | 1d5f5ea | 2021-10-29 13:11:33 +0100 | [diff] [blame] | 186 | io_worker_ref_put(wq); |
| 187 | clear_bit_unlock(0, &worker->create_state); |
| 188 | io_worker_release(worker); |
| 189 | } |
| 190 | |
| 191 | static bool io_task_worker_match(struct callback_head *cb, void *data) |
| 192 | { |
| 193 | struct io_worker *worker; |
| 194 | |
| 195 | if (cb->func != create_worker_cb) |
| 196 | return false; |
| 197 | worker = container_of(cb, struct io_worker, create_work); |
| 198 | return worker == data; |
| 199 | } |
| 200 | |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 201 | static void io_worker_exit(struct io_worker *worker) |
| 202 | { |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 203 | struct io_wq *wq = worker->wq; |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 204 | |
Pavel Begunkov | 1d5f5ea | 2021-10-29 13:11:33 +0100 | [diff] [blame] | 205 | while (1) { |
| 206 | struct callback_head *cb = task_work_cancel_match(wq->task, |
| 207 | io_task_worker_match, worker); |
| 208 | |
| 209 | if (!cb) |
| 210 | break; |
| 211 | io_worker_cancel_cb(worker); |
| 212 | } |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 213 | |
Pavel Begunkov | c907e52 | 2021-10-23 12:13:55 +0100 | [diff] [blame] | 214 | io_worker_release(worker); |
Jens Axboe | eb2de94 | 2021-02-23 19:59:06 -0700 | [diff] [blame] | 215 | wait_for_completion(&worker->ref_done); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 216 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 217 | raw_spin_lock(&wq->lock); |
Jens Axboe | 83d6c39 | 2021-08-03 09:14:35 -0600 | [diff] [blame] | 218 | if (worker->flags & IO_WORKER_F_FREE) |
Jens Axboe | bf1daa4 | 2021-02-16 18:00:55 -0700 | [diff] [blame] | 219 | hlist_nulls_del_rcu(&worker->nulls_node); |
Jens Axboe | e61df66 | 2019-11-13 13:54:49 -0700 | [diff] [blame] | 220 | list_del_rcu(&worker->all_list); |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 221 | raw_spin_unlock(&wq->lock); |
| 222 | io_wq_dec_running(worker); |
Jens Axboe | 83d6c39 | 2021-08-03 09:14:35 -0600 | [diff] [blame] | 223 | worker->flags = 0; |
Hao Xu | 42abc95 | 2022-02-06 17:52:39 +0800 | [diff] [blame] | 224 | preempt_disable(); |
Jens Axboe | 83d6c39 | 2021-08-03 09:14:35 -0600 | [diff] [blame] | 225 | current->flags &= ~PF_IO_WORKER; |
| 226 | preempt_enable(); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 227 | |
YueHaibing | 364b05f | 2019-11-02 15:55:01 +0800 | [diff] [blame] | 228 | kfree_rcu(worker, rcu); |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 229 | io_worker_ref_put(wq); |
Jens Axboe | 46fe18b | 2021-03-04 12:39:36 -0700 | [diff] [blame] | 230 | do_exit(0); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 231 | } |
| 232 | |
Gabriel Krisman Bertazi | dfd63ba | 2023-03-21 22:16:27 -0300 | [diff] [blame] | 233 | static inline bool io_acct_run_queue(struct io_wq_acct *acct) |
Jens Axboe | c5def4a | 2019-11-07 11:41:16 -0700 | [diff] [blame] | 234 | { |
Hao Xu | e13fb1f | 2022-02-06 17:52:40 +0800 | [diff] [blame] | 235 | bool ret = false; |
| 236 | |
| 237 | raw_spin_lock(&acct->lock); |
Jens Axboe | f95dc20 | 2021-08-31 13:57:32 -0600 | [diff] [blame] | 238 | if (!wq_list_empty(&acct->work_list) && |
| 239 | !test_bit(IO_ACCT_STALLED_BIT, &acct->flags)) |
Hao Xu | e13fb1f | 2022-02-06 17:52:40 +0800 | [diff] [blame] | 240 | ret = true; |
| 241 | raw_spin_unlock(&acct->lock); |
| 242 | |
| 243 | return ret; |
Jens Axboe | c5def4a | 2019-11-07 11:41:16 -0700 | [diff] [blame] | 244 | } |
| 245 | |
| 246 | /* |
| 247 | * Check head of free list for an available worker. If one isn't available, |
Jens Axboe | 685fe7f | 2021-03-08 09:37:51 -0700 | [diff] [blame] | 248 | * caller must create one. |
Jens Axboe | c5def4a | 2019-11-07 11:41:16 -0700 | [diff] [blame] | 249 | */ |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 250 | static bool io_wq_activate_free_worker(struct io_wq *wq, |
Gabriel Krisman Bertazi | dfd63ba | 2023-03-21 22:16:27 -0300 | [diff] [blame] | 251 | struct io_wq_acct *acct) |
Jens Axboe | c5def4a | 2019-11-07 11:41:16 -0700 | [diff] [blame] | 252 | __must_hold(RCU) |
| 253 | { |
| 254 | struct hlist_nulls_node *n; |
| 255 | struct io_worker *worker; |
| 256 | |
Jens Axboe | 83d6c39 | 2021-08-03 09:14:35 -0600 | [diff] [blame] | 257 | /* |
| 258 | * Iterate free_list and see if we can find an idle worker to |
| 259 | * activate. If a given worker is on the free_list but in the process |
| 260 | * of exiting, keep trying. |
| 261 | */ |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 262 | hlist_nulls_for_each_entry_rcu(worker, n, &wq->free_list, nulls_node) { |
Jens Axboe | 83d6c39 | 2021-08-03 09:14:35 -0600 | [diff] [blame] | 263 | if (!io_worker_get(worker)) |
| 264 | continue; |
Gabriel Krisman Bertazi | dfd63ba | 2023-03-21 22:16:27 -0300 | [diff] [blame] | 265 | if (io_wq_get_acct(worker) != acct) { |
Jens Axboe | f95dc20 | 2021-08-31 13:57:32 -0600 | [diff] [blame] | 266 | io_worker_release(worker); |
| 267 | continue; |
| 268 | } |
Jens Axboe | 83d6c39 | 2021-08-03 09:14:35 -0600 | [diff] [blame] | 269 | if (wake_up_process(worker->task)) { |
| 270 | io_worker_release(worker); |
| 271 | return true; |
| 272 | } |
Jens Axboe | c5def4a | 2019-11-07 11:41:16 -0700 | [diff] [blame] | 273 | io_worker_release(worker); |
Jens Axboe | c5def4a | 2019-11-07 11:41:16 -0700 | [diff] [blame] | 274 | } |
| 275 | |
| 276 | return false; |
| 277 | } |
| 278 | |
| 279 | /* |
| 280 | * We need a worker. If we find a free one, we're good. If not, and we're |
Jens Axboe | 685fe7f | 2021-03-08 09:37:51 -0700 | [diff] [blame] | 281 | * below the max number of workers, create one. |
Jens Axboe | c5def4a | 2019-11-07 11:41:16 -0700 | [diff] [blame] | 282 | */ |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 283 | static bool io_wq_create_worker(struct io_wq *wq, struct io_wq_acct *acct) |
Jens Axboe | c5def4a | 2019-11-07 11:41:16 -0700 | [diff] [blame] | 284 | { |
Jens Axboe | c5def4a | 2019-11-07 11:41:16 -0700 | [diff] [blame] | 285 | /* |
| 286 | * Most likely an attempt to queue unbounded work on an io_wq that |
| 287 | * wasn't setup with any unbounded workers. |
| 288 | */ |
Pavel Begunkov | e6ab899 | 2021-06-17 18:13:59 +0100 | [diff] [blame] | 289 | if (unlikely(!acct->max_workers)) |
| 290 | pr_warn_once("io-wq is not configured for unbound workers"); |
Jens Axboe | c5def4a | 2019-11-07 11:41:16 -0700 | [diff] [blame] | 291 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 292 | raw_spin_lock(&wq->lock); |
Pavel Begunkov | bc36992 | 2021-10-19 20:31:26 +0100 | [diff] [blame] | 293 | if (acct->nr_workers >= acct->max_workers) { |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 294 | raw_spin_unlock(&wq->lock); |
Hao Xu | 7a842fb | 2021-09-12 03:40:50 +0800 | [diff] [blame] | 295 | return true; |
Jens Axboe | 94ffb0a | 2021-08-30 11:55:22 -0600 | [diff] [blame] | 296 | } |
Hao Xu | 7a842fb | 2021-09-12 03:40:50 +0800 | [diff] [blame] | 297 | acct->nr_workers++; |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 298 | raw_spin_unlock(&wq->lock); |
Hao Xu | 7a842fb | 2021-09-12 03:40:50 +0800 | [diff] [blame] | 299 | atomic_inc(&acct->nr_running); |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 300 | atomic_inc(&wq->worker_refs); |
| 301 | return create_io_worker(wq, acct->index); |
Jens Axboe | c5def4a | 2019-11-07 11:41:16 -0700 | [diff] [blame] | 302 | } |
| 303 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 304 | static void io_wq_inc_running(struct io_worker *worker) |
Jens Axboe | c5def4a | 2019-11-07 11:41:16 -0700 | [diff] [blame] | 305 | { |
Gabriel Krisman Bertazi | dfd63ba | 2023-03-21 22:16:27 -0300 | [diff] [blame] | 306 | struct io_wq_acct *acct = io_wq_get_acct(worker); |
Jens Axboe | c5def4a | 2019-11-07 11:41:16 -0700 | [diff] [blame] | 307 | |
| 308 | atomic_inc(&acct->nr_running); |
| 309 | } |
| 310 | |
Jens Axboe | 685fe7f | 2021-03-08 09:37:51 -0700 | [diff] [blame] | 311 | static void create_worker_cb(struct callback_head *cb) |
| 312 | { |
Jens Axboe | d3e9f73 | 2021-08-04 08:37:25 -0600 | [diff] [blame] | 313 | struct io_worker *worker; |
Jens Axboe | 685fe7f | 2021-03-08 09:37:51 -0700 | [diff] [blame] | 314 | struct io_wq *wq; |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 315 | |
Gabriel Krisman Bertazi | dfd63ba | 2023-03-21 22:16:27 -0300 | [diff] [blame] | 316 | struct io_wq_acct *acct; |
Jens Axboe | 05c5f4e | 2021-09-01 13:01:17 -0600 | [diff] [blame] | 317 | bool do_create = false; |
Jens Axboe | 685fe7f | 2021-03-08 09:37:51 -0700 | [diff] [blame] | 318 | |
Jens Axboe | d3e9f73 | 2021-08-04 08:37:25 -0600 | [diff] [blame] | 319 | worker = container_of(cb, struct io_worker, create_work); |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 320 | wq = worker->wq; |
Gabriel Krisman Bertazi | dfd63ba | 2023-03-21 22:16:27 -0300 | [diff] [blame] | 321 | acct = &wq->acct[worker->create_index]; |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 322 | raw_spin_lock(&wq->lock); |
| 323 | |
Hao Xu | 49e7f0c | 2021-08-08 21:54:33 +0800 | [diff] [blame] | 324 | if (acct->nr_workers < acct->max_workers) { |
Hao Xu | 2169827 | 2021-08-05 18:05:38 +0800 | [diff] [blame] | 325 | acct->nr_workers++; |
Hao Xu | 49e7f0c | 2021-08-08 21:54:33 +0800 | [diff] [blame] | 326 | do_create = true; |
| 327 | } |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 328 | raw_spin_unlock(&wq->lock); |
Hao Xu | 49e7f0c | 2021-08-08 21:54:33 +0800 | [diff] [blame] | 329 | if (do_create) { |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 330 | create_io_worker(wq, worker->create_index); |
Hao Xu | 49e7f0c | 2021-08-08 21:54:33 +0800 | [diff] [blame] | 331 | } else { |
| 332 | atomic_dec(&acct->nr_running); |
| 333 | io_worker_ref_put(wq); |
| 334 | } |
Jens Axboe | d3e9f73 | 2021-08-04 08:37:25 -0600 | [diff] [blame] | 335 | clear_bit_unlock(0, &worker->create_state); |
| 336 | io_worker_release(worker); |
Jens Axboe | 685fe7f | 2021-03-08 09:37:51 -0700 | [diff] [blame] | 337 | } |
| 338 | |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 339 | static bool io_queue_worker_create(struct io_worker *worker, |
Gabriel Krisman Bertazi | dfd63ba | 2023-03-21 22:16:27 -0300 | [diff] [blame] | 340 | struct io_wq_acct *acct, |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 341 | task_work_func_t func) |
Jens Axboe | 685fe7f | 2021-03-08 09:37:51 -0700 | [diff] [blame] | 342 | { |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 343 | struct io_wq *wq = worker->wq; |
Jens Axboe | 685fe7f | 2021-03-08 09:37:51 -0700 | [diff] [blame] | 344 | |
| 345 | /* raced with exit, just ignore create call */ |
| 346 | if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) |
| 347 | goto fail; |
Jens Axboe | d3e9f73 | 2021-08-04 08:37:25 -0600 | [diff] [blame] | 348 | if (!io_worker_get(worker)) |
| 349 | goto fail; |
| 350 | /* |
| 351 | * create_state manages ownership of create_work/index. We should |
| 352 | * only need one entry per worker, as the worker going to sleep |
| 353 | * will trigger the condition, and waking will clear it once it |
| 354 | * runs the task_work. |
| 355 | */ |
| 356 | if (test_bit(0, &worker->create_state) || |
| 357 | test_and_set_bit_lock(0, &worker->create_state)) |
| 358 | goto fail_release; |
Jens Axboe | 685fe7f | 2021-03-08 09:37:51 -0700 | [diff] [blame] | 359 | |
Jens Axboe | 71a8538 | 2021-12-10 08:29:30 -0700 | [diff] [blame] | 360 | atomic_inc(&wq->worker_refs); |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 361 | init_task_work(&worker->create_work, func); |
Jens Axboe | d3e9f73 | 2021-08-04 08:37:25 -0600 | [diff] [blame] | 362 | worker->create_index = acct->index; |
Jens Axboe | 71a8538 | 2021-12-10 08:29:30 -0700 | [diff] [blame] | 363 | if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) { |
| 364 | /* |
| 365 | * EXIT may have been set after checking it above, check after |
| 366 | * adding the task_work and remove any creation item if it is |
| 367 | * now set. wq exit does that too, but we can have added this |
| 368 | * work item after we canceled in io_wq_exit_workers(). |
| 369 | */ |
| 370 | if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) |
| 371 | io_wq_cancel_tw_create(wq); |
| 372 | io_worker_ref_put(wq); |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 373 | return true; |
Jens Axboe | 71a8538 | 2021-12-10 08:29:30 -0700 | [diff] [blame] | 374 | } |
| 375 | io_worker_ref_put(wq); |
Jens Axboe | d3e9f73 | 2021-08-04 08:37:25 -0600 | [diff] [blame] | 376 | clear_bit_unlock(0, &worker->create_state); |
| 377 | fail_release: |
| 378 | io_worker_release(worker); |
Jens Axboe | 685fe7f | 2021-03-08 09:37:51 -0700 | [diff] [blame] | 379 | fail: |
| 380 | atomic_dec(&acct->nr_running); |
| 381 | io_worker_ref_put(wq); |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 382 | return false; |
Jens Axboe | 685fe7f | 2021-03-08 09:37:51 -0700 | [diff] [blame] | 383 | } |
| 384 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 385 | static void io_wq_dec_running(struct io_worker *worker) |
Jens Axboe | c5def4a | 2019-11-07 11:41:16 -0700 | [diff] [blame] | 386 | { |
Gabriel Krisman Bertazi | dfd63ba | 2023-03-21 22:16:27 -0300 | [diff] [blame] | 387 | struct io_wq_acct *acct = io_wq_get_acct(worker); |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 388 | struct io_wq *wq = worker->wq; |
Jens Axboe | c5def4a | 2019-11-07 11:41:16 -0700 | [diff] [blame] | 389 | |
Jens Axboe | 685fe7f | 2021-03-08 09:37:51 -0700 | [diff] [blame] | 390 | if (!(worker->flags & IO_WORKER_F_UP)) |
| 391 | return; |
| 392 | |
Hao Xu | 42abc95 | 2022-02-06 17:52:39 +0800 | [diff] [blame] | 393 | if (!atomic_dec_and_test(&acct->nr_running)) |
| 394 | return; |
Hao Xu | e13fb1f | 2022-02-06 17:52:40 +0800 | [diff] [blame] | 395 | if (!io_acct_run_queue(acct)) |
Hao Xu | 42abc95 | 2022-02-06 17:52:39 +0800 | [diff] [blame] | 396 | return; |
Hao Xu | 42abc95 | 2022-02-06 17:52:39 +0800 | [diff] [blame] | 397 | |
Hao Xu | 42abc95 | 2022-02-06 17:52:39 +0800 | [diff] [blame] | 398 | atomic_inc(&acct->nr_running); |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 399 | atomic_inc(&wq->worker_refs); |
Hao Xu | 42abc95 | 2022-02-06 17:52:39 +0800 | [diff] [blame] | 400 | io_queue_worker_create(worker, acct, create_worker_cb); |
Jens Axboe | c5def4a | 2019-11-07 11:41:16 -0700 | [diff] [blame] | 401 | } |
| 402 | |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 403 | /* |
| 404 | * Worker will start processing some work. Move it to the busy list, if |
| 405 | * it's currently on the freelist |
| 406 | */ |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 407 | static void __io_worker_busy(struct io_wq *wq, struct io_worker *worker) |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 408 | { |
| 409 | if (worker->flags & IO_WORKER_F_FREE) { |
| 410 | worker->flags &= ~IO_WORKER_F_FREE; |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 411 | raw_spin_lock(&wq->lock); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 412 | hlist_nulls_del_init_rcu(&worker->nulls_node); |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 413 | raw_spin_unlock(&wq->lock); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 414 | } |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 415 | } |
| 416 | |
| 417 | /* |
Jens Axboe | 07d9909 | 2023-03-27 13:10:21 -0600 | [diff] [blame] | 418 | * No work, worker going to sleep. Move to freelist. |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 419 | */ |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 420 | static void __io_worker_idle(struct io_wq *wq, struct io_worker *worker) |
| 421 | __must_hold(wq->lock) |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 422 | { |
| 423 | if (!(worker->flags & IO_WORKER_F_FREE)) { |
| 424 | worker->flags |= IO_WORKER_F_FREE; |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 425 | hlist_nulls_add_head_rcu(&worker->nulls_node, &wq->free_list); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 426 | } |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 427 | } |
| 428 | |
Pavel Begunkov | 60cf46a | 2020-03-14 00:31:05 +0300 | [diff] [blame] | 429 | static inline unsigned int io_get_work_hash(struct io_wq_work *work) |
| 430 | { |
| 431 | return work->flags >> IO_WQ_HASH_SHIFT; |
| 432 | } |
| 433 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 434 | static bool io_wait_on_hash(struct io_wq *wq, unsigned int hash) |
Jens Axboe | e941894 | 2021-02-19 12:33:30 -0700 | [diff] [blame] | 435 | { |
Jens Axboe | d3e3c10 | 2021-11-11 17:32:53 -0700 | [diff] [blame] | 436 | bool ret = false; |
Jens Axboe | e941894 | 2021-02-19 12:33:30 -0700 | [diff] [blame] | 437 | |
Jens Axboe | 08bdbd3 | 2021-08-31 06:57:25 -0600 | [diff] [blame] | 438 | spin_lock_irq(&wq->hash->wait.lock); |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 439 | if (list_empty(&wq->wait.entry)) { |
| 440 | __add_wait_queue(&wq->hash->wait, &wq->wait); |
Jens Axboe | e941894 | 2021-02-19 12:33:30 -0700 | [diff] [blame] | 441 | if (!test_bit(hash, &wq->hash->map)) { |
| 442 | __set_current_state(TASK_RUNNING); |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 443 | list_del_init(&wq->wait.entry); |
Jens Axboe | d3e3c10 | 2021-11-11 17:32:53 -0700 | [diff] [blame] | 444 | ret = true; |
Jens Axboe | e941894 | 2021-02-19 12:33:30 -0700 | [diff] [blame] | 445 | } |
| 446 | } |
Jens Axboe | 08bdbd3 | 2021-08-31 06:57:25 -0600 | [diff] [blame] | 447 | spin_unlock_irq(&wq->hash->wait.lock); |
Jens Axboe | d3e3c10 | 2021-11-11 17:32:53 -0700 | [diff] [blame] | 448 | return ret; |
Jens Axboe | e941894 | 2021-02-19 12:33:30 -0700 | [diff] [blame] | 449 | } |
| 450 | |
Gabriel Krisman Bertazi | dfd63ba | 2023-03-21 22:16:27 -0300 | [diff] [blame] | 451 | static struct io_wq_work *io_get_next_work(struct io_wq_acct *acct, |
Jens Axboe | 0242f64 | 2021-08-31 13:53:00 -0600 | [diff] [blame] | 452 | struct io_worker *worker) |
Hao Xu | 42abc95 | 2022-02-06 17:52:39 +0800 | [diff] [blame] | 453 | __must_hold(acct->lock) |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 454 | { |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 455 | struct io_wq_work_node *node, *prev; |
Pavel Begunkov | 86f3cd1 | 2020-03-23 22:57:22 +0300 | [diff] [blame] | 456 | struct io_wq_work *work, *tail; |
Jens Axboe | e941894 | 2021-02-19 12:33:30 -0700 | [diff] [blame] | 457 | unsigned int stall_hash = -1U; |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 458 | struct io_wq *wq = worker->wq; |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 459 | |
Jens Axboe | f95dc20 | 2021-08-31 13:57:32 -0600 | [diff] [blame] | 460 | wq_list_for_each(node, prev, &acct->work_list) { |
Jens Axboe | e941894 | 2021-02-19 12:33:30 -0700 | [diff] [blame] | 461 | unsigned int hash; |
| 462 | |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 463 | work = container_of(node, struct io_wq_work, list); |
| 464 | |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 465 | /* not hashed, can run anytime */ |
Pavel Begunkov | 8766dd5 | 2020-03-14 00:31:04 +0300 | [diff] [blame] | 466 | if (!io_wq_is_hashed(work)) { |
Jens Axboe | f95dc20 | 2021-08-31 13:57:32 -0600 | [diff] [blame] | 467 | wq_list_del(&acct->work_list, node, prev); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 468 | return work; |
| 469 | } |
| 470 | |
Pavel Begunkov | 60cf46a | 2020-03-14 00:31:05 +0300 | [diff] [blame] | 471 | hash = io_get_work_hash(work); |
Jens Axboe | e941894 | 2021-02-19 12:33:30 -0700 | [diff] [blame] | 472 | /* all items with this hash lie in [work, tail] */ |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 473 | tail = wq->hash_tail[hash]; |
Jens Axboe | e941894 | 2021-02-19 12:33:30 -0700 | [diff] [blame] | 474 | |
| 475 | /* hashed, can run if not already running */ |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 476 | if (!test_and_set_bit(hash, &wq->hash->map)) { |
| 477 | wq->hash_tail[hash] = NULL; |
Jens Axboe | f95dc20 | 2021-08-31 13:57:32 -0600 | [diff] [blame] | 478 | wq_list_cut(&acct->work_list, &tail->list, prev); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 479 | return work; |
| 480 | } |
Jens Axboe | e941894 | 2021-02-19 12:33:30 -0700 | [diff] [blame] | 481 | if (stall_hash == -1U) |
| 482 | stall_hash = hash; |
| 483 | /* fast forward to a next hash, for-each will fix up @prev */ |
| 484 | node = &tail->list; |
| 485 | } |
| 486 | |
| 487 | if (stall_hash != -1U) { |
Jens Axboe | d3e3c10 | 2021-11-11 17:32:53 -0700 | [diff] [blame] | 488 | bool unstalled; |
| 489 | |
Jens Axboe | 0242f64 | 2021-08-31 13:53:00 -0600 | [diff] [blame] | 490 | /* |
| 491 | * Set this before dropping the lock to avoid racing with new |
| 492 | * work being added and clearing the stalled bit. |
| 493 | */ |
Jens Axboe | f95dc20 | 2021-08-31 13:57:32 -0600 | [diff] [blame] | 494 | set_bit(IO_ACCT_STALLED_BIT, &acct->flags); |
Hao Xu | 42abc95 | 2022-02-06 17:52:39 +0800 | [diff] [blame] | 495 | raw_spin_unlock(&acct->lock); |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 496 | unstalled = io_wait_on_hash(wq, stall_hash); |
Hao Xu | 42abc95 | 2022-02-06 17:52:39 +0800 | [diff] [blame] | 497 | raw_spin_lock(&acct->lock); |
Jens Axboe | d3e3c10 | 2021-11-11 17:32:53 -0700 | [diff] [blame] | 498 | if (unstalled) { |
| 499 | clear_bit(IO_ACCT_STALLED_BIT, &acct->flags); |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 500 | if (wq_has_sleeper(&wq->hash->wait)) |
| 501 | wake_up(&wq->hash->wait); |
Jens Axboe | d3e3c10 | 2021-11-11 17:32:53 -0700 | [diff] [blame] | 502 | } |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 503 | } |
| 504 | |
| 505 | return NULL; |
| 506 | } |
| 507 | |
Pavel Begunkov | dc026a7 | 2020-03-04 16:14:09 +0300 | [diff] [blame] | 508 | static void io_assign_current_work(struct io_worker *worker, |
| 509 | struct io_wq_work *work) |
| 510 | { |
Pavel Begunkov | d78298e | 2020-03-14 00:31:03 +0300 | [diff] [blame] | 511 | if (work) { |
Pavel Begunkov | 024f15e | 2022-06-21 10:09:02 +0100 | [diff] [blame] | 512 | io_run_task_work(); |
Pavel Begunkov | d78298e | 2020-03-14 00:31:03 +0300 | [diff] [blame] | 513 | cond_resched(); |
| 514 | } |
Pavel Begunkov | dc026a7 | 2020-03-04 16:14:09 +0300 | [diff] [blame] | 515 | |
Jens Axboe | 081b582 | 2022-01-18 19:13:43 -0700 | [diff] [blame] | 516 | raw_spin_lock(&worker->lock); |
Pavel Begunkov | dc026a7 | 2020-03-04 16:14:09 +0300 | [diff] [blame] | 517 | worker->cur_work = work; |
Jens Axboe | 361aee4 | 2022-01-18 19:23:51 -0700 | [diff] [blame] | 518 | worker->next_work = NULL; |
Jens Axboe | 081b582 | 2022-01-18 19:13:43 -0700 | [diff] [blame] | 519 | raw_spin_unlock(&worker->lock); |
Pavel Begunkov | dc026a7 | 2020-03-04 16:14:09 +0300 | [diff] [blame] | 520 | } |
| 521 | |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 522 | static void io_worker_handle_work(struct io_worker *worker) |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 523 | { |
Gabriel Krisman Bertazi | dfd63ba | 2023-03-21 22:16:27 -0300 | [diff] [blame] | 524 | struct io_wq_acct *acct = io_wq_get_acct(worker); |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 525 | struct io_wq *wq = worker->wq; |
Pavel Begunkov | c60eb04 | 2021-04-08 01:54:42 +0100 | [diff] [blame] | 526 | bool do_kill = test_bit(IO_WQ_BIT_EXIT, &wq->state); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 527 | |
| 528 | do { |
Pavel Begunkov | 86f3cd1 | 2020-03-23 22:57:22 +0300 | [diff] [blame] | 529 | struct io_wq_work *work; |
Jens Axboe | 73031f76 | 2022-01-19 13:11:58 -0700 | [diff] [blame] | 530 | |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 531 | /* |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 532 | * If we got some work, mark us as busy. If we didn't, but |
| 533 | * the list isn't empty, it means we stalled on hashed work. |
| 534 | * Mark us stalled so we don't keep looking for work when we |
| 535 | * can't make progress, any work completion or insertion will |
| 536 | * clear the stalled flag. |
| 537 | */ |
Hao Xu | e13fb1f | 2022-02-06 17:52:40 +0800 | [diff] [blame] | 538 | raw_spin_lock(&acct->lock); |
Jens Axboe | f95dc20 | 2021-08-31 13:57:32 -0600 | [diff] [blame] | 539 | work = io_get_next_work(acct, worker); |
Hao Xu | 42abc95 | 2022-02-06 17:52:39 +0800 | [diff] [blame] | 540 | raw_spin_unlock(&acct->lock); |
Jens Axboe | 361aee4 | 2022-01-18 19:23:51 -0700 | [diff] [blame] | 541 | if (work) { |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 542 | __io_worker_busy(wq, worker); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 543 | |
Jens Axboe | 361aee4 | 2022-01-18 19:23:51 -0700 | [diff] [blame] | 544 | /* |
| 545 | * Make sure cancelation can find this, even before |
| 546 | * it becomes the active work. That avoids a window |
| 547 | * where the work has been removed from our general |
| 548 | * work list, but isn't yet discoverable as the |
| 549 | * current work item for this worker. |
| 550 | */ |
| 551 | raw_spin_lock(&worker->lock); |
| 552 | worker->next_work = work; |
| 553 | raw_spin_unlock(&worker->lock); |
Hao Xu | 42abc95 | 2022-02-06 17:52:39 +0800 | [diff] [blame] | 554 | } else { |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 555 | break; |
Hao Xu | 42abc95 | 2022-02-06 17:52:39 +0800 | [diff] [blame] | 556 | } |
Pavel Begunkov | 58e3931 | 2020-03-04 16:14:10 +0300 | [diff] [blame] | 557 | io_assign_current_work(worker, work); |
Jens Axboe | e941894 | 2021-02-19 12:33:30 -0700 | [diff] [blame] | 558 | __set_current_state(TASK_RUNNING); |
Jens Axboe | 36c2f92 | 2019-11-13 09:43:34 -0700 | [diff] [blame] | 559 | |
Pavel Begunkov | dc026a7 | 2020-03-04 16:14:09 +0300 | [diff] [blame] | 560 | /* handle a whole dependent link */ |
| 561 | do { |
Pavel Begunkov | 5280f7e | 2021-02-04 13:52:08 +0000 | [diff] [blame] | 562 | struct io_wq_work *next_hashed, *linked; |
Pavel Begunkov | b089ed39 | 2020-07-25 14:42:00 +0300 | [diff] [blame] | 563 | unsigned int hash = io_get_work_hash(work); |
Hillf Danton | fd1c4bc | 2019-12-24 09:14:29 -0700 | [diff] [blame] | 564 | |
Pavel Begunkov | 86f3cd1 | 2020-03-23 22:57:22 +0300 | [diff] [blame] | 565 | next_hashed = wq_next_work(work); |
Pavel Begunkov | c60eb04 | 2021-04-08 01:54:42 +0100 | [diff] [blame] | 566 | |
| 567 | if (unlikely(do_kill) && (work->flags & IO_WQ_WORK_UNBOUND)) |
| 568 | work->flags |= IO_WQ_WORK_CANCEL; |
Pavel Begunkov | 5280f7e | 2021-02-04 13:52:08 +0000 | [diff] [blame] | 569 | wq->do_work(work); |
| 570 | io_assign_current_work(worker, NULL); |
Jens Axboe | 36c2f92 | 2019-11-13 09:43:34 -0700 | [diff] [blame] | 571 | |
Pavel Begunkov | 5280f7e | 2021-02-04 13:52:08 +0000 | [diff] [blame] | 572 | linked = wq->free_work(work); |
Pavel Begunkov | 86f3cd1 | 2020-03-23 22:57:22 +0300 | [diff] [blame] | 573 | work = next_hashed; |
| 574 | if (!work && linked && !io_wq_is_hashed(linked)) { |
| 575 | work = linked; |
| 576 | linked = NULL; |
| 577 | } |
| 578 | io_assign_current_work(worker, work); |
Pavel Begunkov | 86f3cd1 | 2020-03-23 22:57:22 +0300 | [diff] [blame] | 579 | if (linked) |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 580 | io_wq_enqueue(wq, linked); |
Pavel Begunkov | 86f3cd1 | 2020-03-23 22:57:22 +0300 | [diff] [blame] | 581 | |
| 582 | if (hash != -1U && !next_hashed) { |
Jens Axboe | d3e3c10 | 2021-11-11 17:32:53 -0700 | [diff] [blame] | 583 | /* serialize hash clear with wake_up() */ |
| 584 | spin_lock_irq(&wq->hash->wait.lock); |
Jens Axboe | e941894 | 2021-02-19 12:33:30 -0700 | [diff] [blame] | 585 | clear_bit(hash, &wq->hash->map); |
Jens Axboe | f95dc20 | 2021-08-31 13:57:32 -0600 | [diff] [blame] | 586 | clear_bit(IO_ACCT_STALLED_BIT, &acct->flags); |
Jens Axboe | d3e3c10 | 2021-11-11 17:32:53 -0700 | [diff] [blame] | 587 | spin_unlock_irq(&wq->hash->wait.lock); |
Jens Axboe | e941894 | 2021-02-19 12:33:30 -0700 | [diff] [blame] | 588 | if (wq_has_sleeper(&wq->hash->wait)) |
| 589 | wake_up(&wq->hash->wait); |
Pavel Begunkov | dc026a7 | 2020-03-04 16:14:09 +0300 | [diff] [blame] | 590 | } |
Pavel Begunkov | 58e3931 | 2020-03-04 16:14:10 +0300 | [diff] [blame] | 591 | } while (work); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 592 | } while (1); |
| 593 | } |
| 594 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 595 | static int io_wq_worker(void *data) |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 596 | { |
| 597 | struct io_worker *worker = data; |
Gabriel Krisman Bertazi | dfd63ba | 2023-03-21 22:16:27 -0300 | [diff] [blame] | 598 | struct io_wq_acct *acct = io_wq_get_acct(worker); |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 599 | struct io_wq *wq = worker->wq; |
Jens Axboe | 01e68ce | 2023-03-08 07:18:51 -0700 | [diff] [blame] | 600 | bool exit_mask = false, last_timeout = false; |
Jens Axboe | 46fe18b | 2021-03-04 12:39:36 -0700 | [diff] [blame] | 601 | char buf[TASK_COMM_LEN]; |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 602 | |
Jens Axboe | 46fe18b | 2021-03-04 12:39:36 -0700 | [diff] [blame] | 603 | worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING); |
Jens Axboe | 46fe18b | 2021-03-04 12:39:36 -0700 | [diff] [blame] | 604 | |
Jens Axboe | 685fe7f | 2021-03-08 09:37:51 -0700 | [diff] [blame] | 605 | snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task->pid); |
Jens Axboe | 46fe18b | 2021-03-04 12:39:36 -0700 | [diff] [blame] | 606 | set_task_comm(current, buf); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 607 | |
| 608 | while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) { |
Jens Axboe | 16efa4f | 2021-03-12 20:26:13 -0700 | [diff] [blame] | 609 | long ret; |
| 610 | |
Jens Axboe | 506d95f | 2019-12-07 21:03:59 -0700 | [diff] [blame] | 611 | set_current_state(TASK_INTERRUPTIBLE); |
Hao Xu | e13fb1f | 2022-02-06 17:52:40 +0800 | [diff] [blame] | 612 | while (io_acct_run_queue(acct)) |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 613 | io_worker_handle_work(worker); |
Hao Xu | e13fb1f | 2022-02-06 17:52:40 +0800 | [diff] [blame] | 614 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 615 | raw_spin_lock(&wq->lock); |
Jens Axboe | 01e68ce | 2023-03-08 07:18:51 -0700 | [diff] [blame] | 616 | /* |
| 617 | * Last sleep timed out. Exit if we're not the last worker, |
| 618 | * or if someone modified our affinity. |
| 619 | */ |
| 620 | if (last_timeout && (exit_mask || acct->nr_workers > 1)) { |
Hao Xu | 767a65e | 2021-09-12 03:40:52 +0800 | [diff] [blame] | 621 | acct->nr_workers--; |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 622 | raw_spin_unlock(&wq->lock); |
Jens Axboe | 05c5f4e | 2021-09-01 13:01:17 -0600 | [diff] [blame] | 623 | __set_current_state(TASK_RUNNING); |
| 624 | break; |
| 625 | } |
| 626 | last_timeout = false; |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 627 | __io_worker_idle(wq, worker); |
| 628 | raw_spin_unlock(&wq->lock); |
Pavel Begunkov | 024f15e | 2022-06-21 10:09:02 +0100 | [diff] [blame] | 629 | if (io_run_task_work()) |
Jens Axboe | 00ddff4 | 2021-03-21 07:06:56 -0600 | [diff] [blame] | 630 | continue; |
Jens Axboe | 16efa4f | 2021-03-12 20:26:13 -0700 | [diff] [blame] | 631 | ret = schedule_timeout(WORKER_IDLE_TIMEOUT); |
Jens Axboe | dbe1bdb | 2021-03-25 18:16:06 -0600 | [diff] [blame] | 632 | if (signal_pending(current)) { |
| 633 | struct ksignal ksig; |
| 634 | |
| 635 | if (!get_signal(&ksig)) |
| 636 | continue; |
Jens Axboe | 78f8876 | 2021-09-27 10:04:10 -0600 | [diff] [blame] | 637 | break; |
Jens Axboe | dbe1bdb | 2021-03-25 18:16:06 -0600 | [diff] [blame] | 638 | } |
Jens Axboe | 01e68ce | 2023-03-08 07:18:51 -0700 | [diff] [blame] | 639 | if (!ret) { |
| 640 | last_timeout = true; |
| 641 | exit_mask = !cpumask_test_cpu(raw_smp_processor_id(), |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 642 | wq->cpu_mask); |
Jens Axboe | 01e68ce | 2023-03-08 07:18:51 -0700 | [diff] [blame] | 643 | } |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 644 | } |
| 645 | |
Hao Xu | e13fb1f | 2022-02-06 17:52:40 +0800 | [diff] [blame] | 646 | if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) |
Pavel Begunkov | e587227 | 2021-06-14 02:36:17 +0100 | [diff] [blame] | 647 | io_worker_handle_work(worker); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 648 | |
| 649 | io_worker_exit(worker); |
| 650 | return 0; |
| 651 | } |
| 652 | |
| 653 | /* |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 654 | * Called when a worker is scheduled in. Mark us as currently running. |
| 655 | */ |
| 656 | void io_wq_worker_running(struct task_struct *tsk) |
| 657 | { |
Eric W. Biederman | e32cf5d | 2021-12-22 22:10:09 -0600 | [diff] [blame] | 658 | struct io_worker *worker = tsk->worker_private; |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 659 | |
Jens Axboe | 3bfe610 | 2021-02-16 14:15:30 -0700 | [diff] [blame] | 660 | if (!worker) |
| 661 | return; |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 662 | if (!(worker->flags & IO_WORKER_F_UP)) |
| 663 | return; |
| 664 | if (worker->flags & IO_WORKER_F_RUNNING) |
| 665 | return; |
| 666 | worker->flags |= IO_WORKER_F_RUNNING; |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 667 | io_wq_inc_running(worker); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 668 | } |
| 669 | |
| 670 | /* |
| 671 | * Called when worker is going to sleep. If there are no workers currently |
Jens Axboe | 685fe7f | 2021-03-08 09:37:51 -0700 | [diff] [blame] | 672 | * running and we have work pending, wake up a free one or create a new one. |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 673 | */ |
| 674 | void io_wq_worker_sleeping(struct task_struct *tsk) |
| 675 | { |
Eric W. Biederman | e32cf5d | 2021-12-22 22:10:09 -0600 | [diff] [blame] | 676 | struct io_worker *worker = tsk->worker_private; |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 677 | |
Jens Axboe | 3bfe610 | 2021-02-16 14:15:30 -0700 | [diff] [blame] | 678 | if (!worker) |
| 679 | return; |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 680 | if (!(worker->flags & IO_WORKER_F_UP)) |
| 681 | return; |
| 682 | if (!(worker->flags & IO_WORKER_F_RUNNING)) |
| 683 | return; |
| 684 | |
| 685 | worker->flags &= ~IO_WORKER_F_RUNNING; |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 686 | io_wq_dec_running(worker); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 687 | } |
| 688 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 689 | static void io_init_new_worker(struct io_wq *wq, struct io_worker *worker, |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 690 | struct task_struct *tsk) |
Jens Axboe | 3bfe610 | 2021-02-16 14:15:30 -0700 | [diff] [blame] | 691 | { |
Eric W. Biederman | e32cf5d | 2021-12-22 22:10:09 -0600 | [diff] [blame] | 692 | tsk->worker_private = worker; |
Jens Axboe | 46fe18b | 2021-03-04 12:39:36 -0700 | [diff] [blame] | 693 | worker->task = tsk; |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 694 | set_cpus_allowed_ptr(tsk, wq->cpu_mask); |
Jens Axboe | 46fe18b | 2021-03-04 12:39:36 -0700 | [diff] [blame] | 695 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 696 | raw_spin_lock(&wq->lock); |
| 697 | hlist_nulls_add_head_rcu(&worker->nulls_node, &wq->free_list); |
| 698 | list_add_tail_rcu(&worker->all_list, &wq->all_list); |
Jens Axboe | 46fe18b | 2021-03-04 12:39:36 -0700 | [diff] [blame] | 699 | worker->flags |= IO_WORKER_F_FREE; |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 700 | raw_spin_unlock(&wq->lock); |
Jens Axboe | 46fe18b | 2021-03-04 12:39:36 -0700 | [diff] [blame] | 701 | wake_up_new_task(tsk); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 702 | } |
| 703 | |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 704 | static bool io_wq_work_match_all(struct io_wq_work *work, void *data) |
| 705 | { |
| 706 | return true; |
| 707 | } |
| 708 | |
| 709 | static inline bool io_should_retry_thread(long err) |
| 710 | { |
Jens Axboe | a226abc | 2021-12-02 19:40:15 -0700 | [diff] [blame] | 711 | /* |
| 712 | * Prevent perpetual task_work retry, if the task (or its group) is |
| 713 | * exiting. |
| 714 | */ |
| 715 | if (fatal_signal_pending(current)) |
| 716 | return false; |
| 717 | |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 718 | switch (err) { |
| 719 | case -EAGAIN: |
| 720 | case -ERESTARTSYS: |
| 721 | case -ERESTARTNOINTR: |
| 722 | case -ERESTARTNOHAND: |
| 723 | return true; |
| 724 | default: |
| 725 | return false; |
| 726 | } |
| 727 | } |
| 728 | |
| 729 | static void create_worker_cont(struct callback_head *cb) |
| 730 | { |
| 731 | struct io_worker *worker; |
| 732 | struct task_struct *tsk; |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 733 | struct io_wq *wq; |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 734 | |
| 735 | worker = container_of(cb, struct io_worker, create_work); |
| 736 | clear_bit_unlock(0, &worker->create_state); |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 737 | wq = worker->wq; |
| 738 | tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE); |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 739 | if (!IS_ERR(tsk)) { |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 740 | io_init_new_worker(wq, worker, tsk); |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 741 | io_worker_release(worker); |
| 742 | return; |
| 743 | } else if (!io_should_retry_thread(PTR_ERR(tsk))) { |
Gabriel Krisman Bertazi | dfd63ba | 2023-03-21 22:16:27 -0300 | [diff] [blame] | 744 | struct io_wq_acct *acct = io_wq_get_acct(worker); |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 745 | |
| 746 | atomic_dec(&acct->nr_running); |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 747 | raw_spin_lock(&wq->lock); |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 748 | acct->nr_workers--; |
| 749 | if (!acct->nr_workers) { |
| 750 | struct io_cb_cancel_data match = { |
| 751 | .fn = io_wq_work_match_all, |
| 752 | .cancel_all = true, |
| 753 | }; |
| 754 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 755 | raw_spin_unlock(&wq->lock); |
| 756 | while (io_acct_cancel_pending_work(wq, acct, &match)) |
Hao Xu | 42abc95 | 2022-02-06 17:52:39 +0800 | [diff] [blame] | 757 | ; |
| 758 | } else { |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 759 | raw_spin_unlock(&wq->lock); |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 760 | } |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 761 | io_worker_ref_put(wq); |
Qiang.zhang | 66e70be | 2021-09-09 19:58:22 +0800 | [diff] [blame] | 762 | kfree(worker); |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 763 | return; |
| 764 | } |
| 765 | |
| 766 | /* re-create attempts grab a new worker ref, drop the existing one */ |
| 767 | io_worker_release(worker); |
| 768 | schedule_work(&worker->work); |
| 769 | } |
| 770 | |
| 771 | static void io_workqueue_create(struct work_struct *work) |
| 772 | { |
| 773 | struct io_worker *worker = container_of(work, struct io_worker, work); |
Gabriel Krisman Bertazi | dfd63ba | 2023-03-21 22:16:27 -0300 | [diff] [blame] | 774 | struct io_wq_acct *acct = io_wq_get_acct(worker); |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 775 | |
Bixuan Cui | 71e1cef | 2021-09-11 16:58:47 +0800 | [diff] [blame] | 776 | if (!io_queue_worker_create(worker, acct, create_worker_cont)) |
Qiang.zhang | 66e70be | 2021-09-09 19:58:22 +0800 | [diff] [blame] | 777 | kfree(worker); |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 778 | } |
| 779 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 780 | static bool create_io_worker(struct io_wq *wq, int index) |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 781 | { |
Gabriel Krisman Bertazi | dfd63ba | 2023-03-21 22:16:27 -0300 | [diff] [blame] | 782 | struct io_wq_acct *acct = &wq->acct[index]; |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 783 | struct io_worker *worker; |
| 784 | struct task_struct *tsk; |
| 785 | |
| 786 | __set_current_state(TASK_RUNNING); |
| 787 | |
Breno Leitao | da64d6d | 2023-03-10 12:11:07 -0800 | [diff] [blame] | 788 | worker = kzalloc(sizeof(*worker), GFP_KERNEL); |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 789 | if (!worker) { |
| 790 | fail: |
| 791 | atomic_dec(&acct->nr_running); |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 792 | raw_spin_lock(&wq->lock); |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 793 | acct->nr_workers--; |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 794 | raw_spin_unlock(&wq->lock); |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 795 | io_worker_ref_put(wq); |
| 796 | return false; |
| 797 | } |
| 798 | |
| 799 | refcount_set(&worker->ref, 1); |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 800 | worker->wq = wq; |
Jens Axboe | 081b582 | 2022-01-18 19:13:43 -0700 | [diff] [blame] | 801 | raw_spin_lock_init(&worker->lock); |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 802 | init_completion(&worker->ref_done); |
| 803 | |
| 804 | if (index == IO_WQ_ACCT_BOUND) |
| 805 | worker->flags |= IO_WORKER_F_BOUND; |
| 806 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 807 | tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE); |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 808 | if (!IS_ERR(tsk)) { |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 809 | io_init_new_worker(wq, worker, tsk); |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 810 | } else if (!io_should_retry_thread(PTR_ERR(tsk))) { |
Qiang.zhang | 66e70be | 2021-09-09 19:58:22 +0800 | [diff] [blame] | 811 | kfree(worker); |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 812 | goto fail; |
| 813 | } else { |
| 814 | INIT_WORK(&worker->work, io_workqueue_create); |
| 815 | schedule_work(&worker->work); |
| 816 | } |
| 817 | |
| 818 | return true; |
| 819 | } |
| 820 | |
Hillf Danton | c4068bf | 2020-09-26 21:26:55 +0800 | [diff] [blame] | 821 | /* |
| 822 | * Iterate the passed in list and call the specific function for each |
| 823 | * worker that isn't exiting |
| 824 | */ |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 825 | static bool io_wq_for_each_worker(struct io_wq *wq, |
Hillf Danton | c4068bf | 2020-09-26 21:26:55 +0800 | [diff] [blame] | 826 | bool (*func)(struct io_worker *, void *), |
| 827 | void *data) |
| 828 | { |
| 829 | struct io_worker *worker; |
| 830 | bool ret = false; |
| 831 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 832 | list_for_each_entry_rcu(worker, &wq->all_list, all_list) { |
Hillf Danton | c4068bf | 2020-09-26 21:26:55 +0800 | [diff] [blame] | 833 | if (io_worker_get(worker)) { |
| 834 | /* no task if node is/was offline */ |
| 835 | if (worker->task) |
| 836 | ret = func(worker, data); |
| 837 | io_worker_release(worker); |
| 838 | if (ret) |
| 839 | break; |
| 840 | } |
| 841 | } |
| 842 | |
| 843 | return ret; |
| 844 | } |
| 845 | |
| 846 | static bool io_wq_worker_wake(struct io_worker *worker, void *data) |
| 847 | { |
Jens Axboe | 6cf5862 | 2022-04-25 19:49:01 -0600 | [diff] [blame] | 848 | __set_notify_signal(worker->task); |
Hillf Danton | c4068bf | 2020-09-26 21:26:55 +0800 | [diff] [blame] | 849 | wake_up_process(worker->task); |
| 850 | return false; |
| 851 | } |
| 852 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 853 | static void io_run_cancel(struct io_wq_work *work, struct io_wq *wq) |
Pavel Begunkov | fc04c39 | 2020-03-01 19:18:19 +0300 | [diff] [blame] | 854 | { |
| 855 | do { |
Pavel Begunkov | fc04c39 | 2020-03-01 19:18:19 +0300 | [diff] [blame] | 856 | work->flags |= IO_WQ_WORK_CANCEL; |
Pavel Begunkov | 5280f7e | 2021-02-04 13:52:08 +0000 | [diff] [blame] | 857 | wq->do_work(work); |
| 858 | work = wq->free_work(work); |
Pavel Begunkov | fc04c39 | 2020-03-01 19:18:19 +0300 | [diff] [blame] | 859 | } while (work); |
| 860 | } |
| 861 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 862 | static void io_wq_insert_work(struct io_wq *wq, struct io_wq_work *work) |
Pavel Begunkov | 86f3cd1 | 2020-03-23 22:57:22 +0300 | [diff] [blame] | 863 | { |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 864 | struct io_wq_acct *acct = io_work_get_acct(wq, work); |
Pavel Begunkov | 86f3cd1 | 2020-03-23 22:57:22 +0300 | [diff] [blame] | 865 | unsigned int hash; |
| 866 | struct io_wq_work *tail; |
| 867 | |
| 868 | if (!io_wq_is_hashed(work)) { |
| 869 | append: |
Jens Axboe | f95dc20 | 2021-08-31 13:57:32 -0600 | [diff] [blame] | 870 | wq_list_add_tail(&work->list, &acct->work_list); |
Pavel Begunkov | 86f3cd1 | 2020-03-23 22:57:22 +0300 | [diff] [blame] | 871 | return; |
| 872 | } |
| 873 | |
| 874 | hash = io_get_work_hash(work); |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 875 | tail = wq->hash_tail[hash]; |
| 876 | wq->hash_tail[hash] = work; |
Pavel Begunkov | 86f3cd1 | 2020-03-23 22:57:22 +0300 | [diff] [blame] | 877 | if (!tail) |
| 878 | goto append; |
| 879 | |
Jens Axboe | f95dc20 | 2021-08-31 13:57:32 -0600 | [diff] [blame] | 880 | wq_list_add_after(&work->list, &tail->list, &acct->work_list); |
Pavel Begunkov | 86f3cd1 | 2020-03-23 22:57:22 +0300 | [diff] [blame] | 881 | } |
| 882 | |
Pavel Begunkov | 713b982 | 2021-09-08 10:09:29 +0100 | [diff] [blame] | 883 | static bool io_wq_work_match_item(struct io_wq_work *work, void *data) |
| 884 | { |
| 885 | return work == data; |
| 886 | } |
| 887 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 888 | void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work) |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 889 | { |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 890 | struct io_wq_acct *acct = io_work_get_acct(wq, work); |
Hao Xu | 42abc95 | 2022-02-06 17:52:39 +0800 | [diff] [blame] | 891 | struct io_cb_cancel_data match; |
Jens Axboe | 94ffb0a | 2021-08-30 11:55:22 -0600 | [diff] [blame] | 892 | unsigned work_flags = work->flags; |
| 893 | bool do_create; |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 894 | |
Jens Axboe | 991468d | 2021-07-23 11:53:54 -0600 | [diff] [blame] | 895 | /* |
| 896 | * If io-wq is exiting for this task, or if the request has explicitly |
| 897 | * been marked as one that should not get executed, cancel it here. |
| 898 | */ |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 899 | if (test_bit(IO_WQ_BIT_EXIT, &wq->state) || |
Jens Axboe | 991468d | 2021-07-23 11:53:54 -0600 | [diff] [blame] | 900 | (work->flags & IO_WQ_WORK_CANCEL)) { |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 901 | io_run_cancel(work, wq); |
Jens Axboe | 4fb6ac3 | 2021-02-25 10:17:09 -0700 | [diff] [blame] | 902 | return; |
| 903 | } |
| 904 | |
Hao Xu | 42abc95 | 2022-02-06 17:52:39 +0800 | [diff] [blame] | 905 | raw_spin_lock(&acct->lock); |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 906 | io_wq_insert_work(wq, work); |
Jens Axboe | f95dc20 | 2021-08-31 13:57:32 -0600 | [diff] [blame] | 907 | clear_bit(IO_ACCT_STALLED_BIT, &acct->flags); |
Hao Xu | 42abc95 | 2022-02-06 17:52:39 +0800 | [diff] [blame] | 908 | raw_spin_unlock(&acct->lock); |
Jens Axboe | 94ffb0a | 2021-08-30 11:55:22 -0600 | [diff] [blame] | 909 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 910 | raw_spin_lock(&wq->lock); |
Jens Axboe | 94ffb0a | 2021-08-30 11:55:22 -0600 | [diff] [blame] | 911 | rcu_read_lock(); |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 912 | do_create = !io_wq_activate_free_worker(wq, acct); |
Jens Axboe | 94ffb0a | 2021-08-30 11:55:22 -0600 | [diff] [blame] | 913 | rcu_read_unlock(); |
| 914 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 915 | raw_spin_unlock(&wq->lock); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 916 | |
Jens Axboe | 94ffb0a | 2021-08-30 11:55:22 -0600 | [diff] [blame] | 917 | if (do_create && ((work_flags & IO_WQ_WORK_CONCURRENT) || |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 918 | !atomic_read(&acct->nr_running))) { |
| 919 | bool did_create; |
| 920 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 921 | did_create = io_wq_create_worker(wq, acct); |
Pavel Begunkov | 713b982 | 2021-09-08 10:09:29 +0100 | [diff] [blame] | 922 | if (likely(did_create)) |
| 923 | return; |
| 924 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 925 | raw_spin_lock(&wq->lock); |
Hao Xu | 42abc95 | 2022-02-06 17:52:39 +0800 | [diff] [blame] | 926 | if (acct->nr_workers) { |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 927 | raw_spin_unlock(&wq->lock); |
Hao Xu | 42abc95 | 2022-02-06 17:52:39 +0800 | [diff] [blame] | 928 | return; |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 929 | } |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 930 | raw_spin_unlock(&wq->lock); |
Hao Xu | 42abc95 | 2022-02-06 17:52:39 +0800 | [diff] [blame] | 931 | |
| 932 | /* fatal condition, failed to create the first worker */ |
| 933 | match.fn = io_wq_work_match_item, |
| 934 | match.data = work, |
| 935 | match.cancel_all = false, |
| 936 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 937 | io_acct_cancel_pending_work(wq, acct, &match); |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 938 | } |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 939 | } |
| 940 | |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 941 | /* |
Pavel Begunkov | 8766dd5 | 2020-03-14 00:31:04 +0300 | [diff] [blame] | 942 | * Work items that hash to the same value will not be done in parallel. |
| 943 | * Used to limit concurrent writes, generally hashed by inode. |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 944 | */ |
Pavel Begunkov | 8766dd5 | 2020-03-14 00:31:04 +0300 | [diff] [blame] | 945 | void io_wq_hash_work(struct io_wq_work *work, void *val) |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 946 | { |
Pavel Begunkov | 8766dd5 | 2020-03-14 00:31:04 +0300 | [diff] [blame] | 947 | unsigned int bit; |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 948 | |
| 949 | bit = hash_ptr(val, IO_WQ_HASH_ORDER); |
| 950 | work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT)); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 951 | } |
| 952 | |
Jens Axboe | 361aee4 | 2022-01-18 19:23:51 -0700 | [diff] [blame] | 953 | static bool __io_wq_worker_cancel(struct io_worker *worker, |
| 954 | struct io_cb_cancel_data *match, |
| 955 | struct io_wq_work *work) |
| 956 | { |
| 957 | if (work && match->fn(work, match->data)) { |
| 958 | work->flags |= IO_WQ_WORK_CANCEL; |
Jens Axboe | 6cf5862 | 2022-04-25 19:49:01 -0600 | [diff] [blame] | 959 | __set_notify_signal(worker->task); |
Jens Axboe | 361aee4 | 2022-01-18 19:23:51 -0700 | [diff] [blame] | 960 | return true; |
| 961 | } |
| 962 | |
| 963 | return false; |
| 964 | } |
| 965 | |
Pavel Begunkov | 2293b41 | 2020-03-07 01:15:39 +0300 | [diff] [blame] | 966 | static bool io_wq_worker_cancel(struct io_worker *worker, void *data) |
Jens Axboe | 62755e3 | 2019-10-28 21:49:21 -0600 | [diff] [blame] | 967 | { |
Pavel Begunkov | 2293b41 | 2020-03-07 01:15:39 +0300 | [diff] [blame] | 968 | struct io_cb_cancel_data *match = data; |
Jens Axboe | 62755e3 | 2019-10-28 21:49:21 -0600 | [diff] [blame] | 969 | |
| 970 | /* |
| 971 | * Hold the lock to avoid ->cur_work going out of scope, caller |
Jens Axboe | 36c2f92 | 2019-11-13 09:43:34 -0700 | [diff] [blame] | 972 | * may dereference the passed in work. |
Jens Axboe | 62755e3 | 2019-10-28 21:49:21 -0600 | [diff] [blame] | 973 | */ |
Jens Axboe | 081b582 | 2022-01-18 19:13:43 -0700 | [diff] [blame] | 974 | raw_spin_lock(&worker->lock); |
Jens Axboe | 361aee4 | 2022-01-18 19:23:51 -0700 | [diff] [blame] | 975 | if (__io_wq_worker_cancel(worker, match, worker->cur_work) || |
| 976 | __io_wq_worker_cancel(worker, match, worker->next_work)) |
Pavel Begunkov | 4f26bda | 2020-06-15 10:24:03 +0300 | [diff] [blame] | 977 | match->nr_running++; |
Jens Axboe | 081b582 | 2022-01-18 19:13:43 -0700 | [diff] [blame] | 978 | raw_spin_unlock(&worker->lock); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 979 | |
Pavel Begunkov | 4f26bda | 2020-06-15 10:24:03 +0300 | [diff] [blame] | 980 | return match->nr_running && !match->cancel_all; |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 981 | } |
| 982 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 983 | static inline void io_wq_remove_pending(struct io_wq *wq, |
Pavel Begunkov | 204361a | 2020-08-23 20:33:10 +0300 | [diff] [blame] | 984 | struct io_wq_work *work, |
| 985 | struct io_wq_work_node *prev) |
| 986 | { |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 987 | struct io_wq_acct *acct = io_work_get_acct(wq, work); |
Pavel Begunkov | 204361a | 2020-08-23 20:33:10 +0300 | [diff] [blame] | 988 | unsigned int hash = io_get_work_hash(work); |
| 989 | struct io_wq_work *prev_work = NULL; |
| 990 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 991 | if (io_wq_is_hashed(work) && work == wq->hash_tail[hash]) { |
Pavel Begunkov | 204361a | 2020-08-23 20:33:10 +0300 | [diff] [blame] | 992 | if (prev) |
| 993 | prev_work = container_of(prev, struct io_wq_work, list); |
| 994 | if (prev_work && io_get_work_hash(prev_work) == hash) |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 995 | wq->hash_tail[hash] = prev_work; |
Pavel Begunkov | 204361a | 2020-08-23 20:33:10 +0300 | [diff] [blame] | 996 | else |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 997 | wq->hash_tail[hash] = NULL; |
Pavel Begunkov | 204361a | 2020-08-23 20:33:10 +0300 | [diff] [blame] | 998 | } |
Jens Axboe | f95dc20 | 2021-08-31 13:57:32 -0600 | [diff] [blame] | 999 | wq_list_del(&acct->work_list, &work->list, prev); |
Pavel Begunkov | 204361a | 2020-08-23 20:33:10 +0300 | [diff] [blame] | 1000 | } |
| 1001 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 1002 | static bool io_acct_cancel_pending_work(struct io_wq *wq, |
Gabriel Krisman Bertazi | dfd63ba | 2023-03-21 22:16:27 -0300 | [diff] [blame] | 1003 | struct io_wq_acct *acct, |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 1004 | struct io_cb_cancel_data *match) |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 1005 | { |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 1006 | struct io_wq_work_node *node, *prev; |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 1007 | struct io_wq_work *work; |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 1008 | |
Hao Xu | 42abc95 | 2022-02-06 17:52:39 +0800 | [diff] [blame] | 1009 | raw_spin_lock(&acct->lock); |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 1010 | wq_list_for_each(node, prev, &acct->work_list) { |
| 1011 | work = container_of(node, struct io_wq_work, list); |
| 1012 | if (!match->fn(work, match->data)) |
| 1013 | continue; |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 1014 | io_wq_remove_pending(wq, work, prev); |
Hao Xu | 42abc95 | 2022-02-06 17:52:39 +0800 | [diff] [blame] | 1015 | raw_spin_unlock(&acct->lock); |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 1016 | io_run_cancel(work, wq); |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 1017 | match->nr_pending++; |
| 1018 | /* not safe to continue after unlock */ |
| 1019 | return true; |
| 1020 | } |
Hao Xu | 42abc95 | 2022-02-06 17:52:39 +0800 | [diff] [blame] | 1021 | raw_spin_unlock(&acct->lock); |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 1022 | |
| 1023 | return false; |
| 1024 | } |
| 1025 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 1026 | static void io_wq_cancel_pending_work(struct io_wq *wq, |
Gabriel Krisman Bertazi | dfd63ba | 2023-03-21 22:16:27 -0300 | [diff] [blame] | 1027 | struct io_cb_cancel_data *match) |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 1028 | { |
| 1029 | int i; |
Pavel Begunkov | 4f26bda | 2020-06-15 10:24:03 +0300 | [diff] [blame] | 1030 | retry: |
Jens Axboe | f95dc20 | 2021-08-31 13:57:32 -0600 | [diff] [blame] | 1031 | for (i = 0; i < IO_WQ_ACCT_NR; i++) { |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 1032 | struct io_wq_acct *acct = io_get_acct(wq, i == 0); |
Pavel Begunkov | 4f26bda | 2020-06-15 10:24:03 +0300 | [diff] [blame] | 1033 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 1034 | if (io_acct_cancel_pending_work(wq, acct, match)) { |
Jens Axboe | 3146cba | 2021-09-01 11:20:10 -0600 | [diff] [blame] | 1035 | if (match->cancel_all) |
| 1036 | goto retry; |
Jens Axboe | 36e4c58 | 2022-01-18 19:18:20 -0700 | [diff] [blame] | 1037 | break; |
Jens Axboe | f95dc20 | 2021-08-31 13:57:32 -0600 | [diff] [blame] | 1038 | } |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 1039 | } |
Pavel Begunkov | f4c2665 | 2020-06-15 10:24:02 +0300 | [diff] [blame] | 1040 | } |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 1041 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 1042 | static void io_wq_cancel_running_work(struct io_wq *wq, |
Pavel Begunkov | f4c2665 | 2020-06-15 10:24:02 +0300 | [diff] [blame] | 1043 | struct io_cb_cancel_data *match) |
| 1044 | { |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 1045 | rcu_read_lock(); |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 1046 | io_wq_for_each_worker(wq, io_wq_worker_cancel, match); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 1047 | rcu_read_unlock(); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 1048 | } |
| 1049 | |
Pavel Begunkov | 2293b41 | 2020-03-07 01:15:39 +0300 | [diff] [blame] | 1050 | enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, |
Pavel Begunkov | 4f26bda | 2020-06-15 10:24:03 +0300 | [diff] [blame] | 1051 | void *data, bool cancel_all) |
Pavel Begunkov | 2293b41 | 2020-03-07 01:15:39 +0300 | [diff] [blame] | 1052 | { |
| 1053 | struct io_cb_cancel_data match = { |
Pavel Begunkov | 4f26bda | 2020-06-15 10:24:03 +0300 | [diff] [blame] | 1054 | .fn = cancel, |
| 1055 | .data = data, |
| 1056 | .cancel_all = cancel_all, |
Pavel Begunkov | 2293b41 | 2020-03-07 01:15:39 +0300 | [diff] [blame] | 1057 | }; |
Pavel Begunkov | 2293b41 | 2020-03-07 01:15:39 +0300 | [diff] [blame] | 1058 | |
Pavel Begunkov | f4c2665 | 2020-06-15 10:24:02 +0300 | [diff] [blame] | 1059 | /* |
| 1060 | * First check pending list, if we're lucky we can just remove it |
| 1061 | * from there. CANCEL_OK means that the work is returned as-new, |
| 1062 | * no completion will be posted for it. |
Jens Axboe | efdf518 | 2022-01-18 19:22:32 -0700 | [diff] [blame] | 1063 | * |
| 1064 | * Then check if a free (going busy) or busy worker has the work |
Pavel Begunkov | f4c2665 | 2020-06-15 10:24:02 +0300 | [diff] [blame] | 1065 | * currently running. If we find it there, we'll return CANCEL_RUNNING |
| 1066 | * as an indication that we attempt to signal cancellation. The |
| 1067 | * completion will run normally in this case. |
Jens Axboe | efdf518 | 2022-01-18 19:22:32 -0700 | [diff] [blame] | 1068 | * |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 1069 | * Do both of these while holding the wq->lock, to ensure that |
Jens Axboe | efdf518 | 2022-01-18 19:22:32 -0700 | [diff] [blame] | 1070 | * we'll find a work item regardless of state. |
Pavel Begunkov | f4c2665 | 2020-06-15 10:24:02 +0300 | [diff] [blame] | 1071 | */ |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 1072 | io_wq_cancel_pending_work(wq, &match); |
Breno Leitao | da64d6d | 2023-03-10 12:11:07 -0800 | [diff] [blame] | 1073 | if (match.nr_pending && !match.cancel_all) |
| 1074 | return IO_WQ_CANCEL_OK; |
Pavel Begunkov | f4c2665 | 2020-06-15 10:24:02 +0300 | [diff] [blame] | 1075 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 1076 | raw_spin_lock(&wq->lock); |
| 1077 | io_wq_cancel_running_work(wq, &match); |
| 1078 | raw_spin_unlock(&wq->lock); |
Breno Leitao | da64d6d | 2023-03-10 12:11:07 -0800 | [diff] [blame] | 1079 | if (match.nr_running && !match.cancel_all) |
| 1080 | return IO_WQ_CANCEL_RUNNING; |
Pavel Begunkov | f4c2665 | 2020-06-15 10:24:02 +0300 | [diff] [blame] | 1081 | |
Pavel Begunkov | 4f26bda | 2020-06-15 10:24:03 +0300 | [diff] [blame] | 1082 | if (match.nr_running) |
| 1083 | return IO_WQ_CANCEL_RUNNING; |
| 1084 | if (match.nr_pending) |
| 1085 | return IO_WQ_CANCEL_OK; |
Pavel Begunkov | f4c2665 | 2020-06-15 10:24:02 +0300 | [diff] [blame] | 1086 | return IO_WQ_CANCEL_NOTFOUND; |
Pavel Begunkov | 2293b41 | 2020-03-07 01:15:39 +0300 | [diff] [blame] | 1087 | } |
| 1088 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 1089 | static int io_wq_hash_wake(struct wait_queue_entry *wait, unsigned mode, |
Jens Axboe | e941894 | 2021-02-19 12:33:30 -0700 | [diff] [blame] | 1090 | int sync, void *key) |
| 1091 | { |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 1092 | struct io_wq *wq = container_of(wait, struct io_wq, wait); |
Jens Axboe | f95dc20 | 2021-08-31 13:57:32 -0600 | [diff] [blame] | 1093 | int i; |
Jens Axboe | e941894 | 2021-02-19 12:33:30 -0700 | [diff] [blame] | 1094 | |
| 1095 | list_del_init(&wait->entry); |
| 1096 | |
| 1097 | rcu_read_lock(); |
Jens Axboe | f95dc20 | 2021-08-31 13:57:32 -0600 | [diff] [blame] | 1098 | for (i = 0; i < IO_WQ_ACCT_NR; i++) { |
Gabriel Krisman Bertazi | dfd63ba | 2023-03-21 22:16:27 -0300 | [diff] [blame] | 1099 | struct io_wq_acct *acct = &wq->acct[i]; |
Jens Axboe | f95dc20 | 2021-08-31 13:57:32 -0600 | [diff] [blame] | 1100 | |
| 1101 | if (test_and_clear_bit(IO_ACCT_STALLED_BIT, &acct->flags)) |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 1102 | io_wq_activate_free_worker(wq, acct); |
Jens Axboe | f95dc20 | 2021-08-31 13:57:32 -0600 | [diff] [blame] | 1103 | } |
Jens Axboe | e941894 | 2021-02-19 12:33:30 -0700 | [diff] [blame] | 1104 | rcu_read_unlock(); |
Jens Axboe | e941894 | 2021-02-19 12:33:30 -0700 | [diff] [blame] | 1105 | return 1; |
| 1106 | } |
| 1107 | |
Jens Axboe | 576a347 | 2019-11-25 08:49:20 -0700 | [diff] [blame] | 1108 | struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data) |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 1109 | { |
Breno Leitao | da64d6d | 2023-03-10 12:11:07 -0800 | [diff] [blame] | 1110 | int ret, i; |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 1111 | struct io_wq *wq; |
| 1112 | |
Pavel Begunkov | f5fa38c | 2020-06-08 21:08:20 +0300 | [diff] [blame] | 1113 | if (WARN_ON_ONCE(!data->free_work || !data->do_work)) |
Pavel Begunkov | e9fd939 | 2020-03-04 16:14:12 +0300 | [diff] [blame] | 1114 | return ERR_PTR(-EINVAL); |
Pavel Begunkov | e6ab899 | 2021-06-17 18:13:59 +0100 | [diff] [blame] | 1115 | if (WARN_ON_ONCE(!bounded)) |
| 1116 | return ERR_PTR(-EINVAL); |
Pavel Begunkov | e9fd939 | 2020-03-04 16:14:12 +0300 | [diff] [blame] | 1117 | |
Breno Leitao | da64d6d | 2023-03-10 12:11:07 -0800 | [diff] [blame] | 1118 | wq = kzalloc(sizeof(struct io_wq), GFP_KERNEL); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 1119 | if (!wq) |
| 1120 | return ERR_PTR(-ENOMEM); |
Jens Axboe | 43c01fb | 2020-10-22 09:02:50 -0600 | [diff] [blame] | 1121 | ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node); |
| 1122 | if (ret) |
Pavel Begunkov | c7f405d | 2021-06-14 02:36:12 +0100 | [diff] [blame] | 1123 | goto err_wq; |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 1124 | |
Jens Axboe | e941894 | 2021-02-19 12:33:30 -0700 | [diff] [blame] | 1125 | refcount_inc(&data->hash->refs); |
| 1126 | wq->hash = data->hash; |
Pavel Begunkov | e9fd939 | 2020-03-04 16:14:12 +0300 | [diff] [blame] | 1127 | wq->free_work = data->free_work; |
Pavel Begunkov | f5fa38c | 2020-06-08 21:08:20 +0300 | [diff] [blame] | 1128 | wq->do_work = data->do_work; |
Jens Axboe | 7d72306 | 2019-11-12 22:31:31 -0700 | [diff] [blame] | 1129 | |
Jens Axboe | 43c01fb | 2020-10-22 09:02:50 -0600 | [diff] [blame] | 1130 | ret = -ENOMEM; |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 1131 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 1132 | if (!alloc_cpumask_var(&wq->cpu_mask, GFP_KERNEL)) |
Breno Leitao | da64d6d | 2023-03-10 12:11:07 -0800 | [diff] [blame] | 1133 | goto err; |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 1134 | cpumask_copy(wq->cpu_mask, cpu_possible_mask); |
Gabriel Krisman Bertazi | dfd63ba | 2023-03-21 22:16:27 -0300 | [diff] [blame] | 1135 | wq->acct[IO_WQ_ACCT_BOUND].max_workers = bounded; |
| 1136 | wq->acct[IO_WQ_ACCT_UNBOUND].max_workers = |
Breno Leitao | da64d6d | 2023-03-10 12:11:07 -0800 | [diff] [blame] | 1137 | task_rlimit(current, RLIMIT_NPROC); |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 1138 | INIT_LIST_HEAD(&wq->wait.entry); |
| 1139 | wq->wait.func = io_wq_hash_wake; |
Breno Leitao | da64d6d | 2023-03-10 12:11:07 -0800 | [diff] [blame] | 1140 | for (i = 0; i < IO_WQ_ACCT_NR; i++) { |
Gabriel Krisman Bertazi | dfd63ba | 2023-03-21 22:16:27 -0300 | [diff] [blame] | 1141 | struct io_wq_acct *acct = &wq->acct[i]; |
Jens Axboe | f95dc20 | 2021-08-31 13:57:32 -0600 | [diff] [blame] | 1142 | |
Breno Leitao | da64d6d | 2023-03-10 12:11:07 -0800 | [diff] [blame] | 1143 | acct->index = i; |
| 1144 | atomic_set(&acct->nr_running, 0); |
| 1145 | INIT_WQ_LIST(&acct->work_list); |
| 1146 | raw_spin_lock_init(&acct->lock); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 1147 | } |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 1148 | |
| 1149 | raw_spin_lock_init(&wq->lock); |
| 1150 | INIT_HLIST_NULLS_HEAD(&wq->free_list, 0); |
| 1151 | INIT_LIST_HEAD(&wq->all_list); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 1152 | |
Jens Axboe | 685fe7f | 2021-03-08 09:37:51 -0700 | [diff] [blame] | 1153 | wq->task = get_task_struct(data->task); |
Jens Axboe | 685fe7f | 2021-03-08 09:37:51 -0700 | [diff] [blame] | 1154 | atomic_set(&wq->worker_refs, 1); |
| 1155 | init_completion(&wq->worker_done); |
| 1156 | return wq; |
Jens Axboe | b60fda6 | 2019-11-19 08:37:07 -0700 | [diff] [blame] | 1157 | err: |
Jens Axboe | dc7bbc9 | 2021-03-01 09:09:56 -0700 | [diff] [blame] | 1158 | io_wq_put_hash(data->hash); |
Jens Axboe | 43c01fb | 2020-10-22 09:02:50 -0600 | [diff] [blame] | 1159 | cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node); |
Breno Leitao | da64d6d | 2023-03-10 12:11:07 -0800 | [diff] [blame] | 1160 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 1161 | free_cpumask_var(wq->cpu_mask); |
Jens Axboe | 43c01fb | 2020-10-22 09:02:50 -0600 | [diff] [blame] | 1162 | err_wq: |
Jens Axboe | b60fda6 | 2019-11-19 08:37:07 -0700 | [diff] [blame] | 1163 | kfree(wq); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 1164 | return ERR_PTR(ret); |
| 1165 | } |
| 1166 | |
Jens Axboe | c80ca47 | 2021-04-01 19:57:07 -0600 | [diff] [blame] | 1167 | static bool io_task_work_match(struct callback_head *cb, void *data) |
| 1168 | { |
Jens Axboe | d3e9f73 | 2021-08-04 08:37:25 -0600 | [diff] [blame] | 1169 | struct io_worker *worker; |
Jens Axboe | c80ca47 | 2021-04-01 19:57:07 -0600 | [diff] [blame] | 1170 | |
Jens Axboe | 3b33e3f | 2021-09-08 19:57:26 -0600 | [diff] [blame] | 1171 | if (cb->func != create_worker_cb && cb->func != create_worker_cont) |
Jens Axboe | c80ca47 | 2021-04-01 19:57:07 -0600 | [diff] [blame] | 1172 | return false; |
Jens Axboe | d3e9f73 | 2021-08-04 08:37:25 -0600 | [diff] [blame] | 1173 | worker = container_of(cb, struct io_worker, create_work); |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 1174 | return worker->wq == data; |
Jens Axboe | c80ca47 | 2021-04-01 19:57:07 -0600 | [diff] [blame] | 1175 | } |
| 1176 | |
Pavel Begunkov | 17a9105 | 2021-05-23 15:48:39 +0100 | [diff] [blame] | 1177 | void io_wq_exit_start(struct io_wq *wq) |
| 1178 | { |
| 1179 | set_bit(IO_WQ_BIT_EXIT, &wq->state); |
| 1180 | } |
| 1181 | |
Jens Axboe | 71a8538 | 2021-12-10 08:29:30 -0700 | [diff] [blame] | 1182 | static void io_wq_cancel_tw_create(struct io_wq *wq) |
Jens Axboe | afcc401 | 2021-02-26 13:48:19 -0700 | [diff] [blame] | 1183 | { |
Jens Axboe | 685fe7f | 2021-03-08 09:37:51 -0700 | [diff] [blame] | 1184 | struct callback_head *cb; |
Jens Axboe | 685fe7f | 2021-03-08 09:37:51 -0700 | [diff] [blame] | 1185 | |
Jens Axboe | c80ca47 | 2021-04-01 19:57:07 -0600 | [diff] [blame] | 1186 | while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) { |
Jens Axboe | d3e9f73 | 2021-08-04 08:37:25 -0600 | [diff] [blame] | 1187 | struct io_worker *worker; |
Jens Axboe | 685fe7f | 2021-03-08 09:37:51 -0700 | [diff] [blame] | 1188 | |
Jens Axboe | d3e9f73 | 2021-08-04 08:37:25 -0600 | [diff] [blame] | 1189 | worker = container_of(cb, struct io_worker, create_work); |
Pavel Begunkov | 1d5f5ea | 2021-10-29 13:11:33 +0100 | [diff] [blame] | 1190 | io_worker_cancel_cb(worker); |
Jens Axboe | e6db6f9 | 2023-01-08 10:39:17 -0700 | [diff] [blame] | 1191 | /* |
| 1192 | * Only the worker continuation helper has worker allocated and |
| 1193 | * hence needs freeing. |
| 1194 | */ |
| 1195 | if (cb->func == create_worker_cont) |
| 1196 | kfree(worker); |
Jens Axboe | afcc401 | 2021-02-26 13:48:19 -0700 | [diff] [blame] | 1197 | } |
Jens Axboe | 71a8538 | 2021-12-10 08:29:30 -0700 | [diff] [blame] | 1198 | } |
| 1199 | |
| 1200 | static void io_wq_exit_workers(struct io_wq *wq) |
| 1201 | { |
Jens Axboe | 71a8538 | 2021-12-10 08:29:30 -0700 | [diff] [blame] | 1202 | if (!wq->task) |
| 1203 | return; |
| 1204 | |
| 1205 | io_wq_cancel_tw_create(wq); |
Jens Axboe | 685fe7f | 2021-03-08 09:37:51 -0700 | [diff] [blame] | 1206 | |
| 1207 | rcu_read_lock(); |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 1208 | io_wq_for_each_worker(wq, io_wq_worker_wake, NULL); |
Jens Axboe | 685fe7f | 2021-03-08 09:37:51 -0700 | [diff] [blame] | 1209 | rcu_read_unlock(); |
| 1210 | io_worker_ref_put(wq); |
| 1211 | wait_for_completion(&wq->worker_done); |
Zqiang | 3743c17 | 2021-05-26 13:08:26 +0800 | [diff] [blame] | 1212 | |
Breno Leitao | da64d6d | 2023-03-10 12:11:07 -0800 | [diff] [blame] | 1213 | spin_lock_irq(&wq->hash->wait.lock); |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 1214 | list_del_init(&wq->wait.entry); |
Breno Leitao | da64d6d | 2023-03-10 12:11:07 -0800 | [diff] [blame] | 1215 | spin_unlock_irq(&wq->hash->wait.lock); |
| 1216 | |
Jens Axboe | 685fe7f | 2021-03-08 09:37:51 -0700 | [diff] [blame] | 1217 | put_task_struct(wq->task); |
| 1218 | wq->task = NULL; |
Jens Axboe | afcc401 | 2021-02-26 13:48:19 -0700 | [diff] [blame] | 1219 | } |
| 1220 | |
Jens Axboe | 4fb6ac3 | 2021-02-25 10:17:09 -0700 | [diff] [blame] | 1221 | static void io_wq_destroy(struct io_wq *wq) |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 1222 | { |
Breno Leitao | da64d6d | 2023-03-10 12:11:07 -0800 | [diff] [blame] | 1223 | struct io_cb_cancel_data match = { |
| 1224 | .fn = io_wq_work_match_all, |
| 1225 | .cancel_all = true, |
| 1226 | }; |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 1227 | |
Jens Axboe | 43c01fb | 2020-10-22 09:02:50 -0600 | [diff] [blame] | 1228 | cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node); |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 1229 | io_wq_cancel_pending_work(wq, &match); |
| 1230 | free_cpumask_var(wq->cpu_mask); |
Jens Axboe | e941894 | 2021-02-19 12:33:30 -0700 | [diff] [blame] | 1231 | io_wq_put_hash(wq->hash); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 1232 | kfree(wq); |
Jens Axboe | 4fb6ac3 | 2021-02-25 10:17:09 -0700 | [diff] [blame] | 1233 | } |
| 1234 | |
Jens Axboe | afcc401 | 2021-02-26 13:48:19 -0700 | [diff] [blame] | 1235 | void io_wq_put_and_exit(struct io_wq *wq) |
| 1236 | { |
Pavel Begunkov | 17a9105 | 2021-05-23 15:48:39 +0100 | [diff] [blame] | 1237 | WARN_ON_ONCE(!test_bit(IO_WQ_BIT_EXIT, &wq->state)); |
| 1238 | |
Jens Axboe | 685fe7f | 2021-03-08 09:37:51 -0700 | [diff] [blame] | 1239 | io_wq_exit_workers(wq); |
Pavel Begunkov | 382cb03 | 2021-06-14 02:36:13 +0100 | [diff] [blame] | 1240 | io_wq_destroy(wq); |
Jens Axboe | afcc401 | 2021-02-26 13:48:19 -0700 | [diff] [blame] | 1241 | } |
| 1242 | |
Jens Axboe | 0e03496 | 2021-06-17 10:08:11 -0600 | [diff] [blame] | 1243 | struct online_data { |
| 1244 | unsigned int cpu; |
| 1245 | bool online; |
| 1246 | }; |
| 1247 | |
Jens Axboe | 43c01fb | 2020-10-22 09:02:50 -0600 | [diff] [blame] | 1248 | static bool io_wq_worker_affinity(struct io_worker *worker, void *data) |
| 1249 | { |
Jens Axboe | 0e03496 | 2021-06-17 10:08:11 -0600 | [diff] [blame] | 1250 | struct online_data *od = data; |
Jens Axboe | 43c01fb | 2020-10-22 09:02:50 -0600 | [diff] [blame] | 1251 | |
Jens Axboe | 0e03496 | 2021-06-17 10:08:11 -0600 | [diff] [blame] | 1252 | if (od->online) |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 1253 | cpumask_set_cpu(od->cpu, worker->wq->cpu_mask); |
Jens Axboe | 0e03496 | 2021-06-17 10:08:11 -0600 | [diff] [blame] | 1254 | else |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 1255 | cpumask_clear_cpu(od->cpu, worker->wq->cpu_mask); |
Jens Axboe | 43c01fb | 2020-10-22 09:02:50 -0600 | [diff] [blame] | 1256 | return false; |
| 1257 | } |
| 1258 | |
Jens Axboe | 0e03496 | 2021-06-17 10:08:11 -0600 | [diff] [blame] | 1259 | static int __io_wq_cpu_online(struct io_wq *wq, unsigned int cpu, bool online) |
| 1260 | { |
| 1261 | struct online_data od = { |
| 1262 | .cpu = cpu, |
| 1263 | .online = online |
| 1264 | }; |
Jens Axboe | 0e03496 | 2021-06-17 10:08:11 -0600 | [diff] [blame] | 1265 | |
| 1266 | rcu_read_lock(); |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 1267 | io_wq_for_each_worker(wq, io_wq_worker_affinity, &od); |
Jens Axboe | 0e03496 | 2021-06-17 10:08:11 -0600 | [diff] [blame] | 1268 | rcu_read_unlock(); |
| 1269 | return 0; |
| 1270 | } |
| 1271 | |
Jens Axboe | 43c01fb | 2020-10-22 09:02:50 -0600 | [diff] [blame] | 1272 | static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node) |
| 1273 | { |
| 1274 | struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node); |
Jens Axboe | 43c01fb | 2020-10-22 09:02:50 -0600 | [diff] [blame] | 1275 | |
Jens Axboe | 0e03496 | 2021-06-17 10:08:11 -0600 | [diff] [blame] | 1276 | return __io_wq_cpu_online(wq, cpu, true); |
| 1277 | } |
| 1278 | |
| 1279 | static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node) |
| 1280 | { |
| 1281 | struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node); |
| 1282 | |
| 1283 | return __io_wq_cpu_online(wq, cpu, false); |
Jens Axboe | 43c01fb | 2020-10-22 09:02:50 -0600 | [diff] [blame] | 1284 | } |
| 1285 | |
Jens Axboe | fe76421 | 2021-06-17 10:19:54 -0600 | [diff] [blame] | 1286 | int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask) |
| 1287 | { |
Jens Axboe | fe76421 | 2021-06-17 10:19:54 -0600 | [diff] [blame] | 1288 | rcu_read_lock(); |
Breno Leitao | da64d6d | 2023-03-10 12:11:07 -0800 | [diff] [blame] | 1289 | if (mask) |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 1290 | cpumask_copy(wq->cpu_mask, mask); |
Breno Leitao | da64d6d | 2023-03-10 12:11:07 -0800 | [diff] [blame] | 1291 | else |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 1292 | cpumask_copy(wq->cpu_mask, cpu_possible_mask); |
Jens Axboe | fe76421 | 2021-06-17 10:19:54 -0600 | [diff] [blame] | 1293 | rcu_read_unlock(); |
Breno Leitao | da64d6d | 2023-03-10 12:11:07 -0800 | [diff] [blame] | 1294 | |
Jens Axboe | fe76421 | 2021-06-17 10:19:54 -0600 | [diff] [blame] | 1295 | return 0; |
| 1296 | } |
| 1297 | |
Jens Axboe | 2e48005 | 2021-08-27 11:33:19 -0600 | [diff] [blame] | 1298 | /* |
| 1299 | * Set max number of unbounded workers, returns old value. If new_count is 0, |
| 1300 | * then just return the old value. |
| 1301 | */ |
| 1302 | int io_wq_max_workers(struct io_wq *wq, int *new_count) |
| 1303 | { |
Gabriel Krisman Bertazi | dfd63ba | 2023-03-21 22:16:27 -0300 | [diff] [blame] | 1304 | struct io_wq_acct *acct; |
Beld Zhang | 71c9ce2 | 2021-11-02 12:32:08 -0600 | [diff] [blame] | 1305 | int prev[IO_WQ_ACCT_NR]; |
Breno Leitao | da64d6d | 2023-03-10 12:11:07 -0800 | [diff] [blame] | 1306 | int i; |
Jens Axboe | 2e48005 | 2021-08-27 11:33:19 -0600 | [diff] [blame] | 1307 | |
Eugene Syromiatnikov | dd47c10 | 2021-09-13 17:44:15 +0200 | [diff] [blame] | 1308 | BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND != (int) IO_WQ_BOUND); |
| 1309 | BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND); |
| 1310 | BUILD_BUG_ON((int) IO_WQ_ACCT_NR != 2); |
| 1311 | |
Hao Xu | 86127bb | 2022-02-06 17:52:41 +0800 | [diff] [blame] | 1312 | for (i = 0; i < IO_WQ_ACCT_NR; i++) { |
Jens Axboe | 2e48005 | 2021-08-27 11:33:19 -0600 | [diff] [blame] | 1313 | if (new_count[i] > task_rlimit(current, RLIMIT_NPROC)) |
| 1314 | new_count[i] = task_rlimit(current, RLIMIT_NPROC); |
| 1315 | } |
| 1316 | |
Beld Zhang | 71c9ce2 | 2021-11-02 12:32:08 -0600 | [diff] [blame] | 1317 | for (i = 0; i < IO_WQ_ACCT_NR; i++) |
| 1318 | prev[i] = 0; |
| 1319 | |
Jens Axboe | 2e48005 | 2021-08-27 11:33:19 -0600 | [diff] [blame] | 1320 | rcu_read_lock(); |
Jens Axboe | 2e48005 | 2021-08-27 11:33:19 -0600 | [diff] [blame] | 1321 | |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 1322 | raw_spin_lock(&wq->lock); |
Breno Leitao | da64d6d | 2023-03-10 12:11:07 -0800 | [diff] [blame] | 1323 | for (i = 0; i < IO_WQ_ACCT_NR; i++) { |
Gabriel Krisman Bertazi | dfd63ba | 2023-03-21 22:16:27 -0300 | [diff] [blame] | 1324 | acct = &wq->acct[i]; |
Breno Leitao | da64d6d | 2023-03-10 12:11:07 -0800 | [diff] [blame] | 1325 | prev[i] = max_t(int, acct->max_workers, prev[i]); |
| 1326 | if (new_count[i]) |
| 1327 | acct->max_workers = new_count[i]; |
Jens Axboe | 2e48005 | 2021-08-27 11:33:19 -0600 | [diff] [blame] | 1328 | } |
Gabriel Krisman Bertazi | eb47943 | 2023-03-21 22:16:28 -0300 | [diff] [blame] | 1329 | raw_spin_unlock(&wq->lock); |
Jens Axboe | 2e48005 | 2021-08-27 11:33:19 -0600 | [diff] [blame] | 1330 | rcu_read_unlock(); |
Beld Zhang | 71c9ce2 | 2021-11-02 12:32:08 -0600 | [diff] [blame] | 1331 | |
| 1332 | for (i = 0; i < IO_WQ_ACCT_NR; i++) |
| 1333 | new_count[i] = prev[i]; |
| 1334 | |
Jens Axboe | 2e48005 | 2021-08-27 11:33:19 -0600 | [diff] [blame] | 1335 | return 0; |
| 1336 | } |
| 1337 | |
Jens Axboe | 43c01fb | 2020-10-22 09:02:50 -0600 | [diff] [blame] | 1338 | static __init int io_wq_init(void) |
| 1339 | { |
| 1340 | int ret; |
| 1341 | |
| 1342 | ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "io-wq/online", |
Jens Axboe | 0e03496 | 2021-06-17 10:08:11 -0600 | [diff] [blame] | 1343 | io_wq_cpu_online, io_wq_cpu_offline); |
Jens Axboe | 43c01fb | 2020-10-22 09:02:50 -0600 | [diff] [blame] | 1344 | if (ret < 0) |
| 1345 | return ret; |
| 1346 | io_wq_online = ret; |
| 1347 | return 0; |
| 1348 | } |
| 1349 | subsys_initcall(io_wq_init); |