Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Tejun Heo | ea13844 | 2013-01-18 14:05:55 -0800 | [diff] [blame] | 2 | /* |
| 3 | * kernel/workqueue_internal.h |
| 4 | * |
| 5 | * Workqueue internal header file. Only to be included by workqueue and |
| 6 | * core kernel subsystems. |
| 7 | */ |
| 8 | #ifndef _KERNEL_WORKQUEUE_INTERNAL_H |
| 9 | #define _KERNEL_WORKQUEUE_INTERNAL_H |
| 10 | |
Tejun Heo | 2eaebdb3 | 2013-01-18 14:05:55 -0800 | [diff] [blame] | 11 | #include <linux/workqueue.h> |
Tejun Heo | 84b233a | 2013-01-18 14:05:56 -0800 | [diff] [blame] | 12 | #include <linux/kthread.h> |
Li Bin | cef572a | 2017-10-28 11:07:28 +0800 | [diff] [blame] | 13 | #include <linux/preempt.h> |
Tejun Heo | 2eaebdb3 | 2013-01-18 14:05:55 -0800 | [diff] [blame] | 14 | |
Tejun Heo | 2eaebdb3 | 2013-01-18 14:05:55 -0800 | [diff] [blame] | 15 | struct worker_pool; |
| 16 | |
| 17 | /* |
| 18 | * The poor guys doing the actual heavy lifting. All on-duty workers are |
| 19 | * either serving the manager role, on idle list or on busy hash. For |
| 20 | * details on the locking annotation (L, I, X...), refer to workqueue.c. |
| 21 | * |
| 22 | * Only to be used in workqueue and async. |
| 23 | */ |
| 24 | struct worker { |
| 25 | /* on idle list while idle, on busy hash table while busy */ |
| 26 | union { |
| 27 | struct list_head entry; /* L: while idle */ |
| 28 | struct hlist_node hentry; /* L: while busy */ |
| 29 | }; |
| 30 | |
Tejun Heo | bdf8b9b | 2023-05-17 17:02:08 -1000 | [diff] [blame] | 31 | struct work_struct *current_work; /* K: work being processed and its */ |
| 32 | work_func_t current_func; /* K: function */ |
| 33 | struct pool_workqueue *current_pwq; /* K: pwq */ |
Tejun Heo | 616db87 | 2023-05-17 17:02:08 -1000 | [diff] [blame] | 34 | u64 current_at; /* K: runtime at start or last wakeup */ |
Tejun Heo | bdf8b9b | 2023-05-17 17:02:08 -1000 | [diff] [blame] | 35 | unsigned int current_color; /* K: color */ |
| 36 | |
| 37 | int sleeping; /* S: is worker sleeping? */ |
Tejun Heo | 3d1cb20 | 2013-04-30 15:27:22 -0700 | [diff] [blame] | 38 | |
Tejun Heo | 3a46c98 | 2023-05-17 17:02:08 -1000 | [diff] [blame] | 39 | /* used by the scheduler to determine a worker's last known identity */ |
Tejun Heo | bdf8b9b | 2023-05-17 17:02:08 -1000 | [diff] [blame] | 40 | work_func_t last_func; /* K: last work's fn */ |
Tejun Heo | 3a46c98 | 2023-05-17 17:02:08 -1000 | [diff] [blame] | 41 | |
| 42 | struct list_head scheduled; /* L: scheduled works */ |
Tejun Heo | 3d1cb20 | 2013-04-30 15:27:22 -0700 | [diff] [blame] | 43 | |
Tejun Heo | 2eaebdb3 | 2013-01-18 14:05:55 -0800 | [diff] [blame] | 44 | struct task_struct *task; /* I: worker task */ |
Tejun Heo | a2d812a | 2018-05-18 08:47:13 -0700 | [diff] [blame] | 45 | struct worker_pool *pool; /* A: the associated pool */ |
Lai Jiangshan | b310410 | 2013-02-19 12:17:02 -0800 | [diff] [blame] | 46 | /* L: for rescuers */ |
Lai Jiangshan | 92f9c5c | 2014-05-20 17:46:34 +0800 | [diff] [blame] | 47 | struct list_head node; /* A: anchored at pool->workers */ |
| 48 | /* A: runs through worker->node */ |
Tejun Heo | 3d1cb20 | 2013-04-30 15:27:22 -0700 | [diff] [blame] | 49 | |
Tejun Heo | bdf8b9b | 2023-05-17 17:02:08 -1000 | [diff] [blame] | 50 | unsigned long last_active; /* K: last active timestamp */ |
Tejun Heo | bc8b50c | 2023-08-07 15:57:22 -1000 | [diff] [blame] | 51 | unsigned int flags; /* L: flags */ |
Tejun Heo | 2eaebdb3 | 2013-01-18 14:05:55 -0800 | [diff] [blame] | 52 | int id; /* I: worker id */ |
| 53 | |
Tejun Heo | 3d1cb20 | 2013-04-30 15:27:22 -0700 | [diff] [blame] | 54 | /* |
| 55 | * Opaque string set with work_set_desc(). Printed out with task |
| 56 | * dump for debugging - WARN, BUG, panic or sysrq. |
| 57 | */ |
| 58 | char desc[WORKER_DESC_LEN]; |
| 59 | |
Tejun Heo | 2eaebdb3 | 2013-01-18 14:05:55 -0800 | [diff] [blame] | 60 | /* used only by rescuers to point to the target workqueue */ |
| 61 | struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */ |
| 62 | }; |
| 63 | |
Tejun Heo | 84b233a | 2013-01-18 14:05:56 -0800 | [diff] [blame] | 64 | /** |
| 65 | * current_wq_worker - return struct worker if %current is a workqueue worker |
| 66 | */ |
| 67 | static inline struct worker *current_wq_worker(void) |
| 68 | { |
Li Bin | cef572a | 2017-10-28 11:07:28 +0800 | [diff] [blame] | 69 | if (in_task() && (current->flags & PF_WQ_WORKER)) |
Tejun Heo | 84b233a | 2013-01-18 14:05:56 -0800 | [diff] [blame] | 70 | return kthread_data(current); |
| 71 | return NULL; |
| 72 | } |
| 73 | |
Tejun Heo | ea13844 | 2013-01-18 14:05:55 -0800 | [diff] [blame] | 74 | /* |
| 75 | * Scheduler hooks for concurrency managed workqueue. Only to be used from |
Johannes Weiner | 1b69ac6 | 2019-02-01 14:20:42 -0800 | [diff] [blame] | 76 | * sched/ and workqueue.c. |
Tejun Heo | ea13844 | 2013-01-18 14:05:55 -0800 | [diff] [blame] | 77 | */ |
Thomas Gleixner | 6d25be5 | 2019-03-13 17:55:48 +0100 | [diff] [blame] | 78 | void wq_worker_running(struct task_struct *task); |
| 79 | void wq_worker_sleeping(struct task_struct *task); |
Tejun Heo | 616db87 | 2023-05-17 17:02:08 -1000 | [diff] [blame] | 80 | void wq_worker_tick(struct task_struct *task); |
Johannes Weiner | 1b69ac6 | 2019-02-01 14:20:42 -0800 | [diff] [blame] | 81 | work_func_t wq_worker_last_func(struct task_struct *task); |
Tejun Heo | ea13844 | 2013-01-18 14:05:55 -0800 | [diff] [blame] | 82 | |
| 83 | #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */ |