blob: 065e1ef8fc8d72e55c66000b7b25f9ba0189db45 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Oleg Nesterove73f8952012-05-11 10:59:07 +10002#include <linux/spinlock.h>
3#include <linux/task_work.h>
Eric W. Biederman03248ad2022-02-09 12:20:45 -06004#include <linux/resume_user_mode.h>
Oleg Nesterove73f8952012-05-11 10:59:07 +10005
Oleg Nesterov9da33de2012-08-26 21:12:11 +02006static struct callback_head work_exited; /* all we need is ->next == NULL */
7
Oleg Nesterov892f6662013-09-11 14:23:31 -07008/**
9 * task_work_add - ask the @task to execute @work->func()
10 * @task: the task which should run the callback
11 * @work: the callback to run
Jens Axboe91989c72020-10-16 09:02:26 -060012 * @notify: how to notify the targeted task
Oleg Nesterov892f6662013-09-11 14:23:31 -070013 *
Jens Axboe91989c72020-10-16 09:02:26 -060014 * Queue @work for task_work_run() below and notify the @task if @notify
Jens Axboee788be92022-04-28 17:25:16 -060015 * is @TWA_RESUME, @TWA_SIGNAL, or @TWA_SIGNAL_NO_IPI.
16 *
17 * @TWA_SIGNAL works like signals, in that the it will interrupt the targeted
18 * task and run the task_work, regardless of whether the task is currently
19 * running in the kernel or userspace.
20 * @TWA_SIGNAL_NO_IPI works like @TWA_SIGNAL, except it doesn't send a
21 * reschedule IPI to force the targeted task to reschedule and run task_work.
22 * This can be advantageous if there's no strict requirement that the
23 * task_work be run as soon as possible, just whenever the task enters the
24 * kernel anyway.
25 * @TWA_RESUME work is run only when the task exits the kernel and returns to
26 * user mode, or before entering guest mode.
27 *
28 * Fails if the @task is exiting/exited and thus it can't process this @work.
29 * Otherwise @work->func() will be called when the @task goes through one of
30 * the aforementioned transitions, or exits.
Oleg Nesterov892f6662013-09-11 14:23:31 -070031 *
Jens Axboe91989c72020-10-16 09:02:26 -060032 * If the targeted task is exiting, then an error is returned and the work item
33 * is not queued. It's up to the caller to arrange for an alternative mechanism
34 * in that case.
Oleg Nesterov892f6662013-09-11 14:23:31 -070035 *
Jens Axboe91989c72020-10-16 09:02:26 -060036 * Note: there is no ordering guarantee on works queued here. The task_work
37 * list is LIFO.
Eric Dumazetc8219902015-08-28 19:42:30 -070038 *
Oleg Nesterov892f6662013-09-11 14:23:31 -070039 * RETURNS:
40 * 0 if succeeds or -ESRCH.
41 */
Jens Axboe91989c72020-10-16 09:02:26 -060042int task_work_add(struct task_struct *task, struct callback_head *work,
43 enum task_work_notify_mode notify)
Oleg Nesterove73f8952012-05-11 10:59:07 +100044{
Oleg Nesterovac3d0da2012-08-26 21:12:09 +020045 struct callback_head *head;
Oleg Nesterov9da33de2012-08-26 21:12:11 +020046
Walter Wu23f61f02021-04-29 23:00:45 -070047 /* record the work call stack in order to print it in KASAN reports */
48 kasan_record_aux_stack(work);
49
Uros Bizjak5fdfa162022-08-23 17:26:32 +020050 head = READ_ONCE(task->task_works);
Oleg Nesterovac3d0da2012-08-26 21:12:09 +020051 do {
Oleg Nesterov9da33de2012-08-26 21:12:11 +020052 if (unlikely(head == &work_exited))
53 return -ESRCH;
Oleg Nesterovac3d0da2012-08-26 21:12:09 +020054 work->next = head;
Uros Bizjak5fdfa162022-08-23 17:26:32 +020055 } while (!try_cmpxchg(&task->task_works, &head, work));
Oleg Nesterove73f8952012-05-11 10:59:07 +100056
Oleg Nesterove91b4812020-06-30 17:32:54 +020057 switch (notify) {
Jens Axboe91989c72020-10-16 09:02:26 -060058 case TWA_NONE:
59 break;
Oleg Nesterove91b4812020-06-30 17:32:54 +020060 case TWA_RESUME:
Oleg Nesterove73f8952012-05-11 10:59:07 +100061 set_notify_resume(task);
Oleg Nesterove91b4812020-06-30 17:32:54 +020062 break;
63 case TWA_SIGNAL:
Jens Axboe03941cc2020-10-09 16:01:33 -060064 set_notify_signal(task);
Oleg Nesterove91b4812020-06-30 17:32:54 +020065 break;
Jens Axboee788be92022-04-28 17:25:16 -060066 case TWA_SIGNAL_NO_IPI:
67 __set_notify_signal(task);
68 break;
Jens Axboe91989c72020-10-16 09:02:26 -060069 default:
70 WARN_ON_ONCE(1);
71 break;
Oleg Nesterove91b4812020-06-30 17:32:54 +020072 }
73
Al Viroed3e6942012-06-27 11:31:24 +040074 return 0;
Oleg Nesterove73f8952012-05-11 10:59:07 +100075}
76
Oleg Nesterov892f6662013-09-11 14:23:31 -070077/**
Jens Axboec7aab1a2021-04-01 19:53:29 -060078 * task_work_cancel_match - cancel a pending work added by task_work_add()
Oleg Nesterov892f6662013-09-11 14:23:31 -070079 * @task: the task which should execute the work
Jens Axboec7aab1a2021-04-01 19:53:29 -060080 * @match: match function to call
Oleg Nesterov892f6662013-09-11 14:23:31 -070081 *
82 * RETURNS:
83 * The found work or NULL if not found.
84 */
Al Viro67d12142012-06-27 11:07:19 +040085struct callback_head *
Jens Axboec7aab1a2021-04-01 19:53:29 -060086task_work_cancel_match(struct task_struct *task,
87 bool (*match)(struct callback_head *, void *data),
88 void *data)
Oleg Nesterove73f8952012-05-11 10:59:07 +100089{
Oleg Nesterovac3d0da2012-08-26 21:12:09 +020090 struct callback_head **pprev = &task->task_works;
Oleg Nesterov205e5502013-09-11 14:23:30 -070091 struct callback_head *work;
Oleg Nesterove73f8952012-05-11 10:59:07 +100092 unsigned long flags;
Oleg Nesterov61e96492016-08-02 14:03:44 -070093
Eric W. Biederman7f62d402022-02-09 08:52:41 -060094 if (likely(!task_work_pending(task)))
Oleg Nesterov61e96492016-08-02 14:03:44 -070095 return NULL;
Oleg Nesterovac3d0da2012-08-26 21:12:09 +020096 /*
97 * If cmpxchg() fails we continue without updating pprev.
98 * Either we raced with task_work_add() which added the
99 * new entry before this work, we will find it again. Or
Oleg Nesterov9da33de2012-08-26 21:12:11 +0200100 * we raced with task_work_run(), *pprev == NULL/exited.
Oleg Nesterovac3d0da2012-08-26 21:12:09 +0200101 */
Oleg Nesterove73f8952012-05-11 10:59:07 +1000102 raw_spin_lock_irqsave(&task->pi_lock, flags);
Uros Bizjak5fdfa162022-08-23 17:26:32 +0200103 work = READ_ONCE(*pprev);
104 while (work) {
105 if (!match(work, data)) {
Oleg Nesterovac3d0da2012-08-26 21:12:09 +0200106 pprev = &work->next;
Uros Bizjak5fdfa162022-08-23 17:26:32 +0200107 work = READ_ONCE(*pprev);
108 } else if (try_cmpxchg(pprev, &work, work->next))
Oleg Nesterovac3d0da2012-08-26 21:12:09 +0200109 break;
Oleg Nesterove73f8952012-05-11 10:59:07 +1000110 }
Oleg Nesterove73f8952012-05-11 10:59:07 +1000111 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
Oleg Nesterovac3d0da2012-08-26 21:12:09 +0200112
113 return work;
Oleg Nesterove73f8952012-05-11 10:59:07 +1000114}
115
Jens Axboec7aab1a2021-04-01 19:53:29 -0600116static bool task_work_func_match(struct callback_head *cb, void *data)
117{
118 return cb->func == data;
119}
120
121/**
122 * task_work_cancel - cancel a pending work added by task_work_add()
123 * @task: the task which should execute the work
124 * @func: identifies the work to remove
125 *
126 * Find the last queued pending work with ->func == @func and remove
127 * it from queue.
128 *
129 * RETURNS:
130 * The found work or NULL if not found.
131 */
132struct callback_head *
133task_work_cancel(struct task_struct *task, task_work_func_t func)
134{
135 return task_work_cancel_match(task, task_work_func_match, func);
136}
137
Oleg Nesterov892f6662013-09-11 14:23:31 -0700138/**
139 * task_work_run - execute the works added by task_work_add()
140 *
141 * Flush the pending works. Should be used by the core kernel code.
142 * Called before the task returns to the user-mode or stops, or when
143 * it exits. In the latter case task_work_add() can no longer add the
144 * new work after task_work_run() returns.
145 */
Oleg Nesterove73f8952012-05-11 10:59:07 +1000146void task_work_run(void)
147{
148 struct task_struct *task = current;
Oleg Nesterovac3d0da2012-08-26 21:12:09 +0200149 struct callback_head *work, *head, *next;
Oleg Nesterove73f8952012-05-11 10:59:07 +1000150
Oleg Nesterovac3d0da2012-08-26 21:12:09 +0200151 for (;;) {
Oleg Nesterov9da33de2012-08-26 21:12:11 +0200152 /*
153 * work->func() can do task_work_add(), do not set
154 * work_exited unless the list is empty.
155 */
Uros Bizjak5fdfa162022-08-23 17:26:32 +0200156 work = READ_ONCE(task->task_works);
Oleg Nesterov9da33de2012-08-26 21:12:11 +0200157 do {
Oleg Nesterov6fb61492020-02-18 16:50:18 +0100158 head = NULL;
Oleg Nesterov6fb61492020-02-18 16:50:18 +0100159 if (!work) {
160 if (task->flags & PF_EXITING)
161 head = &work_exited;
162 else
163 break;
164 }
Uros Bizjak5fdfa162022-08-23 17:26:32 +0200165 } while (!try_cmpxchg(&task->task_works, &work, head));
Oleg Nesterov9da33de2012-08-26 21:12:11 +0200166
Oleg Nesterovac3d0da2012-08-26 21:12:09 +0200167 if (!work)
168 break;
Oleg Nesterov6fb61492020-02-18 16:50:18 +0100169 /*
170 * Synchronize with task_work_cancel(). It can not remove
171 * the first entry == work, cmpxchg(task_works) must fail.
172 * But it can remove another entry from the ->next list.
173 */
174 raw_spin_lock_irq(&task->pi_lock);
175 raw_spin_unlock_irq(&task->pi_lock);
Oleg Nesterove73f8952012-05-11 10:59:07 +1000176
Oleg Nesterovac3d0da2012-08-26 21:12:09 +0200177 do {
178 next = work->next;
179 work->func(work);
180 work = next;
Eric Dumazetf3418612012-08-21 15:05:14 +0200181 cond_resched();
Oleg Nesterovac3d0da2012-08-26 21:12:09 +0200182 } while (work);
Oleg Nesterove73f8952012-05-11 10:59:07 +1000183 }
184}