blob: 4fad0e6fca6447d72388e7ea44e90f34924beb03 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Matt Helsley8174f152008-10-18 20:27:19 -07002/*
3 * kernel/freezer.c - Function to freeze a process
4 *
5 * Originally from kernel/power/process.c
6 */
7
8#include <linux/interrupt.h>
9#include <linux/suspend.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040010#include <linux/export.h>
Matt Helsley8174f152008-10-18 20:27:19 -070011#include <linux/syscalls.h>
12#include <linux/freezer.h>
Tejun Heo8a32c442011-11-21 12:32:23 -080013#include <linux/kthread.h>
Matt Helsley8174f152008-10-18 20:27:19 -070014
Tejun Heoa3201222011-11-21 12:32:25 -080015/* total number of freezing conditions in effect */
Peter Zijlstraf5d39b02022-08-22 13:18:22 +020016DEFINE_STATIC_KEY_FALSE(freezer_active);
17EXPORT_SYMBOL(freezer_active);
Tejun Heoa3201222011-11-21 12:32:25 -080018
Peter Zijlstraf5d39b02022-08-22 13:18:22 +020019/*
20 * indicate whether PM freezing is in effect, protected by
Pingfan Liu55f25032018-07-31 16:51:32 +080021 * system_transition_mutex
22 */
Tejun Heoa3201222011-11-21 12:32:25 -080023bool pm_freezing;
24bool pm_nosig_freezing;
25
Tejun Heo0c9af092011-11-21 12:32:24 -080026/* protects freezing and frozen transitions */
27static DEFINE_SPINLOCK(freezer_lock);
Matt Helsley8174f152008-10-18 20:27:19 -070028
Tejun Heoa3201222011-11-21 12:32:25 -080029/**
30 * freezing_slow_path - slow path for testing whether a task needs to be frozen
31 * @p: task to be tested
32 *
Peter Zijlstraf5d39b02022-08-22 13:18:22 +020033 * This function is called by freezing() if freezer_active isn't zero
Tejun Heoa3201222011-11-21 12:32:25 -080034 * and tests whether @p needs to enter and stay in frozen state. Can be
35 * called under any context. The freezers are responsible for ensuring the
36 * target tasks see the updated state.
37 */
38bool freezing_slow_path(struct task_struct *p)
39{
Colin Cross2b44c4d2013-07-24 17:41:33 -070040 if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK))
Tejun Heoa3201222011-11-21 12:32:25 -080041 return false;
42
Michal Hockoa34c80a2016-07-28 15:45:16 -070043 if (test_tsk_thread_flag(p, TIF_MEMDIE))
Cong Wang51fae6da2014-10-21 09:27:12 +020044 return false;
45
Tejun Heoa3201222011-11-21 12:32:25 -080046 if (pm_nosig_freezing || cgroup_freezing(p))
47 return true;
48
Tejun Heo34b087e2011-11-23 09:28:17 -080049 if (pm_freezing && !(p->flags & PF_KTHREAD))
Tejun Heoa3201222011-11-21 12:32:25 -080050 return true;
51
52 return false;
53}
54EXPORT_SYMBOL(freezing_slow_path);
55
Peter Zijlstraf5d39b02022-08-22 13:18:22 +020056bool frozen(struct task_struct *p)
57{
58 return READ_ONCE(p->__state) & TASK_FROZEN;
59}
60
Matt Helsley8174f152008-10-18 20:27:19 -070061/* Refrigerator is place where frozen processes are stored :-). */
Tejun Heo8a32c442011-11-21 12:32:23 -080062bool __refrigerator(bool check_kthr_stop)
Matt Helsley8174f152008-10-18 20:27:19 -070063{
Peter Zijlstraf5d39b02022-08-22 13:18:22 +020064 unsigned int state = get_current_state();
Tejun Heoa0acae02011-11-21 12:32:22 -080065 bool was_frozen = false;
Matt Helsley8174f152008-10-18 20:27:19 -070066
Matt Helsley8174f152008-10-18 20:27:19 -070067 pr_debug("%s entered refrigerator\n", current->comm);
68
Peter Zijlstraf5d39b02022-08-22 13:18:22 +020069 WARN_ON_ONCE(state && !(state & TASK_NORMAL));
70
Matt Helsley8174f152008-10-18 20:27:19 -070071 for (;;) {
Peter Zijlstraf5d39b02022-08-22 13:18:22 +020072 bool freeze;
73
74 set_current_state(TASK_FROZEN);
Tejun Heo5ece3ea2011-11-21 12:32:26 -080075
76 spin_lock_irq(&freezer_lock);
Peter Zijlstraf5d39b02022-08-22 13:18:22 +020077 freeze = freezing(current) && !(check_kthr_stop && kthread_should_stop());
Tejun Heo5ece3ea2011-11-21 12:32:26 -080078 spin_unlock_irq(&freezer_lock);
79
Peter Zijlstraf5d39b02022-08-22 13:18:22 +020080 if (!freeze)
Matt Helsley8174f152008-10-18 20:27:19 -070081 break;
Peter Zijlstraf5d39b02022-08-22 13:18:22 +020082
Tejun Heoa0acae02011-11-21 12:32:22 -080083 was_frozen = true;
Matt Helsley8174f152008-10-18 20:27:19 -070084 schedule();
85 }
Peter Zijlstraf5d39b02022-08-22 13:18:22 +020086 __set_current_state(TASK_RUNNING);
Thomas Gleixner6301cb92009-07-17 14:15:47 +020087
Matt Helsley8174f152008-10-18 20:27:19 -070088 pr_debug("%s left refrigerator\n", current->comm);
Tejun Heo50fb4f7f2011-11-21 12:32:22 -080089
Tejun Heoa0acae02011-11-21 12:32:22 -080090 return was_frozen;
Matt Helsley8174f152008-10-18 20:27:19 -070091}
Tejun Heoa0acae02011-11-21 12:32:22 -080092EXPORT_SYMBOL(__refrigerator);
Matt Helsley8174f152008-10-18 20:27:19 -070093
94static void fake_signal_wake_up(struct task_struct *p)
95{
96 unsigned long flags;
97
Tejun Heo37ad8ac2011-11-21 12:32:26 -080098 if (lock_task_sighand(p, &flags)) {
99 signal_wake_up(p, 0);
100 unlock_task_sighand(p, &flags);
101 }
Matt Helsley8174f152008-10-18 20:27:19 -0700102}
103
Peter Zijlstraf5d39b02022-08-22 13:18:22 +0200104static int __set_task_frozen(struct task_struct *p, void *arg)
105{
106 unsigned int state = READ_ONCE(p->__state);
107
108 if (p->on_rq)
109 return 0;
110
111 if (p != current && task_curr(p))
112 return 0;
113
114 if (!(state & (TASK_FREEZABLE | __TASK_STOPPED | __TASK_TRACED)))
115 return 0;
116
117 /*
118 * Only TASK_NORMAL can be augmented with TASK_FREEZABLE, since they
119 * can suffer spurious wakeups.
120 */
121 if (state & TASK_FREEZABLE)
122 WARN_ON_ONCE(!(state & TASK_NORMAL));
123
124#ifdef CONFIG_LOCKDEP
125 /*
126 * It's dangerous to freeze with locks held; there be dragons there.
127 */
128 if (!(state & __TASK_FREEZABLE_UNSAFE))
129 WARN_ON_ONCE(debug_locks && p->lockdep_depth);
130#endif
131
132 WRITE_ONCE(p->__state, TASK_FROZEN);
133 return TASK_FROZEN;
134}
135
136static bool __freeze_task(struct task_struct *p)
137{
138 /* TASK_FREEZABLE|TASK_STOPPED|TASK_TRACED -> TASK_FROZEN */
139 return task_call_func(p, __set_task_frozen, NULL);
140}
141
Matt Helsley8174f152008-10-18 20:27:19 -0700142/**
Tejun Heo839e3402011-11-21 12:32:26 -0800143 * freeze_task - send a freeze request to given task
144 * @p: task to send the request to
Matt Helsley8174f152008-10-18 20:27:19 -0700145 *
Marcos Paulo de Souza37f08be2012-02-21 23:57:47 +0100146 * If @p is freezing, the freeze request is sent either by sending a fake
147 * signal (if it's not a kernel thread) or waking it up (if it's a kernel
148 * thread).
Tejun Heo839e3402011-11-21 12:32:26 -0800149 *
150 * RETURNS:
151 * %false, if @p is not freezing or already frozen; %true, otherwise
Matt Helsley8174f152008-10-18 20:27:19 -0700152 */
Tejun Heo839e3402011-11-21 12:32:26 -0800153bool freeze_task(struct task_struct *p)
Matt Helsley8174f152008-10-18 20:27:19 -0700154{
Tejun Heo0c9af092011-11-21 12:32:24 -0800155 unsigned long flags;
Matt Helsley8174f152008-10-18 20:27:19 -0700156
Tejun Heo0c9af092011-11-21 12:32:24 -0800157 spin_lock_irqsave(&freezer_lock, flags);
Peter Zijlstraf5d39b02022-08-22 13:18:22 +0200158 if (!freezing(p) || frozen(p) || __freeze_task(p)) {
Tejun Heoa3201222011-11-21 12:32:25 -0800159 spin_unlock_irqrestore(&freezer_lock, flags);
160 return false;
161 }
Matt Helsley8174f152008-10-18 20:27:19 -0700162
Jens Axboed3dc04c2021-03-25 18:22:11 -0600163 if (!(p->flags & PF_KTHREAD))
Tejun Heo8cfe4002010-11-26 23:07:27 +0100164 fake_signal_wake_up(p);
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +0200165 else
Peter Zijlstraf5d39b02022-08-22 13:18:22 +0200166 wake_up_state(p, TASK_NORMAL);
Tejun Heoa3201222011-11-21 12:32:25 -0800167
Tejun Heo0c9af092011-11-21 12:32:24 -0800168 spin_unlock_irqrestore(&freezer_lock, flags);
Tejun Heoa3201222011-11-21 12:32:25 -0800169 return true;
Matt Helsley8174f152008-10-18 20:27:19 -0700170}
171
Peter Zijlstraf5d39b02022-08-22 13:18:22 +0200172/*
173 * The special task states (TASK_STOPPED, TASK_TRACED) keep their canonical
174 * state in p->jobctl. If either of them got a wakeup that was missed because
175 * TASK_FROZEN, then their canonical state reflects that and the below will
176 * refuse to restore the special state and instead issue the wakeup.
177 */
178static int __set_task_special(struct task_struct *p, void *arg)
179{
180 unsigned int state = 0;
181
182 if (p->jobctl & JOBCTL_TRACED)
183 state = TASK_TRACED;
184
185 else if (p->jobctl & JOBCTL_STOPPED)
186 state = TASK_STOPPED;
187
188 if (state)
189 WRITE_ONCE(p->__state, state);
190
191 return state;
192}
193
Tejun Heoa5be2d02011-11-21 12:32:23 -0800194void __thaw_task(struct task_struct *p)
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700195{
Peter Zijlstraf5d39b02022-08-22 13:18:22 +0200196 unsigned long flags, flags2;
Tejun Heoa5be2d02011-11-21 12:32:23 -0800197
Tejun Heo0c9af092011-11-21 12:32:24 -0800198 spin_lock_irqsave(&freezer_lock, flags);
Peter Zijlstraf5d39b02022-08-22 13:18:22 +0200199 if (WARN_ON_ONCE(freezing(p)))
200 goto unlock;
201
202 if (lock_task_sighand(p, &flags2)) {
203 /* TASK_FROZEN -> TASK_{STOPPED,TRACED} */
204 bool ret = task_call_func(p, __set_task_special, NULL);
205 unlock_task_sighand(p, &flags2);
206 if (ret)
207 goto unlock;
208 }
209
210 wake_up_state(p, TASK_FROZEN);
211unlock:
Tejun Heo0c9af092011-11-21 12:32:24 -0800212 spin_unlock_irqrestore(&freezer_lock, flags);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700213}
Tejun Heo96ee6d82011-11-21 12:32:25 -0800214
215/**
Tejun Heo34b087e2011-11-23 09:28:17 -0800216 * set_freezable - make %current freezable
Tejun Heo96ee6d82011-11-21 12:32:25 -0800217 *
218 * Mark %current freezable and enter refrigerator if necessary.
219 */
Tejun Heo34b087e2011-11-23 09:28:17 -0800220bool set_freezable(void)
Tejun Heo96ee6d82011-11-21 12:32:25 -0800221{
222 might_sleep();
223
224 /*
225 * Modify flags while holding freezer_lock. This ensures the
226 * freezer notices that we aren't frozen yet or the freezing
227 * condition is visible to try_to_freeze() below.
228 */
229 spin_lock_irq(&freezer_lock);
230 current->flags &= ~PF_NOFREEZE;
Tejun Heo96ee6d82011-11-21 12:32:25 -0800231 spin_unlock_irq(&freezer_lock);
232
233 return try_to_freeze();
234}
Tejun Heo34b087e2011-11-23 09:28:17 -0800235EXPORT_SYMBOL(set_freezable);