blob: 32b479468e4d50d69530ad766ba7e3897afa5cc3 [file] [log] [blame]
Mandeep Singh Bainese162b392009-01-15 11:08:40 -08001/*
2 * Detect Hung Task
3 *
4 * kernel/hung_task.c - kernel thread for detecting tasks stuck in D state
5 *
6 */
7
8#include <linux/mm.h>
9#include <linux/cpu.h>
10#include <linux/nmi.h>
11#include <linux/init.h>
12#include <linux/delay.h>
13#include <linux/freezer.h>
14#include <linux/kthread.h>
15#include <linux/lockdep.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040016#include <linux/export.h>
Mandeep Singh Bainese162b392009-01-15 11:08:40 -080017#include <linux/sysctl.h>
Oleg Nesterov41e85ce2013-08-01 18:59:41 +020018#include <linux/utsname.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010019#include <linux/sched/signal.h>
Ingo Molnarb17b0152017-02-08 18:51:35 +010020#include <linux/sched/debug.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010021
Oleg Nesterov6a716c92013-10-19 18:18:28 +020022#include <trace/events/sched.h>
Mandeep Singh Bainese162b392009-01-15 11:08:40 -080023
24/*
Mandeep Singh Bainesce9dbe22009-02-04 20:35:48 -080025 * The number of tasks checked:
Mandeep Singh Bainese162b392009-01-15 11:08:40 -080026 */
Li Zefancd646472013-09-23 16:43:58 +080027int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
Mandeep Singh Bainesce9dbe22009-02-04 20:35:48 -080028
29/*
30 * Limit number of tasks checked in a batch.
31 *
32 * This value controls the preemptibility of khungtaskd since preemption
33 * is disabled during the critical section. It also controls the size of
34 * the RCU grace period. So it needs to be upper-bound.
35 */
36#define HUNG_TASK_BATCHING 1024
Mandeep Singh Bainese162b392009-01-15 11:08:40 -080037
38/*
39 * Zero means infinite timeout - no checking done:
40 */
Jeff Mahoneye11feaa2011-04-27 14:27:24 -040041unsigned long __read_mostly sysctl_hung_task_timeout_secs = CONFIG_DEFAULT_HUNG_TASK_TIMEOUT;
Mandeep Singh Bainese162b392009-01-15 11:08:40 -080042
Aaron Tomlin270750db2014-01-20 17:34:13 +000043int __read_mostly sysctl_hung_task_warnings = 10;
Mandeep Singh Bainese162b392009-01-15 11:08:40 -080044
45static int __read_mostly did_panic;
Tetsuo Handa780cbcf2017-05-08 15:55:11 -070046static bool hung_task_show_lock;
Tetsuo Handa401c6362018-06-07 17:10:34 -070047static bool hung_task_call_panic;
Mandeep Singh Bainese162b392009-01-15 11:08:40 -080048
49static struct task_struct *watchdog_task;
50
51/*
52 * Should we panic (and reboot, if panic_timeout= is set) when a
53 * hung task is detected:
54 */
55unsigned int __read_mostly sysctl_hung_task_panic =
56 CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE;
57
58static int __init hung_task_panic_setup(char *str)
59{
Fabian Frederickb51dbec2014-06-04 16:11:26 -070060 int rc = kstrtouint(str, 0, &sysctl_hung_task_panic);
Mandeep Singh Bainese162b392009-01-15 11:08:40 -080061
Fabian Frederickb51dbec2014-06-04 16:11:26 -070062 if (rc)
63 return rc;
Mandeep Singh Bainese162b392009-01-15 11:08:40 -080064 return 1;
65}
66__setup("hung_task_panic=", hung_task_panic_setup);
67
68static int
69hung_task_panic(struct notifier_block *this, unsigned long event, void *ptr)
70{
71 did_panic = 1;
72
73 return NOTIFY_DONE;
74}
75
76static struct notifier_block panic_block = {
77 .notifier_call = hung_task_panic,
78};
79
Mandeep Singh Baines17406b82009-02-06 15:37:47 -080080static void check_hung_task(struct task_struct *t, unsigned long timeout)
Mandeep Singh Bainese162b392009-01-15 11:08:40 -080081{
82 unsigned long switch_count = t->nvcsw + t->nivcsw;
83
Frederic Weisbeckercf2592f2009-02-10 16:52:37 +010084 /*
85 * Ensure the task is not frozen.
Mandeep Singh Bainesf9fab102012-01-03 14:41:13 -080086 * Also, skip vfork and any other user process that freezer should skip.
Frederic Weisbeckercf2592f2009-02-10 16:52:37 +010087 */
Mandeep Singh Bainesf9fab102012-01-03 14:41:13 -080088 if (unlikely(t->flags & (PF_FROZEN | PF_FREEZER_SKIP)))
89 return;
90
91 /*
92 * When a freshly created task is scheduled once, changes its state to
93 * TASK_UNINTERRUPTIBLE without having ever been switched out once, it
94 * musn't be checked.
95 */
96 if (unlikely(!switch_count))
Mandeep Singh Bainese162b392009-01-15 11:08:40 -080097 return;
98
Mandeep Singh Baines17406b82009-02-06 15:37:47 -080099 if (switch_count != t->last_switch_count) {
Mandeep Singh Bainese162b392009-01-15 11:08:40 -0800100 t->last_switch_count = switch_count;
Mandeep Singh Bainese162b392009-01-15 11:08:40 -0800101 return;
102 }
Oleg Nesterov6a716c92013-10-19 18:18:28 +0200103
104 trace_sched_process_hang(t);
105
John Siddle48a6d642016-10-11 13:55:56 -0700106 if (!sysctl_hung_task_warnings && !sysctl_hung_task_panic)
Mandeep Singh Bainese162b392009-01-15 11:08:40 -0800107 return;
Aaron Tomlin270750db2014-01-20 17:34:13 +0000108
Mandeep Singh Bainese162b392009-01-15 11:08:40 -0800109 /*
110 * Ok, the task did not get scheduled for more than 2 minutes,
111 * complain:
112 */
John Siddle48a6d642016-10-11 13:55:56 -0700113 if (sysctl_hung_task_warnings) {
Tetsuo Handa4ca5ede2016-12-12 16:45:35 -0800114 if (sysctl_hung_task_warnings > 0)
115 sysctl_hung_task_warnings--;
John Siddle48a6d642016-10-11 13:55:56 -0700116 pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n",
117 t->comm, t->pid, timeout);
118 pr_err(" %s %s %.*s\n",
119 print_tainted(), init_utsname()->release,
120 (int)strcspn(init_utsname()->version, " "),
121 init_utsname()->version);
122 pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
123 " disables this message.\n");
124 sched_show_task(t);
Tetsuo Handa780cbcf2017-05-08 15:55:11 -0700125 hung_task_show_lock = true;
John Siddle48a6d642016-10-11 13:55:56 -0700126 }
Mandeep Singh Bainese162b392009-01-15 11:08:40 -0800127
Mandeep Singh Bainese162b392009-01-15 11:08:40 -0800128 touch_nmi_watchdog();
129
Sasha Levin625056b2012-03-15 17:47:20 -0400130 if (sysctl_hung_task_panic) {
Tetsuo Handa401c6362018-06-07 17:10:34 -0700131 hung_task_show_lock = true;
132 hung_task_call_panic = true;
Sasha Levin625056b2012-03-15 17:47:20 -0400133 }
Mandeep Singh Bainese162b392009-01-15 11:08:40 -0800134}
135
136/*
Mandeep Singh Bainesce9dbe22009-02-04 20:35:48 -0800137 * To avoid extending the RCU grace period for an unbounded amount of time,
138 * periodically exit the critical section and enter a new one.
139 *
140 * For preemptible RCU it is sufficient to call rcu_read_unlock in order
John Kacur6a103b02010-08-05 17:10:54 +0200141 * to exit the grace period. For classic RCU, a reschedule is required.
Mandeep Singh Bainesce9dbe22009-02-04 20:35:48 -0800142 */
Oleg Nesterov6027ce42012-03-05 14:59:14 -0800143static bool rcu_lock_break(struct task_struct *g, struct task_struct *t)
Mandeep Singh Bainesce9dbe22009-02-04 20:35:48 -0800144{
Oleg Nesterov6027ce42012-03-05 14:59:14 -0800145 bool can_cont;
146
Mandeep Singh Bainesce9dbe22009-02-04 20:35:48 -0800147 get_task_struct(g);
148 get_task_struct(t);
149 rcu_read_unlock();
150 cond_resched();
151 rcu_read_lock();
Oleg Nesterov6027ce42012-03-05 14:59:14 -0800152 can_cont = pid_alive(g) && pid_alive(t);
Mandeep Singh Bainesce9dbe22009-02-04 20:35:48 -0800153 put_task_struct(t);
154 put_task_struct(g);
Oleg Nesterov6027ce42012-03-05 14:59:14 -0800155
156 return can_cont;
Mandeep Singh Bainesce9dbe22009-02-04 20:35:48 -0800157}
158
159/*
Mandeep Singh Bainese162b392009-01-15 11:08:40 -0800160 * Check whether a TASK_UNINTERRUPTIBLE does not get woken up for
161 * a really long time (120 seconds). If that happens, print out
162 * a warning.
163 */
Mandeep Singh Baines603a1482009-01-17 10:31:48 -0800164static void check_hung_uninterruptible_tasks(unsigned long timeout)
Mandeep Singh Bainese162b392009-01-15 11:08:40 -0800165{
166 int max_count = sysctl_hung_task_check_count;
Mandeep Singh Bainesce9dbe22009-02-04 20:35:48 -0800167 int batch_count = HUNG_TASK_BATCHING;
Mandeep Singh Bainese162b392009-01-15 11:08:40 -0800168 struct task_struct *g, *t;
169
170 /*
171 * If the system crashed already then all bets are off,
172 * do not report extra hung tasks:
173 */
174 if (test_taint(TAINT_DIE) || did_panic)
175 return;
176
Tetsuo Handa780cbcf2017-05-08 15:55:11 -0700177 hung_task_show_lock = false;
Mandeep Singh Baines94be52d2009-02-05 09:56:08 -0800178 rcu_read_lock();
Aaron Tomlin972fae692015-04-15 16:16:47 -0700179 for_each_process_thread(g, t) {
Anton Blancharde5af0222009-11-27 13:28:20 +1100180 if (!max_count--)
Mandeep Singh Bainese162b392009-01-15 11:08:40 -0800181 goto unlock;
Mandeep Singh Bainesce9dbe22009-02-04 20:35:48 -0800182 if (!--batch_count) {
183 batch_count = HUNG_TASK_BATCHING;
Oleg Nesterov6027ce42012-03-05 14:59:14 -0800184 if (!rcu_lock_break(g, t))
Mandeep Singh Bainesce9dbe22009-02-04 20:35:48 -0800185 goto unlock;
186 }
Mandeep Singh Bainese162b392009-01-15 11:08:40 -0800187 /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
188 if (t->state == TASK_UNINTERRUPTIBLE)
Mandeep Singh Baines17406b82009-02-06 15:37:47 -0800189 check_hung_task(t, timeout);
Aaron Tomlin972fae692015-04-15 16:16:47 -0700190 }
Mandeep Singh Bainese162b392009-01-15 11:08:40 -0800191 unlock:
Mandeep Singh Baines94be52d2009-02-05 09:56:08 -0800192 rcu_read_unlock();
Tetsuo Handa780cbcf2017-05-08 15:55:11 -0700193 if (hung_task_show_lock)
194 debug_show_all_locks();
Tetsuo Handa401c6362018-06-07 17:10:34 -0700195 if (hung_task_call_panic) {
196 trigger_all_cpu_backtrace();
197 panic("hung_task: blocked tasks");
198 }
Mandeep Singh Bainese162b392009-01-15 11:08:40 -0800199}
200
Tetsuo Handab4aa14a2016-03-22 14:24:39 -0700201static long hung_timeout_jiffies(unsigned long last_checked,
202 unsigned long timeout)
Mandeep Singh Bainese162b392009-01-15 11:08:40 -0800203{
204 /* timeout of 0 will disable the watchdog */
Tetsuo Handab4aa14a2016-03-22 14:24:39 -0700205 return timeout ? last_checked - jiffies + timeout * HZ :
206 MAX_SCHEDULE_TIMEOUT;
Mandeep Singh Bainese162b392009-01-15 11:08:40 -0800207}
208
209/*
210 * Process updating of timeout sysctl
211 */
212int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700213 void __user *buffer,
Mandeep Singh Bainese162b392009-01-15 11:08:40 -0800214 size_t *lenp, loff_t *ppos)
215{
216 int ret;
217
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700218 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
Mandeep Singh Bainese162b392009-01-15 11:08:40 -0800219
220 if (ret || !write)
221 goto out;
222
Mandeep Singh Bainese162b392009-01-15 11:08:40 -0800223 wake_up_process(watchdog_task);
224
225 out:
226 return ret;
227}
228
Marcelo Tosatti8b414522013-10-11 21:39:26 -0300229static atomic_t reset_hung_task = ATOMIC_INIT(0);
230
231void reset_hung_task_detector(void)
232{
233 atomic_set(&reset_hung_task, 1);
234}
235EXPORT_SYMBOL_GPL(reset_hung_task_detector);
236
Mandeep Singh Bainese162b392009-01-15 11:08:40 -0800237/*
238 * kthread which checks for tasks stuck in D state
239 */
240static int watchdog(void *dummy)
241{
Tetsuo Handab4aa14a2016-03-22 14:24:39 -0700242 unsigned long hung_last_checked = jiffies;
243
Mandeep Singh Bainese162b392009-01-15 11:08:40 -0800244 set_user_nice(current, 0);
Mandeep Singh Bainese162b392009-01-15 11:08:40 -0800245
246 for ( ; ; ) {
Mandeep Singh Baines17406b82009-02-06 15:37:47 -0800247 unsigned long timeout = sysctl_hung_task_timeout_secs;
Tetsuo Handab4aa14a2016-03-22 14:24:39 -0700248 long t = hung_timeout_jiffies(hung_last_checked, timeout);
Mandeep Singh Baines603a1482009-01-17 10:31:48 -0800249
Tetsuo Handab4aa14a2016-03-22 14:24:39 -0700250 if (t <= 0) {
251 if (!atomic_xchg(&reset_hung_task, 0))
252 check_hung_uninterruptible_tasks(timeout);
253 hung_last_checked = jiffies;
Marcelo Tosatti8b414522013-10-11 21:39:26 -0300254 continue;
Tetsuo Handab4aa14a2016-03-22 14:24:39 -0700255 }
256 schedule_timeout_interruptible(t);
Mandeep Singh Bainese162b392009-01-15 11:08:40 -0800257 }
258
259 return 0;
260}
261
262static int __init hung_task_init(void)
263{
264 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
265 watchdog_task = kthread_run(watchdog, NULL, "khungtaskd");
266
267 return 0;
268}
Paul Gortmakerc96d6662014-04-03 14:48:35 -0700269subsys_initcall(hung_task_init);