blob: 36c54265bb2b0f369b120901a2732dc73230a8e4 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Ingo Molnar43ae34c2007-07-09 18:52:00 +02002/*
Peter Zijlstra391e43d2011-11-15 17:14:39 +01003 * kernel/sched/debug.c
Ingo Molnar43ae34c2007-07-09 18:52:00 +02004 *
Ingo Molnar325ea102018-03-03 12:20:47 +01005 * Print the CFS rbtree and other debugging details
Ingo Molnar43ae34c2007-07-09 18:52:00 +02006 *
7 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
Ingo Molnar43ae34c2007-07-09 18:52:00 +02008 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02009#include "sched.h"
10
Bharata B Raoefe25c22011-01-11 15:41:54 +053011static DEFINE_SPINLOCK(sched_debug_lock);
12
Ingo Molnar43ae34c2007-07-09 18:52:00 +020013/*
14 * This allows printing both to /proc/sched_debug and
15 * to the console
16 */
17#define SEQ_printf(m, x...) \
18 do { \
19 if (m) \
20 seq_printf(m, x); \
21 else \
Joe Lawrencea8c024c2018-03-19 14:35:54 -040022 pr_cont(x); \
Ingo Molnar43ae34c2007-07-09 18:52:00 +020023 } while (0)
24
Ingo Molnaref83a572007-10-15 17:00:08 +020025/*
26 * Ease the printing of nsec fields:
27 */
Ingo Molnar90b26282007-12-30 17:24:35 +010028static long long nsec_high(unsigned long long nsec)
Ingo Molnaref83a572007-10-15 17:00:08 +020029{
Ingo Molnar90b26282007-12-30 17:24:35 +010030 if ((long long)nsec < 0) {
Ingo Molnaref83a572007-10-15 17:00:08 +020031 nsec = -nsec;
32 do_div(nsec, 1000000);
33 return -nsec;
34 }
35 do_div(nsec, 1000000);
36
37 return nsec;
38}
39
Ingo Molnar90b26282007-12-30 17:24:35 +010040static unsigned long nsec_low(unsigned long long nsec)
Ingo Molnaref83a572007-10-15 17:00:08 +020041{
Ingo Molnar90b26282007-12-30 17:24:35 +010042 if ((long long)nsec < 0)
Ingo Molnaref83a572007-10-15 17:00:08 +020043 nsec = -nsec;
44
45 return do_div(nsec, 1000000);
46}
47
48#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
49
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -050050#define SCHED_FEAT(name, enabled) \
51 #name ,
52
53static const char * const sched_feat_names[] = {
54#include "features.h"
55};
56
57#undef SCHED_FEAT
58
59static int sched_feat_show(struct seq_file *m, void *v)
60{
61 int i;
62
63 for (i = 0; i < __SCHED_FEAT_NR; i++) {
64 if (!(sysctl_sched_features & (1UL << i)))
65 seq_puts(m, "NO_");
66 seq_printf(m, "%s ", sched_feat_names[i]);
67 }
68 seq_puts(m, "\n");
69
70 return 0;
71}
72
Masahiro Yamadae9666d12018-12-31 00:14:15 +090073#ifdef CONFIG_JUMP_LABEL
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -050074
75#define jump_label_key__true STATIC_KEY_INIT_TRUE
76#define jump_label_key__false STATIC_KEY_INIT_FALSE
77
78#define SCHED_FEAT(name, enabled) \
79 jump_label_key__##enabled ,
80
81struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
82#include "features.h"
83};
84
85#undef SCHED_FEAT
86
87static void sched_feat_disable(int i)
88{
Jiada Wange73e8192018-07-31 21:12:22 +090089 static_key_disable_cpuslocked(&sched_feat_keys[i]);
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -050090}
91
92static void sched_feat_enable(int i)
93{
Jiada Wange73e8192018-07-31 21:12:22 +090094 static_key_enable_cpuslocked(&sched_feat_keys[i]);
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -050095}
96#else
97static void sched_feat_disable(int i) { };
98static void sched_feat_enable(int i) { };
Masahiro Yamadae9666d12018-12-31 00:14:15 +090099#endif /* CONFIG_JUMP_LABEL */
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500100
101static int sched_feat_set(char *cmp)
102{
103 int i;
104 int neg = 0;
105
106 if (strncmp(cmp, "NO_", 3) == 0) {
107 neg = 1;
108 cmp += 3;
109 }
110
Yisheng Xie8f894bf2018-05-31 19:11:19 +0800111 i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
112 if (i < 0)
113 return i;
114
115 if (neg) {
116 sysctl_sched_features &= ~(1UL << i);
117 sched_feat_disable(i);
118 } else {
119 sysctl_sched_features |= (1UL << i);
120 sched_feat_enable(i);
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500121 }
122
Yisheng Xie8f894bf2018-05-31 19:11:19 +0800123 return 0;
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500124}
125
126static ssize_t
127sched_feat_write(struct file *filp, const char __user *ubuf,
128 size_t cnt, loff_t *ppos)
129{
130 char buf[64];
131 char *cmp;
Yisheng Xie8f894bf2018-05-31 19:11:19 +0800132 int ret;
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500133 struct inode *inode;
134
135 if (cnt > 63)
136 cnt = 63;
137
138 if (copy_from_user(&buf, ubuf, cnt))
139 return -EFAULT;
140
141 buf[cnt] = 0;
142 cmp = strstrip(buf);
143
144 /* Ensure the static_key remains in a consistent state */
145 inode = file_inode(filp);
Jiada Wange73e8192018-07-31 21:12:22 +0900146 cpus_read_lock();
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500147 inode_lock(inode);
Yisheng Xie8f894bf2018-05-31 19:11:19 +0800148 ret = sched_feat_set(cmp);
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500149 inode_unlock(inode);
Jiada Wange73e8192018-07-31 21:12:22 +0900150 cpus_read_unlock();
Yisheng Xie8f894bf2018-05-31 19:11:19 +0800151 if (ret < 0)
152 return ret;
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500153
154 *ppos += cnt;
155
156 return cnt;
157}
158
159static int sched_feat_open(struct inode *inode, struct file *filp)
160{
161 return single_open(filp, sched_feat_show, NULL);
162}
163
164static const struct file_operations sched_feat_fops = {
165 .open = sched_feat_open,
166 .write = sched_feat_write,
167 .read = seq_read,
168 .llseek = seq_lseek,
169 .release = single_release,
170};
171
Peter Zijlstra9469eb02017-09-07 17:03:53 +0200172__read_mostly bool sched_debug_enabled;
173
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500174static __init int sched_init_debug(void)
175{
176 debugfs_create_file("sched_features", 0644, NULL, NULL,
177 &sched_feat_fops);
178
Peter Zijlstra9469eb02017-09-07 17:03:53 +0200179 debugfs_create_bool("sched_debug", 0644, NULL,
180 &sched_debug_enabled);
181
Steven Rostedt (Red Hat)d6ca41d2016-02-22 16:26:50 -0500182 return 0;
183}
184late_initcall(sched_init_debug);
185
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500186#ifdef CONFIG_SMP
187
188#ifdef CONFIG_SYSCTL
189
190static struct ctl_table sd_ctl_dir[] = {
191 {
192 .procname = "sched_domain",
193 .mode = 0555,
194 },
195 {}
196};
197
198static struct ctl_table sd_ctl_root[] = {
199 {
200 .procname = "kernel",
201 .mode = 0555,
202 .child = sd_ctl_dir,
203 },
204 {}
205};
206
207static struct ctl_table *sd_alloc_ctl_entry(int n)
208{
209 struct ctl_table *entry =
210 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
211
212 return entry;
213}
214
215static void sd_free_ctl_entry(struct ctl_table **tablep)
216{
217 struct ctl_table *entry;
218
219 /*
220 * In the intermediate directories, both the child directory and
221 * procname are dynamically allocated and could fail but the mode
222 * will always be set. In the lowest directory the names are
223 * static strings and all have proc handlers.
224 */
225 for (entry = *tablep; entry->mode; entry++) {
226 if (entry->child)
227 sd_free_ctl_entry(&entry->child);
228 if (entry->proc_handler == NULL)
229 kfree(entry->procname);
230 }
231
232 kfree(*tablep);
233 *tablep = NULL;
234}
235
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500236static void
237set_table_entry(struct ctl_table *entry,
238 const char *procname, void *data, int maxlen,
Dietmar Eggemann3d8d5352019-05-27 07:21:12 +0100239 umode_t mode, proc_handler *proc_handler)
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500240{
241 entry->procname = procname;
242 entry->data = data;
243 entry->maxlen = maxlen;
244 entry->mode = mode;
245 entry->proc_handler = proc_handler;
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500246}
247
248static struct ctl_table *
249sd_alloc_ctl_domain_table(struct sched_domain *sd)
250{
Dietmar Eggemann0e1fef62019-05-27 07:21:14 +0100251 struct ctl_table *table = sd_alloc_ctl_entry(9);
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500252
253 if (table == NULL)
254 return NULL;
255
Dietmar Eggemann0e1fef62019-05-27 07:21:14 +0100256 set_table_entry(&table[0], "min_interval", &sd->min_interval, sizeof(long), 0644, proc_doulongvec_minmax);
257 set_table_entry(&table[1], "max_interval", &sd->max_interval, sizeof(long), 0644, proc_doulongvec_minmax);
258 set_table_entry(&table[2], "busy_factor", &sd->busy_factor, sizeof(int), 0644, proc_dointvec_minmax);
259 set_table_entry(&table[3], "imbalance_pct", &sd->imbalance_pct, sizeof(int), 0644, proc_dointvec_minmax);
260 set_table_entry(&table[4], "cache_nice_tries", &sd->cache_nice_tries, sizeof(int), 0644, proc_dointvec_minmax);
Valentin Schneider98184272020-04-15 22:05:05 +0100261 set_table_entry(&table[5], "flags", &sd->flags, sizeof(int), 0444, proc_dointvec_minmax);
Dietmar Eggemann0e1fef62019-05-27 07:21:14 +0100262 set_table_entry(&table[6], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax);
263 set_table_entry(&table[7], "name", sd->name, CORENAME_MAX_SIZE, 0444, proc_dostring);
264 /* &table[8] is terminator */
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500265
266 return table;
267}
268
269static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
270{
271 struct ctl_table *entry, *table;
272 struct sched_domain *sd;
273 int domain_num = 0, i;
274 char buf[32];
275
276 for_each_domain(cpu, sd)
277 domain_num++;
278 entry = table = sd_alloc_ctl_entry(domain_num + 1);
279 if (table == NULL)
280 return NULL;
281
282 i = 0;
283 for_each_domain(cpu, sd) {
284 snprintf(buf, 32, "domain%d", i);
285 entry->procname = kstrdup(buf, GFP_KERNEL);
286 entry->mode = 0555;
287 entry->child = sd_alloc_ctl_domain_table(sd);
288 entry++;
289 i++;
290 }
291 return table;
292}
293
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100294static cpumask_var_t sd_sysctl_cpus;
295static struct ctl_table_header *sd_sysctl_header;
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200296
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500297void register_sched_domain_sysctl(void)
298{
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200299 static struct ctl_table *cpu_entries;
300 static struct ctl_table **cpu_idx;
Hidetoshi Seto1ca4fa32019-01-29 10:12:45 -0500301 static bool init_done = false;
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500302 char buf[32];
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200303 int i;
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500304
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200305 if (!cpu_entries) {
306 cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1);
307 if (!cpu_entries)
308 return;
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500309
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200310 WARN_ON(sd_ctl_dir[0].child);
311 sd_ctl_dir[0].child = cpu_entries;
312 }
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500313
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200314 if (!cpu_idx) {
315 struct ctl_table *e = cpu_entries;
316
317 cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL);
318 if (!cpu_idx)
319 return;
320
321 /* deal with sparse possible map */
322 for_each_possible_cpu(i) {
323 cpu_idx[i] = e;
324 e++;
325 }
326 }
327
328 if (!cpumask_available(sd_sysctl_cpus)) {
329 if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
330 return;
Hidetoshi Seto1ca4fa32019-01-29 10:12:45 -0500331 }
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200332
Hidetoshi Seto1ca4fa32019-01-29 10:12:45 -0500333 if (!init_done) {
334 init_done = true;
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200335 /* init to possible to not have holes in @cpu_entries */
336 cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
337 }
338
339 for_each_cpu(i, sd_sysctl_cpus) {
340 struct ctl_table *e = cpu_idx[i];
341
342 if (e->child)
343 sd_free_ctl_entry(&e->child);
344
345 if (!e->procname) {
346 snprintf(buf, 32, "cpu%d", i);
347 e->procname = kstrdup(buf, GFP_KERNEL);
348 }
349 e->mode = 0555;
350 e->child = sd_alloc_ctl_cpu_table(i);
351
352 __cpumask_clear_cpu(i, sd_sysctl_cpus);
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500353 }
354
355 WARN_ON(sd_sysctl_header);
356 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
357}
358
Peter Zijlstrabbdacdf2017-08-10 17:10:26 +0200359void dirty_sched_domain_sysctl(int cpu)
360{
361 if (cpumask_available(sd_sysctl_cpus))
362 __cpumask_set_cpu(cpu, sd_sysctl_cpus);
363}
364
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500365/* may be called multiple times per register */
366void unregister_sched_domain_sysctl(void)
367{
368 unregister_sysctl_table(sd_sysctl_header);
369 sd_sysctl_header = NULL;
Steven Rostedt (Red Hat)3866e842016-02-22 16:26:51 -0500370}
371#endif /* CONFIG_SYSCTL */
372#endif /* CONFIG_SMP */
373
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530374#ifdef CONFIG_FAIR_GROUP_SCHED
Mike Galbraith5091faa2010-11-30 14:18:03 +0100375static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530376{
377 struct sched_entity *se = tg->se[cpu];
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530378
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100379#define P(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
380#define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)schedstat_val(F))
381#define PN(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
382#define PN_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530383
Yuyang Ducd126af2015-07-15 08:04:36 +0800384 if (!se)
Ben Segall18bf2802012-10-04 12:51:20 +0200385 return;
Ben Segall18bf2802012-10-04 12:51:20 +0200386
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530387 PN(se->exec_start);
388 PN(se->vruntime);
389 PN(se->sum_exec_runtime);
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100390
Mel Gormancb251762016-02-05 09:08:36 +0000391 if (schedstat_enabled()) {
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500392 PN_SCHEDSTAT(se->statistics.wait_start);
393 PN_SCHEDSTAT(se->statistics.sleep_start);
394 PN_SCHEDSTAT(se->statistics.block_start);
395 PN_SCHEDSTAT(se->statistics.sleep_max);
396 PN_SCHEDSTAT(se->statistics.block_max);
397 PN_SCHEDSTAT(se->statistics.exec_max);
398 PN_SCHEDSTAT(se->statistics.slice_max);
399 PN_SCHEDSTAT(se->statistics.wait_max);
400 PN_SCHEDSTAT(se->statistics.wait_sum);
401 P_SCHEDSTAT(se->statistics.wait_count);
Mel Gormancb251762016-02-05 09:08:36 +0000402 }
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100403
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530404 P(se->load.weight);
Paul Turner9d85f212012-10-04 13:18:29 +0200405#ifdef CONFIG_SMP
Yuyang Du9d89c252015-07-15 08:04:37 +0800406 P(se->avg.load_avg);
407 P(se->avg.util_avg);
Vincent Guittot9f683952020-02-24 09:52:18 +0000408 P(se->avg.runnable_avg);
Paul Turner9d85f212012-10-04 13:18:29 +0200409#endif
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500410
411#undef PN_SCHEDSTAT
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530412#undef PN
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500413#undef P_SCHEDSTAT
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530414#undef P
415}
416#endif
417
Bharata B Raoefe25c22011-01-11 15:41:54 +0530418#ifdef CONFIG_CGROUP_SCHED
419static char group_path[PATH_MAX];
420
421static char *task_group_path(struct task_group *tg)
422{
Bharata B Rao8ecedd72011-01-11 15:42:57 +0530423 if (autogroup_path(tg, group_path, PATH_MAX))
424 return group_path;
425
Tejun Heo4c737b42016-08-10 11:23:44 -0400426 cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100427
Tejun Heo4c737b42016-08-10 11:23:44 -0400428 return group_path;
Bharata B Raoefe25c22011-01-11 15:41:54 +0530429}
430#endif
431
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200432static void
Ingo Molnara48da482007-08-09 11:16:51 +0200433print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200434{
Xie XiuQi20435d82017-08-07 16:44:23 +0800435 if (rq->curr == p)
Xie XiuQie8c16492017-08-07 16:44:22 +0800436 SEQ_printf(m, ">R");
Xie XiuQi20435d82017-08-07 16:44:23 +0800437 else
438 SEQ_printf(m, " %c", task_state_to_char(p));
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200439
Xie XiuQif080d932020-04-14 20:57:21 +0800440 SEQ_printf(m, " %15s %5d %9Ld.%06ld %9Ld %5d ",
Peter Zijlstrafc840912013-09-09 13:01:41 +0200441 p->comm, task_pid_nr(p),
Ingo Molnaref83a572007-10-15 17:00:08 +0200442 SPLIT_NS(p->se.vruntime),
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200443 (long long)(p->nvcsw + p->nivcsw),
Al Viro6f605d82007-08-06 04:26:59 +0100444 p->prio);
Josh Poimboeuf9c572592016-06-03 17:58:40 -0500445
Srikar Dronamraju33d61762015-06-08 13:40:39 +0530446 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
Josh Poimboeuf20e1d482016-06-17 12:43:25 -0500447 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
Srikar Dronamraju33d61762015-06-08 13:40:39 +0530448 SPLIT_NS(p->se.sum_exec_runtime),
Josh Poimboeuf20e1d482016-06-17 12:43:25 -0500449 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
Josh Poimboeuf9c572592016-06-03 17:58:40 -0500450
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100451#ifdef CONFIG_NUMA_BALANCING
Srikar Dronamrajue3d24d02015-06-25 22:51:42 +0530452 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100453#endif
Bharata B Raoefe25c22011-01-11 15:41:54 +0530454#ifdef CONFIG_CGROUP_SCHED
455 SEQ_printf(m, " %s", task_group_path(task_group(p)));
456#endif
Peter Zijlstrad19ca302008-04-19 19:45:00 +0200457
Peter Zijlstrad19ca302008-04-19 19:45:00 +0200458 SEQ_printf(m, "\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200459}
460
Ingo Molnara48da482007-08-09 11:16:51 +0200461static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200462{
463 struct task_struct *g, *p;
464
Joe Lawrencee9ca2672018-03-19 14:35:55 -0400465 SEQ_printf(m, "\n");
466 SEQ_printf(m, "runnable tasks:\n");
Xie XiuQif080d932020-04-14 20:57:21 +0800467 SEQ_printf(m, " S task PID tree-key switches prio"
Joe Lawrencee9ca2672018-03-19 14:35:55 -0400468 " wait-time sum-exec sum-sleep\n");
469 SEQ_printf(m, "-------------------------------------------------------"
Xie XiuQif080d932020-04-14 20:57:21 +0800470 "------------------------------------------------------\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200471
Oleg Nesterov5bd96ab2014-09-21 21:33:41 +0200472 rcu_read_lock();
Oleg Nesterovd38e83c2014-08-13 21:19:56 +0200473 for_each_process_thread(g, p) {
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100474 if (task_cpu(p) != rq_cpu)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200475 continue;
476
Ingo Molnara48da482007-08-09 11:16:51 +0200477 print_task(m, rq, p);
Oleg Nesterovd38e83c2014-08-13 21:19:56 +0200478 }
Oleg Nesterov5bd96ab2014-09-21 21:33:41 +0200479 rcu_read_unlock();
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200480}
481
Ingo Molnar5cef9ec2007-08-09 11:16:47 +0200482void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200483{
Ingo Molnar86d95602007-10-15 17:00:06 +0200484 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
485 spread, rq0_min_vruntime, spread0;
Hitoshi Mitake348ec612009-06-17 22:20:55 +0900486 struct rq *rq = cpu_rq(cpu);
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200487 struct sched_entity *last;
488 unsigned long flags;
489
Bharata B Raoefe25c22011-01-11 15:41:54 +0530490#ifdef CONFIG_FAIR_GROUP_SCHED
Joe Lawrencee9ca2672018-03-19 14:35:55 -0400491 SEQ_printf(m, "\n");
492 SEQ_printf(m, "cfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
Bharata B Raoefe25c22011-01-11 15:41:54 +0530493#else
Joe Lawrencee9ca2672018-03-19 14:35:55 -0400494 SEQ_printf(m, "\n");
495 SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530496#endif
Ingo Molnaref83a572007-10-15 17:00:08 +0200497 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
498 SPLIT_NS(cfs_rq->exec_clock));
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200499
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100500 raw_spin_lock_irqsave(&rq->lock, flags);
Davidlohr Buesobfb06882017-09-08 16:14:55 -0700501 if (rb_first_cached(&cfs_rq->tasks_timeline))
Rik van Rielac53db52011-02-01 09:51:03 -0500502 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200503 last = __pick_last_entity(cfs_rq);
504 if (last)
505 max_vruntime = last->vruntime;
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100506 min_vruntime = cfs_rq->min_vruntime;
Hitoshi Mitake348ec612009-06-17 22:20:55 +0900507 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100508 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnaref83a572007-10-15 17:00:08 +0200509 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
510 SPLIT_NS(MIN_vruntime));
511 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
512 SPLIT_NS(min_vruntime));
513 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
514 SPLIT_NS(max_vruntime));
Ingo Molnar67e12ea2007-10-15 17:00:05 +0200515 spread = max_vruntime - MIN_vruntime;
Ingo Molnaref83a572007-10-15 17:00:08 +0200516 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
517 SPLIT_NS(spread));
Ingo Molnar86d95602007-10-15 17:00:06 +0200518 spread0 = min_vruntime - rq0_min_vruntime;
Ingo Molnaref83a572007-10-15 17:00:08 +0200519 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
520 SPLIT_NS(spread0));
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100521 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
Peter Zijlstraddc97292007-10-15 17:00:10 +0200522 cfs_rq->nr_spread_over);
Peter Zijlstrac82513e2012-04-26 13:12:27 +0200523 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800524 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200525#ifdef CONFIG_SMP
Yuyang Du9d89c252015-07-15 08:04:37 +0800526 SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
527 cfs_rq->avg.load_avg);
Vincent Guittot9f683952020-02-24 09:52:18 +0000528 SEQ_printf(m, " .%-30s: %lu\n", "runnable_avg",
529 cfs_rq->avg.runnable_avg);
Yuyang Du9d89c252015-07-15 08:04:37 +0800530 SEQ_printf(m, " .%-30s: %lu\n", "util_avg",
531 cfs_rq->avg.util_avg);
Patrick Bellasi7f65ea42018-03-09 09:52:42 +0000532 SEQ_printf(m, " .%-30s: %u\n", "util_est_enqueued",
533 cfs_rq->avg.util_est.enqueued);
Peter Zijlstra2a2f5d4e2017-05-08 16:51:41 +0200534 SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg",
535 cfs_rq->removed.load_avg);
536 SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg",
537 cfs_rq->removed.util_avg);
Vincent Guittot9f683952020-02-24 09:52:18 +0000538 SEQ_printf(m, " .%-30s: %ld\n", "removed.runnable_avg",
539 cfs_rq->removed.runnable_avg);
Alex Shi333bb862013-06-28 19:10:35 +0800540#ifdef CONFIG_FAIR_GROUP_SCHED
Yuyang Du9d89c252015-07-15 08:04:37 +0800541 SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib",
542 cfs_rq->tg_load_avg_contrib);
Alex Shi333bb862013-06-28 19:10:35 +0800543 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
544 atomic_long_read(&cfs_rq->tg->load_avg));
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200545#endif
Alex Shi333bb862013-06-28 19:10:35 +0800546#endif
Ben Segallf9f9ffc2013-10-16 11:16:32 -0700547#ifdef CONFIG_CFS_BANDWIDTH
Ben Segallf9f9ffc2013-10-16 11:16:32 -0700548 SEQ_printf(m, " .%-30s: %d\n", "throttled",
549 cfs_rq->throttled);
550 SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
551 cfs_rq->throttle_count);
552#endif
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800553
Alex Shi333bb862013-06-28 19:10:35 +0800554#ifdef CONFIG_FAIR_GROUP_SCHED
Bharata B Raoff9b48c2008-11-10 21:34:09 +0530555 print_cfs_group_stats(m, cpu, cfs_rq->tg);
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200556#endif
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200557}
558
Peter Zijlstraada18de22008-06-19 14:22:24 +0200559void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
560{
Bharata B Raoefe25c22011-01-11 15:41:54 +0530561#ifdef CONFIG_RT_GROUP_SCHED
Joe Lawrencee9ca2672018-03-19 14:35:55 -0400562 SEQ_printf(m, "\n");
563 SEQ_printf(m, "rt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
Bharata B Raoefe25c22011-01-11 15:41:54 +0530564#else
Joe Lawrencee9ca2672018-03-19 14:35:55 -0400565 SEQ_printf(m, "\n");
566 SEQ_printf(m, "rt_rq[%d]:\n", cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530567#endif
Peter Zijlstraada18de22008-06-19 14:22:24 +0200568
569#define P(x) \
570 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200571#define PU(x) \
572 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
Peter Zijlstraada18de22008-06-19 14:22:24 +0200573#define PN(x) \
574 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
575
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200576 PU(rt_nr_running);
577#ifdef CONFIG_SMP
578 PU(rt_nr_migratory);
579#endif
Peter Zijlstraada18de22008-06-19 14:22:24 +0200580 P(rt_throttled);
581 PN(rt_time);
582 PN(rt_runtime);
583
584#undef PN
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200585#undef PU
Peter Zijlstraada18de22008-06-19 14:22:24 +0200586#undef P
587}
588
Wanpeng Liacb32132014-10-31 06:39:33 +0800589void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
590{
Steven Rostedt (Red Hat)ef477182016-02-22 16:26:52 -0500591 struct dl_bw *dl_bw;
592
Joe Lawrencee9ca2672018-03-19 14:35:55 -0400593 SEQ_printf(m, "\n");
594 SEQ_printf(m, "dl_rq[%d]:\n", cpu);
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200595
596#define PU(x) \
597 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
598
599 PU(dl_nr_running);
Steven Rostedt (Red Hat)ef477182016-02-22 16:26:52 -0500600#ifdef CONFIG_SMP
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200601 PU(dl_nr_migratory);
Steven Rostedt (Red Hat)ef477182016-02-22 16:26:52 -0500602 dl_bw = &cpu_rq(cpu)->rd->dl_bw;
603#else
604 dl_bw = &dl_rq->dl_bw;
605#endif
606 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
607 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
Daniel Bristot de Oliveira48365b32017-06-26 17:07:14 +0200608
609#undef PU
Wanpeng Liacb32132014-10-31 06:39:33 +0800610}
611
Ingo Molnara48da482007-08-09 11:16:51 +0200612static void print_cpu(struct seq_file *m, int cpu)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200613{
Hitoshi Mitake348ec612009-06-17 22:20:55 +0900614 struct rq *rq = cpu_rq(cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530615 unsigned long flags;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200616
617#ifdef CONFIG_X86
618 {
619 unsigned int freq = cpu_khz ? : 1;
620
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800621 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200622 cpu, freq / 1000, (freq % 1000));
623 }
624#else
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800625 SEQ_printf(m, "cpu#%d\n", cpu);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200626#endif
627
Peter Zijlstra13e099d2012-05-14 14:34:00 +0200628#define P(x) \
629do { \
630 if (sizeof(rq->x) == 4) \
631 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
632 else \
633 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
634} while (0)
635
Ingo Molnaref83a572007-10-15 17:00:08 +0200636#define PN(x) \
637 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200638
639 P(nr_running);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200640 P(nr_switches);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200641 P(nr_uninterruptible);
Ingo Molnaref83a572007-10-15 17:00:08 +0200642 PN(next_balance);
Peter Zijlstrafc840912013-09-09 13:01:41 +0200643 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
Ingo Molnaref83a572007-10-15 17:00:08 +0200644 PN(clock);
Peter Zijlstra5a537592015-01-05 11:18:12 +0100645 PN(clock_task);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200646#undef P
Ingo Molnaref83a572007-10-15 17:00:08 +0200647#undef PN
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200648
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100649#ifdef CONFIG_SMP
Wanpeng Lidb6ea2f2016-05-03 12:38:25 +0800650#define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100651 P64(avg_idle);
Alex Shi37e6bae2014-01-23 18:39:54 +0800652 P64(max_idle_balance_cost);
Wanpeng Lidb6ea2f2016-05-03 12:38:25 +0800653#undef P64
Mike Galbraith1b9508f2009-11-04 17:53:50 +0100654#endif
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100655
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500656#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
Mel Gormancb251762016-02-05 09:08:36 +0000657 if (schedstat_enabled()) {
658 P(yld_count);
659 P(sched_count);
660 P(sched_goidle);
661 P(ttwu_count);
662 P(ttwu_local);
663 }
Peter Zijlstra5ac5c4d2008-11-10 10:46:32 +0100664#undef P
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500665
Bharata B Raoefe25c22011-01-11 15:41:54 +0530666 spin_lock_irqsave(&sched_debug_lock, flags);
Ingo Molnar5cef9ec2007-08-09 11:16:47 +0200667 print_cfs_stats(m, cpu);
Peter Zijlstraada18de22008-06-19 14:22:24 +0200668 print_rt_stats(m, cpu);
Wanpeng Liacb32132014-10-31 06:39:33 +0800669 print_dl_stats(m, cpu);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200670
Ingo Molnara48da482007-08-09 11:16:51 +0200671 print_rq(m, rq, cpu);
Bharata B Raoefe25c22011-01-11 15:41:54 +0530672 spin_unlock_irqrestore(&sched_debug_lock, flags);
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800673 SEQ_printf(m, "\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200674}
675
Christian Ehrhardt1983a922009-11-30 12:16:47 +0100676static const char *sched_tunable_scaling_names[] = {
677 "none",
Colin Ian Kingad2e3792018-11-28 15:23:50 +0000678 "logarithmic",
Christian Ehrhardt1983a922009-11-30 12:16:47 +0100679 "linear"
680};
681
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800682static void sched_debug_header(struct seq_file *m)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200683{
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100684 u64 ktime, sched_clk, cpu_clk;
685 unsigned long flags;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200686
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100687 local_irq_save(flags);
688 ktime = ktime_to_ns(ktime_get());
689 sched_clk = sched_clock();
690 cpu_clk = local_clock();
691 local_irq_restore(flags);
692
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100693 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200694 init_utsname()->release,
695 (int)strcspn(init_utsname()->version, " "),
696 init_utsname()->version);
697
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100698#define P(x) \
699 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
700#define PN(x) \
701 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
702 PN(ktime);
703 PN(sched_clk);
704 PN(cpu_clk);
705 P(jiffies);
706#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
Peter Zijlstra35af99e2013-11-28 19:38:42 +0100707 P(sched_clock_stable());
Peter Zijlstra5bb6b1e2010-11-19 21:11:09 +0100708#endif
709#undef PN
710#undef P
711
712 SEQ_printf(m, "\n");
713 SEQ_printf(m, "sysctl_sched\n");
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200714
Ingo Molnar1aa47312007-10-15 17:00:10 +0200715#define P(x) \
Ingo Molnard822cec2007-10-15 17:00:10 +0200716 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
Ingo Molnar1aa47312007-10-15 17:00:10 +0200717#define PN(x) \
Ingo Molnard822cec2007-10-15 17:00:10 +0200718 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
Ingo Molnar1aa47312007-10-15 17:00:10 +0200719 PN(sysctl_sched_latency);
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100720 PN(sysctl_sched_min_granularity);
Ingo Molnar1aa47312007-10-15 17:00:10 +0200721 PN(sysctl_sched_wakeup_granularity);
Josh Hunteebef742010-07-19 12:31:16 -0700722 P(sysctl_sched_child_runs_first);
Ingo Molnar1aa47312007-10-15 17:00:10 +0200723 P(sysctl_sched_features);
724#undef PN
725#undef P
726
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800727 SEQ_printf(m, " .%-40s: %d (%s)\n",
728 "sysctl_sched_tunable_scaling",
Christian Ehrhardt1983a922009-11-30 12:16:47 +0100729 sysctl_sched_tunable_scaling,
730 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200731 SEQ_printf(m, "\n");
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800732}
733
734static int sched_debug_show(struct seq_file *m, void *v)
735{
736 int cpu = (unsigned long)(v - 2);
737
738 if (cpu != -1)
739 print_cpu(m, cpu);
740 else
741 sched_debug_header(m);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200742
743 return 0;
744}
745
Peter Zijlstra029632f2011-10-25 10:00:11 +0200746void sysrq_sched_debug_show(void)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200747{
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800748 int cpu;
749
750 sched_debug_header(NULL);
Wei Li02d4ac52019-12-26 16:52:24 +0800751 for_each_online_cpu(cpu) {
752 /*
753 * Need to reset softlockup watchdogs on all CPUs, because
754 * another CPU might be blocked waiting for us to process
755 * an IPI or stop_machine.
756 */
757 touch_nmi_watchdog();
758 touch_all_softlockup_watchdogs();
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800759 print_cpu(NULL, cpu);
Wei Li02d4ac52019-12-26 16:52:24 +0800760 }
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800761}
762
763/*
764 * This itererator needs some explanation.
765 * It returns 1 for the header position.
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100766 * This means 2 is CPU 0.
767 * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
768 * to use cpumask_* to iterate over the CPUs.
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800769 */
770static void *sched_debug_start(struct seq_file *file, loff_t *offset)
771{
772 unsigned long n = *offset;
773
774 if (n == 0)
775 return (void *) 1;
776
777 n--;
778
779 if (n > 0)
780 n = cpumask_next(n - 1, cpu_online_mask);
781 else
782 n = cpumask_first(cpu_online_mask);
783
784 *offset = n + 1;
785
786 if (n < nr_cpu_ids)
787 return (void *)(unsigned long)(n + 2);
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100788
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800789 return NULL;
790}
791
792static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
793{
794 (*offset)++;
795 return sched_debug_start(file, offset);
796}
797
798static void sched_debug_stop(struct seq_file *file, void *data)
799{
800}
801
802static const struct seq_operations sched_debug_sops = {
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100803 .start = sched_debug_start,
804 .next = sched_debug_next,
805 .stop = sched_debug_stop,
806 .show = sched_debug_show,
Nathan Zimmerbbbfeac2013-02-21 15:15:09 -0800807};
808
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200809static int __init init_sched_debug_procfs(void)
810{
Christoph Hellwigfddda2b2018-04-13 19:44:18 +0200811 if (!proc_create_seq("sched_debug", 0444, NULL, &sched_debug_sops))
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200812 return -ENOMEM;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200813 return 0;
814}
815
816__initcall(init_sched_debug_procfs);
817
Valentin Schneider9e3bf942020-02-26 12:45:42 +0000818#define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
819#define __P(F) __PS(#F, F)
820#define P(F) __PS(#F, p->F)
821#define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
822#define __PN(F) __PSN(#F, F)
823#define PN(F) __PSN(#F, p->F)
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100824
825
Srikar Dronamraju397f2372015-06-25 22:51:43 +0530826#ifdef CONFIG_NUMA_BALANCING
827void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
828 unsigned long tpf, unsigned long gsf, unsigned long gpf)
829{
830 SEQ_printf(m, "numa_faults node=%d ", node);
Srikar Dronamraju67d9f6c252018-06-20 22:32:47 +0530831 SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
832 SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
Srikar Dronamraju397f2372015-06-25 22:51:43 +0530833}
834#endif
835
836
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100837static void sched_show_numa(struct task_struct *p, struct seq_file *m)
838{
839#ifdef CONFIG_NUMA_BALANCING
840 struct mempolicy *pol;
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100841
842 if (p->mm)
843 P(mm->numa_scan_seq);
844
845 task_lock(p);
846 pol = p->mempolicy;
847 if (pol && !(pol->flags & MPOL_F_MORON))
848 pol = NULL;
849 mpol_get(pol);
850 task_unlock(p);
851
Srikar Dronamraju397f2372015-06-25 22:51:43 +0530852 P(numa_pages_migrated);
853 P(numa_preferred_nid);
854 P(total_numa_faults);
855 SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
856 task_node(p), task_numa_group_id(p));
857 show_numa_stats(p, m);
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100858 mpol_put(pol);
859#endif
860}
861
Aleksa Sarai74dc3382017-08-06 14:41:41 +1000862void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
863 struct seq_file *m)
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200864{
Ingo Molnarcc367732007-10-15 17:00:18 +0200865 unsigned long nr_switches;
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200866
Aleksa Sarai74dc3382017-08-06 14:41:41 +1000867 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
Oleg Nesterov5089a972010-05-26 14:43:22 -0700868 get_nr_threads(p));
Ingo Molnar2d92f222007-10-15 17:00:18 +0200869 SEQ_printf(m,
Kamalesh Babulaladd332a2013-06-27 22:20:05 +0530870 "---------------------------------------------------------"
871 "----------\n");
Valentin Schneider9e3bf942020-02-26 12:45:42 +0000872
873#define P_SCHEDSTAT(F) __PS(#F, schedstat_val(p->F))
874#define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->F))
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200875
Ingo Molnaref83a572007-10-15 17:00:08 +0200876 PN(se.exec_start);
877 PN(se.vruntime);
878 PN(se.sum_exec_runtime);
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200879
Ingo Molnarcc367732007-10-15 17:00:18 +0200880 nr_switches = p->nvcsw + p->nivcsw;
881
Ingo Molnarcc367732007-10-15 17:00:18 +0200882 P(se.nr_migrations);
Ingo Molnarcc367732007-10-15 17:00:18 +0200883
Mel Gormancb251762016-02-05 09:08:36 +0000884 if (schedstat_enabled()) {
Ingo Molnarcc367732007-10-15 17:00:18 +0200885 u64 avg_atom, avg_per_cpu;
886
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500887 PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
888 PN_SCHEDSTAT(se.statistics.wait_start);
889 PN_SCHEDSTAT(se.statistics.sleep_start);
890 PN_SCHEDSTAT(se.statistics.block_start);
891 PN_SCHEDSTAT(se.statistics.sleep_max);
892 PN_SCHEDSTAT(se.statistics.block_max);
893 PN_SCHEDSTAT(se.statistics.exec_max);
894 PN_SCHEDSTAT(se.statistics.slice_max);
895 PN_SCHEDSTAT(se.statistics.wait_max);
896 PN_SCHEDSTAT(se.statistics.wait_sum);
897 P_SCHEDSTAT(se.statistics.wait_count);
898 PN_SCHEDSTAT(se.statistics.iowait_sum);
899 P_SCHEDSTAT(se.statistics.iowait_count);
900 P_SCHEDSTAT(se.statistics.nr_migrations_cold);
901 P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
902 P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
903 P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
904 P_SCHEDSTAT(se.statistics.nr_forced_migrations);
905 P_SCHEDSTAT(se.statistics.nr_wakeups);
906 P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
907 P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
908 P_SCHEDSTAT(se.statistics.nr_wakeups_local);
909 P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
910 P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
911 P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
912 P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
913 P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
Mel Gormancb251762016-02-05 09:08:36 +0000914
Ingo Molnarcc367732007-10-15 17:00:18 +0200915 avg_atom = p->se.sum_exec_runtime;
916 if (nr_switches)
Mateusz Guzikb0ab99e2014-06-14 15:00:09 +0200917 avg_atom = div64_ul(avg_atom, nr_switches);
Ingo Molnarcc367732007-10-15 17:00:18 +0200918 else
919 avg_atom = -1LL;
920
921 avg_per_cpu = p->se.sum_exec_runtime;
Ingo Molnarc1a89742007-11-28 15:52:56 +0100922 if (p->se.nr_migrations) {
Roman Zippel6f6d6a12008-05-01 04:34:28 -0700923 avg_per_cpu = div64_u64(avg_per_cpu,
924 p->se.nr_migrations);
Ingo Molnarc1a89742007-11-28 15:52:56 +0100925 } else {
Ingo Molnarcc367732007-10-15 17:00:18 +0200926 avg_per_cpu = -1LL;
Ingo Molnarc1a89742007-11-28 15:52:56 +0100927 }
Ingo Molnarcc367732007-10-15 17:00:18 +0200928
929 __PN(avg_atom);
930 __PN(avg_per_cpu);
931 }
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500932
Ingo Molnarcc367732007-10-15 17:00:18 +0200933 __P(nr_switches);
Valentin Schneider9e3bf942020-02-26 12:45:42 +0000934 __PS("nr_voluntary_switches", p->nvcsw);
935 __PS("nr_involuntary_switches", p->nivcsw);
Ingo Molnarcc367732007-10-15 17:00:18 +0200936
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200937 P(se.load.weight);
Alex Shi333bb862013-06-28 19:10:35 +0800938#ifdef CONFIG_SMP
Yuyang Du9d89c252015-07-15 08:04:37 +0800939 P(se.avg.load_sum);
Vincent Guittot9f683952020-02-24 09:52:18 +0000940 P(se.avg.runnable_sum);
Yuyang Du9d89c252015-07-15 08:04:37 +0800941 P(se.avg.util_sum);
942 P(se.avg.load_avg);
Vincent Guittot9f683952020-02-24 09:52:18 +0000943 P(se.avg.runnable_avg);
Yuyang Du9d89c252015-07-15 08:04:37 +0800944 P(se.avg.util_avg);
945 P(se.avg.last_update_time);
Patrick Bellasi7f65ea42018-03-09 09:52:42 +0000946 P(se.avg.util_est.ewma);
947 P(se.avg.util_est.enqueued);
Kamalesh Babulal939fd732013-06-25 13:33:36 +0530948#endif
Valentin Schneider96e74eb2020-02-26 12:45:43 +0000949#ifdef CONFIG_UCLAMP_TASK
Pavankumar Kondetiad32bb42020-05-10 18:26:41 +0530950 __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
951 __PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
Valentin Schneider96e74eb2020-02-26 12:45:43 +0000952 __PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
953 __PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
954#endif
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200955 P(policy);
956 P(prio);
Viresh Kumar1da18432018-11-05 16:51:55 +0530957 if (task_has_dl_policy(p)) {
Tommaso Cucinotta59f8c292016-10-26 11:17:17 +0200958 P(dl.runtime);
959 P(dl.deadline);
960 }
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500961#undef PN_SCHEDSTAT
Josh Poimboeuf4fa8d292016-06-17 12:43:26 -0500962#undef P_SCHEDSTAT
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200963
964 {
Ingo Molnar29d7b902008-11-16 08:07:15 +0100965 unsigned int this_cpu = raw_smp_processor_id();
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200966 u64 t0, t1;
967
Ingo Molnar29d7b902008-11-16 08:07:15 +0100968 t0 = cpu_clock(this_cpu);
969 t1 = cpu_clock(this_cpu);
Valentin Schneider9e3bf942020-02-26 12:45:42 +0000970 __PS("clock-delta", t1-t0);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200971 }
Ingo Molnarb32e86b2013-10-07 11:29:30 +0100972
973 sched_show_numa(p, m);
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200974}
975
976void proc_sched_set_task(struct task_struct *p)
977{
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200978#ifdef CONFIG_SCHEDSTATS
Lucas De Marchi41acab82010-03-10 23:37:45 -0300979 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200980#endif
Ingo Molnar43ae34c2007-07-09 18:52:00 +0200981}