Loading...
1/*
2 * kernel/sched/debug.c
3 *
4 * Print the CFS rbtree
5 *
6 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/proc_fs.h>
14#include <linux/sched.h>
15#include <linux/seq_file.h>
16#include <linux/kallsyms.h>
17#include <linux/utsname.h>
18
19#include "sched.h"
20
21static DEFINE_SPINLOCK(sched_debug_lock);
22
23/*
24 * This allows printing both to /proc/sched_debug and
25 * to the console
26 */
27#define SEQ_printf(m, x...) \
28 do { \
29 if (m) \
30 seq_printf(m, x); \
31 else \
32 printk(x); \
33 } while (0)
34
35/*
36 * Ease the printing of nsec fields:
37 */
38static long long nsec_high(unsigned long long nsec)
39{
40 if ((long long)nsec < 0) {
41 nsec = -nsec;
42 do_div(nsec, 1000000);
43 return -nsec;
44 }
45 do_div(nsec, 1000000);
46
47 return nsec;
48}
49
50static unsigned long nsec_low(unsigned long long nsec)
51{
52 if ((long long)nsec < 0)
53 nsec = -nsec;
54
55 return do_div(nsec, 1000000);
56}
57
58#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
59
60#ifdef CONFIG_FAIR_GROUP_SCHED
61static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
62{
63 struct sched_entity *se = tg->se[cpu];
64 if (!se)
65 return;
66
67#define P(F) \
68 SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
69#define PN(F) \
70 SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
71
72 PN(se->exec_start);
73 PN(se->vruntime);
74 PN(se->sum_exec_runtime);
75#ifdef CONFIG_SCHEDSTATS
76 PN(se->statistics.wait_start);
77 PN(se->statistics.sleep_start);
78 PN(se->statistics.block_start);
79 PN(se->statistics.sleep_max);
80 PN(se->statistics.block_max);
81 PN(se->statistics.exec_max);
82 PN(se->statistics.slice_max);
83 PN(se->statistics.wait_max);
84 PN(se->statistics.wait_sum);
85 P(se->statistics.wait_count);
86#endif
87 P(se->load.weight);
88#undef PN
89#undef P
90}
91#endif
92
93#ifdef CONFIG_CGROUP_SCHED
94static char group_path[PATH_MAX];
95
96static char *task_group_path(struct task_group *tg)
97{
98 if (autogroup_path(tg, group_path, PATH_MAX))
99 return group_path;
100
101 /*
102 * May be NULL if the underlying cgroup isn't fully-created yet
103 */
104 if (!tg->css.cgroup) {
105 group_path[0] = '\0';
106 return group_path;
107 }
108 cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
109 return group_path;
110}
111#endif
112
113static void
114print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
115{
116 if (rq->curr == p)
117 SEQ_printf(m, "R");
118 else
119 SEQ_printf(m, " ");
120
121 SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
122 p->comm, p->pid,
123 SPLIT_NS(p->se.vruntime),
124 (long long)(p->nvcsw + p->nivcsw),
125 p->prio);
126#ifdef CONFIG_SCHEDSTATS
127 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
128 SPLIT_NS(p->se.vruntime),
129 SPLIT_NS(p->se.sum_exec_runtime),
130 SPLIT_NS(p->se.statistics.sum_sleep_runtime));
131#else
132 SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
133 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
134#endif
135#ifdef CONFIG_CGROUP_SCHED
136 SEQ_printf(m, " %s", task_group_path(task_group(p)));
137#endif
138
139 SEQ_printf(m, "\n");
140}
141
142static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
143{
144 struct task_struct *g, *p;
145 unsigned long flags;
146
147 SEQ_printf(m,
148 "\nrunnable tasks:\n"
149 " task PID tree-key switches prio"
150 " exec-runtime sum-exec sum-sleep\n"
151 "------------------------------------------------------"
152 "----------------------------------------------------\n");
153
154 read_lock_irqsave(&tasklist_lock, flags);
155
156 do_each_thread(g, p) {
157 if (!p->on_rq || task_cpu(p) != rq_cpu)
158 continue;
159
160 print_task(m, rq, p);
161 } while_each_thread(g, p);
162
163 read_unlock_irqrestore(&tasklist_lock, flags);
164}
165
166void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
167{
168 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
169 spread, rq0_min_vruntime, spread0;
170 struct rq *rq = cpu_rq(cpu);
171 struct sched_entity *last;
172 unsigned long flags;
173
174#ifdef CONFIG_FAIR_GROUP_SCHED
175 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
176#else
177 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
178#endif
179 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
180 SPLIT_NS(cfs_rq->exec_clock));
181
182 raw_spin_lock_irqsave(&rq->lock, flags);
183 if (cfs_rq->rb_leftmost)
184 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
185 last = __pick_last_entity(cfs_rq);
186 if (last)
187 max_vruntime = last->vruntime;
188 min_vruntime = cfs_rq->min_vruntime;
189 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
190 raw_spin_unlock_irqrestore(&rq->lock, flags);
191 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
192 SPLIT_NS(MIN_vruntime));
193 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
194 SPLIT_NS(min_vruntime));
195 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
196 SPLIT_NS(max_vruntime));
197 spread = max_vruntime - MIN_vruntime;
198 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
199 SPLIT_NS(spread));
200 spread0 = min_vruntime - rq0_min_vruntime;
201 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
202 SPLIT_NS(spread0));
203 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
204 cfs_rq->nr_spread_over);
205 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
206 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
207#ifdef CONFIG_FAIR_GROUP_SCHED
208#ifdef CONFIG_SMP
209 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "load_avg",
210 SPLIT_NS(cfs_rq->load_avg));
211 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "load_period",
212 SPLIT_NS(cfs_rq->load_period));
213 SEQ_printf(m, " .%-30s: %ld\n", "load_contrib",
214 cfs_rq->load_contribution);
215 SEQ_printf(m, " .%-30s: %d\n", "load_tg",
216 atomic_read(&cfs_rq->tg->load_weight));
217#endif
218
219 print_cfs_group_stats(m, cpu, cfs_rq->tg);
220#endif
221}
222
223void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
224{
225#ifdef CONFIG_RT_GROUP_SCHED
226 SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
227#else
228 SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
229#endif
230
231#define P(x) \
232 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
233#define PN(x) \
234 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
235
236 P(rt_nr_running);
237 P(rt_throttled);
238 PN(rt_time);
239 PN(rt_runtime);
240
241#undef PN
242#undef P
243}
244
245extern __read_mostly int sched_clock_running;
246
247static void print_cpu(struct seq_file *m, int cpu)
248{
249 struct rq *rq = cpu_rq(cpu);
250 unsigned long flags;
251
252#ifdef CONFIG_X86
253 {
254 unsigned int freq = cpu_khz ? : 1;
255
256 SEQ_printf(m, "\ncpu#%d, %u.%03u MHz\n",
257 cpu, freq / 1000, (freq % 1000));
258 }
259#else
260 SEQ_printf(m, "\ncpu#%d\n", cpu);
261#endif
262
263#define P(x) \
264do { \
265 if (sizeof(rq->x) == 4) \
266 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
267 else \
268 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
269} while (0)
270
271#define PN(x) \
272 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
273
274 P(nr_running);
275 SEQ_printf(m, " .%-30s: %lu\n", "load",
276 rq->load.weight);
277 P(nr_switches);
278 P(nr_load_updates);
279 P(nr_uninterruptible);
280 PN(next_balance);
281 P(curr->pid);
282 PN(clock);
283 P(cpu_load[0]);
284 P(cpu_load[1]);
285 P(cpu_load[2]);
286 P(cpu_load[3]);
287 P(cpu_load[4]);
288#undef P
289#undef PN
290
291#ifdef CONFIG_SCHEDSTATS
292#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n);
293#define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
294
295 P(yld_count);
296
297 P(sched_count);
298 P(sched_goidle);
299#ifdef CONFIG_SMP
300 P64(avg_idle);
301#endif
302
303 P(ttwu_count);
304 P(ttwu_local);
305
306#undef P
307#undef P64
308#endif
309 spin_lock_irqsave(&sched_debug_lock, flags);
310 print_cfs_stats(m, cpu);
311 print_rt_stats(m, cpu);
312
313 rcu_read_lock();
314 print_rq(m, rq, cpu);
315 rcu_read_unlock();
316 spin_unlock_irqrestore(&sched_debug_lock, flags);
317}
318
319static const char *sched_tunable_scaling_names[] = {
320 "none",
321 "logaritmic",
322 "linear"
323};
324
325static int sched_debug_show(struct seq_file *m, void *v)
326{
327 u64 ktime, sched_clk, cpu_clk;
328 unsigned long flags;
329 int cpu;
330
331 local_irq_save(flags);
332 ktime = ktime_to_ns(ktime_get());
333 sched_clk = sched_clock();
334 cpu_clk = local_clock();
335 local_irq_restore(flags);
336
337 SEQ_printf(m, "Sched Debug Version: v0.10, %s %.*s\n",
338 init_utsname()->release,
339 (int)strcspn(init_utsname()->version, " "),
340 init_utsname()->version);
341
342#define P(x) \
343 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
344#define PN(x) \
345 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
346 PN(ktime);
347 PN(sched_clk);
348 PN(cpu_clk);
349 P(jiffies);
350#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
351 P(sched_clock_stable);
352#endif
353#undef PN
354#undef P
355
356 SEQ_printf(m, "\n");
357 SEQ_printf(m, "sysctl_sched\n");
358
359#define P(x) \
360 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
361#define PN(x) \
362 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
363 PN(sysctl_sched_latency);
364 PN(sysctl_sched_min_granularity);
365 PN(sysctl_sched_wakeup_granularity);
366 P(sysctl_sched_child_runs_first);
367 P(sysctl_sched_features);
368#undef PN
369#undef P
370
371 SEQ_printf(m, " .%-40s: %d (%s)\n", "sysctl_sched_tunable_scaling",
372 sysctl_sched_tunable_scaling,
373 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
374
375 for_each_online_cpu(cpu)
376 print_cpu(m, cpu);
377
378 SEQ_printf(m, "\n");
379
380 return 0;
381}
382
383void sysrq_sched_debug_show(void)
384{
385 sched_debug_show(NULL, NULL);
386}
387
388static int sched_debug_open(struct inode *inode, struct file *filp)
389{
390 return single_open(filp, sched_debug_show, NULL);
391}
392
393static const struct file_operations sched_debug_fops = {
394 .open = sched_debug_open,
395 .read = seq_read,
396 .llseek = seq_lseek,
397 .release = single_release,
398};
399
400static int __init init_sched_debug_procfs(void)
401{
402 struct proc_dir_entry *pe;
403
404 pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
405 if (!pe)
406 return -ENOMEM;
407 return 0;
408}
409
410__initcall(init_sched_debug_procfs);
411
412void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
413{
414 unsigned long nr_switches;
415
416 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid,
417 get_nr_threads(p));
418 SEQ_printf(m,
419 "---------------------------------------------------------\n");
420#define __P(F) \
421 SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)F)
422#define P(F) \
423 SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)p->F)
424#define __PN(F) \
425 SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
426#define PN(F) \
427 SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
428
429 PN(se.exec_start);
430 PN(se.vruntime);
431 PN(se.sum_exec_runtime);
432
433 nr_switches = p->nvcsw + p->nivcsw;
434
435#ifdef CONFIG_SCHEDSTATS
436 PN(se.statistics.wait_start);
437 PN(se.statistics.sleep_start);
438 PN(se.statistics.block_start);
439 PN(se.statistics.sleep_max);
440 PN(se.statistics.block_max);
441 PN(se.statistics.exec_max);
442 PN(se.statistics.slice_max);
443 PN(se.statistics.wait_max);
444 PN(se.statistics.wait_sum);
445 P(se.statistics.wait_count);
446 PN(se.statistics.iowait_sum);
447 P(se.statistics.iowait_count);
448 P(se.nr_migrations);
449 P(se.statistics.nr_migrations_cold);
450 P(se.statistics.nr_failed_migrations_affine);
451 P(se.statistics.nr_failed_migrations_running);
452 P(se.statistics.nr_failed_migrations_hot);
453 P(se.statistics.nr_forced_migrations);
454 P(se.statistics.nr_wakeups);
455 P(se.statistics.nr_wakeups_sync);
456 P(se.statistics.nr_wakeups_migrate);
457 P(se.statistics.nr_wakeups_local);
458 P(se.statistics.nr_wakeups_remote);
459 P(se.statistics.nr_wakeups_affine);
460 P(se.statistics.nr_wakeups_affine_attempts);
461 P(se.statistics.nr_wakeups_passive);
462 P(se.statistics.nr_wakeups_idle);
463
464 {
465 u64 avg_atom, avg_per_cpu;
466
467 avg_atom = p->se.sum_exec_runtime;
468 if (nr_switches)
469 do_div(avg_atom, nr_switches);
470 else
471 avg_atom = -1LL;
472
473 avg_per_cpu = p->se.sum_exec_runtime;
474 if (p->se.nr_migrations) {
475 avg_per_cpu = div64_u64(avg_per_cpu,
476 p->se.nr_migrations);
477 } else {
478 avg_per_cpu = -1LL;
479 }
480
481 __PN(avg_atom);
482 __PN(avg_per_cpu);
483 }
484#endif
485 __P(nr_switches);
486 SEQ_printf(m, "%-35s:%21Ld\n",
487 "nr_voluntary_switches", (long long)p->nvcsw);
488 SEQ_printf(m, "%-35s:%21Ld\n",
489 "nr_involuntary_switches", (long long)p->nivcsw);
490
491 P(se.load.weight);
492 P(policy);
493 P(prio);
494#undef PN
495#undef __PN
496#undef P
497#undef __P
498
499 {
500 unsigned int this_cpu = raw_smp_processor_id();
501 u64 t0, t1;
502
503 t0 = cpu_clock(this_cpu);
504 t1 = cpu_clock(this_cpu);
505 SEQ_printf(m, "%-35s:%21Ld\n",
506 "clock-delta", (long long)(t1-t0));
507 }
508}
509
510void proc_sched_set_task(struct task_struct *p)
511{
512#ifdef CONFIG_SCHEDSTATS
513 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
514#endif
515}
1/*
2 * kernel/sched/debug.c
3 *
4 * Print the CFS rbtree
5 *
6 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/proc_fs.h>
14#include <linux/sched.h>
15#include <linux/seq_file.h>
16#include <linux/kallsyms.h>
17#include <linux/utsname.h>
18#include <linux/mempolicy.h>
19
20#include "sched.h"
21
22static DEFINE_SPINLOCK(sched_debug_lock);
23
24/*
25 * This allows printing both to /proc/sched_debug and
26 * to the console
27 */
28#define SEQ_printf(m, x...) \
29 do { \
30 if (m) \
31 seq_printf(m, x); \
32 else \
33 printk(x); \
34 } while (0)
35
36/*
37 * Ease the printing of nsec fields:
38 */
39static long long nsec_high(unsigned long long nsec)
40{
41 if ((long long)nsec < 0) {
42 nsec = -nsec;
43 do_div(nsec, 1000000);
44 return -nsec;
45 }
46 do_div(nsec, 1000000);
47
48 return nsec;
49}
50
51static unsigned long nsec_low(unsigned long long nsec)
52{
53 if ((long long)nsec < 0)
54 nsec = -nsec;
55
56 return do_div(nsec, 1000000);
57}
58
59#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
60
61#ifdef CONFIG_FAIR_GROUP_SCHED
62static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
63{
64 struct sched_entity *se = tg->se[cpu];
65
66#define P(F) \
67 SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
68#define PN(F) \
69 SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
70
71 if (!se) {
72 struct sched_avg *avg = &cpu_rq(cpu)->avg;
73 P(avg->runnable_avg_sum);
74 P(avg->runnable_avg_period);
75 return;
76 }
77
78
79 PN(se->exec_start);
80 PN(se->vruntime);
81 PN(se->sum_exec_runtime);
82#ifdef CONFIG_SCHEDSTATS
83 PN(se->statistics.wait_start);
84 PN(se->statistics.sleep_start);
85 PN(se->statistics.block_start);
86 PN(se->statistics.sleep_max);
87 PN(se->statistics.block_max);
88 PN(se->statistics.exec_max);
89 PN(se->statistics.slice_max);
90 PN(se->statistics.wait_max);
91 PN(se->statistics.wait_sum);
92 P(se->statistics.wait_count);
93#endif
94 P(se->load.weight);
95#ifdef CONFIG_SMP
96 P(se->avg.runnable_avg_sum);
97 P(se->avg.runnable_avg_period);
98 P(se->avg.load_avg_contrib);
99 P(se->avg.decay_count);
100#endif
101#undef PN
102#undef P
103}
104#endif
105
106#ifdef CONFIG_CGROUP_SCHED
107static char group_path[PATH_MAX];
108
109static char *task_group_path(struct task_group *tg)
110{
111 if (autogroup_path(tg, group_path, PATH_MAX))
112 return group_path;
113
114 return cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
115}
116#endif
117
118static void
119print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
120{
121 if (rq->curr == p)
122 SEQ_printf(m, "R");
123 else
124 SEQ_printf(m, " ");
125
126 SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
127 p->comm, task_pid_nr(p),
128 SPLIT_NS(p->se.vruntime),
129 (long long)(p->nvcsw + p->nivcsw),
130 p->prio);
131#ifdef CONFIG_SCHEDSTATS
132 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
133 SPLIT_NS(p->se.vruntime),
134 SPLIT_NS(p->se.sum_exec_runtime),
135 SPLIT_NS(p->se.statistics.sum_sleep_runtime));
136#else
137 SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
138 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
139#endif
140#ifdef CONFIG_NUMA_BALANCING
141 SEQ_printf(m, " %d", task_node(p));
142#endif
143#ifdef CONFIG_CGROUP_SCHED
144 SEQ_printf(m, " %s", task_group_path(task_group(p)));
145#endif
146
147 SEQ_printf(m, "\n");
148}
149
150static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
151{
152 struct task_struct *g, *p;
153 unsigned long flags;
154
155 SEQ_printf(m,
156 "\nrunnable tasks:\n"
157 " task PID tree-key switches prio"
158 " exec-runtime sum-exec sum-sleep\n"
159 "------------------------------------------------------"
160 "----------------------------------------------------\n");
161
162 read_lock_irqsave(&tasklist_lock, flags);
163
164 do_each_thread(g, p) {
165 if (task_cpu(p) != rq_cpu)
166 continue;
167
168 print_task(m, rq, p);
169 } while_each_thread(g, p);
170
171 read_unlock_irqrestore(&tasklist_lock, flags);
172}
173
174void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
175{
176 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
177 spread, rq0_min_vruntime, spread0;
178 struct rq *rq = cpu_rq(cpu);
179 struct sched_entity *last;
180 unsigned long flags;
181
182#ifdef CONFIG_FAIR_GROUP_SCHED
183 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
184#else
185 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
186#endif
187 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
188 SPLIT_NS(cfs_rq->exec_clock));
189
190 raw_spin_lock_irqsave(&rq->lock, flags);
191 if (cfs_rq->rb_leftmost)
192 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
193 last = __pick_last_entity(cfs_rq);
194 if (last)
195 max_vruntime = last->vruntime;
196 min_vruntime = cfs_rq->min_vruntime;
197 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
198 raw_spin_unlock_irqrestore(&rq->lock, flags);
199 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
200 SPLIT_NS(MIN_vruntime));
201 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
202 SPLIT_NS(min_vruntime));
203 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
204 SPLIT_NS(max_vruntime));
205 spread = max_vruntime - MIN_vruntime;
206 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
207 SPLIT_NS(spread));
208 spread0 = min_vruntime - rq0_min_vruntime;
209 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
210 SPLIT_NS(spread0));
211 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
212 cfs_rq->nr_spread_over);
213 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
214 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
215#ifdef CONFIG_SMP
216 SEQ_printf(m, " .%-30s: %ld\n", "runnable_load_avg",
217 cfs_rq->runnable_load_avg);
218 SEQ_printf(m, " .%-30s: %ld\n", "blocked_load_avg",
219 cfs_rq->blocked_load_avg);
220#ifdef CONFIG_FAIR_GROUP_SCHED
221 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_contrib",
222 cfs_rq->tg_load_contrib);
223 SEQ_printf(m, " .%-30s: %d\n", "tg_runnable_contrib",
224 cfs_rq->tg_runnable_contrib);
225 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
226 atomic_long_read(&cfs_rq->tg->load_avg));
227 SEQ_printf(m, " .%-30s: %d\n", "tg->runnable_avg",
228 atomic_read(&cfs_rq->tg->runnable_avg));
229#endif
230#endif
231#ifdef CONFIG_CFS_BANDWIDTH
232 SEQ_printf(m, " .%-30s: %d\n", "tg->cfs_bandwidth.timer_active",
233 cfs_rq->tg->cfs_bandwidth.timer_active);
234 SEQ_printf(m, " .%-30s: %d\n", "throttled",
235 cfs_rq->throttled);
236 SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
237 cfs_rq->throttle_count);
238#endif
239
240#ifdef CONFIG_FAIR_GROUP_SCHED
241 print_cfs_group_stats(m, cpu, cfs_rq->tg);
242#endif
243}
244
245void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
246{
247#ifdef CONFIG_RT_GROUP_SCHED
248 SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
249#else
250 SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
251#endif
252
253#define P(x) \
254 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
255#define PN(x) \
256 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
257
258 P(rt_nr_running);
259 P(rt_throttled);
260 PN(rt_time);
261 PN(rt_runtime);
262
263#undef PN
264#undef P
265}
266
267extern __read_mostly int sched_clock_running;
268
269static void print_cpu(struct seq_file *m, int cpu)
270{
271 struct rq *rq = cpu_rq(cpu);
272 unsigned long flags;
273
274#ifdef CONFIG_X86
275 {
276 unsigned int freq = cpu_khz ? : 1;
277
278 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
279 cpu, freq / 1000, (freq % 1000));
280 }
281#else
282 SEQ_printf(m, "cpu#%d\n", cpu);
283#endif
284
285#define P(x) \
286do { \
287 if (sizeof(rq->x) == 4) \
288 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
289 else \
290 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
291} while (0)
292
293#define PN(x) \
294 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
295
296 P(nr_running);
297 SEQ_printf(m, " .%-30s: %lu\n", "load",
298 rq->load.weight);
299 P(nr_switches);
300 P(nr_load_updates);
301 P(nr_uninterruptible);
302 PN(next_balance);
303 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
304 PN(clock);
305 P(cpu_load[0]);
306 P(cpu_load[1]);
307 P(cpu_load[2]);
308 P(cpu_load[3]);
309 P(cpu_load[4]);
310#undef P
311#undef PN
312
313#ifdef CONFIG_SCHEDSTATS
314#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n);
315#define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
316
317 P(yld_count);
318
319 P(sched_count);
320 P(sched_goidle);
321#ifdef CONFIG_SMP
322 P64(avg_idle);
323 P64(max_idle_balance_cost);
324#endif
325
326 P(ttwu_count);
327 P(ttwu_local);
328
329#undef P
330#undef P64
331#endif
332 spin_lock_irqsave(&sched_debug_lock, flags);
333 print_cfs_stats(m, cpu);
334 print_rt_stats(m, cpu);
335
336 rcu_read_lock();
337 print_rq(m, rq, cpu);
338 rcu_read_unlock();
339 spin_unlock_irqrestore(&sched_debug_lock, flags);
340 SEQ_printf(m, "\n");
341}
342
343static const char *sched_tunable_scaling_names[] = {
344 "none",
345 "logaritmic",
346 "linear"
347};
348
349static void sched_debug_header(struct seq_file *m)
350{
351 u64 ktime, sched_clk, cpu_clk;
352 unsigned long flags;
353
354 local_irq_save(flags);
355 ktime = ktime_to_ns(ktime_get());
356 sched_clk = sched_clock();
357 cpu_clk = local_clock();
358 local_irq_restore(flags);
359
360 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
361 init_utsname()->release,
362 (int)strcspn(init_utsname()->version, " "),
363 init_utsname()->version);
364
365#define P(x) \
366 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
367#define PN(x) \
368 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
369 PN(ktime);
370 PN(sched_clk);
371 PN(cpu_clk);
372 P(jiffies);
373#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
374 P(sched_clock_stable());
375#endif
376#undef PN
377#undef P
378
379 SEQ_printf(m, "\n");
380 SEQ_printf(m, "sysctl_sched\n");
381
382#define P(x) \
383 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
384#define PN(x) \
385 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
386 PN(sysctl_sched_latency);
387 PN(sysctl_sched_min_granularity);
388 PN(sysctl_sched_wakeup_granularity);
389 P(sysctl_sched_child_runs_first);
390 P(sysctl_sched_features);
391#undef PN
392#undef P
393
394 SEQ_printf(m, " .%-40s: %d (%s)\n",
395 "sysctl_sched_tunable_scaling",
396 sysctl_sched_tunable_scaling,
397 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
398 SEQ_printf(m, "\n");
399}
400
401static int sched_debug_show(struct seq_file *m, void *v)
402{
403 int cpu = (unsigned long)(v - 2);
404
405 if (cpu != -1)
406 print_cpu(m, cpu);
407 else
408 sched_debug_header(m);
409
410 return 0;
411}
412
413void sysrq_sched_debug_show(void)
414{
415 int cpu;
416
417 sched_debug_header(NULL);
418 for_each_online_cpu(cpu)
419 print_cpu(NULL, cpu);
420
421}
422
423/*
424 * This itererator needs some explanation.
425 * It returns 1 for the header position.
426 * This means 2 is cpu 0.
427 * In a hotplugged system some cpus, including cpu 0, may be missing so we have
428 * to use cpumask_* to iterate over the cpus.
429 */
430static void *sched_debug_start(struct seq_file *file, loff_t *offset)
431{
432 unsigned long n = *offset;
433
434 if (n == 0)
435 return (void *) 1;
436
437 n--;
438
439 if (n > 0)
440 n = cpumask_next(n - 1, cpu_online_mask);
441 else
442 n = cpumask_first(cpu_online_mask);
443
444 *offset = n + 1;
445
446 if (n < nr_cpu_ids)
447 return (void *)(unsigned long)(n + 2);
448 return NULL;
449}
450
451static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
452{
453 (*offset)++;
454 return sched_debug_start(file, offset);
455}
456
457static void sched_debug_stop(struct seq_file *file, void *data)
458{
459}
460
461static const struct seq_operations sched_debug_sops = {
462 .start = sched_debug_start,
463 .next = sched_debug_next,
464 .stop = sched_debug_stop,
465 .show = sched_debug_show,
466};
467
468static int sched_debug_release(struct inode *inode, struct file *file)
469{
470 seq_release(inode, file);
471
472 return 0;
473}
474
475static int sched_debug_open(struct inode *inode, struct file *filp)
476{
477 int ret = 0;
478
479 ret = seq_open(filp, &sched_debug_sops);
480
481 return ret;
482}
483
484static const struct file_operations sched_debug_fops = {
485 .open = sched_debug_open,
486 .read = seq_read,
487 .llseek = seq_lseek,
488 .release = sched_debug_release,
489};
490
491static int __init init_sched_debug_procfs(void)
492{
493 struct proc_dir_entry *pe;
494
495 pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
496 if (!pe)
497 return -ENOMEM;
498 return 0;
499}
500
501__initcall(init_sched_debug_procfs);
502
503#define __P(F) \
504 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
505#define P(F) \
506 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
507#define __PN(F) \
508 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
509#define PN(F) \
510 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
511
512
513static void sched_show_numa(struct task_struct *p, struct seq_file *m)
514{
515#ifdef CONFIG_NUMA_BALANCING
516 struct mempolicy *pol;
517 int node, i;
518
519 if (p->mm)
520 P(mm->numa_scan_seq);
521
522 task_lock(p);
523 pol = p->mempolicy;
524 if (pol && !(pol->flags & MPOL_F_MORON))
525 pol = NULL;
526 mpol_get(pol);
527 task_unlock(p);
528
529 SEQ_printf(m, "numa_migrations, %ld\n", xchg(&p->numa_pages_migrated, 0));
530
531 for_each_online_node(node) {
532 for (i = 0; i < 2; i++) {
533 unsigned long nr_faults = -1;
534 int cpu_current, home_node;
535
536 if (p->numa_faults_memory)
537 nr_faults = p->numa_faults_memory[2*node + i];
538
539 cpu_current = !i ? (task_node(p) == node) :
540 (pol && node_isset(node, pol->v.nodes));
541
542 home_node = (p->numa_preferred_nid == node);
543
544 SEQ_printf(m, "numa_faults_memory, %d, %d, %d, %d, %ld\n",
545 i, node, cpu_current, home_node, nr_faults);
546 }
547 }
548
549 mpol_put(pol);
550#endif
551}
552
553void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
554{
555 unsigned long nr_switches;
556
557 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p),
558 get_nr_threads(p));
559 SEQ_printf(m,
560 "---------------------------------------------------------"
561 "----------\n");
562#define __P(F) \
563 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
564#define P(F) \
565 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
566#define __PN(F) \
567 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
568#define PN(F) \
569 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
570
571 PN(se.exec_start);
572 PN(se.vruntime);
573 PN(se.sum_exec_runtime);
574
575 nr_switches = p->nvcsw + p->nivcsw;
576
577#ifdef CONFIG_SCHEDSTATS
578 PN(se.statistics.wait_start);
579 PN(se.statistics.sleep_start);
580 PN(se.statistics.block_start);
581 PN(se.statistics.sleep_max);
582 PN(se.statistics.block_max);
583 PN(se.statistics.exec_max);
584 PN(se.statistics.slice_max);
585 PN(se.statistics.wait_max);
586 PN(se.statistics.wait_sum);
587 P(se.statistics.wait_count);
588 PN(se.statistics.iowait_sum);
589 P(se.statistics.iowait_count);
590 P(se.nr_migrations);
591 P(se.statistics.nr_migrations_cold);
592 P(se.statistics.nr_failed_migrations_affine);
593 P(se.statistics.nr_failed_migrations_running);
594 P(se.statistics.nr_failed_migrations_hot);
595 P(se.statistics.nr_forced_migrations);
596 P(se.statistics.nr_wakeups);
597 P(se.statistics.nr_wakeups_sync);
598 P(se.statistics.nr_wakeups_migrate);
599 P(se.statistics.nr_wakeups_local);
600 P(se.statistics.nr_wakeups_remote);
601 P(se.statistics.nr_wakeups_affine);
602 P(se.statistics.nr_wakeups_affine_attempts);
603 P(se.statistics.nr_wakeups_passive);
604 P(se.statistics.nr_wakeups_idle);
605
606 {
607 u64 avg_atom, avg_per_cpu;
608
609 avg_atom = p->se.sum_exec_runtime;
610 if (nr_switches)
611 do_div(avg_atom, nr_switches);
612 else
613 avg_atom = -1LL;
614
615 avg_per_cpu = p->se.sum_exec_runtime;
616 if (p->se.nr_migrations) {
617 avg_per_cpu = div64_u64(avg_per_cpu,
618 p->se.nr_migrations);
619 } else {
620 avg_per_cpu = -1LL;
621 }
622
623 __PN(avg_atom);
624 __PN(avg_per_cpu);
625 }
626#endif
627 __P(nr_switches);
628 SEQ_printf(m, "%-45s:%21Ld\n",
629 "nr_voluntary_switches", (long long)p->nvcsw);
630 SEQ_printf(m, "%-45s:%21Ld\n",
631 "nr_involuntary_switches", (long long)p->nivcsw);
632
633 P(se.load.weight);
634#ifdef CONFIG_SMP
635 P(se.avg.runnable_avg_sum);
636 P(se.avg.runnable_avg_period);
637 P(se.avg.load_avg_contrib);
638 P(se.avg.decay_count);
639#endif
640 P(policy);
641 P(prio);
642#undef PN
643#undef __PN
644#undef P
645#undef __P
646
647 {
648 unsigned int this_cpu = raw_smp_processor_id();
649 u64 t0, t1;
650
651 t0 = cpu_clock(this_cpu);
652 t1 = cpu_clock(this_cpu);
653 SEQ_printf(m, "%-45s:%21Ld\n",
654 "clock-delta", (long long)(t1-t0));
655 }
656
657 sched_show_numa(p, m);
658}
659
660void proc_sched_set_task(struct task_struct *p)
661{
662#ifdef CONFIG_SCHEDSTATS
663 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
664#endif
665}