Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * kernel/sched/debug.c
  3 *
  4 * Print the CFS rbtree
  5 *
  6 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
  7 *
  8 * This program is free software; you can redistribute it and/or modify
  9 * it under the terms of the GNU General Public License version 2 as
 10 * published by the Free Software Foundation.
 11 */
 12
 13#include <linux/proc_fs.h>
 14#include <linux/sched.h>
 15#include <linux/seq_file.h>
 16#include <linux/kallsyms.h>
 17#include <linux/utsname.h>
 18
 19#include "sched.h"
 20
 21static DEFINE_SPINLOCK(sched_debug_lock);
 22
 23/*
 24 * This allows printing both to /proc/sched_debug and
 25 * to the console
 26 */
 27#define SEQ_printf(m, x...)			\
 28 do {						\
 29	if (m)					\
 30		seq_printf(m, x);		\
 31	else					\
 32		printk(x);			\
 33 } while (0)
 34
 35/*
 36 * Ease the printing of nsec fields:
 37 */
 38static long long nsec_high(unsigned long long nsec)
 39{
 40	if ((long long)nsec < 0) {
 41		nsec = -nsec;
 42		do_div(nsec, 1000000);
 43		return -nsec;
 44	}
 45	do_div(nsec, 1000000);
 46
 47	return nsec;
 48}
 49
 50static unsigned long nsec_low(unsigned long long nsec)
 51{
 52	if ((long long)nsec < 0)
 53		nsec = -nsec;
 54
 55	return do_div(nsec, 1000000);
 56}
 57
 58#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
 59
 60#ifdef CONFIG_FAIR_GROUP_SCHED
 61static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
 62{
 63	struct sched_entity *se = tg->se[cpu];
 64	if (!se)
 65		return;
 66
 67#define P(F) \
 68	SEQ_printf(m, "  .%-30s: %lld\n", #F, (long long)F)
 69#define PN(F) \
 70	SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
 71
 72	PN(se->exec_start);
 73	PN(se->vruntime);
 74	PN(se->sum_exec_runtime);
 75#ifdef CONFIG_SCHEDSTATS
 76	PN(se->statistics.wait_start);
 77	PN(se->statistics.sleep_start);
 78	PN(se->statistics.block_start);
 79	PN(se->statistics.sleep_max);
 80	PN(se->statistics.block_max);
 81	PN(se->statistics.exec_max);
 82	PN(se->statistics.slice_max);
 83	PN(se->statistics.wait_max);
 84	PN(se->statistics.wait_sum);
 85	P(se->statistics.wait_count);
 86#endif
 87	P(se->load.weight);
 88#undef PN
 89#undef P
 90}
 91#endif
 92
 93#ifdef CONFIG_CGROUP_SCHED
 94static char group_path[PATH_MAX];
 95
 96static char *task_group_path(struct task_group *tg)
 97{
 98	if (autogroup_path(tg, group_path, PATH_MAX))
 99		return group_path;
100
101	/*
102	 * May be NULL if the underlying cgroup isn't fully-created yet
103	 */
104	if (!tg->css.cgroup) {
105		group_path[0] = '\0';
106		return group_path;
107	}
108	cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
109	return group_path;
110}
111#endif
112
113static void
114print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
115{
116	if (rq->curr == p)
117		SEQ_printf(m, "R");
118	else
119		SEQ_printf(m, " ");
120
121	SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
122		p->comm, p->pid,
123		SPLIT_NS(p->se.vruntime),
124		(long long)(p->nvcsw + p->nivcsw),
125		p->prio);
126#ifdef CONFIG_SCHEDSTATS
127	SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
128		SPLIT_NS(p->se.vruntime),
129		SPLIT_NS(p->se.sum_exec_runtime),
130		SPLIT_NS(p->se.statistics.sum_sleep_runtime));
131#else
132	SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
133		0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
134#endif
135#ifdef CONFIG_CGROUP_SCHED
136	SEQ_printf(m, " %s", task_group_path(task_group(p)));
137#endif
138
139	SEQ_printf(m, "\n");
140}
141
142static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
143{
144	struct task_struct *g, *p;
145	unsigned long flags;
146
147	SEQ_printf(m,
148	"\nrunnable tasks:\n"
149	"            task   PID         tree-key  switches  prio"
150	"     exec-runtime         sum-exec        sum-sleep\n"
151	"------------------------------------------------------"
152	"----------------------------------------------------\n");
153
154	read_lock_irqsave(&tasklist_lock, flags);
155
156	do_each_thread(g, p) {
157		if (!p->on_rq || task_cpu(p) != rq_cpu)
158			continue;
159
160		print_task(m, rq, p);
161	} while_each_thread(g, p);
162
163	read_unlock_irqrestore(&tasklist_lock, flags);
164}
165
166void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
167{
168	s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
169		spread, rq0_min_vruntime, spread0;
170	struct rq *rq = cpu_rq(cpu);
171	struct sched_entity *last;
172	unsigned long flags;
173
174#ifdef CONFIG_FAIR_GROUP_SCHED
175	SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
176#else
177	SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
178#endif
179	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
180			SPLIT_NS(cfs_rq->exec_clock));
181
182	raw_spin_lock_irqsave(&rq->lock, flags);
183	if (cfs_rq->rb_leftmost)
184		MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
185	last = __pick_last_entity(cfs_rq);
186	if (last)
187		max_vruntime = last->vruntime;
188	min_vruntime = cfs_rq->min_vruntime;
189	rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
190	raw_spin_unlock_irqrestore(&rq->lock, flags);
191	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "MIN_vruntime",
192			SPLIT_NS(MIN_vruntime));
193	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
194			SPLIT_NS(min_vruntime));
195	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "max_vruntime",
196			SPLIT_NS(max_vruntime));
197	spread = max_vruntime - MIN_vruntime;
198	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread",
199			SPLIT_NS(spread));
200	spread0 = min_vruntime - rq0_min_vruntime;
201	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread0",
202			SPLIT_NS(spread0));
203	SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
204			cfs_rq->nr_spread_over);
205	SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
206	SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
207#ifdef CONFIG_FAIR_GROUP_SCHED
208#ifdef CONFIG_SMP
209	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "load_avg",
210			SPLIT_NS(cfs_rq->load_avg));
211	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "load_period",
212			SPLIT_NS(cfs_rq->load_period));
213	SEQ_printf(m, "  .%-30s: %ld\n", "load_contrib",
214			cfs_rq->load_contribution);
215	SEQ_printf(m, "  .%-30s: %d\n", "load_tg",
216			atomic_read(&cfs_rq->tg->load_weight));
217#endif
218
219	print_cfs_group_stats(m, cpu, cfs_rq->tg);
220#endif
221}
222
223void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
224{
225#ifdef CONFIG_RT_GROUP_SCHED
226	SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
227#else
228	SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
229#endif
230
231#define P(x) \
232	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
233#define PN(x) \
234	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
235
236	P(rt_nr_running);
237	P(rt_throttled);
238	PN(rt_time);
239	PN(rt_runtime);
240
241#undef PN
242#undef P
243}
244
245extern __read_mostly int sched_clock_running;
246
247static void print_cpu(struct seq_file *m, int cpu)
248{
249	struct rq *rq = cpu_rq(cpu);
250	unsigned long flags;
251
252#ifdef CONFIG_X86
253	{
254		unsigned int freq = cpu_khz ? : 1;
255
256		SEQ_printf(m, "\ncpu#%d, %u.%03u MHz\n",
257			   cpu, freq / 1000, (freq % 1000));
258	}
259#else
260	SEQ_printf(m, "\ncpu#%d\n", cpu);
261#endif
262
263#define P(x)								\
264do {									\
265	if (sizeof(rq->x) == 4)						\
266		SEQ_printf(m, "  .%-30s: %ld\n", #x, (long)(rq->x));	\
267	else								\
268		SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x));\
269} while (0)
270
271#define PN(x) \
272	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
273
274	P(nr_running);
275	SEQ_printf(m, "  .%-30s: %lu\n", "load",
276		   rq->load.weight);
277	P(nr_switches);
278	P(nr_load_updates);
279	P(nr_uninterruptible);
280	PN(next_balance);
281	P(curr->pid);
282	PN(clock);
283	P(cpu_load[0]);
284	P(cpu_load[1]);
285	P(cpu_load[2]);
286	P(cpu_load[3]);
287	P(cpu_load[4]);
288#undef P
289#undef PN
290
291#ifdef CONFIG_SCHEDSTATS
292#define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, rq->n);
293#define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
294
295	P(yld_count);
296
297	P(sched_count);
298	P(sched_goidle);
299#ifdef CONFIG_SMP
300	P64(avg_idle);
301#endif
302
303	P(ttwu_count);
304	P(ttwu_local);
305
306#undef P
307#undef P64
308#endif
309	spin_lock_irqsave(&sched_debug_lock, flags);
310	print_cfs_stats(m, cpu);
311	print_rt_stats(m, cpu);
312
313	rcu_read_lock();
314	print_rq(m, rq, cpu);
315	rcu_read_unlock();
316	spin_unlock_irqrestore(&sched_debug_lock, flags);
317}
318
319static const char *sched_tunable_scaling_names[] = {
320	"none",
321	"logaritmic",
322	"linear"
323};
324
325static int sched_debug_show(struct seq_file *m, void *v)
326{
327	u64 ktime, sched_clk, cpu_clk;
328	unsigned long flags;
329	int cpu;
330
331	local_irq_save(flags);
332	ktime = ktime_to_ns(ktime_get());
333	sched_clk = sched_clock();
334	cpu_clk = local_clock();
335	local_irq_restore(flags);
336
337	SEQ_printf(m, "Sched Debug Version: v0.10, %s %.*s\n",
338		init_utsname()->release,
339		(int)strcspn(init_utsname()->version, " "),
340		init_utsname()->version);
341
342#define P(x) \
343	SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
344#define PN(x) \
345	SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
346	PN(ktime);
347	PN(sched_clk);
348	PN(cpu_clk);
349	P(jiffies);
350#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
351	P(sched_clock_stable);
352#endif
353#undef PN
354#undef P
355
356	SEQ_printf(m, "\n");
357	SEQ_printf(m, "sysctl_sched\n");
358
359#define P(x) \
360	SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
361#define PN(x) \
362	SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
363	PN(sysctl_sched_latency);
364	PN(sysctl_sched_min_granularity);
365	PN(sysctl_sched_wakeup_granularity);
366	P(sysctl_sched_child_runs_first);
367	P(sysctl_sched_features);
368#undef PN
369#undef P
370
371	SEQ_printf(m, "  .%-40s: %d (%s)\n", "sysctl_sched_tunable_scaling",
372		sysctl_sched_tunable_scaling,
373		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
374
375	for_each_online_cpu(cpu)
376		print_cpu(m, cpu);
377
378	SEQ_printf(m, "\n");
379
380	return 0;
381}
382
383void sysrq_sched_debug_show(void)
384{
385	sched_debug_show(NULL, NULL);
386}
387
388static int sched_debug_open(struct inode *inode, struct file *filp)
389{
390	return single_open(filp, sched_debug_show, NULL);
391}
392
393static const struct file_operations sched_debug_fops = {
394	.open		= sched_debug_open,
395	.read		= seq_read,
396	.llseek		= seq_lseek,
397	.release	= single_release,
398};
399
400static int __init init_sched_debug_procfs(void)
401{
402	struct proc_dir_entry *pe;
403
404	pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
405	if (!pe)
406		return -ENOMEM;
407	return 0;
408}
409
410__initcall(init_sched_debug_procfs);
411
412void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
413{
414	unsigned long nr_switches;
415
416	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid,
417						get_nr_threads(p));
418	SEQ_printf(m,
419		"---------------------------------------------------------\n");
420#define __P(F) \
421	SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)F)
422#define P(F) \
423	SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)p->F)
424#define __PN(F) \
425	SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
426#define PN(F) \
427	SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
428
429	PN(se.exec_start);
430	PN(se.vruntime);
431	PN(se.sum_exec_runtime);
432
433	nr_switches = p->nvcsw + p->nivcsw;
434
435#ifdef CONFIG_SCHEDSTATS
436	PN(se.statistics.wait_start);
437	PN(se.statistics.sleep_start);
438	PN(se.statistics.block_start);
439	PN(se.statistics.sleep_max);
440	PN(se.statistics.block_max);
441	PN(se.statistics.exec_max);
442	PN(se.statistics.slice_max);
443	PN(se.statistics.wait_max);
444	PN(se.statistics.wait_sum);
445	P(se.statistics.wait_count);
446	PN(se.statistics.iowait_sum);
447	P(se.statistics.iowait_count);
448	P(se.nr_migrations);
449	P(se.statistics.nr_migrations_cold);
450	P(se.statistics.nr_failed_migrations_affine);
451	P(se.statistics.nr_failed_migrations_running);
452	P(se.statistics.nr_failed_migrations_hot);
453	P(se.statistics.nr_forced_migrations);
454	P(se.statistics.nr_wakeups);
455	P(se.statistics.nr_wakeups_sync);
456	P(se.statistics.nr_wakeups_migrate);
457	P(se.statistics.nr_wakeups_local);
458	P(se.statistics.nr_wakeups_remote);
459	P(se.statistics.nr_wakeups_affine);
460	P(se.statistics.nr_wakeups_affine_attempts);
461	P(se.statistics.nr_wakeups_passive);
462	P(se.statistics.nr_wakeups_idle);
463
464	{
465		u64 avg_atom, avg_per_cpu;
466
467		avg_atom = p->se.sum_exec_runtime;
468		if (nr_switches)
469			do_div(avg_atom, nr_switches);
470		else
471			avg_atom = -1LL;
472
473		avg_per_cpu = p->se.sum_exec_runtime;
474		if (p->se.nr_migrations) {
475			avg_per_cpu = div64_u64(avg_per_cpu,
476						p->se.nr_migrations);
477		} else {
478			avg_per_cpu = -1LL;
479		}
480
481		__PN(avg_atom);
482		__PN(avg_per_cpu);
483	}
484#endif
485	__P(nr_switches);
486	SEQ_printf(m, "%-35s:%21Ld\n",
487		   "nr_voluntary_switches", (long long)p->nvcsw);
488	SEQ_printf(m, "%-35s:%21Ld\n",
489		   "nr_involuntary_switches", (long long)p->nivcsw);
490
491	P(se.load.weight);
492	P(policy);
493	P(prio);
494#undef PN
495#undef __PN
496#undef P
497#undef __P
498
499	{
500		unsigned int this_cpu = raw_smp_processor_id();
501		u64 t0, t1;
502
503		t0 = cpu_clock(this_cpu);
504		t1 = cpu_clock(this_cpu);
505		SEQ_printf(m, "%-35s:%21Ld\n",
506			   "clock-delta", (long long)(t1-t0));
507	}
508}
509
510void proc_sched_set_task(struct task_struct *p)
511{
512#ifdef CONFIG_SCHEDSTATS
513	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
514#endif
515}