Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v4.10.11
 
  1/*
  2 * kernel/sched/debug.c
  3 *
  4 * Print the CFS rbtree
  5 *
  6 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
  7 *
  8 * This program is free software; you can redistribute it and/or modify
  9 * it under the terms of the GNU General Public License version 2 as
 10 * published by the Free Software Foundation.
 11 */
 12
 13#include <linux/proc_fs.h>
 14#include <linux/sched.h>
 15#include <linux/seq_file.h>
 16#include <linux/kallsyms.h>
 17#include <linux/utsname.h>
 18#include <linux/mempolicy.h>
 19#include <linux/debugfs.h>
 20
 21#include "sched.h"
 22
 23static DEFINE_SPINLOCK(sched_debug_lock);
 24
 25/*
 26 * This allows printing both to /proc/sched_debug and
 27 * to the console
 28 */
 29#define SEQ_printf(m, x...)			\
 30 do {						\
 31	if (m)					\
 32		seq_printf(m, x);		\
 33	else					\
 34		printk(x);			\
 35 } while (0)
 36
 37/*
 38 * Ease the printing of nsec fields:
 39 */
 40static long long nsec_high(unsigned long long nsec)
 41{
 42	if ((long long)nsec < 0) {
 43		nsec = -nsec;
 44		do_div(nsec, 1000000);
 45		return -nsec;
 46	}
 47	do_div(nsec, 1000000);
 48
 49	return nsec;
 50}
 51
 52static unsigned long nsec_low(unsigned long long nsec)
 53{
 54	if ((long long)nsec < 0)
 55		nsec = -nsec;
 56
 57	return do_div(nsec, 1000000);
 58}
 59
 60#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
 61
 62#define SCHED_FEAT(name, enabled)	\
 63	#name ,
 64
 65static const char * const sched_feat_names[] = {
 66#include "features.h"
 67};
 68
 69#undef SCHED_FEAT
 70
 71static int sched_feat_show(struct seq_file *m, void *v)
 72{
 73	int i;
 74
 75	for (i = 0; i < __SCHED_FEAT_NR; i++) {
 76		if (!(sysctl_sched_features & (1UL << i)))
 77			seq_puts(m, "NO_");
 78		seq_printf(m, "%s ", sched_feat_names[i]);
 79	}
 80	seq_puts(m, "\n");
 81
 82	return 0;
 83}
 84
 85#ifdef HAVE_JUMP_LABEL
 86
 87#define jump_label_key__true  STATIC_KEY_INIT_TRUE
 88#define jump_label_key__false STATIC_KEY_INIT_FALSE
 89
 90#define SCHED_FEAT(name, enabled)	\
 91	jump_label_key__##enabled ,
 92
 93struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
 94#include "features.h"
 95};
 96
 97#undef SCHED_FEAT
 98
 99static void sched_feat_disable(int i)
100{
101	static_key_disable(&sched_feat_keys[i]);
102}
103
104static void sched_feat_enable(int i)
105{
106	static_key_enable(&sched_feat_keys[i]);
107}
108#else
109static void sched_feat_disable(int i) { };
110static void sched_feat_enable(int i) { };
111#endif /* HAVE_JUMP_LABEL */
112
113static int sched_feat_set(char *cmp)
114{
115	int i;
116	int neg = 0;
117
118	if (strncmp(cmp, "NO_", 3) == 0) {
119		neg = 1;
120		cmp += 3;
121	}
122
123	for (i = 0; i < __SCHED_FEAT_NR; i++) {
124		if (strcmp(cmp, sched_feat_names[i]) == 0) {
125			if (neg) {
126				sysctl_sched_features &= ~(1UL << i);
127				sched_feat_disable(i);
128			} else {
129				sysctl_sched_features |= (1UL << i);
130				sched_feat_enable(i);
131			}
132			break;
133		}
134	}
135
136	return i;
137}
138
139static ssize_t
140sched_feat_write(struct file *filp, const char __user *ubuf,
141		size_t cnt, loff_t *ppos)
142{
143	char buf[64];
144	char *cmp;
145	int i;
146	struct inode *inode;
147
148	if (cnt > 63)
149		cnt = 63;
150
151	if (copy_from_user(&buf, ubuf, cnt))
152		return -EFAULT;
153
154	buf[cnt] = 0;
155	cmp = strstrip(buf);
156
157	/* Ensure the static_key remains in a consistent state */
158	inode = file_inode(filp);
 
159	inode_lock(inode);
160	i = sched_feat_set(cmp);
161	inode_unlock(inode);
162	if (i == __SCHED_FEAT_NR)
163		return -EINVAL;
 
164
165	*ppos += cnt;
166
167	return cnt;
168}
169
170static int sched_feat_open(struct inode *inode, struct file *filp)
171{
172	return single_open(filp, sched_feat_show, NULL);
173}
174
175static const struct file_operations sched_feat_fops = {
176	.open		= sched_feat_open,
177	.write		= sched_feat_write,
178	.read		= seq_read,
179	.llseek		= seq_lseek,
180	.release	= single_release,
181};
182
 
 
183static __init int sched_init_debug(void)
184{
185	debugfs_create_file("sched_features", 0644, NULL, NULL,
186			&sched_feat_fops);
187
 
 
 
188	return 0;
189}
190late_initcall(sched_init_debug);
191
192#ifdef CONFIG_SMP
193
194#ifdef CONFIG_SYSCTL
195
196static struct ctl_table sd_ctl_dir[] = {
197	{
198		.procname	= "sched_domain",
199		.mode		= 0555,
200	},
201	{}
202};
203
204static struct ctl_table sd_ctl_root[] = {
205	{
206		.procname	= "kernel",
207		.mode		= 0555,
208		.child		= sd_ctl_dir,
209	},
210	{}
211};
212
213static struct ctl_table *sd_alloc_ctl_entry(int n)
214{
215	struct ctl_table *entry =
216		kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
217
218	return entry;
219}
220
221static void sd_free_ctl_entry(struct ctl_table **tablep)
222{
223	struct ctl_table *entry;
224
225	/*
226	 * In the intermediate directories, both the child directory and
227	 * procname are dynamically allocated and could fail but the mode
228	 * will always be set. In the lowest directory the names are
229	 * static strings and all have proc handlers.
230	 */
231	for (entry = *tablep; entry->mode; entry++) {
232		if (entry->child)
233			sd_free_ctl_entry(&entry->child);
234		if (entry->proc_handler == NULL)
235			kfree(entry->procname);
236	}
237
238	kfree(*tablep);
239	*tablep = NULL;
240}
241
242static int min_load_idx = 0;
243static int max_load_idx = CPU_LOAD_IDX_MAX-1;
244
245static void
246set_table_entry(struct ctl_table *entry,
247		const char *procname, void *data, int maxlen,
248		umode_t mode, proc_handler *proc_handler,
249		bool load_idx)
250{
251	entry->procname = procname;
252	entry->data = data;
253	entry->maxlen = maxlen;
254	entry->mode = mode;
255	entry->proc_handler = proc_handler;
256
257	if (load_idx) {
258		entry->extra1 = &min_load_idx;
259		entry->extra2 = &max_load_idx;
260	}
261}
262
263static struct ctl_table *
264sd_alloc_ctl_domain_table(struct sched_domain *sd)
265{
266	struct ctl_table *table = sd_alloc_ctl_entry(14);
267
268	if (table == NULL)
269		return NULL;
270
271	set_table_entry(&table[0], "min_interval", &sd->min_interval,
272		sizeof(long), 0644, proc_doulongvec_minmax, false);
273	set_table_entry(&table[1], "max_interval", &sd->max_interval,
274		sizeof(long), 0644, proc_doulongvec_minmax, false);
275	set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
276		sizeof(int), 0644, proc_dointvec_minmax, true);
277	set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
278		sizeof(int), 0644, proc_dointvec_minmax, true);
279	set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
280		sizeof(int), 0644, proc_dointvec_minmax, true);
281	set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
282		sizeof(int), 0644, proc_dointvec_minmax, true);
283	set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
284		sizeof(int), 0644, proc_dointvec_minmax, true);
285	set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
286		sizeof(int), 0644, proc_dointvec_minmax, false);
287	set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
288		sizeof(int), 0644, proc_dointvec_minmax, false);
289	set_table_entry(&table[9], "cache_nice_tries",
290		&sd->cache_nice_tries,
291		sizeof(int), 0644, proc_dointvec_minmax, false);
292	set_table_entry(&table[10], "flags", &sd->flags,
293		sizeof(int), 0644, proc_dointvec_minmax, false);
294	set_table_entry(&table[11], "max_newidle_lb_cost",
295		&sd->max_newidle_lb_cost,
296		sizeof(long), 0644, proc_doulongvec_minmax, false);
297	set_table_entry(&table[12], "name", sd->name,
298		CORENAME_MAX_SIZE, 0444, proc_dostring, false);
299	/* &table[13] is terminator */
300
301	return table;
302}
303
304static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
305{
306	struct ctl_table *entry, *table;
307	struct sched_domain *sd;
308	int domain_num = 0, i;
309	char buf[32];
310
311	for_each_domain(cpu, sd)
312		domain_num++;
313	entry = table = sd_alloc_ctl_entry(domain_num + 1);
314	if (table == NULL)
315		return NULL;
316
317	i = 0;
318	for_each_domain(cpu, sd) {
319		snprintf(buf, 32, "domain%d", i);
320		entry->procname = kstrdup(buf, GFP_KERNEL);
321		entry->mode = 0555;
322		entry->child = sd_alloc_ctl_domain_table(sd);
323		entry++;
324		i++;
325	}
326	return table;
327}
328
329static struct ctl_table_header *sd_sysctl_header;
 
 
330void register_sched_domain_sysctl(void)
331{
332	int i, cpu_num = num_possible_cpus();
333	struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
 
334	char buf[32];
 
335
336	WARN_ON(sd_ctl_dir[0].child);
337	sd_ctl_dir[0].child = entry;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
338
339	if (entry == NULL)
340		return;
 
 
341
342	for_each_possible_cpu(i) {
343		snprintf(buf, 32, "cpu%d", i);
344		entry->procname = kstrdup(buf, GFP_KERNEL);
345		entry->mode = 0555;
346		entry->child = sd_alloc_ctl_cpu_table(i);
347		entry++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
348	}
349
350	WARN_ON(sd_sysctl_header);
351	sd_sysctl_header = register_sysctl_table(sd_ctl_root);
352}
353
 
 
 
 
 
 
354/* may be called multiple times per register */
355void unregister_sched_domain_sysctl(void)
356{
357	unregister_sysctl_table(sd_sysctl_header);
358	sd_sysctl_header = NULL;
359	if (sd_ctl_dir[0].child)
360		sd_free_ctl_entry(&sd_ctl_dir[0].child);
361}
362#endif /* CONFIG_SYSCTL */
363#endif /* CONFIG_SMP */
364
365#ifdef CONFIG_FAIR_GROUP_SCHED
366static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
367{
368	struct sched_entity *se = tg->se[cpu];
369
370#define P(F) \
371	SEQ_printf(m, "  .%-30s: %lld\n", #F, (long long)F)
372#define P_SCHEDSTAT(F) \
373	SEQ_printf(m, "  .%-30s: %lld\n", #F, (long long)schedstat_val(F))
374#define PN(F) \
375	SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
376#define PN_SCHEDSTAT(F) \
377	SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
378
379	if (!se)
380		return;
381
382	PN(se->exec_start);
383	PN(se->vruntime);
384	PN(se->sum_exec_runtime);
 
385	if (schedstat_enabled()) {
386		PN_SCHEDSTAT(se->statistics.wait_start);
387		PN_SCHEDSTAT(se->statistics.sleep_start);
388		PN_SCHEDSTAT(se->statistics.block_start);
389		PN_SCHEDSTAT(se->statistics.sleep_max);
390		PN_SCHEDSTAT(se->statistics.block_max);
391		PN_SCHEDSTAT(se->statistics.exec_max);
392		PN_SCHEDSTAT(se->statistics.slice_max);
393		PN_SCHEDSTAT(se->statistics.wait_max);
394		PN_SCHEDSTAT(se->statistics.wait_sum);
395		P_SCHEDSTAT(se->statistics.wait_count);
396	}
 
397	P(se->load.weight);
398#ifdef CONFIG_SMP
399	P(se->avg.load_avg);
400	P(se->avg.util_avg);
 
401#endif
402
403#undef PN_SCHEDSTAT
404#undef PN
405#undef P_SCHEDSTAT
406#undef P
407}
408#endif
409
410#ifdef CONFIG_CGROUP_SCHED
411static char group_path[PATH_MAX];
412
413static char *task_group_path(struct task_group *tg)
414{
415	if (autogroup_path(tg, group_path, PATH_MAX))
416		return group_path;
417
418	cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
 
419	return group_path;
420}
421#endif
422
423static void
424print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
425{
426	if (rq->curr == p)
427		SEQ_printf(m, "R");
428	else
429		SEQ_printf(m, " ");
430
431	SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
432		p->comm, task_pid_nr(p),
433		SPLIT_NS(p->se.vruntime),
434		(long long)(p->nvcsw + p->nivcsw),
435		p->prio);
436
437	SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
438		SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
439		SPLIT_NS(p->se.sum_exec_runtime),
440		SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
441
442#ifdef CONFIG_NUMA_BALANCING
443	SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
444#endif
445#ifdef CONFIG_CGROUP_SCHED
446	SEQ_printf(m, " %s", task_group_path(task_group(p)));
447#endif
448
449	SEQ_printf(m, "\n");
450}
451
452static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
453{
454	struct task_struct *g, *p;
455
456	SEQ_printf(m,
457	"\nrunnable tasks:\n"
458	"            task   PID         tree-key  switches  prio"
459	"     wait-time             sum-exec        sum-sleep\n"
460	"------------------------------------------------------"
461	"----------------------------------------------------\n");
462
463	rcu_read_lock();
464	for_each_process_thread(g, p) {
465		if (task_cpu(p) != rq_cpu)
466			continue;
467
468		print_task(m, rq, p);
469	}
470	rcu_read_unlock();
471}
472
473void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
474{
475	s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
476		spread, rq0_min_vruntime, spread0;
477	struct rq *rq = cpu_rq(cpu);
478	struct sched_entity *last;
479	unsigned long flags;
480
481#ifdef CONFIG_FAIR_GROUP_SCHED
482	SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
 
483#else
484	SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
 
485#endif
486	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
487			SPLIT_NS(cfs_rq->exec_clock));
488
489	raw_spin_lock_irqsave(&rq->lock, flags);
490	if (cfs_rq->rb_leftmost)
491		MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
492	last = __pick_last_entity(cfs_rq);
493	if (last)
494		max_vruntime = last->vruntime;
495	min_vruntime = cfs_rq->min_vruntime;
496	rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
497	raw_spin_unlock_irqrestore(&rq->lock, flags);
498	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "MIN_vruntime",
499			SPLIT_NS(MIN_vruntime));
500	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
501			SPLIT_NS(min_vruntime));
502	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "max_vruntime",
503			SPLIT_NS(max_vruntime));
504	spread = max_vruntime - MIN_vruntime;
505	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread",
506			SPLIT_NS(spread));
507	spread0 = min_vruntime - rq0_min_vruntime;
508	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread0",
509			SPLIT_NS(spread0));
510	SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
511			cfs_rq->nr_spread_over);
512	SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
513	SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
514#ifdef CONFIG_SMP
515	SEQ_printf(m, "  .%-30s: %lu\n", "load_avg",
516			cfs_rq->avg.load_avg);
517	SEQ_printf(m, "  .%-30s: %lu\n", "runnable_load_avg",
518			cfs_rq->runnable_load_avg);
519	SEQ_printf(m, "  .%-30s: %lu\n", "util_avg",
520			cfs_rq->avg.util_avg);
521	SEQ_printf(m, "  .%-30s: %ld\n", "removed_load_avg",
522			atomic_long_read(&cfs_rq->removed_load_avg));
523	SEQ_printf(m, "  .%-30s: %ld\n", "removed_util_avg",
524			atomic_long_read(&cfs_rq->removed_util_avg));
 
 
 
 
525#ifdef CONFIG_FAIR_GROUP_SCHED
526	SEQ_printf(m, "  .%-30s: %lu\n", "tg_load_avg_contrib",
527			cfs_rq->tg_load_avg_contrib);
528	SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
529			atomic_long_read(&cfs_rq->tg->load_avg));
530#endif
531#endif
532#ifdef CONFIG_CFS_BANDWIDTH
533	SEQ_printf(m, "  .%-30s: %d\n", "throttled",
534			cfs_rq->throttled);
535	SEQ_printf(m, "  .%-30s: %d\n", "throttle_count",
536			cfs_rq->throttle_count);
537#endif
538
539#ifdef CONFIG_FAIR_GROUP_SCHED
540	print_cfs_group_stats(m, cpu, cfs_rq->tg);
541#endif
542}
543
544void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
545{
546#ifdef CONFIG_RT_GROUP_SCHED
547	SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
 
548#else
549	SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
 
550#endif
551
552#define P(x) \
553	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
 
 
554#define PN(x) \
555	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
556
557	P(rt_nr_running);
 
 
 
558	P(rt_throttled);
559	PN(rt_time);
560	PN(rt_runtime);
561
562#undef PN
 
563#undef P
564}
565
566void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
567{
568	struct dl_bw *dl_bw;
569
570	SEQ_printf(m, "\ndl_rq[%d]:\n", cpu);
571	SEQ_printf(m, "  .%-30s: %ld\n", "dl_nr_running", dl_rq->dl_nr_running);
 
 
 
 
 
572#ifdef CONFIG_SMP
 
573	dl_bw = &cpu_rq(cpu)->rd->dl_bw;
574#else
575	dl_bw = &dl_rq->dl_bw;
576#endif
577	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
578	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
579}
580
581extern __read_mostly int sched_clock_running;
 
582
583static void print_cpu(struct seq_file *m, int cpu)
584{
585	struct rq *rq = cpu_rq(cpu);
586	unsigned long flags;
587
588#ifdef CONFIG_X86
589	{
590		unsigned int freq = cpu_khz ? : 1;
591
592		SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
593			   cpu, freq / 1000, (freq % 1000));
594	}
595#else
596	SEQ_printf(m, "cpu#%d\n", cpu);
597#endif
598
599#define P(x)								\
600do {									\
601	if (sizeof(rq->x) == 4)						\
602		SEQ_printf(m, "  .%-30s: %ld\n", #x, (long)(rq->x));	\
603	else								\
604		SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x));\
605} while (0)
606
607#define PN(x) \
608	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
609
610	P(nr_running);
611	SEQ_printf(m, "  .%-30s: %lu\n", "load",
612		   rq->load.weight);
613	P(nr_switches);
614	P(nr_load_updates);
615	P(nr_uninterruptible);
616	PN(next_balance);
617	SEQ_printf(m, "  .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
618	PN(clock);
619	PN(clock_task);
620	P(cpu_load[0]);
621	P(cpu_load[1]);
622	P(cpu_load[2]);
623	P(cpu_load[3]);
624	P(cpu_load[4]);
625#undef P
626#undef PN
627
628#ifdef CONFIG_SMP
629#define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
630	P64(avg_idle);
631	P64(max_idle_balance_cost);
632#undef P64
633#endif
634
635#define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, schedstat_val(rq->n));
636	if (schedstat_enabled()) {
637		P(yld_count);
638		P(sched_count);
639		P(sched_goidle);
640		P(ttwu_count);
641		P(ttwu_local);
642	}
643#undef P
644
645	spin_lock_irqsave(&sched_debug_lock, flags);
646	print_cfs_stats(m, cpu);
647	print_rt_stats(m, cpu);
648	print_dl_stats(m, cpu);
649
650	print_rq(m, rq, cpu);
651	spin_unlock_irqrestore(&sched_debug_lock, flags);
652	SEQ_printf(m, "\n");
653}
654
655static const char *sched_tunable_scaling_names[] = {
656	"none",
657	"logaritmic",
658	"linear"
659};
660
661static void sched_debug_header(struct seq_file *m)
662{
663	u64 ktime, sched_clk, cpu_clk;
664	unsigned long flags;
665
666	local_irq_save(flags);
667	ktime = ktime_to_ns(ktime_get());
668	sched_clk = sched_clock();
669	cpu_clk = local_clock();
670	local_irq_restore(flags);
671
672	SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
673		init_utsname()->release,
674		(int)strcspn(init_utsname()->version, " "),
675		init_utsname()->version);
676
677#define P(x) \
678	SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
679#define PN(x) \
680	SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
681	PN(ktime);
682	PN(sched_clk);
683	PN(cpu_clk);
684	P(jiffies);
685#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
686	P(sched_clock_stable());
687#endif
688#undef PN
689#undef P
690
691	SEQ_printf(m, "\n");
692	SEQ_printf(m, "sysctl_sched\n");
693
694#define P(x) \
695	SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
696#define PN(x) \
697	SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
698	PN(sysctl_sched_latency);
699	PN(sysctl_sched_min_granularity);
700	PN(sysctl_sched_wakeup_granularity);
701	P(sysctl_sched_child_runs_first);
702	P(sysctl_sched_features);
703#undef PN
704#undef P
705
706	SEQ_printf(m, "  .%-40s: %d (%s)\n",
707		"sysctl_sched_tunable_scaling",
708		sysctl_sched_tunable_scaling,
709		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
710	SEQ_printf(m, "\n");
711}
712
713static int sched_debug_show(struct seq_file *m, void *v)
714{
715	int cpu = (unsigned long)(v - 2);
716
717	if (cpu != -1)
718		print_cpu(m, cpu);
719	else
720		sched_debug_header(m);
721
722	return 0;
723}
724
725void sysrq_sched_debug_show(void)
726{
727	int cpu;
728
729	sched_debug_header(NULL);
730	for_each_online_cpu(cpu)
 
 
 
 
 
 
 
731		print_cpu(NULL, cpu);
732
733}
734
735/*
736 * This itererator needs some explanation.
737 * It returns 1 for the header position.
738 * This means 2 is cpu 0.
739 * In a hotplugged system some cpus, including cpu 0, may be missing so we have
740 * to use cpumask_* to iterate over the cpus.
741 */
742static void *sched_debug_start(struct seq_file *file, loff_t *offset)
743{
744	unsigned long n = *offset;
745
746	if (n == 0)
747		return (void *) 1;
748
749	n--;
750
751	if (n > 0)
752		n = cpumask_next(n - 1, cpu_online_mask);
753	else
754		n = cpumask_first(cpu_online_mask);
755
756	*offset = n + 1;
757
758	if (n < nr_cpu_ids)
759		return (void *)(unsigned long)(n + 2);
 
760	return NULL;
761}
762
763static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
764{
765	(*offset)++;
766	return sched_debug_start(file, offset);
767}
768
769static void sched_debug_stop(struct seq_file *file, void *data)
770{
771}
772
773static const struct seq_operations sched_debug_sops = {
774	.start = sched_debug_start,
775	.next = sched_debug_next,
776	.stop = sched_debug_stop,
777	.show = sched_debug_show,
778};
779
780static int sched_debug_release(struct inode *inode, struct file *file)
781{
782	seq_release(inode, file);
783
784	return 0;
785}
786
787static int sched_debug_open(struct inode *inode, struct file *filp)
788{
789	int ret = 0;
790
791	ret = seq_open(filp, &sched_debug_sops);
792
793	return ret;
794}
795
796static const struct file_operations sched_debug_fops = {
797	.open		= sched_debug_open,
798	.read		= seq_read,
799	.llseek		= seq_lseek,
800	.release	= sched_debug_release,
801};
802
803static int __init init_sched_debug_procfs(void)
804{
805	struct proc_dir_entry *pe;
806
807	pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
808	if (!pe)
809		return -ENOMEM;
810	return 0;
811}
812
813__initcall(init_sched_debug_procfs);
814
815#define __P(F) \
816	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
817#define P(F) \
818	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
819#define __PN(F) \
820	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
821#define PN(F) \
822	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
823
824
825#ifdef CONFIG_NUMA_BALANCING
826void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
827		unsigned long tpf, unsigned long gsf, unsigned long gpf)
828{
829	SEQ_printf(m, "numa_faults node=%d ", node);
830	SEQ_printf(m, "task_private=%lu task_shared=%lu ", tsf, tpf);
831	SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gsf, gpf);
832}
833#endif
834
835
836static void sched_show_numa(struct task_struct *p, struct seq_file *m)
837{
838#ifdef CONFIG_NUMA_BALANCING
839	struct mempolicy *pol;
840
841	if (p->mm)
842		P(mm->numa_scan_seq);
843
844	task_lock(p);
845	pol = p->mempolicy;
846	if (pol && !(pol->flags & MPOL_F_MORON))
847		pol = NULL;
848	mpol_get(pol);
849	task_unlock(p);
850
851	P(numa_pages_migrated);
852	P(numa_preferred_nid);
853	P(total_numa_faults);
854	SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
855			task_node(p), task_numa_group_id(p));
856	show_numa_stats(p, m);
857	mpol_put(pol);
858#endif
859}
860
861void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
 
862{
863	unsigned long nr_switches;
864
865	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p),
866						get_nr_threads(p));
867	SEQ_printf(m,
868		"---------------------------------------------------------"
869		"----------\n");
870#define __P(F) \
871	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
872#define P(F) \
873	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
874#define P_SCHEDSTAT(F) \
875	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)schedstat_val(p->F))
876#define __PN(F) \
877	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
878#define PN(F) \
879	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
880#define PN_SCHEDSTAT(F) \
881	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(p->F)))
882
883	PN(se.exec_start);
884	PN(se.vruntime);
885	PN(se.sum_exec_runtime);
886
887	nr_switches = p->nvcsw + p->nivcsw;
888
889	P(se.nr_migrations);
890
891	if (schedstat_enabled()) {
892		u64 avg_atom, avg_per_cpu;
893
894		PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
895		PN_SCHEDSTAT(se.statistics.wait_start);
896		PN_SCHEDSTAT(se.statistics.sleep_start);
897		PN_SCHEDSTAT(se.statistics.block_start);
898		PN_SCHEDSTAT(se.statistics.sleep_max);
899		PN_SCHEDSTAT(se.statistics.block_max);
900		PN_SCHEDSTAT(se.statistics.exec_max);
901		PN_SCHEDSTAT(se.statistics.slice_max);
902		PN_SCHEDSTAT(se.statistics.wait_max);
903		PN_SCHEDSTAT(se.statistics.wait_sum);
904		P_SCHEDSTAT(se.statistics.wait_count);
905		PN_SCHEDSTAT(se.statistics.iowait_sum);
906		P_SCHEDSTAT(se.statistics.iowait_count);
907		P_SCHEDSTAT(se.statistics.nr_migrations_cold);
908		P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
909		P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
910		P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
911		P_SCHEDSTAT(se.statistics.nr_forced_migrations);
912		P_SCHEDSTAT(se.statistics.nr_wakeups);
913		P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
914		P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
915		P_SCHEDSTAT(se.statistics.nr_wakeups_local);
916		P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
917		P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
918		P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
919		P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
920		P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
921
922		avg_atom = p->se.sum_exec_runtime;
923		if (nr_switches)
924			avg_atom = div64_ul(avg_atom, nr_switches);
925		else
926			avg_atom = -1LL;
927
928		avg_per_cpu = p->se.sum_exec_runtime;
929		if (p->se.nr_migrations) {
930			avg_per_cpu = div64_u64(avg_per_cpu,
931						p->se.nr_migrations);
932		} else {
933			avg_per_cpu = -1LL;
934		}
935
936		__PN(avg_atom);
937		__PN(avg_per_cpu);
938	}
939
940	__P(nr_switches);
941	SEQ_printf(m, "%-45s:%21Ld\n",
942		   "nr_voluntary_switches", (long long)p->nvcsw);
943	SEQ_printf(m, "%-45s:%21Ld\n",
944		   "nr_involuntary_switches", (long long)p->nivcsw);
945
946	P(se.load.weight);
947#ifdef CONFIG_SMP
948	P(se.avg.load_sum);
 
949	P(se.avg.util_sum);
950	P(se.avg.load_avg);
 
951	P(se.avg.util_avg);
952	P(se.avg.last_update_time);
 
 
 
 
 
 
 
 
953#endif
954	P(policy);
955	P(prio);
 
 
 
 
956#undef PN_SCHEDSTAT
957#undef PN
958#undef __PN
959#undef P_SCHEDSTAT
960#undef P
961#undef __P
962
963	{
964		unsigned int this_cpu = raw_smp_processor_id();
965		u64 t0, t1;
966
967		t0 = cpu_clock(this_cpu);
968		t1 = cpu_clock(this_cpu);
969		SEQ_printf(m, "%-45s:%21Ld\n",
970			   "clock-delta", (long long)(t1-t0));
971	}
972
973	sched_show_numa(p, m);
974}
975
976void proc_sched_set_task(struct task_struct *p)
977{
978#ifdef CONFIG_SCHEDSTATS
979	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
980#endif
981}
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * kernel/sched/debug.c
  4 *
  5 * Print the CFS rbtree and other debugging details
  6 *
  7 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
 
 
 
 
  8 */
 
 
 
 
 
 
 
 
 
  9#include "sched.h"
 10
 11static DEFINE_SPINLOCK(sched_debug_lock);
 12
 13/*
 14 * This allows printing both to /proc/sched_debug and
 15 * to the console
 16 */
 17#define SEQ_printf(m, x...)			\
 18 do {						\
 19	if (m)					\
 20		seq_printf(m, x);		\
 21	else					\
 22		pr_cont(x);			\
 23 } while (0)
 24
 25/*
 26 * Ease the printing of nsec fields:
 27 */
 28static long long nsec_high(unsigned long long nsec)
 29{
 30	if ((long long)nsec < 0) {
 31		nsec = -nsec;
 32		do_div(nsec, 1000000);
 33		return -nsec;
 34	}
 35	do_div(nsec, 1000000);
 36
 37	return nsec;
 38}
 39
 40static unsigned long nsec_low(unsigned long long nsec)
 41{
 42	if ((long long)nsec < 0)
 43		nsec = -nsec;
 44
 45	return do_div(nsec, 1000000);
 46}
 47
 48#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
 49
 50#define SCHED_FEAT(name, enabled)	\
 51	#name ,
 52
 53static const char * const sched_feat_names[] = {
 54#include "features.h"
 55};
 56
 57#undef SCHED_FEAT
 58
 59static int sched_feat_show(struct seq_file *m, void *v)
 60{
 61	int i;
 62
 63	for (i = 0; i < __SCHED_FEAT_NR; i++) {
 64		if (!(sysctl_sched_features & (1UL << i)))
 65			seq_puts(m, "NO_");
 66		seq_printf(m, "%s ", sched_feat_names[i]);
 67	}
 68	seq_puts(m, "\n");
 69
 70	return 0;
 71}
 72
 73#ifdef CONFIG_JUMP_LABEL
 74
 75#define jump_label_key__true  STATIC_KEY_INIT_TRUE
 76#define jump_label_key__false STATIC_KEY_INIT_FALSE
 77
 78#define SCHED_FEAT(name, enabled)	\
 79	jump_label_key__##enabled ,
 80
 81struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
 82#include "features.h"
 83};
 84
 85#undef SCHED_FEAT
 86
 87static void sched_feat_disable(int i)
 88{
 89	static_key_disable_cpuslocked(&sched_feat_keys[i]);
 90}
 91
 92static void sched_feat_enable(int i)
 93{
 94	static_key_enable_cpuslocked(&sched_feat_keys[i]);
 95}
 96#else
 97static void sched_feat_disable(int i) { };
 98static void sched_feat_enable(int i) { };
 99#endif /* CONFIG_JUMP_LABEL */
100
101static int sched_feat_set(char *cmp)
102{
103	int i;
104	int neg = 0;
105
106	if (strncmp(cmp, "NO_", 3) == 0) {
107		neg = 1;
108		cmp += 3;
109	}
110
111	i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
112	if (i < 0)
113		return i;
114
115	if (neg) {
116		sysctl_sched_features &= ~(1UL << i);
117		sched_feat_disable(i);
118	} else {
119		sysctl_sched_features |= (1UL << i);
120		sched_feat_enable(i);
 
121	}
122
123	return 0;
124}
125
126static ssize_t
127sched_feat_write(struct file *filp, const char __user *ubuf,
128		size_t cnt, loff_t *ppos)
129{
130	char buf[64];
131	char *cmp;
132	int ret;
133	struct inode *inode;
134
135	if (cnt > 63)
136		cnt = 63;
137
138	if (copy_from_user(&buf, ubuf, cnt))
139		return -EFAULT;
140
141	buf[cnt] = 0;
142	cmp = strstrip(buf);
143
144	/* Ensure the static_key remains in a consistent state */
145	inode = file_inode(filp);
146	cpus_read_lock();
147	inode_lock(inode);
148	ret = sched_feat_set(cmp);
149	inode_unlock(inode);
150	cpus_read_unlock();
151	if (ret < 0)
152		return ret;
153
154	*ppos += cnt;
155
156	return cnt;
157}
158
159static int sched_feat_open(struct inode *inode, struct file *filp)
160{
161	return single_open(filp, sched_feat_show, NULL);
162}
163
164static const struct file_operations sched_feat_fops = {
165	.open		= sched_feat_open,
166	.write		= sched_feat_write,
167	.read		= seq_read,
168	.llseek		= seq_lseek,
169	.release	= single_release,
170};
171
172__read_mostly bool sched_debug_enabled;
173
174static __init int sched_init_debug(void)
175{
176	debugfs_create_file("sched_features", 0644, NULL, NULL,
177			&sched_feat_fops);
178
179	debugfs_create_bool("sched_debug", 0644, NULL,
180			&sched_debug_enabled);
181
182	return 0;
183}
184late_initcall(sched_init_debug);
185
186#ifdef CONFIG_SMP
187
188#ifdef CONFIG_SYSCTL
189
190static struct ctl_table sd_ctl_dir[] = {
191	{
192		.procname	= "sched_domain",
193		.mode		= 0555,
194	},
195	{}
196};
197
198static struct ctl_table sd_ctl_root[] = {
199	{
200		.procname	= "kernel",
201		.mode		= 0555,
202		.child		= sd_ctl_dir,
203	},
204	{}
205};
206
207static struct ctl_table *sd_alloc_ctl_entry(int n)
208{
209	struct ctl_table *entry =
210		kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
211
212	return entry;
213}
214
215static void sd_free_ctl_entry(struct ctl_table **tablep)
216{
217	struct ctl_table *entry;
218
219	/*
220	 * In the intermediate directories, both the child directory and
221	 * procname are dynamically allocated and could fail but the mode
222	 * will always be set. In the lowest directory the names are
223	 * static strings and all have proc handlers.
224	 */
225	for (entry = *tablep; entry->mode; entry++) {
226		if (entry->child)
227			sd_free_ctl_entry(&entry->child);
228		if (entry->proc_handler == NULL)
229			kfree(entry->procname);
230	}
231
232	kfree(*tablep);
233	*tablep = NULL;
234}
235
 
 
 
236static void
237set_table_entry(struct ctl_table *entry,
238		const char *procname, void *data, int maxlen,
239		umode_t mode, proc_handler *proc_handler)
 
240{
241	entry->procname = procname;
242	entry->data = data;
243	entry->maxlen = maxlen;
244	entry->mode = mode;
245	entry->proc_handler = proc_handler;
 
 
 
 
 
246}
247
248static struct ctl_table *
249sd_alloc_ctl_domain_table(struct sched_domain *sd)
250{
251	struct ctl_table *table = sd_alloc_ctl_entry(9);
252
253	if (table == NULL)
254		return NULL;
255
256	set_table_entry(&table[0], "min_interval",	  &sd->min_interval,	    sizeof(long), 0644, proc_doulongvec_minmax);
257	set_table_entry(&table[1], "max_interval",	  &sd->max_interval,	    sizeof(long), 0644, proc_doulongvec_minmax);
258	set_table_entry(&table[2], "busy_factor",	  &sd->busy_factor,	    sizeof(int),  0644, proc_dointvec_minmax);
259	set_table_entry(&table[3], "imbalance_pct",	  &sd->imbalance_pct,	    sizeof(int),  0644, proc_dointvec_minmax);
260	set_table_entry(&table[4], "cache_nice_tries",	  &sd->cache_nice_tries,    sizeof(int),  0644, proc_dointvec_minmax);
261	set_table_entry(&table[5], "flags",		  &sd->flags,		    sizeof(int),  0444, proc_dointvec_minmax);
262	set_table_entry(&table[6], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax);
263	set_table_entry(&table[7], "name",		  sd->name,	       CORENAME_MAX_SIZE, 0444, proc_dostring);
264	/* &table[8] is terminator */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265
266	return table;
267}
268
269static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
270{
271	struct ctl_table *entry, *table;
272	struct sched_domain *sd;
273	int domain_num = 0, i;
274	char buf[32];
275
276	for_each_domain(cpu, sd)
277		domain_num++;
278	entry = table = sd_alloc_ctl_entry(domain_num + 1);
279	if (table == NULL)
280		return NULL;
281
282	i = 0;
283	for_each_domain(cpu, sd) {
284		snprintf(buf, 32, "domain%d", i);
285		entry->procname = kstrdup(buf, GFP_KERNEL);
286		entry->mode = 0555;
287		entry->child = sd_alloc_ctl_domain_table(sd);
288		entry++;
289		i++;
290	}
291	return table;
292}
293
294static cpumask_var_t		sd_sysctl_cpus;
295static struct ctl_table_header	*sd_sysctl_header;
296
297void register_sched_domain_sysctl(void)
298{
299	static struct ctl_table *cpu_entries;
300	static struct ctl_table **cpu_idx;
301	static bool init_done = false;
302	char buf[32];
303	int i;
304
305	if (!cpu_entries) {
306		cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1);
307		if (!cpu_entries)
308			return;
309
310		WARN_ON(sd_ctl_dir[0].child);
311		sd_ctl_dir[0].child = cpu_entries;
312	}
313
314	if (!cpu_idx) {
315		struct ctl_table *e = cpu_entries;
316
317		cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL);
318		if (!cpu_idx)
319			return;
320
321		/* deal with sparse possible map */
322		for_each_possible_cpu(i) {
323			cpu_idx[i] = e;
324			e++;
325		}
326	}
327
328	if (!cpumask_available(sd_sysctl_cpus)) {
329		if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
330			return;
331	}
332
333	if (!init_done) {
334		init_done = true;
335		/* init to possible to not have holes in @cpu_entries */
336		cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
337	}
338
339	for_each_cpu(i, sd_sysctl_cpus) {
340		struct ctl_table *e = cpu_idx[i];
341
342		if (e->child)
343			sd_free_ctl_entry(&e->child);
344
345		if (!e->procname) {
346			snprintf(buf, 32, "cpu%d", i);
347			e->procname = kstrdup(buf, GFP_KERNEL);
348		}
349		e->mode = 0555;
350		e->child = sd_alloc_ctl_cpu_table(i);
351
352		__cpumask_clear_cpu(i, sd_sysctl_cpus);
353	}
354
355	WARN_ON(sd_sysctl_header);
356	sd_sysctl_header = register_sysctl_table(sd_ctl_root);
357}
358
359void dirty_sched_domain_sysctl(int cpu)
360{
361	if (cpumask_available(sd_sysctl_cpus))
362		__cpumask_set_cpu(cpu, sd_sysctl_cpus);
363}
364
365/* may be called multiple times per register */
366void unregister_sched_domain_sysctl(void)
367{
368	unregister_sysctl_table(sd_sysctl_header);
369	sd_sysctl_header = NULL;
 
 
370}
371#endif /* CONFIG_SYSCTL */
372#endif /* CONFIG_SMP */
373
374#ifdef CONFIG_FAIR_GROUP_SCHED
375static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
376{
377	struct sched_entity *se = tg->se[cpu];
378
379#define P(F)		SEQ_printf(m, "  .%-30s: %lld\n",	#F, (long long)F)
380#define P_SCHEDSTAT(F)	SEQ_printf(m, "  .%-30s: %lld\n",	#F, (long long)schedstat_val(F))
381#define PN(F)		SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
382#define PN_SCHEDSTAT(F)	SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
 
 
 
 
383
384	if (!se)
385		return;
386
387	PN(se->exec_start);
388	PN(se->vruntime);
389	PN(se->sum_exec_runtime);
390
391	if (schedstat_enabled()) {
392		PN_SCHEDSTAT(se->statistics.wait_start);
393		PN_SCHEDSTAT(se->statistics.sleep_start);
394		PN_SCHEDSTAT(se->statistics.block_start);
395		PN_SCHEDSTAT(se->statistics.sleep_max);
396		PN_SCHEDSTAT(se->statistics.block_max);
397		PN_SCHEDSTAT(se->statistics.exec_max);
398		PN_SCHEDSTAT(se->statistics.slice_max);
399		PN_SCHEDSTAT(se->statistics.wait_max);
400		PN_SCHEDSTAT(se->statistics.wait_sum);
401		P_SCHEDSTAT(se->statistics.wait_count);
402	}
403
404	P(se->load.weight);
405#ifdef CONFIG_SMP
406	P(se->avg.load_avg);
407	P(se->avg.util_avg);
408	P(se->avg.runnable_avg);
409#endif
410
411#undef PN_SCHEDSTAT
412#undef PN
413#undef P_SCHEDSTAT
414#undef P
415}
416#endif
417
418#ifdef CONFIG_CGROUP_SCHED
419static char group_path[PATH_MAX];
420
421static char *task_group_path(struct task_group *tg)
422{
423	if (autogroup_path(tg, group_path, PATH_MAX))
424		return group_path;
425
426	cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
427
428	return group_path;
429}
430#endif
431
432static void
433print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
434{
435	if (rq->curr == p)
436		SEQ_printf(m, ">R");
437	else
438		SEQ_printf(m, " %c", task_state_to_char(p));
439
440	SEQ_printf(m, " %15s %5d %9Ld.%06ld %9Ld %5d ",
441		p->comm, task_pid_nr(p),
442		SPLIT_NS(p->se.vruntime),
443		(long long)(p->nvcsw + p->nivcsw),
444		p->prio);
445
446	SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
447		SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
448		SPLIT_NS(p->se.sum_exec_runtime),
449		SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
450
451#ifdef CONFIG_NUMA_BALANCING
452	SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
453#endif
454#ifdef CONFIG_CGROUP_SCHED
455	SEQ_printf(m, " %s", task_group_path(task_group(p)));
456#endif
457
458	SEQ_printf(m, "\n");
459}
460
461static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
462{
463	struct task_struct *g, *p;
464
465	SEQ_printf(m, "\n");
466	SEQ_printf(m, "runnable tasks:\n");
467	SEQ_printf(m, " S            task   PID         tree-key  switches  prio"
468		   "     wait-time             sum-exec        sum-sleep\n");
469	SEQ_printf(m, "-------------------------------------------------------"
470		   "------------------------------------------------------\n");
471
472	rcu_read_lock();
473	for_each_process_thread(g, p) {
474		if (task_cpu(p) != rq_cpu)
475			continue;
476
477		print_task(m, rq, p);
478	}
479	rcu_read_unlock();
480}
481
482void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
483{
484	s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
485		spread, rq0_min_vruntime, spread0;
486	struct rq *rq = cpu_rq(cpu);
487	struct sched_entity *last;
488	unsigned long flags;
489
490#ifdef CONFIG_FAIR_GROUP_SCHED
491	SEQ_printf(m, "\n");
492	SEQ_printf(m, "cfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
493#else
494	SEQ_printf(m, "\n");
495	SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
496#endif
497	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
498			SPLIT_NS(cfs_rq->exec_clock));
499
500	raw_spin_lock_irqsave(&rq->lock, flags);
501	if (rb_first_cached(&cfs_rq->tasks_timeline))
502		MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
503	last = __pick_last_entity(cfs_rq);
504	if (last)
505		max_vruntime = last->vruntime;
506	min_vruntime = cfs_rq->min_vruntime;
507	rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
508	raw_spin_unlock_irqrestore(&rq->lock, flags);
509	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "MIN_vruntime",
510			SPLIT_NS(MIN_vruntime));
511	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
512			SPLIT_NS(min_vruntime));
513	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "max_vruntime",
514			SPLIT_NS(max_vruntime));
515	spread = max_vruntime - MIN_vruntime;
516	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread",
517			SPLIT_NS(spread));
518	spread0 = min_vruntime - rq0_min_vruntime;
519	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread0",
520			SPLIT_NS(spread0));
521	SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
522			cfs_rq->nr_spread_over);
523	SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
524	SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
525#ifdef CONFIG_SMP
526	SEQ_printf(m, "  .%-30s: %lu\n", "load_avg",
527			cfs_rq->avg.load_avg);
528	SEQ_printf(m, "  .%-30s: %lu\n", "runnable_avg",
529			cfs_rq->avg.runnable_avg);
530	SEQ_printf(m, "  .%-30s: %lu\n", "util_avg",
531			cfs_rq->avg.util_avg);
532	SEQ_printf(m, "  .%-30s: %u\n", "util_est_enqueued",
533			cfs_rq->avg.util_est.enqueued);
534	SEQ_printf(m, "  .%-30s: %ld\n", "removed.load_avg",
535			cfs_rq->removed.load_avg);
536	SEQ_printf(m, "  .%-30s: %ld\n", "removed.util_avg",
537			cfs_rq->removed.util_avg);
538	SEQ_printf(m, "  .%-30s: %ld\n", "removed.runnable_avg",
539			cfs_rq->removed.runnable_avg);
540#ifdef CONFIG_FAIR_GROUP_SCHED
541	SEQ_printf(m, "  .%-30s: %lu\n", "tg_load_avg_contrib",
542			cfs_rq->tg_load_avg_contrib);
543	SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
544			atomic_long_read(&cfs_rq->tg->load_avg));
545#endif
546#endif
547#ifdef CONFIG_CFS_BANDWIDTH
548	SEQ_printf(m, "  .%-30s: %d\n", "throttled",
549			cfs_rq->throttled);
550	SEQ_printf(m, "  .%-30s: %d\n", "throttle_count",
551			cfs_rq->throttle_count);
552#endif
553
554#ifdef CONFIG_FAIR_GROUP_SCHED
555	print_cfs_group_stats(m, cpu, cfs_rq->tg);
556#endif
557}
558
559void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
560{
561#ifdef CONFIG_RT_GROUP_SCHED
562	SEQ_printf(m, "\n");
563	SEQ_printf(m, "rt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
564#else
565	SEQ_printf(m, "\n");
566	SEQ_printf(m, "rt_rq[%d]:\n", cpu);
567#endif
568
569#define P(x) \
570	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
571#define PU(x) \
572	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
573#define PN(x) \
574	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
575
576	PU(rt_nr_running);
577#ifdef CONFIG_SMP
578	PU(rt_nr_migratory);
579#endif
580	P(rt_throttled);
581	PN(rt_time);
582	PN(rt_runtime);
583
584#undef PN
585#undef PU
586#undef P
587}
588
589void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
590{
591	struct dl_bw *dl_bw;
592
593	SEQ_printf(m, "\n");
594	SEQ_printf(m, "dl_rq[%d]:\n", cpu);
595
596#define PU(x) \
597	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
598
599	PU(dl_nr_running);
600#ifdef CONFIG_SMP
601	PU(dl_nr_migratory);
602	dl_bw = &cpu_rq(cpu)->rd->dl_bw;
603#else
604	dl_bw = &dl_rq->dl_bw;
605#endif
606	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
607	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
 
608
609#undef PU
610}
611
612static void print_cpu(struct seq_file *m, int cpu)
613{
614	struct rq *rq = cpu_rq(cpu);
615	unsigned long flags;
616
617#ifdef CONFIG_X86
618	{
619		unsigned int freq = cpu_khz ? : 1;
620
621		SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
622			   cpu, freq / 1000, (freq % 1000));
623	}
624#else
625	SEQ_printf(m, "cpu#%d\n", cpu);
626#endif
627
628#define P(x)								\
629do {									\
630	if (sizeof(rq->x) == 4)						\
631		SEQ_printf(m, "  .%-30s: %ld\n", #x, (long)(rq->x));	\
632	else								\
633		SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x));\
634} while (0)
635
636#define PN(x) \
637	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
638
639	P(nr_running);
 
 
640	P(nr_switches);
 
641	P(nr_uninterruptible);
642	PN(next_balance);
643	SEQ_printf(m, "  .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
644	PN(clock);
645	PN(clock_task);
 
 
 
 
 
646#undef P
647#undef PN
648
649#ifdef CONFIG_SMP
650#define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
651	P64(avg_idle);
652	P64(max_idle_balance_cost);
653#undef P64
654#endif
655
656#define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, schedstat_val(rq->n));
657	if (schedstat_enabled()) {
658		P(yld_count);
659		P(sched_count);
660		P(sched_goidle);
661		P(ttwu_count);
662		P(ttwu_local);
663	}
664#undef P
665
666	spin_lock_irqsave(&sched_debug_lock, flags);
667	print_cfs_stats(m, cpu);
668	print_rt_stats(m, cpu);
669	print_dl_stats(m, cpu);
670
671	print_rq(m, rq, cpu);
672	spin_unlock_irqrestore(&sched_debug_lock, flags);
673	SEQ_printf(m, "\n");
674}
675
676static const char *sched_tunable_scaling_names[] = {
677	"none",
678	"logarithmic",
679	"linear"
680};
681
682static void sched_debug_header(struct seq_file *m)
683{
684	u64 ktime, sched_clk, cpu_clk;
685	unsigned long flags;
686
687	local_irq_save(flags);
688	ktime = ktime_to_ns(ktime_get());
689	sched_clk = sched_clock();
690	cpu_clk = local_clock();
691	local_irq_restore(flags);
692
693	SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
694		init_utsname()->release,
695		(int)strcspn(init_utsname()->version, " "),
696		init_utsname()->version);
697
698#define P(x) \
699	SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
700#define PN(x) \
701	SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
702	PN(ktime);
703	PN(sched_clk);
704	PN(cpu_clk);
705	P(jiffies);
706#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
707	P(sched_clock_stable());
708#endif
709#undef PN
710#undef P
711
712	SEQ_printf(m, "\n");
713	SEQ_printf(m, "sysctl_sched\n");
714
715#define P(x) \
716	SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
717#define PN(x) \
718	SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
719	PN(sysctl_sched_latency);
720	PN(sysctl_sched_min_granularity);
721	PN(sysctl_sched_wakeup_granularity);
722	P(sysctl_sched_child_runs_first);
723	P(sysctl_sched_features);
724#undef PN
725#undef P
726
727	SEQ_printf(m, "  .%-40s: %d (%s)\n",
728		"sysctl_sched_tunable_scaling",
729		sysctl_sched_tunable_scaling,
730		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
731	SEQ_printf(m, "\n");
732}
733
734static int sched_debug_show(struct seq_file *m, void *v)
735{
736	int cpu = (unsigned long)(v - 2);
737
738	if (cpu != -1)
739		print_cpu(m, cpu);
740	else
741		sched_debug_header(m);
742
743	return 0;
744}
745
746void sysrq_sched_debug_show(void)
747{
748	int cpu;
749
750	sched_debug_header(NULL);
751	for_each_online_cpu(cpu) {
752		/*
753		 * Need to reset softlockup watchdogs on all CPUs, because
754		 * another CPU might be blocked waiting for us to process
755		 * an IPI or stop_machine.
756		 */
757		touch_nmi_watchdog();
758		touch_all_softlockup_watchdogs();
759		print_cpu(NULL, cpu);
760	}
761}
762
763/*
764 * This itererator needs some explanation.
765 * It returns 1 for the header position.
766 * This means 2 is CPU 0.
767 * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
768 * to use cpumask_* to iterate over the CPUs.
769 */
770static void *sched_debug_start(struct seq_file *file, loff_t *offset)
771{
772	unsigned long n = *offset;
773
774	if (n == 0)
775		return (void *) 1;
776
777	n--;
778
779	if (n > 0)
780		n = cpumask_next(n - 1, cpu_online_mask);
781	else
782		n = cpumask_first(cpu_online_mask);
783
784	*offset = n + 1;
785
786	if (n < nr_cpu_ids)
787		return (void *)(unsigned long)(n + 2);
788
789	return NULL;
790}
791
792static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
793{
794	(*offset)++;
795	return sched_debug_start(file, offset);
796}
797
798static void sched_debug_stop(struct seq_file *file, void *data)
799{
800}
801
802static const struct seq_operations sched_debug_sops = {
803	.start		= sched_debug_start,
804	.next		= sched_debug_next,
805	.stop		= sched_debug_stop,
806	.show		= sched_debug_show,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
807};
808
809static int __init init_sched_debug_procfs(void)
810{
811	if (!proc_create_seq("sched_debug", 0444, NULL, &sched_debug_sops))
 
 
 
812		return -ENOMEM;
813	return 0;
814}
815
816__initcall(init_sched_debug_procfs);
817
818#define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
819#define __P(F) __PS(#F, F)
820#define   P(F) __PS(#F, p->F)
821#define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
822#define __PN(F) __PSN(#F, F)
823#define   PN(F) __PSN(#F, p->F)
 
 
824
825
826#ifdef CONFIG_NUMA_BALANCING
827void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
828		unsigned long tpf, unsigned long gsf, unsigned long gpf)
829{
830	SEQ_printf(m, "numa_faults node=%d ", node);
831	SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
832	SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
833}
834#endif
835
836
837static void sched_show_numa(struct task_struct *p, struct seq_file *m)
838{
839#ifdef CONFIG_NUMA_BALANCING
840	struct mempolicy *pol;
841
842	if (p->mm)
843		P(mm->numa_scan_seq);
844
845	task_lock(p);
846	pol = p->mempolicy;
847	if (pol && !(pol->flags & MPOL_F_MORON))
848		pol = NULL;
849	mpol_get(pol);
850	task_unlock(p);
851
852	P(numa_pages_migrated);
853	P(numa_preferred_nid);
854	P(total_numa_faults);
855	SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
856			task_node(p), task_numa_group_id(p));
857	show_numa_stats(p, m);
858	mpol_put(pol);
859#endif
860}
861
862void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
863						  struct seq_file *m)
864{
865	unsigned long nr_switches;
866
867	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
868						get_nr_threads(p));
869	SEQ_printf(m,
870		"---------------------------------------------------------"
871		"----------\n");
872
873#define P_SCHEDSTAT(F)  __PS(#F, schedstat_val(p->F))
874#define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->F))
 
 
 
 
 
 
 
 
 
875
876	PN(se.exec_start);
877	PN(se.vruntime);
878	PN(se.sum_exec_runtime);
879
880	nr_switches = p->nvcsw + p->nivcsw;
881
882	P(se.nr_migrations);
883
884	if (schedstat_enabled()) {
885		u64 avg_atom, avg_per_cpu;
886
887		PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
888		PN_SCHEDSTAT(se.statistics.wait_start);
889		PN_SCHEDSTAT(se.statistics.sleep_start);
890		PN_SCHEDSTAT(se.statistics.block_start);
891		PN_SCHEDSTAT(se.statistics.sleep_max);
892		PN_SCHEDSTAT(se.statistics.block_max);
893		PN_SCHEDSTAT(se.statistics.exec_max);
894		PN_SCHEDSTAT(se.statistics.slice_max);
895		PN_SCHEDSTAT(se.statistics.wait_max);
896		PN_SCHEDSTAT(se.statistics.wait_sum);
897		P_SCHEDSTAT(se.statistics.wait_count);
898		PN_SCHEDSTAT(se.statistics.iowait_sum);
899		P_SCHEDSTAT(se.statistics.iowait_count);
900		P_SCHEDSTAT(se.statistics.nr_migrations_cold);
901		P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
902		P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
903		P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
904		P_SCHEDSTAT(se.statistics.nr_forced_migrations);
905		P_SCHEDSTAT(se.statistics.nr_wakeups);
906		P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
907		P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
908		P_SCHEDSTAT(se.statistics.nr_wakeups_local);
909		P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
910		P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
911		P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
912		P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
913		P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
914
915		avg_atom = p->se.sum_exec_runtime;
916		if (nr_switches)
917			avg_atom = div64_ul(avg_atom, nr_switches);
918		else
919			avg_atom = -1LL;
920
921		avg_per_cpu = p->se.sum_exec_runtime;
922		if (p->se.nr_migrations) {
923			avg_per_cpu = div64_u64(avg_per_cpu,
924						p->se.nr_migrations);
925		} else {
926			avg_per_cpu = -1LL;
927		}
928
929		__PN(avg_atom);
930		__PN(avg_per_cpu);
931	}
932
933	__P(nr_switches);
934	__PS("nr_voluntary_switches", p->nvcsw);
935	__PS("nr_involuntary_switches", p->nivcsw);
 
 
936
937	P(se.load.weight);
938#ifdef CONFIG_SMP
939	P(se.avg.load_sum);
940	P(se.avg.runnable_sum);
941	P(se.avg.util_sum);
942	P(se.avg.load_avg);
943	P(se.avg.runnable_avg);
944	P(se.avg.util_avg);
945	P(se.avg.last_update_time);
946	P(se.avg.util_est.ewma);
947	P(se.avg.util_est.enqueued);
948#endif
949#ifdef CONFIG_UCLAMP_TASK
950	__PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
951	__PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
952	__PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
953	__PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
954#endif
955	P(policy);
956	P(prio);
957	if (task_has_dl_policy(p)) {
958		P(dl.runtime);
959		P(dl.deadline);
960	}
961#undef PN_SCHEDSTAT
 
 
962#undef P_SCHEDSTAT
 
 
963
964	{
965		unsigned int this_cpu = raw_smp_processor_id();
966		u64 t0, t1;
967
968		t0 = cpu_clock(this_cpu);
969		t1 = cpu_clock(this_cpu);
970		__PS("clock-delta", t1-t0);
 
971	}
972
973	sched_show_numa(p, m);
974}
975
976void proc_sched_set_task(struct task_struct *p)
977{
978#ifdef CONFIG_SCHEDSTATS
979	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
980#endif
981}