Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * kernel/sched/debug.c
  4 *
  5 * Print the CFS rbtree and other debugging details
  6 *
  7 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
  8 */
  9#include "sched.h"
 10
 11static DEFINE_SPINLOCK(sched_debug_lock);
 12
 13/*
 14 * This allows printing both to /proc/sched_debug and
 15 * to the console
 16 */
 17#define SEQ_printf(m, x...)			\
 18 do {						\
 19	if (m)					\
 20		seq_printf(m, x);		\
 21	else					\
 22		pr_cont(x);			\
 23 } while (0)
 24
 25/*
 26 * Ease the printing of nsec fields:
 27 */
 28static long long nsec_high(unsigned long long nsec)
 29{
 30	if ((long long)nsec < 0) {
 31		nsec = -nsec;
 32		do_div(nsec, 1000000);
 33		return -nsec;
 34	}
 35	do_div(nsec, 1000000);
 36
 37	return nsec;
 38}
 39
 40static unsigned long nsec_low(unsigned long long nsec)
 41{
 42	if ((long long)nsec < 0)
 43		nsec = -nsec;
 44
 45	return do_div(nsec, 1000000);
 46}
 47
 48#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
 49
 50#define SCHED_FEAT(name, enabled)	\
 51	#name ,
 52
 53static const char * const sched_feat_names[] = {
 54#include "features.h"
 55};
 56
 57#undef SCHED_FEAT
 58
 59static int sched_feat_show(struct seq_file *m, void *v)
 60{
 61	int i;
 62
 63	for (i = 0; i < __SCHED_FEAT_NR; i++) {
 64		if (!(sysctl_sched_features & (1UL << i)))
 65			seq_puts(m, "NO_");
 66		seq_printf(m, "%s ", sched_feat_names[i]);
 67	}
 68	seq_puts(m, "\n");
 69
 70	return 0;
 71}
 72
 73#ifdef CONFIG_JUMP_LABEL
 74
 75#define jump_label_key__true  STATIC_KEY_INIT_TRUE
 76#define jump_label_key__false STATIC_KEY_INIT_FALSE
 77
 78#define SCHED_FEAT(name, enabled)	\
 79	jump_label_key__##enabled ,
 80
 81struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
 82#include "features.h"
 83};
 84
 85#undef SCHED_FEAT
 86
 87static void sched_feat_disable(int i)
 88{
 89	static_key_disable_cpuslocked(&sched_feat_keys[i]);
 90}
 91
 92static void sched_feat_enable(int i)
 93{
 94	static_key_enable_cpuslocked(&sched_feat_keys[i]);
 95}
 96#else
 97static void sched_feat_disable(int i) { };
 98static void sched_feat_enable(int i) { };
 99#endif /* CONFIG_JUMP_LABEL */
100
101static int sched_feat_set(char *cmp)
102{
103	int i;
104	int neg = 0;
105
106	if (strncmp(cmp, "NO_", 3) == 0) {
107		neg = 1;
108		cmp += 3;
109	}
110
111	i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
112	if (i < 0)
113		return i;
114
115	if (neg) {
116		sysctl_sched_features &= ~(1UL << i);
117		sched_feat_disable(i);
118	} else {
119		sysctl_sched_features |= (1UL << i);
120		sched_feat_enable(i);
121	}
122
123	return 0;
124}
125
126static ssize_t
127sched_feat_write(struct file *filp, const char __user *ubuf,
128		size_t cnt, loff_t *ppos)
129{
130	char buf[64];
131	char *cmp;
132	int ret;
133	struct inode *inode;
134
135	if (cnt > 63)
136		cnt = 63;
137
138	if (copy_from_user(&buf, ubuf, cnt))
139		return -EFAULT;
140
141	buf[cnt] = 0;
142	cmp = strstrip(buf);
143
144	/* Ensure the static_key remains in a consistent state */
145	inode = file_inode(filp);
146	cpus_read_lock();
147	inode_lock(inode);
148	ret = sched_feat_set(cmp);
149	inode_unlock(inode);
150	cpus_read_unlock();
151	if (ret < 0)
152		return ret;
153
154	*ppos += cnt;
155
156	return cnt;
157}
158
159static int sched_feat_open(struct inode *inode, struct file *filp)
160{
161	return single_open(filp, sched_feat_show, NULL);
162}
163
164static const struct file_operations sched_feat_fops = {
165	.open		= sched_feat_open,
166	.write		= sched_feat_write,
167	.read		= seq_read,
168	.llseek		= seq_lseek,
169	.release	= single_release,
170};
171
172__read_mostly bool sched_debug_enabled;
173
174static __init int sched_init_debug(void)
 
175{
176	debugfs_create_file("sched_features", 0644, NULL, NULL,
177			&sched_feat_fops);
178
179	debugfs_create_bool("sched_debug", 0644, NULL,
180			&sched_debug_enabled);
181
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
182	return 0;
183}
184late_initcall(sched_init_debug);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
186#ifdef CONFIG_SMP
 
187
188#ifdef CONFIG_SYSCTL
189
190static struct ctl_table sd_ctl_dir[] = {
191	{
192		.procname	= "sched_domain",
193		.mode		= 0555,
194	},
195	{}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
196};
197
198static struct ctl_table sd_ctl_root[] = {
199	{
200		.procname	= "kernel",
201		.mode		= 0555,
202		.child		= sd_ctl_dir,
203	},
204	{}
 
 
 
 
 
205};
206
207static struct ctl_table *sd_alloc_ctl_entry(int n)
 
 
208{
209	struct ctl_table *entry =
210		kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
212	return entry;
 
 
 
 
 
 
 
 
 
213}
 
214
215static void sd_free_ctl_entry(struct ctl_table **tablep)
 
 
 
 
216{
217	struct ctl_table *entry;
 
218
219	/*
220	 * In the intermediate directories, both the child directory and
221	 * procname are dynamically allocated and could fail but the mode
222	 * will always be set. In the lowest directory the names are
223	 * static strings and all have proc handlers.
224	 */
225	for (entry = *tablep; entry->mode; entry++) {
226		if (entry->child)
227			sd_free_ctl_entry(&entry->child);
228		if (entry->proc_handler == NULL)
229			kfree(entry->procname);
230	}
 
231
232	kfree(*tablep);
233	*tablep = NULL;
234}
235
236static void
237set_table_entry(struct ctl_table *entry,
238		const char *procname, void *data, int maxlen,
239		umode_t mode, proc_handler *proc_handler)
240{
241	entry->procname = procname;
242	entry->data = data;
243	entry->maxlen = maxlen;
244	entry->mode = mode;
245	entry->proc_handler = proc_handler;
246}
247
248static struct ctl_table *
249sd_alloc_ctl_domain_table(struct sched_domain *sd)
250{
251	struct ctl_table *table = sd_alloc_ctl_entry(9);
252
253	if (table == NULL)
254		return NULL;
255
256	set_table_entry(&table[0], "min_interval",	  &sd->min_interval,	    sizeof(long), 0644, proc_doulongvec_minmax);
257	set_table_entry(&table[1], "max_interval",	  &sd->max_interval,	    sizeof(long), 0644, proc_doulongvec_minmax);
258	set_table_entry(&table[2], "busy_factor",	  &sd->busy_factor,	    sizeof(int),  0644, proc_dointvec_minmax);
259	set_table_entry(&table[3], "imbalance_pct",	  &sd->imbalance_pct,	    sizeof(int),  0644, proc_dointvec_minmax);
260	set_table_entry(&table[4], "cache_nice_tries",	  &sd->cache_nice_tries,    sizeof(int),  0644, proc_dointvec_minmax);
261	set_table_entry(&table[5], "flags",		  &sd->flags,		    sizeof(int),  0644, proc_dointvec_minmax);
262	set_table_entry(&table[6], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax);
263	set_table_entry(&table[7], "name",		  sd->name,	       CORENAME_MAX_SIZE, 0444, proc_dostring);
264	/* &table[8] is terminator */
265
266	return table;
267}
268
269static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
270{
271	struct ctl_table *entry, *table;
272	struct sched_domain *sd;
273	int domain_num = 0, i;
274	char buf[32];
275
276	for_each_domain(cpu, sd)
277		domain_num++;
278	entry = table = sd_alloc_ctl_entry(domain_num + 1);
279	if (table == NULL)
280		return NULL;
281
282	i = 0;
283	for_each_domain(cpu, sd) {
284		snprintf(buf, 32, "domain%d", i);
285		entry->procname = kstrdup(buf, GFP_KERNEL);
286		entry->mode = 0555;
287		entry->child = sd_alloc_ctl_domain_table(sd);
288		entry++;
289		i++;
290	}
291	return table;
292}
293
294static cpumask_var_t		sd_sysctl_cpus;
295static struct ctl_table_header	*sd_sysctl_header;
 
 
 
 
296
297void register_sched_domain_sysctl(void)
298{
299	static struct ctl_table *cpu_entries;
300	static struct ctl_table **cpu_idx;
301	static bool init_done = false;
302	char buf[32];
303	int i;
304
305	if (!cpu_entries) {
306		cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1);
307		if (!cpu_entries)
308			return;
 
 
 
309
310		WARN_ON(sd_ctl_dir[0].child);
311		sd_ctl_dir[0].child = cpu_entries;
312	}
313
314	if (!cpu_idx) {
315		struct ctl_table *e = cpu_entries;
 
316
317		cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL);
318		if (!cpu_idx)
319			return;
320
321		/* deal with sparse possible map */
322		for_each_possible_cpu(i) {
323			cpu_idx[i] = e;
324			e++;
325		}
326	}
 
 
 
327
328	if (!cpumask_available(sd_sysctl_cpus)) {
329		if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
330			return;
 
331	}
332
333	if (!init_done) {
334		init_done = true;
335		/* init to possible to not have holes in @cpu_entries */
336		cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
 
 
337	}
338
339	for_each_cpu(i, sd_sysctl_cpus) {
340		struct ctl_table *e = cpu_idx[i];
 
 
 
 
 
 
 
 
 
 
341
342		if (e->child)
343			sd_free_ctl_entry(&e->child);
344
345		if (!e->procname) {
346			snprintf(buf, 32, "cpu%d", i);
347			e->procname = kstrdup(buf, GFP_KERNEL);
348		}
349		e->mode = 0555;
350		e->child = sd_alloc_ctl_cpu_table(i);
351
352		__cpumask_clear_cpu(i, sd_sysctl_cpus);
353	}
354
355	WARN_ON(sd_sysctl_header);
356	sd_sysctl_header = register_sysctl_table(sd_ctl_root);
357}
358
359void dirty_sched_domain_sysctl(int cpu)
360{
361	if (cpumask_available(sd_sysctl_cpus))
362		__cpumask_set_cpu(cpu, sd_sysctl_cpus);
363}
364
365/* may be called multiple times per register */
366void unregister_sched_domain_sysctl(void)
367{
368	unregister_sysctl_table(sd_sysctl_header);
369	sd_sysctl_header = NULL;
370}
371#endif /* CONFIG_SYSCTL */
372#endif /* CONFIG_SMP */
373
374#ifdef CONFIG_FAIR_GROUP_SCHED
375static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
376{
377	struct sched_entity *se = tg->se[cpu];
378
379#define P(F)		SEQ_printf(m, "  .%-30s: %lld\n",	#F, (long long)F)
380#define P_SCHEDSTAT(F)	SEQ_printf(m, "  .%-30s: %lld\n",	#F, (long long)schedstat_val(F))
 
381#define PN(F)		SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
382#define PN_SCHEDSTAT(F)	SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
 
383
384	if (!se)
385		return;
386
387	PN(se->exec_start);
388	PN(se->vruntime);
389	PN(se->sum_exec_runtime);
390
391	if (schedstat_enabled()) {
392		PN_SCHEDSTAT(se->statistics.wait_start);
393		PN_SCHEDSTAT(se->statistics.sleep_start);
394		PN_SCHEDSTAT(se->statistics.block_start);
395		PN_SCHEDSTAT(se->statistics.sleep_max);
396		PN_SCHEDSTAT(se->statistics.block_max);
397		PN_SCHEDSTAT(se->statistics.exec_max);
398		PN_SCHEDSTAT(se->statistics.slice_max);
399		PN_SCHEDSTAT(se->statistics.wait_max);
400		PN_SCHEDSTAT(se->statistics.wait_sum);
401		P_SCHEDSTAT(se->statistics.wait_count);
 
 
 
402	}
403
404	P(se->load.weight);
405	P(se->runnable_weight);
406#ifdef CONFIG_SMP
407	P(se->avg.load_avg);
408	P(se->avg.util_avg);
409	P(se->avg.runnable_load_avg);
410#endif
411
412#undef PN_SCHEDSTAT
413#undef PN
414#undef P_SCHEDSTAT
415#undef P
416}
417#endif
418
419#ifdef CONFIG_CGROUP_SCHED
 
420static char group_path[PATH_MAX];
421
422static char *task_group_path(struct task_group *tg)
423{
424	if (autogroup_path(tg, group_path, PATH_MAX))
425		return group_path;
426
427	cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
 
428
429	return group_path;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
430}
431#endif
432
433static void
434print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
435{
436	if (rq->curr == p)
437		SEQ_printf(m, ">R");
438	else
439		SEQ_printf(m, " %c", task_state_to_char(p));
440
441	SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
442		p->comm, task_pid_nr(p),
443		SPLIT_NS(p->se.vruntime),
 
 
 
 
444		(long long)(p->nvcsw + p->nivcsw),
445		p->prio);
446
447	SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
448		SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
449		SPLIT_NS(p->se.sum_exec_runtime),
450		SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
 
451
452#ifdef CONFIG_NUMA_BALANCING
453	SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
454#endif
455#ifdef CONFIG_CGROUP_SCHED
456	SEQ_printf(m, " %s", task_group_path(task_group(p)));
457#endif
458
459	SEQ_printf(m, "\n");
460}
461
462static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
463{
464	struct task_struct *g, *p;
465
466	SEQ_printf(m, "\n");
467	SEQ_printf(m, "runnable tasks:\n");
468	SEQ_printf(m, " S           task   PID         tree-key  switches  prio"
469		   "     wait-time             sum-exec        sum-sleep\n");
470	SEQ_printf(m, "-------------------------------------------------------"
471		   "----------------------------------------------------\n");
472
473	rcu_read_lock();
474	for_each_process_thread(g, p) {
475		if (task_cpu(p) != rq_cpu)
476			continue;
477
478		print_task(m, rq, p);
479	}
480	rcu_read_unlock();
481}
482
483void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
484{
485	s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
486		spread, rq0_min_vruntime, spread0;
487	struct rq *rq = cpu_rq(cpu);
488	struct sched_entity *last;
489	unsigned long flags;
490
491#ifdef CONFIG_FAIR_GROUP_SCHED
492	SEQ_printf(m, "\n");
493	SEQ_printf(m, "cfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
494#else
495	SEQ_printf(m, "\n");
496	SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
497#endif
498	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
499			SPLIT_NS(cfs_rq->exec_clock));
500
501	raw_spin_lock_irqsave(&rq->lock, flags);
502	if (rb_first_cached(&cfs_rq->tasks_timeline))
503		MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
 
 
 
 
504	last = __pick_last_entity(cfs_rq);
505	if (last)
506		max_vruntime = last->vruntime;
507	min_vruntime = cfs_rq->min_vruntime;
508	rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
509	raw_spin_unlock_irqrestore(&rq->lock, flags);
510	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "MIN_vruntime",
511			SPLIT_NS(MIN_vruntime));
 
 
512	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
513			SPLIT_NS(min_vruntime));
514	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "max_vruntime",
515			SPLIT_NS(max_vruntime));
516	spread = max_vruntime - MIN_vruntime;
517	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread",
518			SPLIT_NS(spread));
519	spread0 = min_vruntime - rq0_min_vruntime;
520	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread0",
521			SPLIT_NS(spread0));
522	SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
523			cfs_rq->nr_spread_over);
524	SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
 
 
 
 
 
525	SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
526#ifdef CONFIG_SMP
527	SEQ_printf(m, "  .%-30s: %ld\n", "runnable_weight", cfs_rq->runnable_weight);
528	SEQ_printf(m, "  .%-30s: %lu\n", "load_avg",
529			cfs_rq->avg.load_avg);
530	SEQ_printf(m, "  .%-30s: %lu\n", "runnable_load_avg",
531			cfs_rq->avg.runnable_load_avg);
532	SEQ_printf(m, "  .%-30s: %lu\n", "util_avg",
533			cfs_rq->avg.util_avg);
534	SEQ_printf(m, "  .%-30s: %u\n", "util_est_enqueued",
535			cfs_rq->avg.util_est.enqueued);
536	SEQ_printf(m, "  .%-30s: %ld\n", "removed.load_avg",
537			cfs_rq->removed.load_avg);
538	SEQ_printf(m, "  .%-30s: %ld\n", "removed.util_avg",
539			cfs_rq->removed.util_avg);
540	SEQ_printf(m, "  .%-30s: %ld\n", "removed.runnable_sum",
541			cfs_rq->removed.runnable_sum);
542#ifdef CONFIG_FAIR_GROUP_SCHED
543	SEQ_printf(m, "  .%-30s: %lu\n", "tg_load_avg_contrib",
544			cfs_rq->tg_load_avg_contrib);
545	SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
546			atomic_long_read(&cfs_rq->tg->load_avg));
547#endif
548#endif
549#ifdef CONFIG_CFS_BANDWIDTH
550	SEQ_printf(m, "  .%-30s: %d\n", "throttled",
551			cfs_rq->throttled);
552	SEQ_printf(m, "  .%-30s: %d\n", "throttle_count",
553			cfs_rq->throttle_count);
554#endif
555
556#ifdef CONFIG_FAIR_GROUP_SCHED
557	print_cfs_group_stats(m, cpu, cfs_rq->tg);
558#endif
559}
560
561void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
562{
563#ifdef CONFIG_RT_GROUP_SCHED
564	SEQ_printf(m, "\n");
565	SEQ_printf(m, "rt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
566#else
567	SEQ_printf(m, "\n");
568	SEQ_printf(m, "rt_rq[%d]:\n", cpu);
569#endif
570
571#define P(x) \
572	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
573#define PU(x) \
574	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
575#define PN(x) \
576	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
577
578	PU(rt_nr_running);
579#ifdef CONFIG_SMP
580	PU(rt_nr_migratory);
581#endif
582	P(rt_throttled);
583	PN(rt_time);
584	PN(rt_runtime);
585
586#undef PN
587#undef PU
588#undef P
589}
590
591void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
592{
593	struct dl_bw *dl_bw;
594
595	SEQ_printf(m, "\n");
596	SEQ_printf(m, "dl_rq[%d]:\n", cpu);
597
598#define PU(x) \
599	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
600
601	PU(dl_nr_running);
602#ifdef CONFIG_SMP
603	PU(dl_nr_migratory);
604	dl_bw = &cpu_rq(cpu)->rd->dl_bw;
605#else
606	dl_bw = &dl_rq->dl_bw;
607#endif
608	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
609	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
610
611#undef PU
612}
613
614static void print_cpu(struct seq_file *m, int cpu)
615{
616	struct rq *rq = cpu_rq(cpu);
617	unsigned long flags;
618
619#ifdef CONFIG_X86
620	{
621		unsigned int freq = cpu_khz ? : 1;
622
623		SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
624			   cpu, freq / 1000, (freq % 1000));
625	}
626#else
627	SEQ_printf(m, "cpu#%d\n", cpu);
628#endif
629
630#define P(x)								\
631do {									\
632	if (sizeof(rq->x) == 4)						\
633		SEQ_printf(m, "  .%-30s: %ld\n", #x, (long)(rq->x));	\
634	else								\
635		SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x));\
636} while (0)
637
638#define PN(x) \
639	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
640
641	P(nr_running);
642	P(nr_switches);
643	P(nr_load_updates);
644	P(nr_uninterruptible);
645	PN(next_balance);
646	SEQ_printf(m, "  .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
647	PN(clock);
648	PN(clock_task);
649#undef P
650#undef PN
651
652#ifdef CONFIG_SMP
653#define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
654	P64(avg_idle);
655	P64(max_idle_balance_cost);
656#undef P64
657#endif
658
659#define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, schedstat_val(rq->n));
660	if (schedstat_enabled()) {
661		P(yld_count);
662		P(sched_count);
663		P(sched_goidle);
664		P(ttwu_count);
665		P(ttwu_local);
666	}
667#undef P
668
669	spin_lock_irqsave(&sched_debug_lock, flags);
670	print_cfs_stats(m, cpu);
671	print_rt_stats(m, cpu);
672	print_dl_stats(m, cpu);
673
674	print_rq(m, rq, cpu);
675	spin_unlock_irqrestore(&sched_debug_lock, flags);
676	SEQ_printf(m, "\n");
677}
678
679static const char *sched_tunable_scaling_names[] = {
680	"none",
681	"logarithmic",
682	"linear"
683};
684
685static void sched_debug_header(struct seq_file *m)
686{
687	u64 ktime, sched_clk, cpu_clk;
688	unsigned long flags;
689
690	local_irq_save(flags);
691	ktime = ktime_to_ns(ktime_get());
692	sched_clk = sched_clock();
693	cpu_clk = local_clock();
694	local_irq_restore(flags);
695
696	SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
697		init_utsname()->release,
698		(int)strcspn(init_utsname()->version, " "),
699		init_utsname()->version);
700
701#define P(x) \
702	SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
703#define PN(x) \
704	SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
705	PN(ktime);
706	PN(sched_clk);
707	PN(cpu_clk);
708	P(jiffies);
709#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
710	P(sched_clock_stable());
711#endif
712#undef PN
713#undef P
714
715	SEQ_printf(m, "\n");
716	SEQ_printf(m, "sysctl_sched\n");
717
718#define P(x) \
719	SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
720#define PN(x) \
721	SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
722	PN(sysctl_sched_latency);
723	PN(sysctl_sched_min_granularity);
724	PN(sysctl_sched_wakeup_granularity);
725	P(sysctl_sched_child_runs_first);
726	P(sysctl_sched_features);
727#undef PN
728#undef P
729
730	SEQ_printf(m, "  .%-40s: %d (%s)\n",
731		"sysctl_sched_tunable_scaling",
732		sysctl_sched_tunable_scaling,
733		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
734	SEQ_printf(m, "\n");
735}
736
737static int sched_debug_show(struct seq_file *m, void *v)
738{
739	int cpu = (unsigned long)(v - 2);
740
741	if (cpu != -1)
742		print_cpu(m, cpu);
743	else
744		sched_debug_header(m);
745
746	return 0;
747}
748
749void sysrq_sched_debug_show(void)
750{
751	int cpu;
752
753	sched_debug_header(NULL);
754	for_each_online_cpu(cpu)
 
 
 
 
 
 
 
755		print_cpu(NULL, cpu);
756
757}
758
759/*
760 * This itererator needs some explanation.
761 * It returns 1 for the header position.
762 * This means 2 is CPU 0.
763 * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
764 * to use cpumask_* to iterate over the CPUs.
765 */
766static void *sched_debug_start(struct seq_file *file, loff_t *offset)
767{
768	unsigned long n = *offset;
769
770	if (n == 0)
771		return (void *) 1;
772
773	n--;
774
775	if (n > 0)
776		n = cpumask_next(n - 1, cpu_online_mask);
777	else
778		n = cpumask_first(cpu_online_mask);
779
780	*offset = n + 1;
781
782	if (n < nr_cpu_ids)
783		return (void *)(unsigned long)(n + 2);
784
785	return NULL;
786}
787
788static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
789{
790	(*offset)++;
791	return sched_debug_start(file, offset);
792}
793
794static void sched_debug_stop(struct seq_file *file, void *data)
795{
796}
797
798static const struct seq_operations sched_debug_sops = {
799	.start		= sched_debug_start,
800	.next		= sched_debug_next,
801	.stop		= sched_debug_stop,
802	.show		= sched_debug_show,
803};
804
805static int __init init_sched_debug_procfs(void)
806{
807	if (!proc_create_seq("sched_debug", 0444, NULL, &sched_debug_sops))
808		return -ENOMEM;
809	return 0;
810}
811
812__initcall(init_sched_debug_procfs);
813
814#define __P(F)	SEQ_printf(m, "%-45s:%21Ld\n",	     #F, (long long)F)
815#define   P(F)	SEQ_printf(m, "%-45s:%21Ld\n",	     #F, (long long)p->F)
816#define __PN(F)	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
817#define   PN(F)	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
818
819
820#ifdef CONFIG_NUMA_BALANCING
821void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
822		unsigned long tpf, unsigned long gsf, unsigned long gpf)
823{
824	SEQ_printf(m, "numa_faults node=%d ", node);
825	SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
826	SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
827}
828#endif
829
830
831static void sched_show_numa(struct task_struct *p, struct seq_file *m)
832{
833#ifdef CONFIG_NUMA_BALANCING
834	struct mempolicy *pol;
835
836	if (p->mm)
837		P(mm->numa_scan_seq);
838
839	task_lock(p);
840	pol = p->mempolicy;
841	if (pol && !(pol->flags & MPOL_F_MORON))
842		pol = NULL;
843	mpol_get(pol);
844	task_unlock(p);
845
846	P(numa_pages_migrated);
847	P(numa_preferred_nid);
848	P(total_numa_faults);
849	SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
850			task_node(p), task_numa_group_id(p));
851	show_numa_stats(p, m);
852	mpol_put(pol);
853#endif
854}
855
856void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
857						  struct seq_file *m)
858{
859	unsigned long nr_switches;
860
861	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
862						get_nr_threads(p));
863	SEQ_printf(m,
864		"---------------------------------------------------------"
865		"----------\n");
866#define __P(F) \
867	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
868#define P(F) \
869	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
870#define P_SCHEDSTAT(F) \
871	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)schedstat_val(p->F))
872#define __PN(F) \
873	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
874#define PN(F) \
875	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
876#define PN_SCHEDSTAT(F) \
877	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(p->F)))
878
879	PN(se.exec_start);
880	PN(se.vruntime);
881	PN(se.sum_exec_runtime);
882
883	nr_switches = p->nvcsw + p->nivcsw;
884
885	P(se.nr_migrations);
886
887	if (schedstat_enabled()) {
888		u64 avg_atom, avg_per_cpu;
889
890		PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
891		PN_SCHEDSTAT(se.statistics.wait_start);
892		PN_SCHEDSTAT(se.statistics.sleep_start);
893		PN_SCHEDSTAT(se.statistics.block_start);
894		PN_SCHEDSTAT(se.statistics.sleep_max);
895		PN_SCHEDSTAT(se.statistics.block_max);
896		PN_SCHEDSTAT(se.statistics.exec_max);
897		PN_SCHEDSTAT(se.statistics.slice_max);
898		PN_SCHEDSTAT(se.statistics.wait_max);
899		PN_SCHEDSTAT(se.statistics.wait_sum);
900		P_SCHEDSTAT(se.statistics.wait_count);
901		PN_SCHEDSTAT(se.statistics.iowait_sum);
902		P_SCHEDSTAT(se.statistics.iowait_count);
903		P_SCHEDSTAT(se.statistics.nr_migrations_cold);
904		P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
905		P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
906		P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
907		P_SCHEDSTAT(se.statistics.nr_forced_migrations);
908		P_SCHEDSTAT(se.statistics.nr_wakeups);
909		P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
910		P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
911		P_SCHEDSTAT(se.statistics.nr_wakeups_local);
912		P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
913		P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
914		P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
915		P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
916		P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
 
917
918		avg_atom = p->se.sum_exec_runtime;
919		if (nr_switches)
920			avg_atom = div64_ul(avg_atom, nr_switches);
921		else
922			avg_atom = -1LL;
923
924		avg_per_cpu = p->se.sum_exec_runtime;
925		if (p->se.nr_migrations) {
926			avg_per_cpu = div64_u64(avg_per_cpu,
927						p->se.nr_migrations);
928		} else {
929			avg_per_cpu = -1LL;
930		}
931
932		__PN(avg_atom);
933		__PN(avg_per_cpu);
 
 
 
 
934	}
935
936	__P(nr_switches);
937	SEQ_printf(m, "%-45s:%21Ld\n",
938		   "nr_voluntary_switches", (long long)p->nvcsw);
939	SEQ_printf(m, "%-45s:%21Ld\n",
940		   "nr_involuntary_switches", (long long)p->nivcsw);
941
942	P(se.load.weight);
943	P(se.runnable_weight);
944#ifdef CONFIG_SMP
945	P(se.avg.load_sum);
946	P(se.avg.runnable_load_sum);
947	P(se.avg.util_sum);
948	P(se.avg.load_avg);
949	P(se.avg.runnable_load_avg);
950	P(se.avg.util_avg);
951	P(se.avg.last_update_time);
952	P(se.avg.util_est.ewma);
953	P(se.avg.util_est.enqueued);
 
 
 
 
 
954#endif
955	P(policy);
956	P(prio);
957	if (task_has_dl_policy(p)) {
958		P(dl.runtime);
959		P(dl.deadline);
960	}
961#undef PN_SCHEDSTAT
962#undef PN
963#undef __PN
964#undef P_SCHEDSTAT
965#undef P
966#undef __P
967
968	{
969		unsigned int this_cpu = raw_smp_processor_id();
970		u64 t0, t1;
971
972		t0 = cpu_clock(this_cpu);
973		t1 = cpu_clock(this_cpu);
974		SEQ_printf(m, "%-45s:%21Ld\n",
975			   "clock-delta", (long long)(t1-t0));
976	}
977
978	sched_show_numa(p, m);
979}
980
981void proc_sched_set_task(struct task_struct *p)
982{
983#ifdef CONFIG_SCHEDSTATS
984	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
985#endif
 
 
 
 
 
 
 
 
 
 
986}
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * kernel/sched/debug.c
   4 *
   5 * Print the CFS rbtree and other debugging details
   6 *
   7 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
   8 */
 
 
 
   9
  10/*
  11 * This allows printing both to /sys/kernel/debug/sched/debug and
  12 * to the console
  13 */
  14#define SEQ_printf(m, x...)			\
  15 do {						\
  16	if (m)					\
  17		seq_printf(m, x);		\
  18	else					\
  19		pr_cont(x);			\
  20 } while (0)
  21
  22/*
  23 * Ease the printing of nsec fields:
  24 */
  25static long long nsec_high(unsigned long long nsec)
  26{
  27	if ((long long)nsec < 0) {
  28		nsec = -nsec;
  29		do_div(nsec, 1000000);
  30		return -nsec;
  31	}
  32	do_div(nsec, 1000000);
  33
  34	return nsec;
  35}
  36
  37static unsigned long nsec_low(unsigned long long nsec)
  38{
  39	if ((long long)nsec < 0)
  40		nsec = -nsec;
  41
  42	return do_div(nsec, 1000000);
  43}
  44
  45#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
  46
  47#define SCHED_FEAT(name, enabled)	\
  48	#name ,
  49
  50static const char * const sched_feat_names[] = {
  51#include "features.h"
  52};
  53
  54#undef SCHED_FEAT
  55
  56static int sched_feat_show(struct seq_file *m, void *v)
  57{
  58	int i;
  59
  60	for (i = 0; i < __SCHED_FEAT_NR; i++) {
  61		if (!(sysctl_sched_features & (1UL << i)))
  62			seq_puts(m, "NO_");
  63		seq_printf(m, "%s ", sched_feat_names[i]);
  64	}
  65	seq_puts(m, "\n");
  66
  67	return 0;
  68}
  69
  70#ifdef CONFIG_JUMP_LABEL
  71
  72#define jump_label_key__true  STATIC_KEY_INIT_TRUE
  73#define jump_label_key__false STATIC_KEY_INIT_FALSE
  74
  75#define SCHED_FEAT(name, enabled)	\
  76	jump_label_key__##enabled ,
  77
  78struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
  79#include "features.h"
  80};
  81
  82#undef SCHED_FEAT
  83
  84static void sched_feat_disable(int i)
  85{
  86	static_key_disable_cpuslocked(&sched_feat_keys[i]);
  87}
  88
  89static void sched_feat_enable(int i)
  90{
  91	static_key_enable_cpuslocked(&sched_feat_keys[i]);
  92}
  93#else
  94static void sched_feat_disable(int i) { };
  95static void sched_feat_enable(int i) { };
  96#endif /* CONFIG_JUMP_LABEL */
  97
  98static int sched_feat_set(char *cmp)
  99{
 100	int i;
 101	int neg = 0;
 102
 103	if (strncmp(cmp, "NO_", 3) == 0) {
 104		neg = 1;
 105		cmp += 3;
 106	}
 107
 108	i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
 109	if (i < 0)
 110		return i;
 111
 112	if (neg) {
 113		sysctl_sched_features &= ~(1UL << i);
 114		sched_feat_disable(i);
 115	} else {
 116		sysctl_sched_features |= (1UL << i);
 117		sched_feat_enable(i);
 118	}
 119
 120	return 0;
 121}
 122
 123static ssize_t
 124sched_feat_write(struct file *filp, const char __user *ubuf,
 125		size_t cnt, loff_t *ppos)
 126{
 127	char buf[64];
 128	char *cmp;
 129	int ret;
 130	struct inode *inode;
 131
 132	if (cnt > 63)
 133		cnt = 63;
 134
 135	if (copy_from_user(&buf, ubuf, cnt))
 136		return -EFAULT;
 137
 138	buf[cnt] = 0;
 139	cmp = strstrip(buf);
 140
 141	/* Ensure the static_key remains in a consistent state */
 142	inode = file_inode(filp);
 143	cpus_read_lock();
 144	inode_lock(inode);
 145	ret = sched_feat_set(cmp);
 146	inode_unlock(inode);
 147	cpus_read_unlock();
 148	if (ret < 0)
 149		return ret;
 150
 151	*ppos += cnt;
 152
 153	return cnt;
 154}
 155
 156static int sched_feat_open(struct inode *inode, struct file *filp)
 157{
 158	return single_open(filp, sched_feat_show, NULL);
 159}
 160
 161static const struct file_operations sched_feat_fops = {
 162	.open		= sched_feat_open,
 163	.write		= sched_feat_write,
 164	.read		= seq_read,
 165	.llseek		= seq_lseek,
 166	.release	= single_release,
 167};
 168
 169#ifdef CONFIG_SMP
 170
 171static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
 172				   size_t cnt, loff_t *ppos)
 173{
 174	char buf[16];
 175	unsigned int scaling;
 176
 177	if (cnt > 15)
 178		cnt = 15;
 179
 180	if (copy_from_user(&buf, ubuf, cnt))
 181		return -EFAULT;
 182	buf[cnt] = '\0';
 183
 184	if (kstrtouint(buf, 10, &scaling))
 185		return -EINVAL;
 186
 187	if (scaling >= SCHED_TUNABLESCALING_END)
 188		return -EINVAL;
 189
 190	sysctl_sched_tunable_scaling = scaling;
 191	if (sched_update_scaling())
 192		return -EINVAL;
 193
 194	*ppos += cnt;
 195	return cnt;
 196}
 197
 198static int sched_scaling_show(struct seq_file *m, void *v)
 199{
 200	seq_printf(m, "%d\n", sysctl_sched_tunable_scaling);
 201	return 0;
 202}
 203
 204static int sched_scaling_open(struct inode *inode, struct file *filp)
 205{
 206	return single_open(filp, sched_scaling_show, NULL);
 207}
 208
 209static const struct file_operations sched_scaling_fops = {
 210	.open		= sched_scaling_open,
 211	.write		= sched_scaling_write,
 212	.read		= seq_read,
 213	.llseek		= seq_lseek,
 214	.release	= single_release,
 215};
 216
 217#endif /* SMP */
 218
 219#ifdef CONFIG_PREEMPT_DYNAMIC
 220
 221static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
 222				   size_t cnt, loff_t *ppos)
 223{
 224	char buf[16];
 225	int mode;
 226
 227	if (cnt > 15)
 228		cnt = 15;
 229
 230	if (copy_from_user(&buf, ubuf, cnt))
 231		return -EFAULT;
 232
 233	buf[cnt] = 0;
 234	mode = sched_dynamic_mode(strstrip(buf));
 235	if (mode < 0)
 236		return mode;
 237
 238	sched_dynamic_update(mode);
 239
 240	*ppos += cnt;
 241
 242	return cnt;
 243}
 244
 245static int sched_dynamic_show(struct seq_file *m, void *v)
 246{
 247	static const char * preempt_modes[] = {
 248		"none", "voluntary", "full"
 249	};
 250	int i;
 251
 252	for (i = 0; i < ARRAY_SIZE(preempt_modes); i++) {
 253		if (preempt_dynamic_mode == i)
 254			seq_puts(m, "(");
 255		seq_puts(m, preempt_modes[i]);
 256		if (preempt_dynamic_mode == i)
 257			seq_puts(m, ")");
 258
 259		seq_puts(m, " ");
 260	}
 261
 262	seq_puts(m, "\n");
 263	return 0;
 264}
 265
 266static int sched_dynamic_open(struct inode *inode, struct file *filp)
 267{
 268	return single_open(filp, sched_dynamic_show, NULL);
 269}
 270
 271static const struct file_operations sched_dynamic_fops = {
 272	.open		= sched_dynamic_open,
 273	.write		= sched_dynamic_write,
 274	.read		= seq_read,
 275	.llseek		= seq_lseek,
 276	.release	= single_release,
 277};
 278
 279#endif /* CONFIG_PREEMPT_DYNAMIC */
 280
 281__read_mostly bool sched_debug_verbose;
 282
 283#ifdef CONFIG_SMP
 284static struct dentry           *sd_dentry;
 285
 
 286
 287static ssize_t sched_verbose_write(struct file *filp, const char __user *ubuf,
 288				  size_t cnt, loff_t *ppos)
 289{
 290	ssize_t result;
 291	bool orig;
 292
 293	cpus_read_lock();
 294	mutex_lock(&sched_domains_mutex);
 295
 296	orig = sched_debug_verbose;
 297	result = debugfs_write_file_bool(filp, ubuf, cnt, ppos);
 298
 299	if (sched_debug_verbose && !orig)
 300		update_sched_domain_debugfs();
 301	else if (!sched_debug_verbose && orig) {
 302		debugfs_remove(sd_dentry);
 303		sd_dentry = NULL;
 304	}
 305
 306	mutex_unlock(&sched_domains_mutex);
 307	cpus_read_unlock();
 308
 309	return result;
 310}
 311#else
 312#define sched_verbose_write debugfs_write_file_bool
 313#endif
 314
 315static const struct file_operations sched_verbose_fops = {
 316	.read =         debugfs_read_file_bool,
 317	.write =        sched_verbose_write,
 318	.open =         simple_open,
 319	.llseek =       default_llseek,
 320};
 321
 322static const struct seq_operations sched_debug_sops;
 323
 324static int sched_debug_open(struct inode *inode, struct file *filp)
 325{
 326	return seq_open(filp, &sched_debug_sops);
 327}
 328
 329static const struct file_operations sched_debug_fops = {
 330	.open		= sched_debug_open,
 331	.read		= seq_read,
 332	.llseek		= seq_lseek,
 333	.release	= seq_release,
 334};
 335
 336static struct dentry *debugfs_sched;
 337
 338static __init int sched_init_debug(void)
 339{
 340	struct dentry __maybe_unused *numa;
 341
 342	debugfs_sched = debugfs_create_dir("sched", NULL);
 343
 344	debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
 345	debugfs_create_file_unsafe("verbose", 0644, debugfs_sched, &sched_debug_verbose, &sched_verbose_fops);
 346#ifdef CONFIG_PREEMPT_DYNAMIC
 347	debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
 348#endif
 349
 350	debugfs_create_u32("base_slice_ns", 0644, debugfs_sched, &sysctl_sched_base_slice);
 351
 352	debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
 353	debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
 354
 355#ifdef CONFIG_SMP
 356	debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
 357	debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
 358	debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
 359
 360	mutex_lock(&sched_domains_mutex);
 361	update_sched_domain_debugfs();
 362	mutex_unlock(&sched_domains_mutex);
 363#endif
 364
 365#ifdef CONFIG_NUMA_BALANCING
 366	numa = debugfs_create_dir("numa_balancing", debugfs_sched);
 367
 368	debugfs_create_u32("scan_delay_ms", 0644, numa, &sysctl_numa_balancing_scan_delay);
 369	debugfs_create_u32("scan_period_min_ms", 0644, numa, &sysctl_numa_balancing_scan_period_min);
 370	debugfs_create_u32("scan_period_max_ms", 0644, numa, &sysctl_numa_balancing_scan_period_max);
 371	debugfs_create_u32("scan_size_mb", 0644, numa, &sysctl_numa_balancing_scan_size);
 372	debugfs_create_u32("hot_threshold_ms", 0644, numa, &sysctl_numa_balancing_hot_threshold);
 373#endif
 374
 375	debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
 376
 377	return 0;
 378}
 379late_initcall(sched_init_debug);
 380
 381#ifdef CONFIG_SMP
 382
 383static cpumask_var_t		sd_sysctl_cpus;
 384
 385static int sd_flags_show(struct seq_file *m, void *v)
 386{
 387	unsigned long flags = *(unsigned int *)m->private;
 388	int idx;
 389
 390	for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
 391		seq_puts(m, sd_flag_debug[idx].name);
 392		seq_puts(m, " ");
 
 
 
 
 
 
 
 
 393	}
 394	seq_puts(m, "\n");
 395
 396	return 0;
 
 397}
 398
 399static int sd_flags_open(struct inode *inode, struct file *file)
 400{
 401	return single_open(file, sd_flags_show, inode->i_private);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 402}
 403
 404static const struct file_operations sd_flags_fops = {
 405	.open		= sd_flags_open,
 406	.read		= seq_read,
 407	.llseek		= seq_lseek,
 408	.release	= single_release,
 409};
 410
 411static void register_sd(struct sched_domain *sd, struct dentry *parent)
 412{
 413#define SDM(type, mode, member)	\
 414	debugfs_create_##type(#member, mode, parent, &sd->member)
 
 
 
 415
 416	SDM(ulong, 0644, min_interval);
 417	SDM(ulong, 0644, max_interval);
 418	SDM(u64,   0644, max_newidle_lb_cost);
 419	SDM(u32,   0644, busy_factor);
 420	SDM(u32,   0644, imbalance_pct);
 421	SDM(u32,   0644, cache_nice_tries);
 422	SDM(str,   0444, name);
 423
 424#undef SDM
 
 
 425
 426	debugfs_create_file("flags", 0444, parent, &sd->flags, &sd_flags_fops);
 427	debugfs_create_file("groups_flags", 0444, parent, &sd->groups->flags, &sd_flags_fops);
 428}
 429
 430void update_sched_domain_debugfs(void)
 431{
 432	int cpu, i;
 433
 434	/*
 435	 * This can unfortunately be invoked before sched_debug_init() creates
 436	 * the debug directory. Don't touch sd_sysctl_cpus until then.
 437	 */
 438	if (!debugfs_sched)
 439		return;
 440
 441	if (!sched_debug_verbose)
 442		return;
 443
 444	if (!cpumask_available(sd_sysctl_cpus)) {
 445		if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
 446			return;
 447		cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
 448	}
 449
 450	if (!sd_dentry) {
 451		sd_dentry = debugfs_create_dir("domains", debugfs_sched);
 452
 453		/* rebuild sd_sysctl_cpus if empty since it gets cleared below */
 454		if (cpumask_empty(sd_sysctl_cpus))
 455			cpumask_copy(sd_sysctl_cpus, cpu_online_mask);
 456	}
 457
 458	for_each_cpu(cpu, sd_sysctl_cpus) {
 459		struct sched_domain *sd;
 460		struct dentry *d_cpu;
 461		char buf[32];
 462
 463		snprintf(buf, sizeof(buf), "cpu%d", cpu);
 464		debugfs_lookup_and_remove(buf, sd_dentry);
 465		d_cpu = debugfs_create_dir(buf, sd_dentry);
 466
 467		i = 0;
 468		for_each_domain(cpu, sd) {
 469			struct dentry *d_sd;
 470
 471			snprintf(buf, sizeof(buf), "domain%d", i);
 472			d_sd = debugfs_create_dir(buf, d_cpu);
 473
 474			register_sd(sd, d_sd);
 475			i++;
 
 476		}
 
 
 477
 478		__cpumask_clear_cpu(cpu, sd_sysctl_cpus);
 479	}
 
 
 
 480}
 481
 482void dirty_sched_domain_sysctl(int cpu)
 483{
 484	if (cpumask_available(sd_sysctl_cpus))
 485		__cpumask_set_cpu(cpu, sd_sysctl_cpus);
 486}
 487
 
 
 
 
 
 
 
 488#endif /* CONFIG_SMP */
 489
 490#ifdef CONFIG_FAIR_GROUP_SCHED
 491static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
 492{
 493	struct sched_entity *se = tg->se[cpu];
 494
 495#define P(F)		SEQ_printf(m, "  .%-30s: %lld\n",	#F, (long long)F)
 496#define P_SCHEDSTAT(F)	SEQ_printf(m, "  .%-30s: %lld\n",	\
 497		#F, (long long)schedstat_val(stats->F))
 498#define PN(F)		SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
 499#define PN_SCHEDSTAT(F)	SEQ_printf(m, "  .%-30s: %lld.%06ld\n", \
 500		#F, SPLIT_NS((long long)schedstat_val(stats->F)))
 501
 502	if (!se)
 503		return;
 504
 505	PN(se->exec_start);
 506	PN(se->vruntime);
 507	PN(se->sum_exec_runtime);
 508
 509	if (schedstat_enabled()) {
 510		struct sched_statistics *stats;
 511		stats = __schedstats_from_se(se);
 512
 513		PN_SCHEDSTAT(wait_start);
 514		PN_SCHEDSTAT(sleep_start);
 515		PN_SCHEDSTAT(block_start);
 516		PN_SCHEDSTAT(sleep_max);
 517		PN_SCHEDSTAT(block_max);
 518		PN_SCHEDSTAT(exec_max);
 519		PN_SCHEDSTAT(slice_max);
 520		PN_SCHEDSTAT(wait_max);
 521		PN_SCHEDSTAT(wait_sum);
 522		P_SCHEDSTAT(wait_count);
 523	}
 524
 525	P(se->load.weight);
 
 526#ifdef CONFIG_SMP
 527	P(se->avg.load_avg);
 528	P(se->avg.util_avg);
 529	P(se->avg.runnable_avg);
 530#endif
 531
 532#undef PN_SCHEDSTAT
 533#undef PN
 534#undef P_SCHEDSTAT
 535#undef P
 536}
 537#endif
 538
 539#ifdef CONFIG_CGROUP_SCHED
 540static DEFINE_SPINLOCK(sched_debug_lock);
 541static char group_path[PATH_MAX];
 542
 543static void task_group_path(struct task_group *tg, char *path, int plen)
 544{
 545	if (autogroup_path(tg, path, plen))
 546		return;
 547
 548	cgroup_path(tg->css.cgroup, path, plen);
 549}
 550
 551/*
 552 * Only 1 SEQ_printf_task_group_path() caller can use the full length
 553 * group_path[] for cgroup path. Other simultaneous callers will have
 554 * to use a shorter stack buffer. A "..." suffix is appended at the end
 555 * of the stack buffer so that it will show up in case the output length
 556 * matches the given buffer size to indicate possible path name truncation.
 557 */
 558#define SEQ_printf_task_group_path(m, tg, fmt...)			\
 559{									\
 560	if (spin_trylock(&sched_debug_lock)) {				\
 561		task_group_path(tg, group_path, sizeof(group_path));	\
 562		SEQ_printf(m, fmt, group_path);				\
 563		spin_unlock(&sched_debug_lock);				\
 564	} else {							\
 565		char buf[128];						\
 566		char *bufend = buf + sizeof(buf) - 3;			\
 567		task_group_path(tg, buf, bufend - buf);			\
 568		strcpy(bufend - 1, "...");				\
 569		SEQ_printf(m, fmt, buf);				\
 570	}								\
 571}
 572#endif
 573
 574static void
 575print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
 576{
 577	if (task_current(rq, p))
 578		SEQ_printf(m, ">R");
 579	else
 580		SEQ_printf(m, " %c", task_state_to_char(p));
 581
 582	SEQ_printf(m, "%15s %5d %9Ld.%06ld %c %9Ld.%06ld %9Ld.%06ld %9Ld.%06ld %9Ld %5d ",
 583		p->comm, task_pid_nr(p),
 584		SPLIT_NS(p->se.vruntime),
 585		entity_eligible(cfs_rq_of(&p->se), &p->se) ? 'E' : 'N',
 586		SPLIT_NS(p->se.deadline),
 587		SPLIT_NS(p->se.slice),
 588		SPLIT_NS(p->se.sum_exec_runtime),
 589		(long long)(p->nvcsw + p->nivcsw),
 590		p->prio);
 591
 592	SEQ_printf(m, "%9lld.%06ld %9lld.%06ld %9lld.%06ld %9lld.%06ld",
 593		SPLIT_NS(schedstat_val_or_zero(p->stats.wait_sum)),
 594		SPLIT_NS(p->se.sum_exec_runtime),
 595		SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)),
 596		SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime)));
 597
 598#ifdef CONFIG_NUMA_BALANCING
 599	SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
 600#endif
 601#ifdef CONFIG_CGROUP_SCHED
 602	SEQ_printf_task_group_path(m, task_group(p), " %s")
 603#endif
 604
 605	SEQ_printf(m, "\n");
 606}
 607
 608static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
 609{
 610	struct task_struct *g, *p;
 611
 612	SEQ_printf(m, "\n");
 613	SEQ_printf(m, "runnable tasks:\n");
 614	SEQ_printf(m, " S            task   PID         tree-key  switches  prio"
 615		   "     wait-time             sum-exec        sum-sleep\n");
 616	SEQ_printf(m, "-------------------------------------------------------"
 617		   "------------------------------------------------------\n");
 618
 619	rcu_read_lock();
 620	for_each_process_thread(g, p) {
 621		if (task_cpu(p) != rq_cpu)
 622			continue;
 623
 624		print_task(m, rq, p);
 625	}
 626	rcu_read_unlock();
 627}
 628
 629void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
 630{
 631	s64 left_vruntime = -1, min_vruntime, right_vruntime = -1, left_deadline = -1, spread;
 632	struct sched_entity *last, *first, *root;
 633	struct rq *rq = cpu_rq(cpu);
 
 634	unsigned long flags;
 635
 636#ifdef CONFIG_FAIR_GROUP_SCHED
 637	SEQ_printf(m, "\n");
 638	SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
 639#else
 640	SEQ_printf(m, "\n");
 641	SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
 642#endif
 643	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
 644			SPLIT_NS(cfs_rq->exec_clock));
 645
 646	raw_spin_rq_lock_irqsave(rq, flags);
 647	root = __pick_root_entity(cfs_rq);
 648	if (root)
 649		left_vruntime = root->min_vruntime;
 650	first = __pick_first_entity(cfs_rq);
 651	if (first)
 652		left_deadline = first->deadline;
 653	last = __pick_last_entity(cfs_rq);
 654	if (last)
 655		right_vruntime = last->vruntime;
 656	min_vruntime = cfs_rq->min_vruntime;
 657	raw_spin_rq_unlock_irqrestore(rq, flags);
 658
 659	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "left_deadline",
 660			SPLIT_NS(left_deadline));
 661	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "left_vruntime",
 662			SPLIT_NS(left_vruntime));
 663	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
 664			SPLIT_NS(min_vruntime));
 665	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "avg_vruntime",
 666			SPLIT_NS(avg_vruntime(cfs_rq)));
 667	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "right_vruntime",
 668			SPLIT_NS(right_vruntime));
 669	spread = right_vruntime - left_vruntime;
 670	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread));
 
 
 671	SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
 672			cfs_rq->nr_spread_over);
 673	SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
 674	SEQ_printf(m, "  .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running);
 675	SEQ_printf(m, "  .%-30s: %d\n", "idle_nr_running",
 676			cfs_rq->idle_nr_running);
 677	SEQ_printf(m, "  .%-30s: %d\n", "idle_h_nr_running",
 678			cfs_rq->idle_h_nr_running);
 679	SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
 680#ifdef CONFIG_SMP
 
 681	SEQ_printf(m, "  .%-30s: %lu\n", "load_avg",
 682			cfs_rq->avg.load_avg);
 683	SEQ_printf(m, "  .%-30s: %lu\n", "runnable_avg",
 684			cfs_rq->avg.runnable_avg);
 685	SEQ_printf(m, "  .%-30s: %lu\n", "util_avg",
 686			cfs_rq->avg.util_avg);
 687	SEQ_printf(m, "  .%-30s: %u\n", "util_est",
 688			cfs_rq->avg.util_est);
 689	SEQ_printf(m, "  .%-30s: %ld\n", "removed.load_avg",
 690			cfs_rq->removed.load_avg);
 691	SEQ_printf(m, "  .%-30s: %ld\n", "removed.util_avg",
 692			cfs_rq->removed.util_avg);
 693	SEQ_printf(m, "  .%-30s: %ld\n", "removed.runnable_avg",
 694			cfs_rq->removed.runnable_avg);
 695#ifdef CONFIG_FAIR_GROUP_SCHED
 696	SEQ_printf(m, "  .%-30s: %lu\n", "tg_load_avg_contrib",
 697			cfs_rq->tg_load_avg_contrib);
 698	SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
 699			atomic_long_read(&cfs_rq->tg->load_avg));
 700#endif
 701#endif
 702#ifdef CONFIG_CFS_BANDWIDTH
 703	SEQ_printf(m, "  .%-30s: %d\n", "throttled",
 704			cfs_rq->throttled);
 705	SEQ_printf(m, "  .%-30s: %d\n", "throttle_count",
 706			cfs_rq->throttle_count);
 707#endif
 708
 709#ifdef CONFIG_FAIR_GROUP_SCHED
 710	print_cfs_group_stats(m, cpu, cfs_rq->tg);
 711#endif
 712}
 713
 714void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
 715{
 716#ifdef CONFIG_RT_GROUP_SCHED
 717	SEQ_printf(m, "\n");
 718	SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
 719#else
 720	SEQ_printf(m, "\n");
 721	SEQ_printf(m, "rt_rq[%d]:\n", cpu);
 722#endif
 723
 724#define P(x) \
 725	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
 726#define PU(x) \
 727	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
 728#define PN(x) \
 729	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
 730
 731	PU(rt_nr_running);
 
 
 
 732	P(rt_throttled);
 733	PN(rt_time);
 734	PN(rt_runtime);
 735
 736#undef PN
 737#undef PU
 738#undef P
 739}
 740
 741void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
 742{
 743	struct dl_bw *dl_bw;
 744
 745	SEQ_printf(m, "\n");
 746	SEQ_printf(m, "dl_rq[%d]:\n", cpu);
 747
 748#define PU(x) \
 749	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
 750
 751	PU(dl_nr_running);
 752#ifdef CONFIG_SMP
 
 753	dl_bw = &cpu_rq(cpu)->rd->dl_bw;
 754#else
 755	dl_bw = &dl_rq->dl_bw;
 756#endif
 757	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
 758	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
 759
 760#undef PU
 761}
 762
 763static void print_cpu(struct seq_file *m, int cpu)
 764{
 765	struct rq *rq = cpu_rq(cpu);
 
 766
 767#ifdef CONFIG_X86
 768	{
 769		unsigned int freq = cpu_khz ? : 1;
 770
 771		SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
 772			   cpu, freq / 1000, (freq % 1000));
 773	}
 774#else
 775	SEQ_printf(m, "cpu#%d\n", cpu);
 776#endif
 777
 778#define P(x)								\
 779do {									\
 780	if (sizeof(rq->x) == 4)						\
 781		SEQ_printf(m, "  .%-30s: %d\n", #x, (int)(rq->x));	\
 782	else								\
 783		SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x));\
 784} while (0)
 785
 786#define PN(x) \
 787	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
 788
 789	P(nr_running);
 790	P(nr_switches);
 
 791	P(nr_uninterruptible);
 792	PN(next_balance);
 793	SEQ_printf(m, "  .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
 794	PN(clock);
 795	PN(clock_task);
 796#undef P
 797#undef PN
 798
 799#ifdef CONFIG_SMP
 800#define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
 801	P64(avg_idle);
 802	P64(max_idle_balance_cost);
 803#undef P64
 804#endif
 805
 806#define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, schedstat_val(rq->n));
 807	if (schedstat_enabled()) {
 808		P(yld_count);
 809		P(sched_count);
 810		P(sched_goidle);
 811		P(ttwu_count);
 812		P(ttwu_local);
 813	}
 814#undef P
 815
 
 816	print_cfs_stats(m, cpu);
 817	print_rt_stats(m, cpu);
 818	print_dl_stats(m, cpu);
 819
 820	print_rq(m, rq, cpu);
 
 821	SEQ_printf(m, "\n");
 822}
 823
 824static const char *sched_tunable_scaling_names[] = {
 825	"none",
 826	"logarithmic",
 827	"linear"
 828};
 829
 830static void sched_debug_header(struct seq_file *m)
 831{
 832	u64 ktime, sched_clk, cpu_clk;
 833	unsigned long flags;
 834
 835	local_irq_save(flags);
 836	ktime = ktime_to_ns(ktime_get());
 837	sched_clk = sched_clock();
 838	cpu_clk = local_clock();
 839	local_irq_restore(flags);
 840
 841	SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
 842		init_utsname()->release,
 843		(int)strcspn(init_utsname()->version, " "),
 844		init_utsname()->version);
 845
 846#define P(x) \
 847	SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
 848#define PN(x) \
 849	SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
 850	PN(ktime);
 851	PN(sched_clk);
 852	PN(cpu_clk);
 853	P(jiffies);
 854#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
 855	P(sched_clock_stable());
 856#endif
 857#undef PN
 858#undef P
 859
 860	SEQ_printf(m, "\n");
 861	SEQ_printf(m, "sysctl_sched\n");
 862
 863#define P(x) \
 864	SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
 865#define PN(x) \
 866	SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
 867	PN(sysctl_sched_base_slice);
 
 
 
 868	P(sysctl_sched_features);
 869#undef PN
 870#undef P
 871
 872	SEQ_printf(m, "  .%-40s: %d (%s)\n",
 873		"sysctl_sched_tunable_scaling",
 874		sysctl_sched_tunable_scaling,
 875		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
 876	SEQ_printf(m, "\n");
 877}
 878
 879static int sched_debug_show(struct seq_file *m, void *v)
 880{
 881	int cpu = (unsigned long)(v - 2);
 882
 883	if (cpu != -1)
 884		print_cpu(m, cpu);
 885	else
 886		sched_debug_header(m);
 887
 888	return 0;
 889}
 890
 891void sysrq_sched_debug_show(void)
 892{
 893	int cpu;
 894
 895	sched_debug_header(NULL);
 896	for_each_online_cpu(cpu) {
 897		/*
 898		 * Need to reset softlockup watchdogs on all CPUs, because
 899		 * another CPU might be blocked waiting for us to process
 900		 * an IPI or stop_machine.
 901		 */
 902		touch_nmi_watchdog();
 903		touch_all_softlockup_watchdogs();
 904		print_cpu(NULL, cpu);
 905	}
 906}
 907
 908/*
 909 * This iterator needs some explanation.
 910 * It returns 1 for the header position.
 911 * This means 2 is CPU 0.
 912 * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
 913 * to use cpumask_* to iterate over the CPUs.
 914 */
 915static void *sched_debug_start(struct seq_file *file, loff_t *offset)
 916{
 917	unsigned long n = *offset;
 918
 919	if (n == 0)
 920		return (void *) 1;
 921
 922	n--;
 923
 924	if (n > 0)
 925		n = cpumask_next(n - 1, cpu_online_mask);
 926	else
 927		n = cpumask_first(cpu_online_mask);
 928
 929	*offset = n + 1;
 930
 931	if (n < nr_cpu_ids)
 932		return (void *)(unsigned long)(n + 2);
 933
 934	return NULL;
 935}
 936
 937static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
 938{
 939	(*offset)++;
 940	return sched_debug_start(file, offset);
 941}
 942
 943static void sched_debug_stop(struct seq_file *file, void *data)
 944{
 945}
 946
 947static const struct seq_operations sched_debug_sops = {
 948	.start		= sched_debug_start,
 949	.next		= sched_debug_next,
 950	.stop		= sched_debug_stop,
 951	.show		= sched_debug_show,
 952};
 953
 954#define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
 955#define __P(F) __PS(#F, F)
 956#define   P(F) __PS(#F, p->F)
 957#define   PM(F, M) __PS(#F, p->F & (M))
 958#define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
 959#define __PN(F) __PSN(#F, F)
 960#define   PN(F) __PSN(#F, p->F)
 
 
 
 
 
 
 961
 962
 963#ifdef CONFIG_NUMA_BALANCING
 964void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
 965		unsigned long tpf, unsigned long gsf, unsigned long gpf)
 966{
 967	SEQ_printf(m, "numa_faults node=%d ", node);
 968	SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
 969	SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
 970}
 971#endif
 972
 973
 974static void sched_show_numa(struct task_struct *p, struct seq_file *m)
 975{
 976#ifdef CONFIG_NUMA_BALANCING
 
 
 977	if (p->mm)
 978		P(mm->numa_scan_seq);
 979
 
 
 
 
 
 
 
 980	P(numa_pages_migrated);
 981	P(numa_preferred_nid);
 982	P(total_numa_faults);
 983	SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
 984			task_node(p), task_numa_group_id(p));
 985	show_numa_stats(p, m);
 
 986#endif
 987}
 988
 989void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
 990						  struct seq_file *m)
 991{
 992	unsigned long nr_switches;
 993
 994	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
 995						get_nr_threads(p));
 996	SEQ_printf(m,
 997		"---------------------------------------------------------"
 998		"----------\n");
 999
1000#define P_SCHEDSTAT(F)  __PS(#F, schedstat_val(p->stats.F))
1001#define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->stats.F))
 
 
 
 
 
 
 
 
 
1002
1003	PN(se.exec_start);
1004	PN(se.vruntime);
1005	PN(se.sum_exec_runtime);
1006
1007	nr_switches = p->nvcsw + p->nivcsw;
1008
1009	P(se.nr_migrations);
1010
1011	if (schedstat_enabled()) {
1012		u64 avg_atom, avg_per_cpu;
1013
1014		PN_SCHEDSTAT(sum_sleep_runtime);
1015		PN_SCHEDSTAT(sum_block_runtime);
1016		PN_SCHEDSTAT(wait_start);
1017		PN_SCHEDSTAT(sleep_start);
1018		PN_SCHEDSTAT(block_start);
1019		PN_SCHEDSTAT(sleep_max);
1020		PN_SCHEDSTAT(block_max);
1021		PN_SCHEDSTAT(exec_max);
1022		PN_SCHEDSTAT(slice_max);
1023		PN_SCHEDSTAT(wait_max);
1024		PN_SCHEDSTAT(wait_sum);
1025		P_SCHEDSTAT(wait_count);
1026		PN_SCHEDSTAT(iowait_sum);
1027		P_SCHEDSTAT(iowait_count);
1028		P_SCHEDSTAT(nr_migrations_cold);
1029		P_SCHEDSTAT(nr_failed_migrations_affine);
1030		P_SCHEDSTAT(nr_failed_migrations_running);
1031		P_SCHEDSTAT(nr_failed_migrations_hot);
1032		P_SCHEDSTAT(nr_forced_migrations);
1033		P_SCHEDSTAT(nr_wakeups);
1034		P_SCHEDSTAT(nr_wakeups_sync);
1035		P_SCHEDSTAT(nr_wakeups_migrate);
1036		P_SCHEDSTAT(nr_wakeups_local);
1037		P_SCHEDSTAT(nr_wakeups_remote);
1038		P_SCHEDSTAT(nr_wakeups_affine);
1039		P_SCHEDSTAT(nr_wakeups_affine_attempts);
1040		P_SCHEDSTAT(nr_wakeups_passive);
1041		P_SCHEDSTAT(nr_wakeups_idle);
1042
1043		avg_atom = p->se.sum_exec_runtime;
1044		if (nr_switches)
1045			avg_atom = div64_ul(avg_atom, nr_switches);
1046		else
1047			avg_atom = -1LL;
1048
1049		avg_per_cpu = p->se.sum_exec_runtime;
1050		if (p->se.nr_migrations) {
1051			avg_per_cpu = div64_u64(avg_per_cpu,
1052						p->se.nr_migrations);
1053		} else {
1054			avg_per_cpu = -1LL;
1055		}
1056
1057		__PN(avg_atom);
1058		__PN(avg_per_cpu);
1059
1060#ifdef CONFIG_SCHED_CORE
1061		PN_SCHEDSTAT(core_forceidle_sum);
1062#endif
1063	}
1064
1065	__P(nr_switches);
1066	__PS("nr_voluntary_switches", p->nvcsw);
1067	__PS("nr_involuntary_switches", p->nivcsw);
 
 
1068
1069	P(se.load.weight);
 
1070#ifdef CONFIG_SMP
1071	P(se.avg.load_sum);
1072	P(se.avg.runnable_sum);
1073	P(se.avg.util_sum);
1074	P(se.avg.load_avg);
1075	P(se.avg.runnable_avg);
1076	P(se.avg.util_avg);
1077	P(se.avg.last_update_time);
1078	PM(se.avg.util_est, ~UTIL_AVG_UNCHANGED);
1079#endif
1080#ifdef CONFIG_UCLAMP_TASK
1081	__PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
1082	__PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
1083	__PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
1084	__PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
1085#endif
1086	P(policy);
1087	P(prio);
1088	if (task_has_dl_policy(p)) {
1089		P(dl.runtime);
1090		P(dl.deadline);
1091	}
1092#undef PN_SCHEDSTAT
 
 
1093#undef P_SCHEDSTAT
 
 
1094
1095	{
1096		unsigned int this_cpu = raw_smp_processor_id();
1097		u64 t0, t1;
1098
1099		t0 = cpu_clock(this_cpu);
1100		t1 = cpu_clock(this_cpu);
1101		__PS("clock-delta", t1-t0);
 
1102	}
1103
1104	sched_show_numa(p, m);
1105}
1106
1107void proc_sched_set_task(struct task_struct *p)
1108{
1109#ifdef CONFIG_SCHEDSTATS
1110	memset(&p->stats, 0, sizeof(p->stats));
1111#endif
1112}
1113
1114void resched_latency_warn(int cpu, u64 latency)
1115{
1116	static DEFINE_RATELIMIT_STATE(latency_check_ratelimit, 60 * 60 * HZ, 1);
1117
1118	WARN(__ratelimit(&latency_check_ratelimit),
1119	     "sched: CPU %d need_resched set for > %llu ns (%d ticks) "
1120	     "without schedule\n",
1121	     cpu, latency, cpu_rq(cpu)->ticks_without_resched);
1122}