Linux Audio

Check our new training course

Loading...
v4.17
 
   1/*
   2 * kernel/sched/debug.c
   3 *
   4 * Print the CFS rbtree and other debugging details
   5 *
   6 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12#include "sched.h"
  13
  14static DEFINE_SPINLOCK(sched_debug_lock);
  15
  16/*
  17 * This allows printing both to /proc/sched_debug and
  18 * to the console
  19 */
  20#define SEQ_printf(m, x...)			\
  21 do {						\
  22	if (m)					\
  23		seq_printf(m, x);		\
  24	else					\
  25		pr_cont(x);			\
  26 } while (0)
  27
  28/*
  29 * Ease the printing of nsec fields:
  30 */
  31static long long nsec_high(unsigned long long nsec)
  32{
  33	if ((long long)nsec < 0) {
  34		nsec = -nsec;
  35		do_div(nsec, 1000000);
  36		return -nsec;
  37	}
  38	do_div(nsec, 1000000);
  39
  40	return nsec;
  41}
  42
  43static unsigned long nsec_low(unsigned long long nsec)
  44{
  45	if ((long long)nsec < 0)
  46		nsec = -nsec;
  47
  48	return do_div(nsec, 1000000);
  49}
  50
  51#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
  52
  53#define SCHED_FEAT(name, enabled)	\
  54	#name ,
  55
  56static const char * const sched_feat_names[] = {
  57#include "features.h"
  58};
  59
  60#undef SCHED_FEAT
  61
  62static int sched_feat_show(struct seq_file *m, void *v)
  63{
  64	int i;
  65
  66	for (i = 0; i < __SCHED_FEAT_NR; i++) {
  67		if (!(sysctl_sched_features & (1UL << i)))
  68			seq_puts(m, "NO_");
  69		seq_printf(m, "%s ", sched_feat_names[i]);
  70	}
  71	seq_puts(m, "\n");
  72
  73	return 0;
  74}
  75
  76#ifdef HAVE_JUMP_LABEL
  77
  78#define jump_label_key__true  STATIC_KEY_INIT_TRUE
  79#define jump_label_key__false STATIC_KEY_INIT_FALSE
  80
  81#define SCHED_FEAT(name, enabled)	\
  82	jump_label_key__##enabled ,
  83
  84struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
  85#include "features.h"
  86};
  87
  88#undef SCHED_FEAT
  89
  90static void sched_feat_disable(int i)
  91{
  92	static_key_disable(&sched_feat_keys[i]);
  93}
  94
  95static void sched_feat_enable(int i)
  96{
  97	static_key_enable(&sched_feat_keys[i]);
  98}
  99#else
 100static void sched_feat_disable(int i) { };
 101static void sched_feat_enable(int i) { };
 102#endif /* HAVE_JUMP_LABEL */
 103
 104static int sched_feat_set(char *cmp)
 105{
 106	int i;
 107	int neg = 0;
 108
 109	if (strncmp(cmp, "NO_", 3) == 0) {
 110		neg = 1;
 111		cmp += 3;
 112	}
 113
 114	for (i = 0; i < __SCHED_FEAT_NR; i++) {
 115		if (strcmp(cmp, sched_feat_names[i]) == 0) {
 116			if (neg) {
 117				sysctl_sched_features &= ~(1UL << i);
 118				sched_feat_disable(i);
 119			} else {
 120				sysctl_sched_features |= (1UL << i);
 121				sched_feat_enable(i);
 122			}
 123			break;
 124		}
 125	}
 126
 127	return i;
 128}
 129
 130static ssize_t
 131sched_feat_write(struct file *filp, const char __user *ubuf,
 132		size_t cnt, loff_t *ppos)
 133{
 134	char buf[64];
 135	char *cmp;
 136	int i;
 137	struct inode *inode;
 138
 139	if (cnt > 63)
 140		cnt = 63;
 141
 142	if (copy_from_user(&buf, ubuf, cnt))
 143		return -EFAULT;
 144
 145	buf[cnt] = 0;
 146	cmp = strstrip(buf);
 147
 148	/* Ensure the static_key remains in a consistent state */
 149	inode = file_inode(filp);
 
 150	inode_lock(inode);
 151	i = sched_feat_set(cmp);
 152	inode_unlock(inode);
 153	if (i == __SCHED_FEAT_NR)
 154		return -EINVAL;
 
 155
 156	*ppos += cnt;
 157
 158	return cnt;
 159}
 160
 161static int sched_feat_open(struct inode *inode, struct file *filp)
 162{
 163	return single_open(filp, sched_feat_show, NULL);
 164}
 165
 166static const struct file_operations sched_feat_fops = {
 167	.open		= sched_feat_open,
 168	.write		= sched_feat_write,
 169	.read		= seq_read,
 170	.llseek		= seq_lseek,
 171	.release	= single_release,
 172};
 173
 174__read_mostly bool sched_debug_enabled;
 175
 176static __init int sched_init_debug(void)
 
 177{
 178	debugfs_create_file("sched_features", 0644, NULL, NULL,
 179			&sched_feat_fops);
 180
 181	debugfs_create_bool("sched_debug", 0644, NULL,
 182			&sched_debug_enabled);
 183
 184	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 185}
 186late_initcall(sched_init_debug);
 187
 188#ifdef CONFIG_SMP
 
 
 
 
 189
 190#ifdef CONFIG_SYSCTL
 
 
 
 191
 192static struct ctl_table sd_ctl_dir[] = {
 193	{
 194		.procname	= "sched_domain",
 195		.mode		= 0555,
 196	},
 197	{}
 198};
 199
 200static struct ctl_table sd_ctl_root[] = {
 201	{
 202		.procname	= "kernel",
 203		.mode		= 0555,
 204		.child		= sd_ctl_dir,
 205	},
 206	{}
 207};
 208
 209static struct ctl_table *sd_alloc_ctl_entry(int n)
 
 210{
 211	struct ctl_table *entry =
 212		kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 213
 214	return entry;
 215}
 216
 217static void sd_free_ctl_entry(struct ctl_table **tablep)
 218{
 219	struct ctl_table *entry;
 
 
 
 220
 221	/*
 222	 * In the intermediate directories, both the child directory and
 223	 * procname are dynamically allocated and could fail but the mode
 224	 * will always be set. In the lowest directory the names are
 225	 * static strings and all have proc handlers.
 226	 */
 227	for (entry = *tablep; entry->mode; entry++) {
 228		if (entry->child)
 229			sd_free_ctl_entry(&entry->child);
 230		if (entry->proc_handler == NULL)
 231			kfree(entry->procname);
 232	}
 233
 234	kfree(*tablep);
 235	*tablep = NULL;
 236}
 237
 238static int min_load_idx = 0;
 239static int max_load_idx = CPU_LOAD_IDX_MAX-1;
 
 
 240
 241static void
 242set_table_entry(struct ctl_table *entry,
 243		const char *procname, void *data, int maxlen,
 244		umode_t mode, proc_handler *proc_handler,
 245		bool load_idx)
 246{
 247	entry->procname = procname;
 248	entry->data = data;
 249	entry->maxlen = maxlen;
 250	entry->mode = mode;
 251	entry->proc_handler = proc_handler;
 252
 253	if (load_idx) {
 254		entry->extra1 = &min_load_idx;
 255		entry->extra2 = &max_load_idx;
 256	}
 
 257}
 258
 259static struct ctl_table *
 260sd_alloc_ctl_domain_table(struct sched_domain *sd)
 
 
 
 
 
 
 
 
 261{
 262	struct ctl_table *table = sd_alloc_ctl_entry(14);
 263
 264	if (table == NULL)
 265		return NULL;
 266
 267	set_table_entry(&table[0] , "min_interval",	   &sd->min_interval,	     sizeof(long), 0644, proc_doulongvec_minmax, false);
 268	set_table_entry(&table[1] , "max_interval",	   &sd->max_interval,	     sizeof(long), 0644, proc_doulongvec_minmax, false);
 269	set_table_entry(&table[2] , "busy_idx",		   &sd->busy_idx,	     sizeof(int) , 0644, proc_dointvec_minmax,   true );
 270	set_table_entry(&table[3] , "idle_idx",		   &sd->idle_idx,	     sizeof(int) , 0644, proc_dointvec_minmax,   true );
 271	set_table_entry(&table[4] , "newidle_idx",	   &sd->newidle_idx,	     sizeof(int) , 0644, proc_dointvec_minmax,   true );
 272	set_table_entry(&table[5] , "wake_idx",		   &sd->wake_idx,	     sizeof(int) , 0644, proc_dointvec_minmax,   true );
 273	set_table_entry(&table[6] , "forkexec_idx",	   &sd->forkexec_idx,	     sizeof(int) , 0644, proc_dointvec_minmax,   true );
 274	set_table_entry(&table[7] , "busy_factor",	   &sd->busy_factor,	     sizeof(int) , 0644, proc_dointvec_minmax,   false);
 275	set_table_entry(&table[8] , "imbalance_pct",	   &sd->imbalance_pct,	     sizeof(int) , 0644, proc_dointvec_minmax,   false);
 276	set_table_entry(&table[9] , "cache_nice_tries",	   &sd->cache_nice_tries,    sizeof(int) , 0644, proc_dointvec_minmax,   false);
 277	set_table_entry(&table[10], "flags",		   &sd->flags,		     sizeof(int) , 0644, proc_dointvec_minmax,   false);
 278	set_table_entry(&table[11], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax, false);
 279	set_table_entry(&table[12], "name",		   sd->name,		CORENAME_MAX_SIZE, 0444, proc_dostring,		 false);
 280	/* &table[13] is terminator */
 281
 282	return table;
 283}
 284
 285static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
 286{
 287	struct ctl_table *entry, *table;
 288	struct sched_domain *sd;
 289	int domain_num = 0, i;
 290	char buf[32];
 291
 292	for_each_domain(cpu, sd)
 293		domain_num++;
 294	entry = table = sd_alloc_ctl_entry(domain_num + 1);
 295	if (table == NULL)
 296		return NULL;
 297
 298	i = 0;
 299	for_each_domain(cpu, sd) {
 300		snprintf(buf, 32, "domain%d", i);
 301		entry->procname = kstrdup(buf, GFP_KERNEL);
 302		entry->mode = 0555;
 303		entry->child = sd_alloc_ctl_domain_table(sd);
 304		entry++;
 305		i++;
 306	}
 307	return table;
 308}
 
 
 
 309
 310static cpumask_var_t		sd_sysctl_cpus;
 311static struct ctl_table_header	*sd_sysctl_header;
 312
 313void register_sched_domain_sysctl(void)
 314{
 315	static struct ctl_table *cpu_entries;
 316	static struct ctl_table **cpu_idx;
 317	char buf[32];
 318	int i;
 319
 320	if (!cpu_entries) {
 321		cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1);
 322		if (!cpu_entries)
 323			return;
 324
 325		WARN_ON(sd_ctl_dir[0].child);
 326		sd_ctl_dir[0].child = cpu_entries;
 327	}
 
 328
 329	if (!cpu_idx) {
 330		struct ctl_table *e = cpu_entries;
 331
 332		cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL);
 333		if (!cpu_idx)
 334			return;
 
 335
 336		/* deal with sparse possible map */
 337		for_each_possible_cpu(i) {
 338			cpu_idx[i] = e;
 339			e++;
 340		}
 341	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 342
 343	if (!cpumask_available(sd_sysctl_cpus)) {
 344		if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
 345			return;
 346
 347		/* init to possible to not have holes in @cpu_entries */
 348		cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
 349	}
 350
 351	for_each_cpu(i, sd_sysctl_cpus) {
 352		struct ctl_table *e = cpu_idx[i];
 
 
 
 
 
 
 
 
 
 
 
 
 
 353
 354		if (e->child)
 355			sd_free_ctl_entry(&e->child);
 356
 357		if (!e->procname) {
 358			snprintf(buf, 32, "cpu%d", i);
 359			e->procname = kstrdup(buf, GFP_KERNEL);
 360		}
 361		e->mode = 0555;
 362		e->child = sd_alloc_ctl_cpu_table(i);
 363
 364		__cpumask_clear_cpu(i, sd_sysctl_cpus);
 365	}
 366
 367	WARN_ON(sd_sysctl_header);
 368	sd_sysctl_header = register_sysctl_table(sd_ctl_root);
 369}
 370
 371void dirty_sched_domain_sysctl(int cpu)
 372{
 373	if (cpumask_available(sd_sysctl_cpus))
 374		__cpumask_set_cpu(cpu, sd_sysctl_cpus);
 375}
 376
 377/* may be called multiple times per register */
 378void unregister_sched_domain_sysctl(void)
 379{
 380	unregister_sysctl_table(sd_sysctl_header);
 381	sd_sysctl_header = NULL;
 382}
 383#endif /* CONFIG_SYSCTL */
 384#endif /* CONFIG_SMP */
 385
 386#ifdef CONFIG_FAIR_GROUP_SCHED
 387static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
 388{
 389	struct sched_entity *se = tg->se[cpu];
 390
 391#define P(F)		SEQ_printf(m, "  .%-30s: %lld\n",	#F, (long long)F)
 392#define P_SCHEDSTAT(F)	SEQ_printf(m, "  .%-30s: %lld\n",	#F, (long long)schedstat_val(F))
 393#define PN(F)		SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
 394#define PN_SCHEDSTAT(F)	SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
 395
 396	if (!se)
 397		return;
 398
 399	PN(se->exec_start);
 400	PN(se->vruntime);
 401	PN(se->sum_exec_runtime);
 402
 403	if (schedstat_enabled()) {
 404		PN_SCHEDSTAT(se->statistics.wait_start);
 405		PN_SCHEDSTAT(se->statistics.sleep_start);
 406		PN_SCHEDSTAT(se->statistics.block_start);
 407		PN_SCHEDSTAT(se->statistics.sleep_max);
 408		PN_SCHEDSTAT(se->statistics.block_max);
 409		PN_SCHEDSTAT(se->statistics.exec_max);
 410		PN_SCHEDSTAT(se->statistics.slice_max);
 411		PN_SCHEDSTAT(se->statistics.wait_max);
 412		PN_SCHEDSTAT(se->statistics.wait_sum);
 413		P_SCHEDSTAT(se->statistics.wait_count);
 414	}
 415
 416	P(se->load.weight);
 417	P(se->runnable_weight);
 418#ifdef CONFIG_SMP
 419	P(se->avg.load_avg);
 420	P(se->avg.util_avg);
 421	P(se->avg.runnable_load_avg);
 422#endif
 423
 424#undef PN_SCHEDSTAT
 425#undef PN
 426#undef P_SCHEDSTAT
 427#undef P
 428}
 429#endif
 430
 431#ifdef CONFIG_CGROUP_SCHED
 
 432static char group_path[PATH_MAX];
 433
 434static char *task_group_path(struct task_group *tg)
 435{
 436	if (autogroup_path(tg, group_path, PATH_MAX))
 437		return group_path;
 438
 439	cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
 
 440
 441	return group_path;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 442}
 443#endif
 444
 445static void
 446print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
 447{
 448	if (rq->curr == p)
 449		SEQ_printf(m, ">R");
 450	else
 451		SEQ_printf(m, " %c", task_state_to_char(p));
 452
 453	SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
 454		p->comm, task_pid_nr(p),
 455		SPLIT_NS(p->se.vruntime),
 456		(long long)(p->nvcsw + p->nivcsw),
 457		p->prio);
 458
 459	SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
 460		SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
 461		SPLIT_NS(p->se.sum_exec_runtime),
 462		SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
 463
 464#ifdef CONFIG_NUMA_BALANCING
 465	SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
 466#endif
 467#ifdef CONFIG_CGROUP_SCHED
 468	SEQ_printf(m, " %s", task_group_path(task_group(p)));
 469#endif
 470
 471	SEQ_printf(m, "\n");
 472}
 473
 474static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
 475{
 476	struct task_struct *g, *p;
 477
 478	SEQ_printf(m, "\n");
 479	SEQ_printf(m, "runnable tasks:\n");
 480	SEQ_printf(m, " S           task   PID         tree-key  switches  prio"
 481		   "     wait-time             sum-exec        sum-sleep\n");
 482	SEQ_printf(m, "-------------------------------------------------------"
 483		   "----------------------------------------------------\n");
 484
 485	rcu_read_lock();
 486	for_each_process_thread(g, p) {
 487		if (task_cpu(p) != rq_cpu)
 488			continue;
 489
 490		print_task(m, rq, p);
 491	}
 492	rcu_read_unlock();
 493}
 494
 495void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
 496{
 497	s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
 498		spread, rq0_min_vruntime, spread0;
 499	struct rq *rq = cpu_rq(cpu);
 500	struct sched_entity *last;
 501	unsigned long flags;
 502
 503#ifdef CONFIG_FAIR_GROUP_SCHED
 504	SEQ_printf(m, "\n");
 505	SEQ_printf(m, "cfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
 506#else
 507	SEQ_printf(m, "\n");
 508	SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
 509#endif
 510	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
 511			SPLIT_NS(cfs_rq->exec_clock));
 512
 513	raw_spin_lock_irqsave(&rq->lock, flags);
 514	if (rb_first_cached(&cfs_rq->tasks_timeline))
 515		MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
 516	last = __pick_last_entity(cfs_rq);
 517	if (last)
 518		max_vruntime = last->vruntime;
 519	min_vruntime = cfs_rq->min_vruntime;
 520	rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
 521	raw_spin_unlock_irqrestore(&rq->lock, flags);
 522	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "MIN_vruntime",
 523			SPLIT_NS(MIN_vruntime));
 524	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
 525			SPLIT_NS(min_vruntime));
 526	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "max_vruntime",
 527			SPLIT_NS(max_vruntime));
 528	spread = max_vruntime - MIN_vruntime;
 529	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread",
 530			SPLIT_NS(spread));
 531	spread0 = min_vruntime - rq0_min_vruntime;
 532	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread0",
 533			SPLIT_NS(spread0));
 534	SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
 535			cfs_rq->nr_spread_over);
 536	SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
 537	SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
 538#ifdef CONFIG_SMP
 539	SEQ_printf(m, "  .%-30s: %ld\n", "runnable_weight", cfs_rq->runnable_weight);
 540	SEQ_printf(m, "  .%-30s: %lu\n", "load_avg",
 541			cfs_rq->avg.load_avg);
 542	SEQ_printf(m, "  .%-30s: %lu\n", "runnable_load_avg",
 543			cfs_rq->avg.runnable_load_avg);
 544	SEQ_printf(m, "  .%-30s: %lu\n", "util_avg",
 545			cfs_rq->avg.util_avg);
 546	SEQ_printf(m, "  .%-30s: %u\n", "util_est_enqueued",
 547			cfs_rq->avg.util_est.enqueued);
 548	SEQ_printf(m, "  .%-30s: %ld\n", "removed.load_avg",
 549			cfs_rq->removed.load_avg);
 550	SEQ_printf(m, "  .%-30s: %ld\n", "removed.util_avg",
 551			cfs_rq->removed.util_avg);
 552	SEQ_printf(m, "  .%-30s: %ld\n", "removed.runnable_sum",
 553			cfs_rq->removed.runnable_sum);
 554#ifdef CONFIG_FAIR_GROUP_SCHED
 555	SEQ_printf(m, "  .%-30s: %lu\n", "tg_load_avg_contrib",
 556			cfs_rq->tg_load_avg_contrib);
 557	SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
 558			atomic_long_read(&cfs_rq->tg->load_avg));
 559#endif
 560#endif
 561#ifdef CONFIG_CFS_BANDWIDTH
 562	SEQ_printf(m, "  .%-30s: %d\n", "throttled",
 563			cfs_rq->throttled);
 564	SEQ_printf(m, "  .%-30s: %d\n", "throttle_count",
 565			cfs_rq->throttle_count);
 566#endif
 567
 568#ifdef CONFIG_FAIR_GROUP_SCHED
 569	print_cfs_group_stats(m, cpu, cfs_rq->tg);
 570#endif
 571}
 572
 573void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
 574{
 575#ifdef CONFIG_RT_GROUP_SCHED
 576	SEQ_printf(m, "\n");
 577	SEQ_printf(m, "rt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
 578#else
 579	SEQ_printf(m, "\n");
 580	SEQ_printf(m, "rt_rq[%d]:\n", cpu);
 581#endif
 582
 583#define P(x) \
 584	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
 585#define PU(x) \
 586	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
 587#define PN(x) \
 588	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
 589
 590	PU(rt_nr_running);
 591#ifdef CONFIG_SMP
 592	PU(rt_nr_migratory);
 593#endif
 594	P(rt_throttled);
 595	PN(rt_time);
 596	PN(rt_runtime);
 597
 598#undef PN
 599#undef PU
 600#undef P
 601}
 602
 603void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
 604{
 605	struct dl_bw *dl_bw;
 606
 607	SEQ_printf(m, "\n");
 608	SEQ_printf(m, "dl_rq[%d]:\n", cpu);
 609
 610#define PU(x) \
 611	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
 612
 613	PU(dl_nr_running);
 614#ifdef CONFIG_SMP
 615	PU(dl_nr_migratory);
 616	dl_bw = &cpu_rq(cpu)->rd->dl_bw;
 617#else
 618	dl_bw = &dl_rq->dl_bw;
 619#endif
 620	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
 621	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
 622
 623#undef PU
 624}
 625
 626extern __read_mostly int sched_clock_running;
 627
 628static void print_cpu(struct seq_file *m, int cpu)
 629{
 630	struct rq *rq = cpu_rq(cpu);
 631	unsigned long flags;
 632
 633#ifdef CONFIG_X86
 634	{
 635		unsigned int freq = cpu_khz ? : 1;
 636
 637		SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
 638			   cpu, freq / 1000, (freq % 1000));
 639	}
 640#else
 641	SEQ_printf(m, "cpu#%d\n", cpu);
 642#endif
 643
 644#define P(x)								\
 645do {									\
 646	if (sizeof(rq->x) == 4)						\
 647		SEQ_printf(m, "  .%-30s: %ld\n", #x, (long)(rq->x));	\
 648	else								\
 649		SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x));\
 650} while (0)
 651
 652#define PN(x) \
 653	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
 654
 655	P(nr_running);
 656	SEQ_printf(m, "  .%-30s: %lu\n", "load",
 657		   rq->load.weight);
 658	P(nr_switches);
 659	P(nr_load_updates);
 660	P(nr_uninterruptible);
 661	PN(next_balance);
 662	SEQ_printf(m, "  .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
 663	PN(clock);
 664	PN(clock_task);
 665	P(cpu_load[0]);
 666	P(cpu_load[1]);
 667	P(cpu_load[2]);
 668	P(cpu_load[3]);
 669	P(cpu_load[4]);
 670#undef P
 671#undef PN
 672
 673#ifdef CONFIG_SMP
 674#define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
 675	P64(avg_idle);
 676	P64(max_idle_balance_cost);
 677#undef P64
 678#endif
 679
 680#define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, schedstat_val(rq->n));
 681	if (schedstat_enabled()) {
 682		P(yld_count);
 683		P(sched_count);
 684		P(sched_goidle);
 685		P(ttwu_count);
 686		P(ttwu_local);
 687	}
 688#undef P
 689
 690	spin_lock_irqsave(&sched_debug_lock, flags);
 691	print_cfs_stats(m, cpu);
 692	print_rt_stats(m, cpu);
 693	print_dl_stats(m, cpu);
 694
 695	print_rq(m, rq, cpu);
 696	spin_unlock_irqrestore(&sched_debug_lock, flags);
 697	SEQ_printf(m, "\n");
 698}
 699
 700static const char *sched_tunable_scaling_names[] = {
 701	"none",
 702	"logaritmic",
 703	"linear"
 704};
 705
 706static void sched_debug_header(struct seq_file *m)
 707{
 708	u64 ktime, sched_clk, cpu_clk;
 709	unsigned long flags;
 710
 711	local_irq_save(flags);
 712	ktime = ktime_to_ns(ktime_get());
 713	sched_clk = sched_clock();
 714	cpu_clk = local_clock();
 715	local_irq_restore(flags);
 716
 717	SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
 718		init_utsname()->release,
 719		(int)strcspn(init_utsname()->version, " "),
 720		init_utsname()->version);
 721
 722#define P(x) \
 723	SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
 724#define PN(x) \
 725	SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
 726	PN(ktime);
 727	PN(sched_clk);
 728	PN(cpu_clk);
 729	P(jiffies);
 730#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
 731	P(sched_clock_stable());
 732#endif
 733#undef PN
 734#undef P
 735
 736	SEQ_printf(m, "\n");
 737	SEQ_printf(m, "sysctl_sched\n");
 738
 739#define P(x) \
 740	SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
 741#define PN(x) \
 742	SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
 743	PN(sysctl_sched_latency);
 744	PN(sysctl_sched_min_granularity);
 745	PN(sysctl_sched_wakeup_granularity);
 746	P(sysctl_sched_child_runs_first);
 747	P(sysctl_sched_features);
 748#undef PN
 749#undef P
 750
 751	SEQ_printf(m, "  .%-40s: %d (%s)\n",
 752		"sysctl_sched_tunable_scaling",
 753		sysctl_sched_tunable_scaling,
 754		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
 755	SEQ_printf(m, "\n");
 756}
 757
 758static int sched_debug_show(struct seq_file *m, void *v)
 759{
 760	int cpu = (unsigned long)(v - 2);
 761
 762	if (cpu != -1)
 763		print_cpu(m, cpu);
 764	else
 765		sched_debug_header(m);
 766
 767	return 0;
 768}
 769
 770void sysrq_sched_debug_show(void)
 771{
 772	int cpu;
 773
 774	sched_debug_header(NULL);
 775	for_each_online_cpu(cpu)
 
 
 
 
 
 
 
 776		print_cpu(NULL, cpu);
 777
 778}
 779
 780/*
 781 * This itererator needs some explanation.
 782 * It returns 1 for the header position.
 783 * This means 2 is CPU 0.
 784 * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
 785 * to use cpumask_* to iterate over the CPUs.
 786 */
 787static void *sched_debug_start(struct seq_file *file, loff_t *offset)
 788{
 789	unsigned long n = *offset;
 790
 791	if (n == 0)
 792		return (void *) 1;
 793
 794	n--;
 795
 796	if (n > 0)
 797		n = cpumask_next(n - 1, cpu_online_mask);
 798	else
 799		n = cpumask_first(cpu_online_mask);
 800
 801	*offset = n + 1;
 802
 803	if (n < nr_cpu_ids)
 804		return (void *)(unsigned long)(n + 2);
 805
 806	return NULL;
 807}
 808
 809static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
 810{
 811	(*offset)++;
 812	return sched_debug_start(file, offset);
 813}
 814
 815static void sched_debug_stop(struct seq_file *file, void *data)
 816{
 817}
 818
 819static const struct seq_operations sched_debug_sops = {
 820	.start		= sched_debug_start,
 821	.next		= sched_debug_next,
 822	.stop		= sched_debug_stop,
 823	.show		= sched_debug_show,
 824};
 825
 826static int sched_debug_release(struct inode *inode, struct file *file)
 827{
 828	seq_release(inode, file);
 829
 830	return 0;
 831}
 832
 833static int sched_debug_open(struct inode *inode, struct file *filp)
 834{
 835	int ret = 0;
 836
 837	ret = seq_open(filp, &sched_debug_sops);
 838
 839	return ret;
 840}
 841
 842static const struct file_operations sched_debug_fops = {
 843	.open		= sched_debug_open,
 844	.read		= seq_read,
 845	.llseek		= seq_lseek,
 846	.release	= sched_debug_release,
 847};
 848
 849static int __init init_sched_debug_procfs(void)
 850{
 851	struct proc_dir_entry *pe;
 852
 853	pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
 854	if (!pe)
 855		return -ENOMEM;
 856	return 0;
 857}
 858
 859__initcall(init_sched_debug_procfs);
 860
 861#define __P(F)	SEQ_printf(m, "%-45s:%21Ld\n",	     #F, (long long)F)
 862#define   P(F)	SEQ_printf(m, "%-45s:%21Ld\n",	     #F, (long long)p->F)
 863#define __PN(F)	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
 864#define   PN(F)	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
 865
 866
 867#ifdef CONFIG_NUMA_BALANCING
 868void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
 869		unsigned long tpf, unsigned long gsf, unsigned long gpf)
 870{
 871	SEQ_printf(m, "numa_faults node=%d ", node);
 872	SEQ_printf(m, "task_private=%lu task_shared=%lu ", tsf, tpf);
 873	SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gsf, gpf);
 874}
 875#endif
 876
 877
 878static void sched_show_numa(struct task_struct *p, struct seq_file *m)
 879{
 880#ifdef CONFIG_NUMA_BALANCING
 881	struct mempolicy *pol;
 882
 883	if (p->mm)
 884		P(mm->numa_scan_seq);
 885
 886	task_lock(p);
 887	pol = p->mempolicy;
 888	if (pol && !(pol->flags & MPOL_F_MORON))
 889		pol = NULL;
 890	mpol_get(pol);
 891	task_unlock(p);
 892
 893	P(numa_pages_migrated);
 894	P(numa_preferred_nid);
 895	P(total_numa_faults);
 896	SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
 897			task_node(p), task_numa_group_id(p));
 898	show_numa_stats(p, m);
 899	mpol_put(pol);
 900#endif
 901}
 902
 903void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
 904						  struct seq_file *m)
 905{
 906	unsigned long nr_switches;
 907
 908	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
 909						get_nr_threads(p));
 910	SEQ_printf(m,
 911		"---------------------------------------------------------"
 912		"----------\n");
 913#define __P(F) \
 914	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
 915#define P(F) \
 916	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
 917#define P_SCHEDSTAT(F) \
 918	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)schedstat_val(p->F))
 919#define __PN(F) \
 920	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
 921#define PN(F) \
 922	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
 923#define PN_SCHEDSTAT(F) \
 924	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(p->F)))
 925
 926	PN(se.exec_start);
 927	PN(se.vruntime);
 928	PN(se.sum_exec_runtime);
 929
 930	nr_switches = p->nvcsw + p->nivcsw;
 931
 932	P(se.nr_migrations);
 933
 934	if (schedstat_enabled()) {
 935		u64 avg_atom, avg_per_cpu;
 936
 937		PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
 938		PN_SCHEDSTAT(se.statistics.wait_start);
 939		PN_SCHEDSTAT(se.statistics.sleep_start);
 940		PN_SCHEDSTAT(se.statistics.block_start);
 941		PN_SCHEDSTAT(se.statistics.sleep_max);
 942		PN_SCHEDSTAT(se.statistics.block_max);
 943		PN_SCHEDSTAT(se.statistics.exec_max);
 944		PN_SCHEDSTAT(se.statistics.slice_max);
 945		PN_SCHEDSTAT(se.statistics.wait_max);
 946		PN_SCHEDSTAT(se.statistics.wait_sum);
 947		P_SCHEDSTAT(se.statistics.wait_count);
 948		PN_SCHEDSTAT(se.statistics.iowait_sum);
 949		P_SCHEDSTAT(se.statistics.iowait_count);
 950		P_SCHEDSTAT(se.statistics.nr_migrations_cold);
 951		P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
 952		P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
 953		P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
 954		P_SCHEDSTAT(se.statistics.nr_forced_migrations);
 955		P_SCHEDSTAT(se.statistics.nr_wakeups);
 956		P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
 957		P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
 958		P_SCHEDSTAT(se.statistics.nr_wakeups_local);
 959		P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
 960		P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
 961		P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
 962		P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
 963		P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
 964
 965		avg_atom = p->se.sum_exec_runtime;
 966		if (nr_switches)
 967			avg_atom = div64_ul(avg_atom, nr_switches);
 968		else
 969			avg_atom = -1LL;
 970
 971		avg_per_cpu = p->se.sum_exec_runtime;
 972		if (p->se.nr_migrations) {
 973			avg_per_cpu = div64_u64(avg_per_cpu,
 974						p->se.nr_migrations);
 975		} else {
 976			avg_per_cpu = -1LL;
 977		}
 978
 979		__PN(avg_atom);
 980		__PN(avg_per_cpu);
 981	}
 982
 983	__P(nr_switches);
 984	SEQ_printf(m, "%-45s:%21Ld\n",
 985		   "nr_voluntary_switches", (long long)p->nvcsw);
 986	SEQ_printf(m, "%-45s:%21Ld\n",
 987		   "nr_involuntary_switches", (long long)p->nivcsw);
 988
 989	P(se.load.weight);
 990	P(se.runnable_weight);
 991#ifdef CONFIG_SMP
 992	P(se.avg.load_sum);
 993	P(se.avg.runnable_load_sum);
 994	P(se.avg.util_sum);
 995	P(se.avg.load_avg);
 996	P(se.avg.runnable_load_avg);
 997	P(se.avg.util_avg);
 998	P(se.avg.last_update_time);
 999	P(se.avg.util_est.ewma);
1000	P(se.avg.util_est.enqueued);
 
 
 
 
 
 
1001#endif
1002	P(policy);
1003	P(prio);
1004	if (p->policy == SCHED_DEADLINE) {
1005		P(dl.runtime);
1006		P(dl.deadline);
1007	}
1008#undef PN_SCHEDSTAT
1009#undef PN
1010#undef __PN
1011#undef P_SCHEDSTAT
1012#undef P
1013#undef __P
1014
1015	{
1016		unsigned int this_cpu = raw_smp_processor_id();
1017		u64 t0, t1;
1018
1019		t0 = cpu_clock(this_cpu);
1020		t1 = cpu_clock(this_cpu);
1021		SEQ_printf(m, "%-45s:%21Ld\n",
1022			   "clock-delta", (long long)(t1-t0));
1023	}
1024
1025	sched_show_numa(p, m);
1026}
1027
1028void proc_sched_set_task(struct task_struct *p)
1029{
1030#ifdef CONFIG_SCHEDSTATS
1031	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
1032#endif
 
 
 
 
 
 
 
 
 
 
1033}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * kernel/sched/debug.c
   4 *
   5 * Print the CFS rbtree and other debugging details
   6 *
   7 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
 
 
 
 
   8 */
   9#include "sched.h"
  10
 
 
  11/*
  12 * This allows printing both to /proc/sched_debug and
  13 * to the console
  14 */
  15#define SEQ_printf(m, x...)			\
  16 do {						\
  17	if (m)					\
  18		seq_printf(m, x);		\
  19	else					\
  20		pr_cont(x);			\
  21 } while (0)
  22
  23/*
  24 * Ease the printing of nsec fields:
  25 */
  26static long long nsec_high(unsigned long long nsec)
  27{
  28	if ((long long)nsec < 0) {
  29		nsec = -nsec;
  30		do_div(nsec, 1000000);
  31		return -nsec;
  32	}
  33	do_div(nsec, 1000000);
  34
  35	return nsec;
  36}
  37
  38static unsigned long nsec_low(unsigned long long nsec)
  39{
  40	if ((long long)nsec < 0)
  41		nsec = -nsec;
  42
  43	return do_div(nsec, 1000000);
  44}
  45
  46#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
  47
  48#define SCHED_FEAT(name, enabled)	\
  49	#name ,
  50
  51static const char * const sched_feat_names[] = {
  52#include "features.h"
  53};
  54
  55#undef SCHED_FEAT
  56
  57static int sched_feat_show(struct seq_file *m, void *v)
  58{
  59	int i;
  60
  61	for (i = 0; i < __SCHED_FEAT_NR; i++) {
  62		if (!(sysctl_sched_features & (1UL << i)))
  63			seq_puts(m, "NO_");
  64		seq_printf(m, "%s ", sched_feat_names[i]);
  65	}
  66	seq_puts(m, "\n");
  67
  68	return 0;
  69}
  70
  71#ifdef CONFIG_JUMP_LABEL
  72
  73#define jump_label_key__true  STATIC_KEY_INIT_TRUE
  74#define jump_label_key__false STATIC_KEY_INIT_FALSE
  75
  76#define SCHED_FEAT(name, enabled)	\
  77	jump_label_key__##enabled ,
  78
  79struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
  80#include "features.h"
  81};
  82
  83#undef SCHED_FEAT
  84
  85static void sched_feat_disable(int i)
  86{
  87	static_key_disable_cpuslocked(&sched_feat_keys[i]);
  88}
  89
  90static void sched_feat_enable(int i)
  91{
  92	static_key_enable_cpuslocked(&sched_feat_keys[i]);
  93}
  94#else
  95static void sched_feat_disable(int i) { };
  96static void sched_feat_enable(int i) { };
  97#endif /* CONFIG_JUMP_LABEL */
  98
  99static int sched_feat_set(char *cmp)
 100{
 101	int i;
 102	int neg = 0;
 103
 104	if (strncmp(cmp, "NO_", 3) == 0) {
 105		neg = 1;
 106		cmp += 3;
 107	}
 108
 109	i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
 110	if (i < 0)
 111		return i;
 112
 113	if (neg) {
 114		sysctl_sched_features &= ~(1UL << i);
 115		sched_feat_disable(i);
 116	} else {
 117		sysctl_sched_features |= (1UL << i);
 118		sched_feat_enable(i);
 
 119	}
 120
 121	return 0;
 122}
 123
 124static ssize_t
 125sched_feat_write(struct file *filp, const char __user *ubuf,
 126		size_t cnt, loff_t *ppos)
 127{
 128	char buf[64];
 129	char *cmp;
 130	int ret;
 131	struct inode *inode;
 132
 133	if (cnt > 63)
 134		cnt = 63;
 135
 136	if (copy_from_user(&buf, ubuf, cnt))
 137		return -EFAULT;
 138
 139	buf[cnt] = 0;
 140	cmp = strstrip(buf);
 141
 142	/* Ensure the static_key remains in a consistent state */
 143	inode = file_inode(filp);
 144	cpus_read_lock();
 145	inode_lock(inode);
 146	ret = sched_feat_set(cmp);
 147	inode_unlock(inode);
 148	cpus_read_unlock();
 149	if (ret < 0)
 150		return ret;
 151
 152	*ppos += cnt;
 153
 154	return cnt;
 155}
 156
 157static int sched_feat_open(struct inode *inode, struct file *filp)
 158{
 159	return single_open(filp, sched_feat_show, NULL);
 160}
 161
 162static const struct file_operations sched_feat_fops = {
 163	.open		= sched_feat_open,
 164	.write		= sched_feat_write,
 165	.read		= seq_read,
 166	.llseek		= seq_lseek,
 167	.release	= single_release,
 168};
 169
 170#ifdef CONFIG_SMP
 171
 172static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
 173				   size_t cnt, loff_t *ppos)
 174{
 175	char buf[16];
 176	unsigned int scaling;
 177
 178	if (cnt > 15)
 179		cnt = 15;
 180
 181	if (copy_from_user(&buf, ubuf, cnt))
 182		return -EFAULT;
 183	buf[cnt] = '\0';
 184
 185	if (kstrtouint(buf, 10, &scaling))
 186		return -EINVAL;
 187
 188	if (scaling >= SCHED_TUNABLESCALING_END)
 189		return -EINVAL;
 190
 191	sysctl_sched_tunable_scaling = scaling;
 192	if (sched_update_scaling())
 193		return -EINVAL;
 194
 195	*ppos += cnt;
 196	return cnt;
 197}
 
 198
 199static int sched_scaling_show(struct seq_file *m, void *v)
 200{
 201	seq_printf(m, "%d\n", sysctl_sched_tunable_scaling);
 202	return 0;
 203}
 204
 205static int sched_scaling_open(struct inode *inode, struct file *filp)
 206{
 207	return single_open(filp, sched_scaling_show, NULL);
 208}
 209
 210static const struct file_operations sched_scaling_fops = {
 211	.open		= sched_scaling_open,
 212	.write		= sched_scaling_write,
 213	.read		= seq_read,
 214	.llseek		= seq_lseek,
 215	.release	= single_release,
 216};
 217
 218#endif /* SMP */
 219
 220#ifdef CONFIG_PREEMPT_DYNAMIC
 
 
 
 
 
 221
 222static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
 223				   size_t cnt, loff_t *ppos)
 224{
 225	char buf[16];
 226	int mode;
 227
 228	if (cnt > 15)
 229		cnt = 15;
 230
 231	if (copy_from_user(&buf, ubuf, cnt))
 232		return -EFAULT;
 233
 234	buf[cnt] = 0;
 235	mode = sched_dynamic_mode(strstrip(buf));
 236	if (mode < 0)
 237		return mode;
 238
 239	sched_dynamic_update(mode);
 240
 241	*ppos += cnt;
 242
 243	return cnt;
 244}
 245
 246static int sched_dynamic_show(struct seq_file *m, void *v)
 247{
 248	static const char * preempt_modes[] = {
 249		"none", "voluntary", "full"
 250	};
 251	int i;
 252
 253	for (i = 0; i < ARRAY_SIZE(preempt_modes); i++) {
 254		if (preempt_dynamic_mode == i)
 255			seq_puts(m, "(");
 256		seq_puts(m, preempt_modes[i]);
 257		if (preempt_dynamic_mode == i)
 258			seq_puts(m, ")");
 259
 260		seq_puts(m, " ");
 
 
 
 261	}
 262
 263	seq_puts(m, "\n");
 264	return 0;
 265}
 266
 267static int sched_dynamic_open(struct inode *inode, struct file *filp)
 268{
 269	return single_open(filp, sched_dynamic_show, NULL);
 270}
 271
 272static const struct file_operations sched_dynamic_fops = {
 273	.open		= sched_dynamic_open,
 274	.write		= sched_dynamic_write,
 275	.read		= seq_read,
 276	.llseek		= seq_lseek,
 277	.release	= single_release,
 278};
 279
 280#endif /* CONFIG_PREEMPT_DYNAMIC */
 281
 282__read_mostly bool sched_debug_verbose;
 283
 284static const struct seq_operations sched_debug_sops;
 285
 286static int sched_debug_open(struct inode *inode, struct file *filp)
 287{
 288	return seq_open(filp, &sched_debug_sops);
 289}
 290
 291static const struct file_operations sched_debug_fops = {
 292	.open		= sched_debug_open,
 293	.read		= seq_read,
 294	.llseek		= seq_lseek,
 295	.release	= seq_release,
 296};
 297
 298static struct dentry *debugfs_sched;
 299
 300static __init int sched_init_debug(void)
 301{
 302	struct dentry __maybe_unused *numa;
 303
 304	debugfs_sched = debugfs_create_dir("sched", NULL);
 
 305
 306	debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
 307	debugfs_create_bool("verbose", 0644, debugfs_sched, &sched_debug_verbose);
 308#ifdef CONFIG_PREEMPT_DYNAMIC
 309	debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
 310#endif
 311
 312	debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
 313	debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
 314	debugfs_create_u32("wakeup_granularity_ns", 0644, debugfs_sched, &sysctl_sched_wakeup_granularity);
 315
 316	debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
 317	debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
 318
 319#ifdef CONFIG_SMP
 320	debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
 321	debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
 322	debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
 323
 324	mutex_lock(&sched_domains_mutex);
 325	update_sched_domain_debugfs();
 326	mutex_unlock(&sched_domains_mutex);
 327#endif
 328
 329#ifdef CONFIG_NUMA_BALANCING
 330	numa = debugfs_create_dir("numa_balancing", debugfs_sched);
 331
 332	debugfs_create_u32("scan_delay_ms", 0644, numa, &sysctl_numa_balancing_scan_delay);
 333	debugfs_create_u32("scan_period_min_ms", 0644, numa, &sysctl_numa_balancing_scan_period_min);
 334	debugfs_create_u32("scan_period_max_ms", 0644, numa, &sysctl_numa_balancing_scan_period_max);
 335	debugfs_create_u32("scan_size_mb", 0644, numa, &sysctl_numa_balancing_scan_size);
 336#endif
 337
 338	debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
 339
 340	return 0;
 
 
 
 
 
 
 341}
 342late_initcall(sched_init_debug);
 343
 344#ifdef CONFIG_SMP
 345
 346static cpumask_var_t		sd_sysctl_cpus;
 347static struct dentry		*sd_dentry;
 348
 349static int sd_flags_show(struct seq_file *m, void *v)
 350{
 351	unsigned long flags = *(unsigned int *)m->private;
 352	int idx;
 
 
 353
 354	for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
 355		seq_puts(m, sd_flag_debug[idx].name);
 356		seq_puts(m, " ");
 
 
 
 
 357	}
 358	seq_puts(m, "\n");
 359
 360	return 0;
 361}
 362
 363static int sd_flags_open(struct inode *inode, struct file *file)
 364{
 365	return single_open(file, sd_flags_show, inode->i_private);
 366}
 367
 368static const struct file_operations sd_flags_fops = {
 369	.open		= sd_flags_open,
 370	.read		= seq_read,
 371	.llseek		= seq_lseek,
 372	.release	= single_release,
 373};
 374
 375static void register_sd(struct sched_domain *sd, struct dentry *parent)
 376{
 377#define SDM(type, mode, member)	\
 378	debugfs_create_##type(#member, mode, parent, &sd->member)
 379
 380	SDM(ulong, 0644, min_interval);
 381	SDM(ulong, 0644, max_interval);
 382	SDM(u64,   0644, max_newidle_lb_cost);
 383	SDM(u32,   0644, busy_factor);
 384	SDM(u32,   0644, imbalance_pct);
 385	SDM(u32,   0644, cache_nice_tries);
 386	SDM(str,   0444, name);
 387
 388#undef SDM
 389
 390	debugfs_create_file("flags", 0444, parent, &sd->flags, &sd_flags_fops);
 391}
 392
 393void update_sched_domain_debugfs(void)
 394{
 395	int cpu, i;
 396
 397	/*
 398	 * This can unfortunately be invoked before sched_debug_init() creates
 399	 * the debug directory. Don't touch sd_sysctl_cpus until then.
 400	 */
 401	if (!debugfs_sched)
 402		return;
 403
 404	if (!cpumask_available(sd_sysctl_cpus)) {
 405		if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
 406			return;
 
 
 407		cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
 408	}
 409
 410	if (!sd_dentry)
 411		sd_dentry = debugfs_create_dir("domains", debugfs_sched);
 412
 413	for_each_cpu(cpu, sd_sysctl_cpus) {
 414		struct sched_domain *sd;
 415		struct dentry *d_cpu;
 416		char buf[32];
 417
 418		snprintf(buf, sizeof(buf), "cpu%d", cpu);
 419		debugfs_remove(debugfs_lookup(buf, sd_dentry));
 420		d_cpu = debugfs_create_dir(buf, sd_dentry);
 421
 422		i = 0;
 423		for_each_domain(cpu, sd) {
 424			struct dentry *d_sd;
 425
 426			snprintf(buf, sizeof(buf), "domain%d", i);
 427			d_sd = debugfs_create_dir(buf, d_cpu);
 428
 429			register_sd(sd, d_sd);
 430			i++;
 
 431		}
 
 
 432
 433		__cpumask_clear_cpu(cpu, sd_sysctl_cpus);
 434	}
 
 
 
 435}
 436
 437void dirty_sched_domain_sysctl(int cpu)
 438{
 439	if (cpumask_available(sd_sysctl_cpus))
 440		__cpumask_set_cpu(cpu, sd_sysctl_cpus);
 441}
 442
 
 
 
 
 
 
 
 443#endif /* CONFIG_SMP */
 444
 445#ifdef CONFIG_FAIR_GROUP_SCHED
 446static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
 447{
 448	struct sched_entity *se = tg->se[cpu];
 449
 450#define P(F)		SEQ_printf(m, "  .%-30s: %lld\n",	#F, (long long)F)
 451#define P_SCHEDSTAT(F)	SEQ_printf(m, "  .%-30s: %lld\n",	#F, (long long)schedstat_val(F))
 452#define PN(F)		SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
 453#define PN_SCHEDSTAT(F)	SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
 454
 455	if (!se)
 456		return;
 457
 458	PN(se->exec_start);
 459	PN(se->vruntime);
 460	PN(se->sum_exec_runtime);
 461
 462	if (schedstat_enabled()) {
 463		PN_SCHEDSTAT(se->statistics.wait_start);
 464		PN_SCHEDSTAT(se->statistics.sleep_start);
 465		PN_SCHEDSTAT(se->statistics.block_start);
 466		PN_SCHEDSTAT(se->statistics.sleep_max);
 467		PN_SCHEDSTAT(se->statistics.block_max);
 468		PN_SCHEDSTAT(se->statistics.exec_max);
 469		PN_SCHEDSTAT(se->statistics.slice_max);
 470		PN_SCHEDSTAT(se->statistics.wait_max);
 471		PN_SCHEDSTAT(se->statistics.wait_sum);
 472		P_SCHEDSTAT(se->statistics.wait_count);
 473	}
 474
 475	P(se->load.weight);
 
 476#ifdef CONFIG_SMP
 477	P(se->avg.load_avg);
 478	P(se->avg.util_avg);
 479	P(se->avg.runnable_avg);
 480#endif
 481
 482#undef PN_SCHEDSTAT
 483#undef PN
 484#undef P_SCHEDSTAT
 485#undef P
 486}
 487#endif
 488
 489#ifdef CONFIG_CGROUP_SCHED
 490static DEFINE_SPINLOCK(sched_debug_lock);
 491static char group_path[PATH_MAX];
 492
 493static void task_group_path(struct task_group *tg, char *path, int plen)
 494{
 495	if (autogroup_path(tg, path, plen))
 496		return;
 497
 498	cgroup_path(tg->css.cgroup, path, plen);
 499}
 500
 501/*
 502 * Only 1 SEQ_printf_task_group_path() caller can use the full length
 503 * group_path[] for cgroup path. Other simultaneous callers will have
 504 * to use a shorter stack buffer. A "..." suffix is appended at the end
 505 * of the stack buffer so that it will show up in case the output length
 506 * matches the given buffer size to indicate possible path name truncation.
 507 */
 508#define SEQ_printf_task_group_path(m, tg, fmt...)			\
 509{									\
 510	if (spin_trylock(&sched_debug_lock)) {				\
 511		task_group_path(tg, group_path, sizeof(group_path));	\
 512		SEQ_printf(m, fmt, group_path);				\
 513		spin_unlock(&sched_debug_lock);				\
 514	} else {							\
 515		char buf[128];						\
 516		char *bufend = buf + sizeof(buf) - 3;			\
 517		task_group_path(tg, buf, bufend - buf);			\
 518		strcpy(bufend - 1, "...");				\
 519		SEQ_printf(m, fmt, buf);				\
 520	}								\
 521}
 522#endif
 523
 524static void
 525print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
 526{
 527	if (task_current(rq, p))
 528		SEQ_printf(m, ">R");
 529	else
 530		SEQ_printf(m, " %c", task_state_to_char(p));
 531
 532	SEQ_printf(m, " %15s %5d %9Ld.%06ld %9Ld %5d ",
 533		p->comm, task_pid_nr(p),
 534		SPLIT_NS(p->se.vruntime),
 535		(long long)(p->nvcsw + p->nivcsw),
 536		p->prio);
 537
 538	SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
 539		SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
 540		SPLIT_NS(p->se.sum_exec_runtime),
 541		SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
 542
 543#ifdef CONFIG_NUMA_BALANCING
 544	SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
 545#endif
 546#ifdef CONFIG_CGROUP_SCHED
 547	SEQ_printf_task_group_path(m, task_group(p), " %s")
 548#endif
 549
 550	SEQ_printf(m, "\n");
 551}
 552
 553static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
 554{
 555	struct task_struct *g, *p;
 556
 557	SEQ_printf(m, "\n");
 558	SEQ_printf(m, "runnable tasks:\n");
 559	SEQ_printf(m, " S            task   PID         tree-key  switches  prio"
 560		   "     wait-time             sum-exec        sum-sleep\n");
 561	SEQ_printf(m, "-------------------------------------------------------"
 562		   "------------------------------------------------------\n");
 563
 564	rcu_read_lock();
 565	for_each_process_thread(g, p) {
 566		if (task_cpu(p) != rq_cpu)
 567			continue;
 568
 569		print_task(m, rq, p);
 570	}
 571	rcu_read_unlock();
 572}
 573
 574void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
 575{
 576	s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
 577		spread, rq0_min_vruntime, spread0;
 578	struct rq *rq = cpu_rq(cpu);
 579	struct sched_entity *last;
 580	unsigned long flags;
 581
 582#ifdef CONFIG_FAIR_GROUP_SCHED
 583	SEQ_printf(m, "\n");
 584	SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
 585#else
 586	SEQ_printf(m, "\n");
 587	SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
 588#endif
 589	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
 590			SPLIT_NS(cfs_rq->exec_clock));
 591
 592	raw_spin_rq_lock_irqsave(rq, flags);
 593	if (rb_first_cached(&cfs_rq->tasks_timeline))
 594		MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
 595	last = __pick_last_entity(cfs_rq);
 596	if (last)
 597		max_vruntime = last->vruntime;
 598	min_vruntime = cfs_rq->min_vruntime;
 599	rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
 600	raw_spin_rq_unlock_irqrestore(rq, flags);
 601	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "MIN_vruntime",
 602			SPLIT_NS(MIN_vruntime));
 603	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
 604			SPLIT_NS(min_vruntime));
 605	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "max_vruntime",
 606			SPLIT_NS(max_vruntime));
 607	spread = max_vruntime - MIN_vruntime;
 608	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread",
 609			SPLIT_NS(spread));
 610	spread0 = min_vruntime - rq0_min_vruntime;
 611	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread0",
 612			SPLIT_NS(spread0));
 613	SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
 614			cfs_rq->nr_spread_over);
 615	SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
 616	SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
 617#ifdef CONFIG_SMP
 
 618	SEQ_printf(m, "  .%-30s: %lu\n", "load_avg",
 619			cfs_rq->avg.load_avg);
 620	SEQ_printf(m, "  .%-30s: %lu\n", "runnable_avg",
 621			cfs_rq->avg.runnable_avg);
 622	SEQ_printf(m, "  .%-30s: %lu\n", "util_avg",
 623			cfs_rq->avg.util_avg);
 624	SEQ_printf(m, "  .%-30s: %u\n", "util_est_enqueued",
 625			cfs_rq->avg.util_est.enqueued);
 626	SEQ_printf(m, "  .%-30s: %ld\n", "removed.load_avg",
 627			cfs_rq->removed.load_avg);
 628	SEQ_printf(m, "  .%-30s: %ld\n", "removed.util_avg",
 629			cfs_rq->removed.util_avg);
 630	SEQ_printf(m, "  .%-30s: %ld\n", "removed.runnable_avg",
 631			cfs_rq->removed.runnable_avg);
 632#ifdef CONFIG_FAIR_GROUP_SCHED
 633	SEQ_printf(m, "  .%-30s: %lu\n", "tg_load_avg_contrib",
 634			cfs_rq->tg_load_avg_contrib);
 635	SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
 636			atomic_long_read(&cfs_rq->tg->load_avg));
 637#endif
 638#endif
 639#ifdef CONFIG_CFS_BANDWIDTH
 640	SEQ_printf(m, "  .%-30s: %d\n", "throttled",
 641			cfs_rq->throttled);
 642	SEQ_printf(m, "  .%-30s: %d\n", "throttle_count",
 643			cfs_rq->throttle_count);
 644#endif
 645
 646#ifdef CONFIG_FAIR_GROUP_SCHED
 647	print_cfs_group_stats(m, cpu, cfs_rq->tg);
 648#endif
 649}
 650
 651void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
 652{
 653#ifdef CONFIG_RT_GROUP_SCHED
 654	SEQ_printf(m, "\n");
 655	SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
 656#else
 657	SEQ_printf(m, "\n");
 658	SEQ_printf(m, "rt_rq[%d]:\n", cpu);
 659#endif
 660
 661#define P(x) \
 662	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
 663#define PU(x) \
 664	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
 665#define PN(x) \
 666	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
 667
 668	PU(rt_nr_running);
 669#ifdef CONFIG_SMP
 670	PU(rt_nr_migratory);
 671#endif
 672	P(rt_throttled);
 673	PN(rt_time);
 674	PN(rt_runtime);
 675
 676#undef PN
 677#undef PU
 678#undef P
 679}
 680
 681void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
 682{
 683	struct dl_bw *dl_bw;
 684
 685	SEQ_printf(m, "\n");
 686	SEQ_printf(m, "dl_rq[%d]:\n", cpu);
 687
 688#define PU(x) \
 689	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
 690
 691	PU(dl_nr_running);
 692#ifdef CONFIG_SMP
 693	PU(dl_nr_migratory);
 694	dl_bw = &cpu_rq(cpu)->rd->dl_bw;
 695#else
 696	dl_bw = &dl_rq->dl_bw;
 697#endif
 698	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
 699	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
 700
 701#undef PU
 702}
 703
 
 
 704static void print_cpu(struct seq_file *m, int cpu)
 705{
 706	struct rq *rq = cpu_rq(cpu);
 
 707
 708#ifdef CONFIG_X86
 709	{
 710		unsigned int freq = cpu_khz ? : 1;
 711
 712		SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
 713			   cpu, freq / 1000, (freq % 1000));
 714	}
 715#else
 716	SEQ_printf(m, "cpu#%d\n", cpu);
 717#endif
 718
 719#define P(x)								\
 720do {									\
 721	if (sizeof(rq->x) == 4)						\
 722		SEQ_printf(m, "  .%-30s: %ld\n", #x, (long)(rq->x));	\
 723	else								\
 724		SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x));\
 725} while (0)
 726
 727#define PN(x) \
 728	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
 729
 730	P(nr_running);
 
 
 731	P(nr_switches);
 
 732	P(nr_uninterruptible);
 733	PN(next_balance);
 734	SEQ_printf(m, "  .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
 735	PN(clock);
 736	PN(clock_task);
 
 
 
 
 
 737#undef P
 738#undef PN
 739
 740#ifdef CONFIG_SMP
 741#define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
 742	P64(avg_idle);
 743	P64(max_idle_balance_cost);
 744#undef P64
 745#endif
 746
 747#define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, schedstat_val(rq->n));
 748	if (schedstat_enabled()) {
 749		P(yld_count);
 750		P(sched_count);
 751		P(sched_goidle);
 752		P(ttwu_count);
 753		P(ttwu_local);
 754	}
 755#undef P
 756
 
 757	print_cfs_stats(m, cpu);
 758	print_rt_stats(m, cpu);
 759	print_dl_stats(m, cpu);
 760
 761	print_rq(m, rq, cpu);
 
 762	SEQ_printf(m, "\n");
 763}
 764
 765static const char *sched_tunable_scaling_names[] = {
 766	"none",
 767	"logarithmic",
 768	"linear"
 769};
 770
 771static void sched_debug_header(struct seq_file *m)
 772{
 773	u64 ktime, sched_clk, cpu_clk;
 774	unsigned long flags;
 775
 776	local_irq_save(flags);
 777	ktime = ktime_to_ns(ktime_get());
 778	sched_clk = sched_clock();
 779	cpu_clk = local_clock();
 780	local_irq_restore(flags);
 781
 782	SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
 783		init_utsname()->release,
 784		(int)strcspn(init_utsname()->version, " "),
 785		init_utsname()->version);
 786
 787#define P(x) \
 788	SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
 789#define PN(x) \
 790	SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
 791	PN(ktime);
 792	PN(sched_clk);
 793	PN(cpu_clk);
 794	P(jiffies);
 795#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
 796	P(sched_clock_stable());
 797#endif
 798#undef PN
 799#undef P
 800
 801	SEQ_printf(m, "\n");
 802	SEQ_printf(m, "sysctl_sched\n");
 803
 804#define P(x) \
 805	SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
 806#define PN(x) \
 807	SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
 808	PN(sysctl_sched_latency);
 809	PN(sysctl_sched_min_granularity);
 810	PN(sysctl_sched_wakeup_granularity);
 811	P(sysctl_sched_child_runs_first);
 812	P(sysctl_sched_features);
 813#undef PN
 814#undef P
 815
 816	SEQ_printf(m, "  .%-40s: %d (%s)\n",
 817		"sysctl_sched_tunable_scaling",
 818		sysctl_sched_tunable_scaling,
 819		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
 820	SEQ_printf(m, "\n");
 821}
 822
 823static int sched_debug_show(struct seq_file *m, void *v)
 824{
 825	int cpu = (unsigned long)(v - 2);
 826
 827	if (cpu != -1)
 828		print_cpu(m, cpu);
 829	else
 830		sched_debug_header(m);
 831
 832	return 0;
 833}
 834
 835void sysrq_sched_debug_show(void)
 836{
 837	int cpu;
 838
 839	sched_debug_header(NULL);
 840	for_each_online_cpu(cpu) {
 841		/*
 842		 * Need to reset softlockup watchdogs on all CPUs, because
 843		 * another CPU might be blocked waiting for us to process
 844		 * an IPI or stop_machine.
 845		 */
 846		touch_nmi_watchdog();
 847		touch_all_softlockup_watchdogs();
 848		print_cpu(NULL, cpu);
 849	}
 850}
 851
 852/*
 853 * This iterator needs some explanation.
 854 * It returns 1 for the header position.
 855 * This means 2 is CPU 0.
 856 * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
 857 * to use cpumask_* to iterate over the CPUs.
 858 */
 859static void *sched_debug_start(struct seq_file *file, loff_t *offset)
 860{
 861	unsigned long n = *offset;
 862
 863	if (n == 0)
 864		return (void *) 1;
 865
 866	n--;
 867
 868	if (n > 0)
 869		n = cpumask_next(n - 1, cpu_online_mask);
 870	else
 871		n = cpumask_first(cpu_online_mask);
 872
 873	*offset = n + 1;
 874
 875	if (n < nr_cpu_ids)
 876		return (void *)(unsigned long)(n + 2);
 877
 878	return NULL;
 879}
 880
 881static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
 882{
 883	(*offset)++;
 884	return sched_debug_start(file, offset);
 885}
 886
 887static void sched_debug_stop(struct seq_file *file, void *data)
 888{
 889}
 890
 891static const struct seq_operations sched_debug_sops = {
 892	.start		= sched_debug_start,
 893	.next		= sched_debug_next,
 894	.stop		= sched_debug_stop,
 895	.show		= sched_debug_show,
 896};
 897
 898#define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
 899#define __P(F) __PS(#F, F)
 900#define   P(F) __PS(#F, p->F)
 901#define   PM(F, M) __PS(#F, p->F & (M))
 902#define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
 903#define __PN(F) __PSN(#F, F)
 904#define   PN(F) __PSN(#F, p->F)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 905
 906
 907#ifdef CONFIG_NUMA_BALANCING
 908void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
 909		unsigned long tpf, unsigned long gsf, unsigned long gpf)
 910{
 911	SEQ_printf(m, "numa_faults node=%d ", node);
 912	SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
 913	SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
 914}
 915#endif
 916
 917
 918static void sched_show_numa(struct task_struct *p, struct seq_file *m)
 919{
 920#ifdef CONFIG_NUMA_BALANCING
 921	struct mempolicy *pol;
 922
 923	if (p->mm)
 924		P(mm->numa_scan_seq);
 925
 926	task_lock(p);
 927	pol = p->mempolicy;
 928	if (pol && !(pol->flags & MPOL_F_MORON))
 929		pol = NULL;
 930	mpol_get(pol);
 931	task_unlock(p);
 932
 933	P(numa_pages_migrated);
 934	P(numa_preferred_nid);
 935	P(total_numa_faults);
 936	SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
 937			task_node(p), task_numa_group_id(p));
 938	show_numa_stats(p, m);
 939	mpol_put(pol);
 940#endif
 941}
 942
 943void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
 944						  struct seq_file *m)
 945{
 946	unsigned long nr_switches;
 947
 948	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
 949						get_nr_threads(p));
 950	SEQ_printf(m,
 951		"---------------------------------------------------------"
 952		"----------\n");
 953
 954#define P_SCHEDSTAT(F)  __PS(#F, schedstat_val(p->F))
 955#define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->F))
 
 
 
 
 
 
 
 
 
 956
 957	PN(se.exec_start);
 958	PN(se.vruntime);
 959	PN(se.sum_exec_runtime);
 960
 961	nr_switches = p->nvcsw + p->nivcsw;
 962
 963	P(se.nr_migrations);
 964
 965	if (schedstat_enabled()) {
 966		u64 avg_atom, avg_per_cpu;
 967
 968		PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
 969		PN_SCHEDSTAT(se.statistics.wait_start);
 970		PN_SCHEDSTAT(se.statistics.sleep_start);
 971		PN_SCHEDSTAT(se.statistics.block_start);
 972		PN_SCHEDSTAT(se.statistics.sleep_max);
 973		PN_SCHEDSTAT(se.statistics.block_max);
 974		PN_SCHEDSTAT(se.statistics.exec_max);
 975		PN_SCHEDSTAT(se.statistics.slice_max);
 976		PN_SCHEDSTAT(se.statistics.wait_max);
 977		PN_SCHEDSTAT(se.statistics.wait_sum);
 978		P_SCHEDSTAT(se.statistics.wait_count);
 979		PN_SCHEDSTAT(se.statistics.iowait_sum);
 980		P_SCHEDSTAT(se.statistics.iowait_count);
 981		P_SCHEDSTAT(se.statistics.nr_migrations_cold);
 982		P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
 983		P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
 984		P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
 985		P_SCHEDSTAT(se.statistics.nr_forced_migrations);
 986		P_SCHEDSTAT(se.statistics.nr_wakeups);
 987		P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
 988		P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
 989		P_SCHEDSTAT(se.statistics.nr_wakeups_local);
 990		P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
 991		P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
 992		P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
 993		P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
 994		P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
 995
 996		avg_atom = p->se.sum_exec_runtime;
 997		if (nr_switches)
 998			avg_atom = div64_ul(avg_atom, nr_switches);
 999		else
1000			avg_atom = -1LL;
1001
1002		avg_per_cpu = p->se.sum_exec_runtime;
1003		if (p->se.nr_migrations) {
1004			avg_per_cpu = div64_u64(avg_per_cpu,
1005						p->se.nr_migrations);
1006		} else {
1007			avg_per_cpu = -1LL;
1008		}
1009
1010		__PN(avg_atom);
1011		__PN(avg_per_cpu);
1012	}
1013
1014	__P(nr_switches);
1015	__PS("nr_voluntary_switches", p->nvcsw);
1016	__PS("nr_involuntary_switches", p->nivcsw);
 
 
1017
1018	P(se.load.weight);
 
1019#ifdef CONFIG_SMP
1020	P(se.avg.load_sum);
1021	P(se.avg.runnable_sum);
1022	P(se.avg.util_sum);
1023	P(se.avg.load_avg);
1024	P(se.avg.runnable_avg);
1025	P(se.avg.util_avg);
1026	P(se.avg.last_update_time);
1027	P(se.avg.util_est.ewma);
1028	PM(se.avg.util_est.enqueued, ~UTIL_AVG_UNCHANGED);
1029#endif
1030#ifdef CONFIG_UCLAMP_TASK
1031	__PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
1032	__PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
1033	__PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
1034	__PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
1035#endif
1036	P(policy);
1037	P(prio);
1038	if (task_has_dl_policy(p)) {
1039		P(dl.runtime);
1040		P(dl.deadline);
1041	}
1042#undef PN_SCHEDSTAT
 
 
1043#undef P_SCHEDSTAT
 
 
1044
1045	{
1046		unsigned int this_cpu = raw_smp_processor_id();
1047		u64 t0, t1;
1048
1049		t0 = cpu_clock(this_cpu);
1050		t1 = cpu_clock(this_cpu);
1051		__PS("clock-delta", t1-t0);
 
1052	}
1053
1054	sched_show_numa(p, m);
1055}
1056
1057void proc_sched_set_task(struct task_struct *p)
1058{
1059#ifdef CONFIG_SCHEDSTATS
1060	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
1061#endif
1062}
1063
1064void resched_latency_warn(int cpu, u64 latency)
1065{
1066	static DEFINE_RATELIMIT_STATE(latency_check_ratelimit, 60 * 60 * HZ, 1);
1067
1068	WARN(__ratelimit(&latency_check_ratelimit),
1069	     "sched: CPU %d need_resched set for > %llu ns (%d ticks) "
1070	     "without schedule\n",
1071	     cpu, latency, cpu_rq(cpu)->ticks_without_resched);
1072}