Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * /proc/schedstat implementation
  4 */
  5
  6void __update_stats_wait_start(struct rq *rq, struct task_struct *p,
  7			       struct sched_statistics *stats)
  8{
  9	u64 wait_start, prev_wait_start;
 10
 11	wait_start = rq_clock(rq);
 12	prev_wait_start = schedstat_val(stats->wait_start);
 13
 14	if (p && likely(wait_start > prev_wait_start))
 15		wait_start -= prev_wait_start;
 16
 17	__schedstat_set(stats->wait_start, wait_start);
 18}
 19
 20void __update_stats_wait_end(struct rq *rq, struct task_struct *p,
 21			     struct sched_statistics *stats)
 22{
 23	u64 delta = rq_clock(rq) - schedstat_val(stats->wait_start);
 24
 25	if (p) {
 26		if (task_on_rq_migrating(p)) {
 27			/*
 28			 * Preserve migrating task's wait time so wait_start
 29			 * time stamp can be adjusted to accumulate wait time
 30			 * prior to migration.
 31			 */
 32			__schedstat_set(stats->wait_start, delta);
 33
 34			return;
 35		}
 36
 37		trace_sched_stat_wait(p, delta);
 38	}
 39
 40	__schedstat_set(stats->wait_max,
 41			max(schedstat_val(stats->wait_max), delta));
 42	__schedstat_inc(stats->wait_count);
 43	__schedstat_add(stats->wait_sum, delta);
 44	__schedstat_set(stats->wait_start, 0);
 45}
 46
 47void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p,
 48				    struct sched_statistics *stats)
 49{
 50	u64 sleep_start, block_start;
 51
 52	sleep_start = schedstat_val(stats->sleep_start);
 53	block_start = schedstat_val(stats->block_start);
 54
 55	if (sleep_start) {
 56		u64 delta = rq_clock(rq) - sleep_start;
 57
 58		if ((s64)delta < 0)
 59			delta = 0;
 60
 61		if (unlikely(delta > schedstat_val(stats->sleep_max)))
 62			__schedstat_set(stats->sleep_max, delta);
 63
 64		__schedstat_set(stats->sleep_start, 0);
 65		__schedstat_add(stats->sum_sleep_runtime, delta);
 66
 67		if (p) {
 68			account_scheduler_latency(p, delta >> 10, 1);
 69			trace_sched_stat_sleep(p, delta);
 70		}
 71	}
 72
 73	if (block_start) {
 74		u64 delta = rq_clock(rq) - block_start;
 
 
 75
 76		if ((s64)delta < 0)
 77			delta = 0;
 78
 79		if (unlikely(delta > schedstat_val(stats->block_max)))
 80			__schedstat_set(stats->block_max, delta);
 81
 82		__schedstat_set(stats->block_start, 0);
 83		__schedstat_add(stats->sum_sleep_runtime, delta);
 84		__schedstat_add(stats->sum_block_runtime, delta);
 85
 86		if (p) {
 87			if (p->in_iowait) {
 88				__schedstat_add(stats->iowait_sum, delta);
 89				__schedstat_inc(stats->iowait_count);
 90				trace_sched_stat_iowait(p, delta);
 91			}
 92
 93			trace_sched_stat_blocked(p, delta);
 94
 95			account_scheduler_latency(p, delta >> 10, 0);
 96		}
 97	}
 98}
 99
100/*
101 * Current schedstat API version.
102 *
103 * Bump this up when changing the output format or the meaning of an existing
104 * format, so that tools can adapt (or abort)
105 */
106#define SCHEDSTAT_VERSION 16
107
108static int show_schedstat(struct seq_file *seq, void *v)
109{
110	int cpu;
 
 
 
 
 
111
112	if (v == (void *)1) {
113		seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
114		seq_printf(seq, "timestamp %lu\n", jiffies);
115	} else {
116		struct rq *rq;
117#ifdef CONFIG_SMP
118		struct sched_domain *sd;
119		int dcount = 0;
120#endif
121		cpu = (unsigned long)(v - 2);
122		rq = cpu_rq(cpu);
123
124		/* runqueue-specific stats */
125		seq_printf(seq,
126		    "cpu%d %u 0 %u %u %u %u %llu %llu %lu",
127		    cpu, rq->yld_count,
128		    rq->sched_count, rq->sched_goidle,
129		    rq->ttwu_count, rq->ttwu_local,
130		    rq->rq_cpu_time,
131		    rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
132
133		seq_printf(seq, "\n");
134
135#ifdef CONFIG_SMP
136		/* domain-specific stats */
137		rcu_read_lock();
138		for_each_domain(cpu, sd) {
139			enum cpu_idle_type itype;
140
141			seq_printf(seq, "domain%d %*pb", dcount++,
142				   cpumask_pr_args(sched_domain_span(sd)));
143			for (itype = 0; itype < CPU_MAX_IDLE_TYPES; itype++) {
 
 
144				seq_printf(seq, " %u %u %u %u %u %u %u %u",
145				    sd->lb_count[itype],
146				    sd->lb_balanced[itype],
147				    sd->lb_failed[itype],
148				    sd->lb_imbalance[itype],
149				    sd->lb_gained[itype],
150				    sd->lb_hot_gained[itype],
151				    sd->lb_nobusyq[itype],
152				    sd->lb_nobusyg[itype]);
153			}
154			seq_printf(seq,
155				   " %u %u %u %u %u %u %u %u %u %u %u %u\n",
156			    sd->alb_count, sd->alb_failed, sd->alb_pushed,
157			    sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
158			    sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
159			    sd->ttwu_wake_remote, sd->ttwu_move_affine,
160			    sd->ttwu_move_balance);
161		}
162		rcu_read_unlock();
163#endif
164	}
 
165	return 0;
166}
167
168/*
169 * This iterator needs some explanation.
170 * It returns 1 for the header position.
171 * This means 2 is cpu 0.
172 * In a hotplugged system some CPUs, including cpu 0, may be missing so we have
173 * to use cpumask_* to iterate over the CPUs.
174 */
175static void *schedstat_start(struct seq_file *file, loff_t *offset)
176{
177	unsigned long n = *offset;
178
179	if (n == 0)
180		return (void *) 1;
181
182	n--;
183
184	if (n > 0)
185		n = cpumask_next(n - 1, cpu_online_mask);
186	else
187		n = cpumask_first(cpu_online_mask);
188
189	*offset = n + 1;
190
191	if (n < nr_cpu_ids)
192		return (void *)(unsigned long)(n + 2);
193
194	return NULL;
195}
196
197static void *schedstat_next(struct seq_file *file, void *data, loff_t *offset)
198{
199	(*offset)++;
200
201	return schedstat_start(file, offset);
202}
203
204static void schedstat_stop(struct seq_file *file, void *data)
205{
206}
207
208static const struct seq_operations schedstat_sops = {
209	.start = schedstat_start,
210	.next  = schedstat_next,
211	.stop  = schedstat_stop,
212	.show  = show_schedstat,
 
 
 
 
 
 
 
 
213};
214
215static int __init proc_schedstat_init(void)
216{
217	proc_create_seq("schedstat", 0, NULL, &schedstat_sops);
218	return 0;
219}
220subsys_initcall(proc_schedstat_init);
v3.5.6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  1
  2#include <linux/slab.h>
  3#include <linux/fs.h>
  4#include <linux/seq_file.h>
  5#include <linux/proc_fs.h>
  6
  7#include "sched.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  8
  9/*
 10 * bump this up when changing the output format or the meaning of an existing
 
 
 11 * format, so that tools can adapt (or abort)
 12 */
 13#define SCHEDSTAT_VERSION 15
 14
 15static int show_schedstat(struct seq_file *seq, void *v)
 16{
 17	int cpu;
 18	int mask_len = DIV_ROUND_UP(NR_CPUS, 32) * 9;
 19	char *mask_str = kmalloc(mask_len, GFP_KERNEL);
 20
 21	if (mask_str == NULL)
 22		return -ENOMEM;
 23
 24	seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
 25	seq_printf(seq, "timestamp %lu\n", jiffies);
 26	for_each_online_cpu(cpu) {
 27		struct rq *rq = cpu_rq(cpu);
 
 28#ifdef CONFIG_SMP
 29		struct sched_domain *sd;
 30		int dcount = 0;
 31#endif
 
 
 32
 33		/* runqueue-specific stats */
 34		seq_printf(seq,
 35		    "cpu%d %u 0 %u %u %u %u %llu %llu %lu",
 36		    cpu, rq->yld_count,
 37		    rq->sched_count, rq->sched_goidle,
 38		    rq->ttwu_count, rq->ttwu_local,
 39		    rq->rq_cpu_time,
 40		    rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
 41
 42		seq_printf(seq, "\n");
 43
 44#ifdef CONFIG_SMP
 45		/* domain-specific stats */
 46		rcu_read_lock();
 47		for_each_domain(cpu, sd) {
 48			enum cpu_idle_type itype;
 49
 50			cpumask_scnprintf(mask_str, mask_len,
 51					  sched_domain_span(sd));
 52			seq_printf(seq, "domain%d %s", dcount++, mask_str);
 53			for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
 54					itype++) {
 55				seq_printf(seq, " %u %u %u %u %u %u %u %u",
 56				    sd->lb_count[itype],
 57				    sd->lb_balanced[itype],
 58				    sd->lb_failed[itype],
 59				    sd->lb_imbalance[itype],
 60				    sd->lb_gained[itype],
 61				    sd->lb_hot_gained[itype],
 62				    sd->lb_nobusyq[itype],
 63				    sd->lb_nobusyg[itype]);
 64			}
 65			seq_printf(seq,
 66				   " %u %u %u %u %u %u %u %u %u %u %u %u\n",
 67			    sd->alb_count, sd->alb_failed, sd->alb_pushed,
 68			    sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
 69			    sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
 70			    sd->ttwu_wake_remote, sd->ttwu_move_affine,
 71			    sd->ttwu_move_balance);
 72		}
 73		rcu_read_unlock();
 74#endif
 75	}
 76	kfree(mask_str);
 77	return 0;
 78}
 79
 80static int schedstat_open(struct inode *inode, struct file *file)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 81{
 82	unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32);
 83	char *buf = kmalloc(size, GFP_KERNEL);
 84	struct seq_file *m;
 85	int res;
 86
 87	if (!buf)
 88		return -ENOMEM;
 89	res = single_open(file, show_schedstat, NULL);
 90	if (!res) {
 91		m = file->private_data;
 92		m->buf = buf;
 93		m->size = size;
 94	} else
 95		kfree(buf);
 96	return res;
 97}
 98
 99static const struct file_operations proc_schedstat_operations = {
100	.open    = schedstat_open,
101	.read    = seq_read,
102	.llseek  = seq_lseek,
103	.release = single_release,
104};
105
106static int __init proc_schedstat_init(void)
107{
108	proc_create("schedstat", 0, NULL, &proc_schedstat_operations);
109	return 0;
110}
111module_init(proc_schedstat_init);