Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * /proc/schedstat implementation
4 */
5#include "sched.h"
6
7/*
8 * Current schedstat API version.
9 *
10 * Bump this up when changing the output format or the meaning of an existing
11 * format, so that tools can adapt (or abort)
12 */
13#define SCHEDSTAT_VERSION 15
14
15static int show_schedstat(struct seq_file *seq, void *v)
16{
17 int cpu;
18
19 if (v == (void *)1) {
20 seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
21 seq_printf(seq, "timestamp %lu\n", jiffies);
22 } else {
23 struct rq *rq;
24#ifdef CONFIG_SMP
25 struct sched_domain *sd;
26 int dcount = 0;
27#endif
28 cpu = (unsigned long)(v - 2);
29 rq = cpu_rq(cpu);
30
31 /* runqueue-specific stats */
32 seq_printf(seq,
33 "cpu%d %u 0 %u %u %u %u %llu %llu %lu",
34 cpu, rq->yld_count,
35 rq->sched_count, rq->sched_goidle,
36 rq->ttwu_count, rq->ttwu_local,
37 rq->rq_cpu_time,
38 rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
39
40 seq_printf(seq, "\n");
41
42#ifdef CONFIG_SMP
43 /* domain-specific stats */
44 rcu_read_lock();
45 for_each_domain(cpu, sd) {
46 enum cpu_idle_type itype;
47
48 seq_printf(seq, "domain%d %*pb", dcount++,
49 cpumask_pr_args(sched_domain_span(sd)));
50 for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
51 itype++) {
52 seq_printf(seq, " %u %u %u %u %u %u %u %u",
53 sd->lb_count[itype],
54 sd->lb_balanced[itype],
55 sd->lb_failed[itype],
56 sd->lb_imbalance[itype],
57 sd->lb_gained[itype],
58 sd->lb_hot_gained[itype],
59 sd->lb_nobusyq[itype],
60 sd->lb_nobusyg[itype]);
61 }
62 seq_printf(seq,
63 " %u %u %u %u %u %u %u %u %u %u %u %u\n",
64 sd->alb_count, sd->alb_failed, sd->alb_pushed,
65 sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
66 sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
67 sd->ttwu_wake_remote, sd->ttwu_move_affine,
68 sd->ttwu_move_balance);
69 }
70 rcu_read_unlock();
71#endif
72 }
73 return 0;
74}
75
76/*
77 * This itererator needs some explanation.
78 * It returns 1 for the header position.
79 * This means 2 is cpu 0.
80 * In a hotplugged system some CPUs, including cpu 0, may be missing so we have
81 * to use cpumask_* to iterate over the CPUs.
82 */
83static void *schedstat_start(struct seq_file *file, loff_t *offset)
84{
85 unsigned long n = *offset;
86
87 if (n == 0)
88 return (void *) 1;
89
90 n--;
91
92 if (n > 0)
93 n = cpumask_next(n - 1, cpu_online_mask);
94 else
95 n = cpumask_first(cpu_online_mask);
96
97 *offset = n + 1;
98
99 if (n < nr_cpu_ids)
100 return (void *)(unsigned long)(n + 2);
101
102 return NULL;
103}
104
105static void *schedstat_next(struct seq_file *file, void *data, loff_t *offset)
106{
107 (*offset)++;
108
109 return schedstat_start(file, offset);
110}
111
112static void schedstat_stop(struct seq_file *file, void *data)
113{
114}
115
116static const struct seq_operations schedstat_sops = {
117 .start = schedstat_start,
118 .next = schedstat_next,
119 .stop = schedstat_stop,
120 .show = show_schedstat,
121};
122
123static int schedstat_open(struct inode *inode, struct file *file)
124{
125 return seq_open(file, &schedstat_sops);
126}
127
128static const struct file_operations proc_schedstat_operations = {
129 .open = schedstat_open,
130 .read = seq_read,
131 .llseek = seq_lseek,
132 .release = seq_release,
133};
134
135static int __init proc_schedstat_init(void)
136{
137 proc_create("schedstat", 0, NULL, &proc_schedstat_operations);
138
139 return 0;
140}
141subsys_initcall(proc_schedstat_init);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * /proc/schedstat implementation
4 */
5
6void __update_stats_wait_start(struct rq *rq, struct task_struct *p,
7 struct sched_statistics *stats)
8{
9 u64 wait_start, prev_wait_start;
10
11 wait_start = rq_clock(rq);
12 prev_wait_start = schedstat_val(stats->wait_start);
13
14 if (p && likely(wait_start > prev_wait_start))
15 wait_start -= prev_wait_start;
16
17 __schedstat_set(stats->wait_start, wait_start);
18}
19
20void __update_stats_wait_end(struct rq *rq, struct task_struct *p,
21 struct sched_statistics *stats)
22{
23 u64 delta = rq_clock(rq) - schedstat_val(stats->wait_start);
24
25 if (p) {
26 if (task_on_rq_migrating(p)) {
27 /*
28 * Preserve migrating task's wait time so wait_start
29 * time stamp can be adjusted to accumulate wait time
30 * prior to migration.
31 */
32 __schedstat_set(stats->wait_start, delta);
33
34 return;
35 }
36
37 trace_sched_stat_wait(p, delta);
38 }
39
40 __schedstat_set(stats->wait_max,
41 max(schedstat_val(stats->wait_max), delta));
42 __schedstat_inc(stats->wait_count);
43 __schedstat_add(stats->wait_sum, delta);
44 __schedstat_set(stats->wait_start, 0);
45}
46
47void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p,
48 struct sched_statistics *stats)
49{
50 u64 sleep_start, block_start;
51
52 sleep_start = schedstat_val(stats->sleep_start);
53 block_start = schedstat_val(stats->block_start);
54
55 if (sleep_start) {
56 u64 delta = rq_clock(rq) - sleep_start;
57
58 if ((s64)delta < 0)
59 delta = 0;
60
61 if (unlikely(delta > schedstat_val(stats->sleep_max)))
62 __schedstat_set(stats->sleep_max, delta);
63
64 __schedstat_set(stats->sleep_start, 0);
65 __schedstat_add(stats->sum_sleep_runtime, delta);
66
67 if (p) {
68 account_scheduler_latency(p, delta >> 10, 1);
69 trace_sched_stat_sleep(p, delta);
70 }
71 }
72
73 if (block_start) {
74 u64 delta = rq_clock(rq) - block_start;
75
76 if ((s64)delta < 0)
77 delta = 0;
78
79 if (unlikely(delta > schedstat_val(stats->block_max)))
80 __schedstat_set(stats->block_max, delta);
81
82 __schedstat_set(stats->block_start, 0);
83 __schedstat_add(stats->sum_sleep_runtime, delta);
84 __schedstat_add(stats->sum_block_runtime, delta);
85
86 if (p) {
87 if (p->in_iowait) {
88 __schedstat_add(stats->iowait_sum, delta);
89 __schedstat_inc(stats->iowait_count);
90 trace_sched_stat_iowait(p, delta);
91 }
92
93 trace_sched_stat_blocked(p, delta);
94
95 account_scheduler_latency(p, delta >> 10, 0);
96 }
97 }
98}
99
100/*
101 * Current schedstat API version.
102 *
103 * Bump this up when changing the output format or the meaning of an existing
104 * format, so that tools can adapt (or abort)
105 */
106#define SCHEDSTAT_VERSION 16
107
108static int show_schedstat(struct seq_file *seq, void *v)
109{
110 int cpu;
111
112 if (v == (void *)1) {
113 seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
114 seq_printf(seq, "timestamp %lu\n", jiffies);
115 } else {
116 struct rq *rq;
117#ifdef CONFIG_SMP
118 struct sched_domain *sd;
119 int dcount = 0;
120#endif
121 cpu = (unsigned long)(v - 2);
122 rq = cpu_rq(cpu);
123
124 /* runqueue-specific stats */
125 seq_printf(seq,
126 "cpu%d %u 0 %u %u %u %u %llu %llu %lu",
127 cpu, rq->yld_count,
128 rq->sched_count, rq->sched_goidle,
129 rq->ttwu_count, rq->ttwu_local,
130 rq->rq_cpu_time,
131 rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
132
133 seq_printf(seq, "\n");
134
135#ifdef CONFIG_SMP
136 /* domain-specific stats */
137 rcu_read_lock();
138 for_each_domain(cpu, sd) {
139 enum cpu_idle_type itype;
140
141 seq_printf(seq, "domain%d %*pb", dcount++,
142 cpumask_pr_args(sched_domain_span(sd)));
143 for (itype = 0; itype < CPU_MAX_IDLE_TYPES; itype++) {
144 seq_printf(seq, " %u %u %u %u %u %u %u %u",
145 sd->lb_count[itype],
146 sd->lb_balanced[itype],
147 sd->lb_failed[itype],
148 sd->lb_imbalance[itype],
149 sd->lb_gained[itype],
150 sd->lb_hot_gained[itype],
151 sd->lb_nobusyq[itype],
152 sd->lb_nobusyg[itype]);
153 }
154 seq_printf(seq,
155 " %u %u %u %u %u %u %u %u %u %u %u %u\n",
156 sd->alb_count, sd->alb_failed, sd->alb_pushed,
157 sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
158 sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
159 sd->ttwu_wake_remote, sd->ttwu_move_affine,
160 sd->ttwu_move_balance);
161 }
162 rcu_read_unlock();
163#endif
164 }
165 return 0;
166}
167
168/*
169 * This iterator needs some explanation.
170 * It returns 1 for the header position.
171 * This means 2 is cpu 0.
172 * In a hotplugged system some CPUs, including cpu 0, may be missing so we have
173 * to use cpumask_* to iterate over the CPUs.
174 */
175static void *schedstat_start(struct seq_file *file, loff_t *offset)
176{
177 unsigned long n = *offset;
178
179 if (n == 0)
180 return (void *) 1;
181
182 n--;
183
184 if (n > 0)
185 n = cpumask_next(n - 1, cpu_online_mask);
186 else
187 n = cpumask_first(cpu_online_mask);
188
189 *offset = n + 1;
190
191 if (n < nr_cpu_ids)
192 return (void *)(unsigned long)(n + 2);
193
194 return NULL;
195}
196
197static void *schedstat_next(struct seq_file *file, void *data, loff_t *offset)
198{
199 (*offset)++;
200
201 return schedstat_start(file, offset);
202}
203
204static void schedstat_stop(struct seq_file *file, void *data)
205{
206}
207
208static const struct seq_operations schedstat_sops = {
209 .start = schedstat_start,
210 .next = schedstat_next,
211 .stop = schedstat_stop,
212 .show = show_schedstat,
213};
214
215static int __init proc_schedstat_init(void)
216{
217 proc_create_seq("schedstat", 0, NULL, &schedstat_sops);
218 return 0;
219}
220subsys_initcall(proc_schedstat_init);