Loading...
1#include <linux/cpumask.h>
2#include <linux/fs.h>
3#include <linux/init.h>
4#include <linux/interrupt.h>
5#include <linux/kernel_stat.h>
6#include <linux/proc_fs.h>
7#include <linux/sched.h>
8#include <linux/seq_file.h>
9#include <linux/slab.h>
10#include <linux/time.h>
11#include <linux/irqnr.h>
12#include <asm/cputime.h>
13#include <linux/tick.h>
14
15#ifndef arch_irq_stat_cpu
16#define arch_irq_stat_cpu(cpu) 0
17#endif
18#ifndef arch_irq_stat
19#define arch_irq_stat() 0
20#endif
21
22#ifdef arch_idle_time
23
24static cputime64_t get_idle_time(int cpu)
25{
26 cputime64_t idle;
27
28 idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
29 if (cpu_online(cpu) && !nr_iowait_cpu(cpu))
30 idle += arch_idle_time(cpu);
31 return idle;
32}
33
34static cputime64_t get_iowait_time(int cpu)
35{
36 cputime64_t iowait;
37
38 iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
39 if (cpu_online(cpu) && nr_iowait_cpu(cpu))
40 iowait += arch_idle_time(cpu);
41 return iowait;
42}
43
44#else
45
46static u64 get_idle_time(int cpu)
47{
48 u64 idle, idle_time = get_cpu_idle_time_us(cpu, NULL);
49
50 if (idle_time == -1ULL)
51 /* !NO_HZ so we can rely on cpustat.idle */
52 idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
53 else
54 idle = usecs_to_cputime64(idle_time);
55
56 return idle;
57}
58
59static u64 get_iowait_time(int cpu)
60{
61 u64 iowait, iowait_time = get_cpu_iowait_time_us(cpu, NULL);
62
63 if (iowait_time == -1ULL)
64 /* !NO_HZ so we can rely on cpustat.iowait */
65 iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
66 else
67 iowait = usecs_to_cputime64(iowait_time);
68
69 return iowait;
70}
71
72#endif
73
74static int show_stat(struct seq_file *p, void *v)
75{
76 int i, j;
77 unsigned long jif;
78 u64 user, nice, system, idle, iowait, irq, softirq, steal;
79 u64 guest, guest_nice;
80 u64 sum = 0;
81 u64 sum_softirq = 0;
82 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
83 struct timespec boottime;
84
85 user = nice = system = idle = iowait =
86 irq = softirq = steal = 0;
87 guest = guest_nice = 0;
88 getboottime(&boottime);
89 jif = boottime.tv_sec;
90
91 for_each_possible_cpu(i) {
92 user += kcpustat_cpu(i).cpustat[CPUTIME_USER];
93 nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
94 system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
95 idle += get_idle_time(i);
96 iowait += get_iowait_time(i);
97 irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
98 softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
99 steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
100 guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
101 guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
102 sum += kstat_cpu_irqs_sum(i);
103 sum += arch_irq_stat_cpu(i);
104
105 for (j = 0; j < NR_SOFTIRQS; j++) {
106 unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
107
108 per_softirq_sums[j] += softirq_stat;
109 sum_softirq += softirq_stat;
110 }
111 }
112 sum += arch_irq_stat();
113
114 seq_puts(p, "cpu ");
115 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
116 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
117 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(system));
118 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(idle));
119 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(iowait));
120 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(irq));
121 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(softirq));
122 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(steal));
123 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest));
124 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest_nice));
125 seq_putc(p, '\n');
126
127 for_each_online_cpu(i) {
128 /* Copy values here to work around gcc-2.95.3, gcc-2.96 */
129 user = kcpustat_cpu(i).cpustat[CPUTIME_USER];
130 nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
131 system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
132 idle = get_idle_time(i);
133 iowait = get_iowait_time(i);
134 irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
135 softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
136 steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
137 guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
138 guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
139 seq_printf(p, "cpu%d", i);
140 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
141 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
142 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(system));
143 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(idle));
144 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(iowait));
145 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(irq));
146 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(softirq));
147 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(steal));
148 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest));
149 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest_nice));
150 seq_putc(p, '\n');
151 }
152 seq_printf(p, "intr %llu", (unsigned long long)sum);
153
154 /* sum again ? it could be updated? */
155 for_each_irq_nr(j)
156 seq_put_decimal_ull(p, ' ', kstat_irqs(j));
157
158 seq_printf(p,
159 "\nctxt %llu\n"
160 "btime %lu\n"
161 "processes %lu\n"
162 "procs_running %lu\n"
163 "procs_blocked %lu\n",
164 nr_context_switches(),
165 (unsigned long)jif,
166 total_forks,
167 nr_running(),
168 nr_iowait());
169
170 seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
171
172 for (i = 0; i < NR_SOFTIRQS; i++)
173 seq_put_decimal_ull(p, ' ', per_softirq_sums[i]);
174 seq_putc(p, '\n');
175
176 return 0;
177}
178
179static int stat_open(struct inode *inode, struct file *file)
180{
181 unsigned size = 1024 + 128 * num_possible_cpus();
182 char *buf;
183 struct seq_file *m;
184 int res;
185
186 /* minimum size to display an interrupt count : 2 bytes */
187 size += 2 * nr_irqs;
188
189 /* don't ask for more than the kmalloc() max size */
190 if (size > KMALLOC_MAX_SIZE)
191 size = KMALLOC_MAX_SIZE;
192 buf = kmalloc(size, GFP_KERNEL);
193 if (!buf)
194 return -ENOMEM;
195
196 res = single_open(file, show_stat, NULL);
197 if (!res) {
198 m = file->private_data;
199 m->buf = buf;
200 m->size = ksize(buf);
201 } else
202 kfree(buf);
203 return res;
204}
205
206static const struct file_operations proc_stat_operations = {
207 .open = stat_open,
208 .read = seq_read,
209 .llseek = seq_lseek,
210 .release = single_release,
211};
212
213static int __init proc_stat_init(void)
214{
215 proc_create("stat", 0, NULL, &proc_stat_operations);
216 return 0;
217}
218module_init(proc_stat_init);
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/cpumask.h>
3#include <linux/fs.h>
4#include <linux/init.h>
5#include <linux/interrupt.h>
6#include <linux/kernel_stat.h>
7#include <linux/proc_fs.h>
8#include <linux/sched.h>
9#include <linux/sched/stat.h>
10#include <linux/seq_file.h>
11#include <linux/slab.h>
12#include <linux/time.h>
13#include <linux/irqnr.h>
14#include <linux/sched/cputime.h>
15#include <linux/tick.h>
16
17#ifndef arch_irq_stat_cpu
18#define arch_irq_stat_cpu(cpu) 0
19#endif
20#ifndef arch_irq_stat
21#define arch_irq_stat() 0
22#endif
23
24#ifdef arch_idle_time
25
26static u64 get_idle_time(struct kernel_cpustat *kcs, int cpu)
27{
28 u64 idle;
29
30 idle = kcs->cpustat[CPUTIME_IDLE];
31 if (cpu_online(cpu) && !nr_iowait_cpu(cpu))
32 idle += arch_idle_time(cpu);
33 return idle;
34}
35
36static u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu)
37{
38 u64 iowait;
39
40 iowait = kcs->cpustat[CPUTIME_IOWAIT];
41 if (cpu_online(cpu) && nr_iowait_cpu(cpu))
42 iowait += arch_idle_time(cpu);
43 return iowait;
44}
45
46#else
47
48static u64 get_idle_time(struct kernel_cpustat *kcs, int cpu)
49{
50 u64 idle, idle_usecs = -1ULL;
51
52 if (cpu_online(cpu))
53 idle_usecs = get_cpu_idle_time_us(cpu, NULL);
54
55 if (idle_usecs == -1ULL)
56 /* !NO_HZ or cpu offline so we can rely on cpustat.idle */
57 idle = kcs->cpustat[CPUTIME_IDLE];
58 else
59 idle = idle_usecs * NSEC_PER_USEC;
60
61 return idle;
62}
63
64static u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu)
65{
66 u64 iowait, iowait_usecs = -1ULL;
67
68 if (cpu_online(cpu))
69 iowait_usecs = get_cpu_iowait_time_us(cpu, NULL);
70
71 if (iowait_usecs == -1ULL)
72 /* !NO_HZ or cpu offline so we can rely on cpustat.iowait */
73 iowait = kcs->cpustat[CPUTIME_IOWAIT];
74 else
75 iowait = iowait_usecs * NSEC_PER_USEC;
76
77 return iowait;
78}
79
80#endif
81
82static void show_irq_gap(struct seq_file *p, unsigned int gap)
83{
84 static const char zeros[] = " 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0";
85
86 while (gap > 0) {
87 unsigned int inc;
88
89 inc = min_t(unsigned int, gap, ARRAY_SIZE(zeros) / 2);
90 seq_write(p, zeros, 2 * inc);
91 gap -= inc;
92 }
93}
94
95static void show_all_irqs(struct seq_file *p)
96{
97 unsigned int i, next = 0;
98
99 for_each_active_irq(i) {
100 show_irq_gap(p, i - next);
101 seq_put_decimal_ull(p, " ", kstat_irqs_usr(i));
102 next = i + 1;
103 }
104 show_irq_gap(p, nr_irqs - next);
105}
106
107static int show_stat(struct seq_file *p, void *v)
108{
109 int i, j;
110 u64 user, nice, system, idle, iowait, irq, softirq, steal;
111 u64 guest, guest_nice;
112 u64 sum = 0;
113 u64 sum_softirq = 0;
114 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
115 struct timespec64 boottime;
116
117 user = nice = system = idle = iowait =
118 irq = softirq = steal = 0;
119 guest = guest_nice = 0;
120 getboottime64(&boottime);
121
122 for_each_possible_cpu(i) {
123 struct kernel_cpustat *kcs = &kcpustat_cpu(i);
124
125 user += kcs->cpustat[CPUTIME_USER];
126 nice += kcs->cpustat[CPUTIME_NICE];
127 system += kcs->cpustat[CPUTIME_SYSTEM];
128 idle += get_idle_time(kcs, i);
129 iowait += get_iowait_time(kcs, i);
130 irq += kcs->cpustat[CPUTIME_IRQ];
131 softirq += kcs->cpustat[CPUTIME_SOFTIRQ];
132 steal += kcs->cpustat[CPUTIME_STEAL];
133 guest += kcs->cpustat[CPUTIME_GUEST];
134 guest_nice += kcs->cpustat[CPUTIME_GUEST_NICE];
135 sum += kstat_cpu_irqs_sum(i);
136 sum += arch_irq_stat_cpu(i);
137
138 for (j = 0; j < NR_SOFTIRQS; j++) {
139 unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
140
141 per_softirq_sums[j] += softirq_stat;
142 sum_softirq += softirq_stat;
143 }
144 }
145 sum += arch_irq_stat();
146
147 seq_put_decimal_ull(p, "cpu ", nsec_to_clock_t(user));
148 seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice));
149 seq_put_decimal_ull(p, " ", nsec_to_clock_t(system));
150 seq_put_decimal_ull(p, " ", nsec_to_clock_t(idle));
151 seq_put_decimal_ull(p, " ", nsec_to_clock_t(iowait));
152 seq_put_decimal_ull(p, " ", nsec_to_clock_t(irq));
153 seq_put_decimal_ull(p, " ", nsec_to_clock_t(softirq));
154 seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal));
155 seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest));
156 seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest_nice));
157 seq_putc(p, '\n');
158
159 for_each_online_cpu(i) {
160 struct kernel_cpustat *kcs = &kcpustat_cpu(i);
161
162 /* Copy values here to work around gcc-2.95.3, gcc-2.96 */
163 user = kcs->cpustat[CPUTIME_USER];
164 nice = kcs->cpustat[CPUTIME_NICE];
165 system = kcs->cpustat[CPUTIME_SYSTEM];
166 idle = get_idle_time(kcs, i);
167 iowait = get_iowait_time(kcs, i);
168 irq = kcs->cpustat[CPUTIME_IRQ];
169 softirq = kcs->cpustat[CPUTIME_SOFTIRQ];
170 steal = kcs->cpustat[CPUTIME_STEAL];
171 guest = kcs->cpustat[CPUTIME_GUEST];
172 guest_nice = kcs->cpustat[CPUTIME_GUEST_NICE];
173 seq_printf(p, "cpu%d", i);
174 seq_put_decimal_ull(p, " ", nsec_to_clock_t(user));
175 seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice));
176 seq_put_decimal_ull(p, " ", nsec_to_clock_t(system));
177 seq_put_decimal_ull(p, " ", nsec_to_clock_t(idle));
178 seq_put_decimal_ull(p, " ", nsec_to_clock_t(iowait));
179 seq_put_decimal_ull(p, " ", nsec_to_clock_t(irq));
180 seq_put_decimal_ull(p, " ", nsec_to_clock_t(softirq));
181 seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal));
182 seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest));
183 seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest_nice));
184 seq_putc(p, '\n');
185 }
186 seq_put_decimal_ull(p, "intr ", (unsigned long long)sum);
187
188 show_all_irqs(p);
189
190 seq_printf(p,
191 "\nctxt %llu\n"
192 "btime %llu\n"
193 "processes %lu\n"
194 "procs_running %lu\n"
195 "procs_blocked %lu\n",
196 nr_context_switches(),
197 (unsigned long long)boottime.tv_sec,
198 total_forks,
199 nr_running(),
200 nr_iowait());
201
202 seq_put_decimal_ull(p, "softirq ", (unsigned long long)sum_softirq);
203
204 for (i = 0; i < NR_SOFTIRQS; i++)
205 seq_put_decimal_ull(p, " ", per_softirq_sums[i]);
206 seq_putc(p, '\n');
207
208 return 0;
209}
210
211static int stat_open(struct inode *inode, struct file *file)
212{
213 unsigned int size = 1024 + 128 * num_online_cpus();
214
215 /* minimum size to display an interrupt count : 2 bytes */
216 size += 2 * nr_irqs;
217 return single_open_size(file, show_stat, NULL, size);
218}
219
220static const struct file_operations proc_stat_operations = {
221 .open = stat_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
227static int __init proc_stat_init(void)
228{
229 proc_create("stat", 0, NULL, &proc_stat_operations);
230 return 0;
231}
232fs_initcall(proc_stat_init);