Loading...
1/* delayacct.c - per-task delay accounting
2 *
3 * Copyright (C) Shailabh Nagar, IBM Corp. 2006
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it would be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
14 */
15
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/taskstats.h>
19#include <linux/time.h>
20#include <linux/sysctl.h>
21#include <linux/delayacct.h>
22#include <linux/module.h>
23
24int delayacct_on __read_mostly = 1; /* Delay accounting turned on/off */
25EXPORT_SYMBOL_GPL(delayacct_on);
26struct kmem_cache *delayacct_cache;
27
28static int __init delayacct_setup_disable(char *str)
29{
30 delayacct_on = 0;
31 return 1;
32}
33__setup("nodelayacct", delayacct_setup_disable);
34
35void delayacct_init(void)
36{
37 delayacct_cache = KMEM_CACHE(task_delay_info, SLAB_PANIC|SLAB_ACCOUNT);
38 delayacct_tsk_init(&init_task);
39}
40
41void __delayacct_tsk_init(struct task_struct *tsk)
42{
43 tsk->delays = kmem_cache_zalloc(delayacct_cache, GFP_KERNEL);
44 if (tsk->delays)
45 spin_lock_init(&tsk->delays->lock);
46}
47
48/*
49 * Finish delay accounting for a statistic using its timestamps (@start),
50 * accumalator (@total) and @count
51 */
52static void delayacct_end(u64 *start, u64 *total, u32 *count)
53{
54 s64 ns = ktime_get_ns() - *start;
55 unsigned long flags;
56
57 if (ns > 0) {
58 spin_lock_irqsave(¤t->delays->lock, flags);
59 *total += ns;
60 (*count)++;
61 spin_unlock_irqrestore(¤t->delays->lock, flags);
62 }
63}
64
65void __delayacct_blkio_start(void)
66{
67 current->delays->blkio_start = ktime_get_ns();
68}
69
70void __delayacct_blkio_end(void)
71{
72 if (current->delays->flags & DELAYACCT_PF_SWAPIN)
73 /* Swapin block I/O */
74 delayacct_end(¤t->delays->blkio_start,
75 ¤t->delays->swapin_delay,
76 ¤t->delays->swapin_count);
77 else /* Other block I/O */
78 delayacct_end(¤t->delays->blkio_start,
79 ¤t->delays->blkio_delay,
80 ¤t->delays->blkio_count);
81}
82
83int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
84{
85 cputime_t utime, stime, stimescaled, utimescaled;
86 unsigned long long t2, t3;
87 unsigned long flags, t1;
88 s64 tmp;
89
90 task_cputime(tsk, &utime, &stime);
91 tmp = (s64)d->cpu_run_real_total;
92 tmp += cputime_to_nsecs(utime + stime);
93 d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp;
94
95 task_cputime_scaled(tsk, &utimescaled, &stimescaled);
96 tmp = (s64)d->cpu_scaled_run_real_total;
97 tmp += cputime_to_nsecs(utimescaled + stimescaled);
98 d->cpu_scaled_run_real_total =
99 (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp;
100
101 /*
102 * No locking available for sched_info (and too expensive to add one)
103 * Mitigate by taking snapshot of values
104 */
105 t1 = tsk->sched_info.pcount;
106 t2 = tsk->sched_info.run_delay;
107 t3 = tsk->se.sum_exec_runtime;
108
109 d->cpu_count += t1;
110
111 tmp = (s64)d->cpu_delay_total + t2;
112 d->cpu_delay_total = (tmp < (s64)d->cpu_delay_total) ? 0 : tmp;
113
114 tmp = (s64)d->cpu_run_virtual_total + t3;
115 d->cpu_run_virtual_total =
116 (tmp < (s64)d->cpu_run_virtual_total) ? 0 : tmp;
117
118 /* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */
119
120 spin_lock_irqsave(&tsk->delays->lock, flags);
121 tmp = d->blkio_delay_total + tsk->delays->blkio_delay;
122 d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp;
123 tmp = d->swapin_delay_total + tsk->delays->swapin_delay;
124 d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp;
125 tmp = d->freepages_delay_total + tsk->delays->freepages_delay;
126 d->freepages_delay_total = (tmp < d->freepages_delay_total) ? 0 : tmp;
127 d->blkio_count += tsk->delays->blkio_count;
128 d->swapin_count += tsk->delays->swapin_count;
129 d->freepages_count += tsk->delays->freepages_count;
130 spin_unlock_irqrestore(&tsk->delays->lock, flags);
131
132 return 0;
133}
134
135__u64 __delayacct_blkio_ticks(struct task_struct *tsk)
136{
137 __u64 ret;
138 unsigned long flags;
139
140 spin_lock_irqsave(&tsk->delays->lock, flags);
141 ret = nsec_to_clock_t(tsk->delays->blkio_delay +
142 tsk->delays->swapin_delay);
143 spin_unlock_irqrestore(&tsk->delays->lock, flags);
144 return ret;
145}
146
147void __delayacct_freepages_start(void)
148{
149 current->delays->freepages_start = ktime_get_ns();
150}
151
152void __delayacct_freepages_end(void)
153{
154 delayacct_end(¤t->delays->freepages_start,
155 ¤t->delays->freepages_delay,
156 ¤t->delays->freepages_count);
157}
158
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* delayacct.c - per-task delay accounting
3 *
4 * Copyright (C) Shailabh Nagar, IBM Corp. 2006
5 */
6
7#include <linux/sched.h>
8#include <linux/sched/task.h>
9#include <linux/sched/cputime.h>
10#include <linux/sched/clock.h>
11#include <linux/slab.h>
12#include <linux/taskstats.h>
13#include <linux/sysctl.h>
14#include <linux/delayacct.h>
15#include <linux/module.h>
16
17DEFINE_STATIC_KEY_FALSE(delayacct_key);
18int delayacct_on __read_mostly; /* Delay accounting turned on/off */
19struct kmem_cache *delayacct_cache;
20
21static void set_delayacct(bool enabled)
22{
23 if (enabled) {
24 static_branch_enable(&delayacct_key);
25 delayacct_on = 1;
26 } else {
27 delayacct_on = 0;
28 static_branch_disable(&delayacct_key);
29 }
30}
31
32static int __init delayacct_setup_enable(char *str)
33{
34 delayacct_on = 1;
35 return 1;
36}
37__setup("delayacct", delayacct_setup_enable);
38
39void delayacct_init(void)
40{
41 delayacct_cache = KMEM_CACHE(task_delay_info, SLAB_PANIC|SLAB_ACCOUNT);
42 delayacct_tsk_init(&init_task);
43 set_delayacct(delayacct_on);
44}
45
46#ifdef CONFIG_PROC_SYSCTL
47static int sysctl_delayacct(const struct ctl_table *table, int write, void *buffer,
48 size_t *lenp, loff_t *ppos)
49{
50 int state = delayacct_on;
51 struct ctl_table t;
52 int err;
53
54 if (write && !capable(CAP_SYS_ADMIN))
55 return -EPERM;
56
57 t = *table;
58 t.data = &state;
59 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
60 if (err < 0)
61 return err;
62 if (write)
63 set_delayacct(state);
64 return err;
65}
66
67static struct ctl_table kern_delayacct_table[] = {
68 {
69 .procname = "task_delayacct",
70 .data = NULL,
71 .maxlen = sizeof(unsigned int),
72 .mode = 0644,
73 .proc_handler = sysctl_delayacct,
74 .extra1 = SYSCTL_ZERO,
75 .extra2 = SYSCTL_ONE,
76 },
77};
78
79static __init int kernel_delayacct_sysctls_init(void)
80{
81 register_sysctl_init("kernel", kern_delayacct_table);
82 return 0;
83}
84late_initcall(kernel_delayacct_sysctls_init);
85#endif
86
87void __delayacct_tsk_init(struct task_struct *tsk)
88{
89 tsk->delays = kmem_cache_zalloc(delayacct_cache, GFP_KERNEL);
90 if (tsk->delays)
91 raw_spin_lock_init(&tsk->delays->lock);
92}
93
94/*
95 * Finish delay accounting for a statistic using its timestamps (@start),
96 * accumalator (@total) and @count
97 */
98static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total, u32 *count)
99{
100 s64 ns = local_clock() - *start;
101 unsigned long flags;
102
103 if (ns > 0) {
104 raw_spin_lock_irqsave(lock, flags);
105 *total += ns;
106 (*count)++;
107 raw_spin_unlock_irqrestore(lock, flags);
108 }
109}
110
111void __delayacct_blkio_start(void)
112{
113 current->delays->blkio_start = local_clock();
114}
115
116/*
117 * We cannot rely on the `current` macro, as we haven't yet switched back to
118 * the process being woken.
119 */
120void __delayacct_blkio_end(struct task_struct *p)
121{
122 delayacct_end(&p->delays->lock,
123 &p->delays->blkio_start,
124 &p->delays->blkio_delay,
125 &p->delays->blkio_count);
126}
127
128int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
129{
130 u64 utime, stime, stimescaled, utimescaled;
131 unsigned long long t2, t3;
132 unsigned long flags, t1;
133 s64 tmp;
134
135 task_cputime(tsk, &utime, &stime);
136 tmp = (s64)d->cpu_run_real_total;
137 tmp += utime + stime;
138 d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp;
139
140 task_cputime_scaled(tsk, &utimescaled, &stimescaled);
141 tmp = (s64)d->cpu_scaled_run_real_total;
142 tmp += utimescaled + stimescaled;
143 d->cpu_scaled_run_real_total =
144 (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp;
145
146 /*
147 * No locking available for sched_info (and too expensive to add one)
148 * Mitigate by taking snapshot of values
149 */
150 t1 = tsk->sched_info.pcount;
151 t2 = tsk->sched_info.run_delay;
152 t3 = tsk->se.sum_exec_runtime;
153
154 d->cpu_count += t1;
155
156 tmp = (s64)d->cpu_delay_total + t2;
157 d->cpu_delay_total = (tmp < (s64)d->cpu_delay_total) ? 0 : tmp;
158
159 tmp = (s64)d->cpu_run_virtual_total + t3;
160 d->cpu_run_virtual_total =
161 (tmp < (s64)d->cpu_run_virtual_total) ? 0 : tmp;
162
163 if (!tsk->delays)
164 return 0;
165
166 /* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */
167
168 raw_spin_lock_irqsave(&tsk->delays->lock, flags);
169 tmp = d->blkio_delay_total + tsk->delays->blkio_delay;
170 d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp;
171 tmp = d->swapin_delay_total + tsk->delays->swapin_delay;
172 d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp;
173 tmp = d->freepages_delay_total + tsk->delays->freepages_delay;
174 d->freepages_delay_total = (tmp < d->freepages_delay_total) ? 0 : tmp;
175 tmp = d->thrashing_delay_total + tsk->delays->thrashing_delay;
176 d->thrashing_delay_total = (tmp < d->thrashing_delay_total) ? 0 : tmp;
177 tmp = d->compact_delay_total + tsk->delays->compact_delay;
178 d->compact_delay_total = (tmp < d->compact_delay_total) ? 0 : tmp;
179 tmp = d->wpcopy_delay_total + tsk->delays->wpcopy_delay;
180 d->wpcopy_delay_total = (tmp < d->wpcopy_delay_total) ? 0 : tmp;
181 tmp = d->irq_delay_total + tsk->delays->irq_delay;
182 d->irq_delay_total = (tmp < d->irq_delay_total) ? 0 : tmp;
183 d->blkio_count += tsk->delays->blkio_count;
184 d->swapin_count += tsk->delays->swapin_count;
185 d->freepages_count += tsk->delays->freepages_count;
186 d->thrashing_count += tsk->delays->thrashing_count;
187 d->compact_count += tsk->delays->compact_count;
188 d->wpcopy_count += tsk->delays->wpcopy_count;
189 d->irq_count += tsk->delays->irq_count;
190 raw_spin_unlock_irqrestore(&tsk->delays->lock, flags);
191
192 return 0;
193}
194
195__u64 __delayacct_blkio_ticks(struct task_struct *tsk)
196{
197 __u64 ret;
198 unsigned long flags;
199
200 raw_spin_lock_irqsave(&tsk->delays->lock, flags);
201 ret = nsec_to_clock_t(tsk->delays->blkio_delay);
202 raw_spin_unlock_irqrestore(&tsk->delays->lock, flags);
203 return ret;
204}
205
206void __delayacct_freepages_start(void)
207{
208 current->delays->freepages_start = local_clock();
209}
210
211void __delayacct_freepages_end(void)
212{
213 delayacct_end(¤t->delays->lock,
214 ¤t->delays->freepages_start,
215 ¤t->delays->freepages_delay,
216 ¤t->delays->freepages_count);
217}
218
219void __delayacct_thrashing_start(bool *in_thrashing)
220{
221 *in_thrashing = !!current->in_thrashing;
222 if (*in_thrashing)
223 return;
224
225 current->in_thrashing = 1;
226 current->delays->thrashing_start = local_clock();
227}
228
229void __delayacct_thrashing_end(bool *in_thrashing)
230{
231 if (*in_thrashing)
232 return;
233
234 current->in_thrashing = 0;
235 delayacct_end(¤t->delays->lock,
236 ¤t->delays->thrashing_start,
237 ¤t->delays->thrashing_delay,
238 ¤t->delays->thrashing_count);
239}
240
241void __delayacct_swapin_start(void)
242{
243 current->delays->swapin_start = local_clock();
244}
245
246void __delayacct_swapin_end(void)
247{
248 delayacct_end(¤t->delays->lock,
249 ¤t->delays->swapin_start,
250 ¤t->delays->swapin_delay,
251 ¤t->delays->swapin_count);
252}
253
254void __delayacct_compact_start(void)
255{
256 current->delays->compact_start = local_clock();
257}
258
259void __delayacct_compact_end(void)
260{
261 delayacct_end(¤t->delays->lock,
262 ¤t->delays->compact_start,
263 ¤t->delays->compact_delay,
264 ¤t->delays->compact_count);
265}
266
267void __delayacct_wpcopy_start(void)
268{
269 current->delays->wpcopy_start = local_clock();
270}
271
272void __delayacct_wpcopy_end(void)
273{
274 delayacct_end(¤t->delays->lock,
275 ¤t->delays->wpcopy_start,
276 ¤t->delays->wpcopy_delay,
277 ¤t->delays->wpcopy_count);
278}
279
280void __delayacct_irq(struct task_struct *task, u32 delta)
281{
282 unsigned long flags;
283
284 raw_spin_lock_irqsave(&task->delays->lock, flags);
285 task->delays->irq_delay += delta;
286 task->delays->irq_count++;
287 raw_spin_unlock_irqrestore(&task->delays->lock, flags);
288}
289