Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* delayacct.c - per-task delay accounting
3 *
4 * Copyright (C) Shailabh Nagar, IBM Corp. 2006
5 */
6
7#include <linux/sched.h>
8#include <linux/sched/task.h>
9#include <linux/sched/cputime.h>
10#include <linux/slab.h>
11#include <linux/taskstats.h>
12#include <linux/time.h>
13#include <linux/sysctl.h>
14#include <linux/delayacct.h>
15#include <linux/module.h>
16
17int delayacct_on __read_mostly = 1; /* Delay accounting turned on/off */
18EXPORT_SYMBOL_GPL(delayacct_on);
19struct kmem_cache *delayacct_cache;
20
21static int __init delayacct_setup_disable(char *str)
22{
23 delayacct_on = 0;
24 return 1;
25}
26__setup("nodelayacct", delayacct_setup_disable);
27
28void delayacct_init(void)
29{
30 delayacct_cache = KMEM_CACHE(task_delay_info, SLAB_PANIC|SLAB_ACCOUNT);
31 delayacct_tsk_init(&init_task);
32}
33
34void __delayacct_tsk_init(struct task_struct *tsk)
35{
36 tsk->delays = kmem_cache_zalloc(delayacct_cache, GFP_KERNEL);
37 if (tsk->delays)
38 raw_spin_lock_init(&tsk->delays->lock);
39}
40
41/*
42 * Finish delay accounting for a statistic using its timestamps (@start),
43 * accumalator (@total) and @count
44 */
45static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total,
46 u32 *count)
47{
48 s64 ns = ktime_get_ns() - *start;
49 unsigned long flags;
50
51 if (ns > 0) {
52 raw_spin_lock_irqsave(lock, flags);
53 *total += ns;
54 (*count)++;
55 raw_spin_unlock_irqrestore(lock, flags);
56 }
57}
58
59void __delayacct_blkio_start(void)
60{
61 current->delays->blkio_start = ktime_get_ns();
62}
63
64/*
65 * We cannot rely on the `current` macro, as we haven't yet switched back to
66 * the process being woken.
67 */
68void __delayacct_blkio_end(struct task_struct *p)
69{
70 struct task_delay_info *delays = p->delays;
71 u64 *total;
72 u32 *count;
73
74 if (p->delays->flags & DELAYACCT_PF_SWAPIN) {
75 total = &delays->swapin_delay;
76 count = &delays->swapin_count;
77 } else {
78 total = &delays->blkio_delay;
79 count = &delays->blkio_count;
80 }
81
82 delayacct_end(&delays->lock, &delays->blkio_start, total, count);
83}
84
85int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
86{
87 u64 utime, stime, stimescaled, utimescaled;
88 unsigned long long t2, t3;
89 unsigned long flags, t1;
90 s64 tmp;
91
92 task_cputime(tsk, &utime, &stime);
93 tmp = (s64)d->cpu_run_real_total;
94 tmp += utime + stime;
95 d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp;
96
97 task_cputime_scaled(tsk, &utimescaled, &stimescaled);
98 tmp = (s64)d->cpu_scaled_run_real_total;
99 tmp += utimescaled + stimescaled;
100 d->cpu_scaled_run_real_total =
101 (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp;
102
103 /*
104 * No locking available for sched_info (and too expensive to add one)
105 * Mitigate by taking snapshot of values
106 */
107 t1 = tsk->sched_info.pcount;
108 t2 = tsk->sched_info.run_delay;
109 t3 = tsk->se.sum_exec_runtime;
110
111 d->cpu_count += t1;
112
113 tmp = (s64)d->cpu_delay_total + t2;
114 d->cpu_delay_total = (tmp < (s64)d->cpu_delay_total) ? 0 : tmp;
115
116 tmp = (s64)d->cpu_run_virtual_total + t3;
117 d->cpu_run_virtual_total =
118 (tmp < (s64)d->cpu_run_virtual_total) ? 0 : tmp;
119
120 /* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */
121
122 raw_spin_lock_irqsave(&tsk->delays->lock, flags);
123 tmp = d->blkio_delay_total + tsk->delays->blkio_delay;
124 d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp;
125 tmp = d->swapin_delay_total + tsk->delays->swapin_delay;
126 d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp;
127 tmp = d->freepages_delay_total + tsk->delays->freepages_delay;
128 d->freepages_delay_total = (tmp < d->freepages_delay_total) ? 0 : tmp;
129 tmp = d->thrashing_delay_total + tsk->delays->thrashing_delay;
130 d->thrashing_delay_total = (tmp < d->thrashing_delay_total) ? 0 : tmp;
131 d->blkio_count += tsk->delays->blkio_count;
132 d->swapin_count += tsk->delays->swapin_count;
133 d->freepages_count += tsk->delays->freepages_count;
134 d->thrashing_count += tsk->delays->thrashing_count;
135 raw_spin_unlock_irqrestore(&tsk->delays->lock, flags);
136
137 return 0;
138}
139
140__u64 __delayacct_blkio_ticks(struct task_struct *tsk)
141{
142 __u64 ret;
143 unsigned long flags;
144
145 raw_spin_lock_irqsave(&tsk->delays->lock, flags);
146 ret = nsec_to_clock_t(tsk->delays->blkio_delay +
147 tsk->delays->swapin_delay);
148 raw_spin_unlock_irqrestore(&tsk->delays->lock, flags);
149 return ret;
150}
151
152void __delayacct_freepages_start(void)
153{
154 current->delays->freepages_start = ktime_get_ns();
155}
156
157void __delayacct_freepages_end(void)
158{
159 delayacct_end(
160 ¤t->delays->lock,
161 ¤t->delays->freepages_start,
162 ¤t->delays->freepages_delay,
163 ¤t->delays->freepages_count);
164}
165
166void __delayacct_thrashing_start(void)
167{
168 current->delays->thrashing_start = ktime_get_ns();
169}
170
171void __delayacct_thrashing_end(void)
172{
173 delayacct_end(¤t->delays->lock,
174 ¤t->delays->thrashing_start,
175 ¤t->delays->thrashing_delay,
176 ¤t->delays->thrashing_count);
177}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* delayacct.c - per-task delay accounting
3 *
4 * Copyright (C) Shailabh Nagar, IBM Corp. 2006
5 */
6
7#include <linux/sched.h>
8#include <linux/sched/task.h>
9#include <linux/sched/cputime.h>
10#include <linux/sched/clock.h>
11#include <linux/slab.h>
12#include <linux/taskstats.h>
13#include <linux/sysctl.h>
14#include <linux/delayacct.h>
15#include <linux/module.h>
16
17DEFINE_STATIC_KEY_FALSE(delayacct_key);
18int delayacct_on __read_mostly; /* Delay accounting turned on/off */
19struct kmem_cache *delayacct_cache;
20
21static void set_delayacct(bool enabled)
22{
23 if (enabled) {
24 static_branch_enable(&delayacct_key);
25 delayacct_on = 1;
26 } else {
27 delayacct_on = 0;
28 static_branch_disable(&delayacct_key);
29 }
30}
31
32static int __init delayacct_setup_enable(char *str)
33{
34 delayacct_on = 1;
35 return 1;
36}
37__setup("delayacct", delayacct_setup_enable);
38
39void delayacct_init(void)
40{
41 delayacct_cache = KMEM_CACHE(task_delay_info, SLAB_PANIC|SLAB_ACCOUNT);
42 delayacct_tsk_init(&init_task);
43 set_delayacct(delayacct_on);
44}
45
46#ifdef CONFIG_PROC_SYSCTL
47static int sysctl_delayacct(const struct ctl_table *table, int write, void *buffer,
48 size_t *lenp, loff_t *ppos)
49{
50 int state = delayacct_on;
51 struct ctl_table t;
52 int err;
53
54 if (write && !capable(CAP_SYS_ADMIN))
55 return -EPERM;
56
57 t = *table;
58 t.data = &state;
59 err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
60 if (err < 0)
61 return err;
62 if (write)
63 set_delayacct(state);
64 return err;
65}
66
67static struct ctl_table kern_delayacct_table[] = {
68 {
69 .procname = "task_delayacct",
70 .data = NULL,
71 .maxlen = sizeof(unsigned int),
72 .mode = 0644,
73 .proc_handler = sysctl_delayacct,
74 .extra1 = SYSCTL_ZERO,
75 .extra2 = SYSCTL_ONE,
76 },
77};
78
79static __init int kernel_delayacct_sysctls_init(void)
80{
81 register_sysctl_init("kernel", kern_delayacct_table);
82 return 0;
83}
84late_initcall(kernel_delayacct_sysctls_init);
85#endif
86
87void __delayacct_tsk_init(struct task_struct *tsk)
88{
89 tsk->delays = kmem_cache_zalloc(delayacct_cache, GFP_KERNEL);
90 if (tsk->delays)
91 raw_spin_lock_init(&tsk->delays->lock);
92}
93
94/*
95 * Finish delay accounting for a statistic using its timestamps (@start),
96 * accumalator (@total) and @count
97 */
98static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total, u32 *count)
99{
100 s64 ns = local_clock() - *start;
101 unsigned long flags;
102
103 if (ns > 0) {
104 raw_spin_lock_irqsave(lock, flags);
105 *total += ns;
106 (*count)++;
107 raw_spin_unlock_irqrestore(lock, flags);
108 }
109}
110
111void __delayacct_blkio_start(void)
112{
113 current->delays->blkio_start = local_clock();
114}
115
116/*
117 * We cannot rely on the `current` macro, as we haven't yet switched back to
118 * the process being woken.
119 */
120void __delayacct_blkio_end(struct task_struct *p)
121{
122 delayacct_end(&p->delays->lock,
123 &p->delays->blkio_start,
124 &p->delays->blkio_delay,
125 &p->delays->blkio_count);
126}
127
128int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
129{
130 u64 utime, stime, stimescaled, utimescaled;
131 unsigned long long t2, t3;
132 unsigned long flags, t1;
133 s64 tmp;
134
135 task_cputime(tsk, &utime, &stime);
136 tmp = (s64)d->cpu_run_real_total;
137 tmp += utime + stime;
138 d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp;
139
140 task_cputime_scaled(tsk, &utimescaled, &stimescaled);
141 tmp = (s64)d->cpu_scaled_run_real_total;
142 tmp += utimescaled + stimescaled;
143 d->cpu_scaled_run_real_total =
144 (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp;
145
146 /*
147 * No locking available for sched_info (and too expensive to add one)
148 * Mitigate by taking snapshot of values
149 */
150 t1 = tsk->sched_info.pcount;
151 t2 = tsk->sched_info.run_delay;
152 t3 = tsk->se.sum_exec_runtime;
153
154 d->cpu_count += t1;
155
156 tmp = (s64)d->cpu_delay_total + t2;
157 d->cpu_delay_total = (tmp < (s64)d->cpu_delay_total) ? 0 : tmp;
158
159 tmp = (s64)d->cpu_run_virtual_total + t3;
160 d->cpu_run_virtual_total =
161 (tmp < (s64)d->cpu_run_virtual_total) ? 0 : tmp;
162
163 if (!tsk->delays)
164 return 0;
165
166 /* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */
167
168 raw_spin_lock_irqsave(&tsk->delays->lock, flags);
169 tmp = d->blkio_delay_total + tsk->delays->blkio_delay;
170 d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp;
171 tmp = d->swapin_delay_total + tsk->delays->swapin_delay;
172 d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp;
173 tmp = d->freepages_delay_total + tsk->delays->freepages_delay;
174 d->freepages_delay_total = (tmp < d->freepages_delay_total) ? 0 : tmp;
175 tmp = d->thrashing_delay_total + tsk->delays->thrashing_delay;
176 d->thrashing_delay_total = (tmp < d->thrashing_delay_total) ? 0 : tmp;
177 tmp = d->compact_delay_total + tsk->delays->compact_delay;
178 d->compact_delay_total = (tmp < d->compact_delay_total) ? 0 : tmp;
179 tmp = d->wpcopy_delay_total + tsk->delays->wpcopy_delay;
180 d->wpcopy_delay_total = (tmp < d->wpcopy_delay_total) ? 0 : tmp;
181 tmp = d->irq_delay_total + tsk->delays->irq_delay;
182 d->irq_delay_total = (tmp < d->irq_delay_total) ? 0 : tmp;
183 d->blkio_count += tsk->delays->blkio_count;
184 d->swapin_count += tsk->delays->swapin_count;
185 d->freepages_count += tsk->delays->freepages_count;
186 d->thrashing_count += tsk->delays->thrashing_count;
187 d->compact_count += tsk->delays->compact_count;
188 d->wpcopy_count += tsk->delays->wpcopy_count;
189 d->irq_count += tsk->delays->irq_count;
190 raw_spin_unlock_irqrestore(&tsk->delays->lock, flags);
191
192 return 0;
193}
194
195__u64 __delayacct_blkio_ticks(struct task_struct *tsk)
196{
197 __u64 ret;
198 unsigned long flags;
199
200 raw_spin_lock_irqsave(&tsk->delays->lock, flags);
201 ret = nsec_to_clock_t(tsk->delays->blkio_delay);
202 raw_spin_unlock_irqrestore(&tsk->delays->lock, flags);
203 return ret;
204}
205
206void __delayacct_freepages_start(void)
207{
208 current->delays->freepages_start = local_clock();
209}
210
211void __delayacct_freepages_end(void)
212{
213 delayacct_end(¤t->delays->lock,
214 ¤t->delays->freepages_start,
215 ¤t->delays->freepages_delay,
216 ¤t->delays->freepages_count);
217}
218
219void __delayacct_thrashing_start(bool *in_thrashing)
220{
221 *in_thrashing = !!current->in_thrashing;
222 if (*in_thrashing)
223 return;
224
225 current->in_thrashing = 1;
226 current->delays->thrashing_start = local_clock();
227}
228
229void __delayacct_thrashing_end(bool *in_thrashing)
230{
231 if (*in_thrashing)
232 return;
233
234 current->in_thrashing = 0;
235 delayacct_end(¤t->delays->lock,
236 ¤t->delays->thrashing_start,
237 ¤t->delays->thrashing_delay,
238 ¤t->delays->thrashing_count);
239}
240
241void __delayacct_swapin_start(void)
242{
243 current->delays->swapin_start = local_clock();
244}
245
246void __delayacct_swapin_end(void)
247{
248 delayacct_end(¤t->delays->lock,
249 ¤t->delays->swapin_start,
250 ¤t->delays->swapin_delay,
251 ¤t->delays->swapin_count);
252}
253
254void __delayacct_compact_start(void)
255{
256 current->delays->compact_start = local_clock();
257}
258
259void __delayacct_compact_end(void)
260{
261 delayacct_end(¤t->delays->lock,
262 ¤t->delays->compact_start,
263 ¤t->delays->compact_delay,
264 ¤t->delays->compact_count);
265}
266
267void __delayacct_wpcopy_start(void)
268{
269 current->delays->wpcopy_start = local_clock();
270}
271
272void __delayacct_wpcopy_end(void)
273{
274 delayacct_end(¤t->delays->lock,
275 ¤t->delays->wpcopy_start,
276 ¤t->delays->wpcopy_delay,
277 ¤t->delays->wpcopy_count);
278}
279
280void __delayacct_irq(struct task_struct *task, u32 delta)
281{
282 unsigned long flags;
283
284 raw_spin_lock_irqsave(&task->delays->lock, flags);
285 task->delays->irq_delay += delta;
286 task->delays->irq_count++;
287 raw_spin_unlock_irqrestore(&task->delays->lock, flags);
288}
289