Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * latencytop.c: Latency display infrastructure
4 *
5 * (C) Copyright 2008 Intel Corporation
6 * Author: Arjan van de Ven <arjan@linux.intel.com>
7 */
8
9/*
10 * CONFIG_LATENCYTOP enables a kernel latency tracking infrastructure that is
11 * used by the "latencytop" userspace tool. The latency that is tracked is not
12 * the 'traditional' interrupt latency (which is primarily caused by something
13 * else consuming CPU), but instead, it is the latency an application encounters
14 * because the kernel sleeps on its behalf for various reasons.
15 *
16 * This code tracks 2 levels of statistics:
17 * 1) System level latency
18 * 2) Per process latency
19 *
20 * The latency is stored in fixed sized data structures in an accumulated form;
21 * if the "same" latency cause is hit twice, this will be tracked as one entry
22 * in the data structure. Both the count, total accumulated latency and maximum
23 * latency are tracked in this data structure. When the fixed size structure is
24 * full, no new causes are tracked until the buffer is flushed by writing to
25 * the /proc file; the userspace tool does this on a regular basis.
26 *
27 * A latency cause is identified by a stringified backtrace at the point that
28 * the scheduler gets invoked. The userland tool will use this string to
29 * identify the cause of the latency in human readable form.
30 *
31 * The information is exported via /proc/latency_stats and /proc/<pid>/latency.
32 * These files look like this:
33 *
34 * Latency Top version : v0.1
35 * 70 59433 4897 i915_irq_wait drm_ioctl vfs_ioctl do_vfs_ioctl sys_ioctl
36 * | | | |
37 * | | | +----> the stringified backtrace
38 * | | +---------> The maximum latency for this entry in microseconds
39 * | +--------------> The accumulated latency for this entry (microseconds)
40 * +-------------------> The number of times this entry is hit
41 *
42 * (note: the average latency is the accumulated latency divided by the number
43 * of times)
44 */
45
46#include <linux/kallsyms.h>
47#include <linux/seq_file.h>
48#include <linux/notifier.h>
49#include <linux/spinlock.h>
50#include <linux/proc_fs.h>
51#include <linux/latencytop.h>
52#include <linux/export.h>
53#include <linux/sched.h>
54#include <linux/sched/debug.h>
55#include <linux/sched/stat.h>
56#include <linux/list.h>
57#include <linux/stacktrace.h>
58#include <linux/sysctl.h>
59
60static DEFINE_RAW_SPINLOCK(latency_lock);
61
62#define MAXLR 128
63static struct latency_record latency_record[MAXLR];
64
65int latencytop_enabled;
66
67#ifdef CONFIG_SYSCTL
68static int sysctl_latencytop(struct ctl_table *table, int write, void *buffer,
69 size_t *lenp, loff_t *ppos)
70{
71 int err;
72
73 err = proc_dointvec(table, write, buffer, lenp, ppos);
74 if (latencytop_enabled)
75 force_schedstat_enabled();
76
77 return err;
78}
79
80static struct ctl_table latencytop_sysctl[] = {
81 {
82 .procname = "latencytop",
83 .data = &latencytop_enabled,
84 .maxlen = sizeof(int),
85 .mode = 0644,
86 .proc_handler = sysctl_latencytop,
87 },
88 {}
89};
90#endif
91
92void clear_tsk_latency_tracing(struct task_struct *p)
93{
94 unsigned long flags;
95
96 raw_spin_lock_irqsave(&latency_lock, flags);
97 memset(&p->latency_record, 0, sizeof(p->latency_record));
98 p->latency_record_count = 0;
99 raw_spin_unlock_irqrestore(&latency_lock, flags);
100}
101
102static void clear_global_latency_tracing(void)
103{
104 unsigned long flags;
105
106 raw_spin_lock_irqsave(&latency_lock, flags);
107 memset(&latency_record, 0, sizeof(latency_record));
108 raw_spin_unlock_irqrestore(&latency_lock, flags);
109}
110
111static void __sched
112account_global_scheduler_latency(struct task_struct *tsk,
113 struct latency_record *lat)
114{
115 int firstnonnull = MAXLR;
116 int i;
117
118 /* skip kernel threads for now */
119 if (!tsk->mm)
120 return;
121
122 for (i = 0; i < MAXLR; i++) {
123 int q, same = 1;
124
125 /* Nothing stored: */
126 if (!latency_record[i].backtrace[0]) {
127 if (firstnonnull > i)
128 firstnonnull = i;
129 continue;
130 }
131 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
132 unsigned long record = lat->backtrace[q];
133
134 if (latency_record[i].backtrace[q] != record) {
135 same = 0;
136 break;
137 }
138
139 /* 0 entry marks end of backtrace: */
140 if (!record)
141 break;
142 }
143 if (same) {
144 latency_record[i].count++;
145 latency_record[i].time += lat->time;
146 if (lat->time > latency_record[i].max)
147 latency_record[i].max = lat->time;
148 return;
149 }
150 }
151
152 i = firstnonnull;
153 if (i >= MAXLR)
154 return;
155
156 /* Allocted a new one: */
157 memcpy(&latency_record[i], lat, sizeof(struct latency_record));
158}
159
160/**
161 * __account_scheduler_latency - record an occurred latency
162 * @tsk - the task struct of the task hitting the latency
163 * @usecs - the duration of the latency in microseconds
164 * @inter - 1 if the sleep was interruptible, 0 if uninterruptible
165 *
166 * This function is the main entry point for recording latency entries
167 * as called by the scheduler.
168 *
169 * This function has a few special cases to deal with normal 'non-latency'
170 * sleeps: specifically, interruptible sleep longer than 5 msec is skipped
171 * since this usually is caused by waiting for events via select() and co.
172 *
173 * Negative latencies (caused by time going backwards) are also explicitly
174 * skipped.
175 */
176void __sched
177__account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
178{
179 unsigned long flags;
180 int i, q;
181 struct latency_record lat;
182
183 /* Long interruptible waits are generally user requested... */
184 if (inter && usecs > 5000)
185 return;
186
187 /* Negative sleeps are time going backwards */
188 /* Zero-time sleeps are non-interesting */
189 if (usecs <= 0)
190 return;
191
192 memset(&lat, 0, sizeof(lat));
193 lat.count = 1;
194 lat.time = usecs;
195 lat.max = usecs;
196
197 stack_trace_save_tsk(tsk, lat.backtrace, LT_BACKTRACEDEPTH, 0);
198
199 raw_spin_lock_irqsave(&latency_lock, flags);
200
201 account_global_scheduler_latency(tsk, &lat);
202
203 for (i = 0; i < tsk->latency_record_count; i++) {
204 struct latency_record *mylat;
205 int same = 1;
206
207 mylat = &tsk->latency_record[i];
208 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
209 unsigned long record = lat.backtrace[q];
210
211 if (mylat->backtrace[q] != record) {
212 same = 0;
213 break;
214 }
215
216 /* 0 entry is end of backtrace */
217 if (!record)
218 break;
219 }
220 if (same) {
221 mylat->count++;
222 mylat->time += lat.time;
223 if (lat.time > mylat->max)
224 mylat->max = lat.time;
225 goto out_unlock;
226 }
227 }
228
229 /*
230 * short term hack; if we're > 32 we stop; future we recycle:
231 */
232 if (tsk->latency_record_count >= LT_SAVECOUNT)
233 goto out_unlock;
234
235 /* Allocated a new one: */
236 i = tsk->latency_record_count++;
237 memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
238
239out_unlock:
240 raw_spin_unlock_irqrestore(&latency_lock, flags);
241}
242
243static int lstats_show(struct seq_file *m, void *v)
244{
245 int i;
246
247 seq_puts(m, "Latency Top version : v0.1\n");
248
249 for (i = 0; i < MAXLR; i++) {
250 struct latency_record *lr = &latency_record[i];
251
252 if (lr->backtrace[0]) {
253 int q;
254 seq_printf(m, "%i %lu %lu",
255 lr->count, lr->time, lr->max);
256 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
257 unsigned long bt = lr->backtrace[q];
258
259 if (!bt)
260 break;
261
262 seq_printf(m, " %ps", (void *)bt);
263 }
264 seq_puts(m, "\n");
265 }
266 }
267 return 0;
268}
269
270static ssize_t
271lstats_write(struct file *file, const char __user *buf, size_t count,
272 loff_t *offs)
273{
274 clear_global_latency_tracing();
275
276 return count;
277}
278
279static int lstats_open(struct inode *inode, struct file *filp)
280{
281 return single_open(filp, lstats_show, NULL);
282}
283
284static const struct proc_ops lstats_proc_ops = {
285 .proc_open = lstats_open,
286 .proc_read = seq_read,
287 .proc_write = lstats_write,
288 .proc_lseek = seq_lseek,
289 .proc_release = single_release,
290};
291
292static int __init init_lstats_procfs(void)
293{
294 proc_create("latency_stats", 0644, NULL, &lstats_proc_ops);
295#ifdef CONFIG_SYSCTL
296 register_sysctl_init("kernel", latencytop_sysctl);
297#endif
298 return 0;
299}
300device_initcall(init_lstats_procfs);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * latencytop.c: Latency display infrastructure
4 *
5 * (C) Copyright 2008 Intel Corporation
6 * Author: Arjan van de Ven <arjan@linux.intel.com>
7 */
8
9/*
10 * CONFIG_LATENCYTOP enables a kernel latency tracking infrastructure that is
11 * used by the "latencytop" userspace tool. The latency that is tracked is not
12 * the 'traditional' interrupt latency (which is primarily caused by something
13 * else consuming CPU), but instead, it is the latency an application encounters
14 * because the kernel sleeps on its behalf for various reasons.
15 *
16 * This code tracks 2 levels of statistics:
17 * 1) System level latency
18 * 2) Per process latency
19 *
20 * The latency is stored in fixed sized data structures in an accumulated form;
21 * if the "same" latency cause is hit twice, this will be tracked as one entry
22 * in the data structure. Both the count, total accumulated latency and maximum
23 * latency are tracked in this data structure. When the fixed size structure is
24 * full, no new causes are tracked until the buffer is flushed by writing to
25 * the /proc file; the userspace tool does this on a regular basis.
26 *
27 * A latency cause is identified by a stringified backtrace at the point that
28 * the scheduler gets invoked. The userland tool will use this string to
29 * identify the cause of the latency in human readable form.
30 *
31 * The information is exported via /proc/latency_stats and /proc/<pid>/latency.
32 * These files look like this:
33 *
34 * Latency Top version : v0.1
35 * 70 59433 4897 i915_irq_wait drm_ioctl vfs_ioctl do_vfs_ioctl sys_ioctl
36 * | | | |
37 * | | | +----> the stringified backtrace
38 * | | +---------> The maximum latency for this entry in microseconds
39 * | +--------------> The accumulated latency for this entry (microseconds)
40 * +-------------------> The number of times this entry is hit
41 *
42 * (note: the average latency is the accumulated latency divided by the number
43 * of times)
44 */
45
46#include <linux/kallsyms.h>
47#include <linux/seq_file.h>
48#include <linux/notifier.h>
49#include <linux/spinlock.h>
50#include <linux/proc_fs.h>
51#include <linux/latencytop.h>
52#include <linux/export.h>
53#include <linux/sched.h>
54#include <linux/sched/debug.h>
55#include <linux/sched/stat.h>
56#include <linux/list.h>
57#include <linux/stacktrace.h>
58
59static DEFINE_RAW_SPINLOCK(latency_lock);
60
61#define MAXLR 128
62static struct latency_record latency_record[MAXLR];
63
64int latencytop_enabled;
65
66void clear_tsk_latency_tracing(struct task_struct *p)
67{
68 unsigned long flags;
69
70 raw_spin_lock_irqsave(&latency_lock, flags);
71 memset(&p->latency_record, 0, sizeof(p->latency_record));
72 p->latency_record_count = 0;
73 raw_spin_unlock_irqrestore(&latency_lock, flags);
74}
75
76static void clear_global_latency_tracing(void)
77{
78 unsigned long flags;
79
80 raw_spin_lock_irqsave(&latency_lock, flags);
81 memset(&latency_record, 0, sizeof(latency_record));
82 raw_spin_unlock_irqrestore(&latency_lock, flags);
83}
84
85static void __sched
86account_global_scheduler_latency(struct task_struct *tsk,
87 struct latency_record *lat)
88{
89 int firstnonnull = MAXLR + 1;
90 int i;
91
92 /* skip kernel threads for now */
93 if (!tsk->mm)
94 return;
95
96 for (i = 0; i < MAXLR; i++) {
97 int q, same = 1;
98
99 /* Nothing stored: */
100 if (!latency_record[i].backtrace[0]) {
101 if (firstnonnull > i)
102 firstnonnull = i;
103 continue;
104 }
105 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
106 unsigned long record = lat->backtrace[q];
107
108 if (latency_record[i].backtrace[q] != record) {
109 same = 0;
110 break;
111 }
112
113 /* 0 entry marks end of backtrace: */
114 if (!record)
115 break;
116 }
117 if (same) {
118 latency_record[i].count++;
119 latency_record[i].time += lat->time;
120 if (lat->time > latency_record[i].max)
121 latency_record[i].max = lat->time;
122 return;
123 }
124 }
125
126 i = firstnonnull;
127 if (i >= MAXLR - 1)
128 return;
129
130 /* Allocted a new one: */
131 memcpy(&latency_record[i], lat, sizeof(struct latency_record));
132}
133
134/**
135 * __account_scheduler_latency - record an occurred latency
136 * @tsk - the task struct of the task hitting the latency
137 * @usecs - the duration of the latency in microseconds
138 * @inter - 1 if the sleep was interruptible, 0 if uninterruptible
139 *
140 * This function is the main entry point for recording latency entries
141 * as called by the scheduler.
142 *
143 * This function has a few special cases to deal with normal 'non-latency'
144 * sleeps: specifically, interruptible sleep longer than 5 msec is skipped
145 * since this usually is caused by waiting for events via select() and co.
146 *
147 * Negative latencies (caused by time going backwards) are also explicitly
148 * skipped.
149 */
150void __sched
151__account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
152{
153 unsigned long flags;
154 int i, q;
155 struct latency_record lat;
156
157 /* Long interruptible waits are generally user requested... */
158 if (inter && usecs > 5000)
159 return;
160
161 /* Negative sleeps are time going backwards */
162 /* Zero-time sleeps are non-interesting */
163 if (usecs <= 0)
164 return;
165
166 memset(&lat, 0, sizeof(lat));
167 lat.count = 1;
168 lat.time = usecs;
169 lat.max = usecs;
170
171 stack_trace_save_tsk(tsk, lat.backtrace, LT_BACKTRACEDEPTH, 0);
172
173 raw_spin_lock_irqsave(&latency_lock, flags);
174
175 account_global_scheduler_latency(tsk, &lat);
176
177 for (i = 0; i < tsk->latency_record_count; i++) {
178 struct latency_record *mylat;
179 int same = 1;
180
181 mylat = &tsk->latency_record[i];
182 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
183 unsigned long record = lat.backtrace[q];
184
185 if (mylat->backtrace[q] != record) {
186 same = 0;
187 break;
188 }
189
190 /* 0 entry is end of backtrace */
191 if (!record)
192 break;
193 }
194 if (same) {
195 mylat->count++;
196 mylat->time += lat.time;
197 if (lat.time > mylat->max)
198 mylat->max = lat.time;
199 goto out_unlock;
200 }
201 }
202
203 /*
204 * short term hack; if we're > 32 we stop; future we recycle:
205 */
206 if (tsk->latency_record_count >= LT_SAVECOUNT)
207 goto out_unlock;
208
209 /* Allocated a new one: */
210 i = tsk->latency_record_count++;
211 memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
212
213out_unlock:
214 raw_spin_unlock_irqrestore(&latency_lock, flags);
215}
216
217static int lstats_show(struct seq_file *m, void *v)
218{
219 int i;
220
221 seq_puts(m, "Latency Top version : v0.1\n");
222
223 for (i = 0; i < MAXLR; i++) {
224 struct latency_record *lr = &latency_record[i];
225
226 if (lr->backtrace[0]) {
227 int q;
228 seq_printf(m, "%i %lu %lu",
229 lr->count, lr->time, lr->max);
230 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
231 unsigned long bt = lr->backtrace[q];
232
233 if (!bt)
234 break;
235
236 seq_printf(m, " %ps", (void *)bt);
237 }
238 seq_puts(m, "\n");
239 }
240 }
241 return 0;
242}
243
244static ssize_t
245lstats_write(struct file *file, const char __user *buf, size_t count,
246 loff_t *offs)
247{
248 clear_global_latency_tracing();
249
250 return count;
251}
252
253static int lstats_open(struct inode *inode, struct file *filp)
254{
255 return single_open(filp, lstats_show, NULL);
256}
257
258static const struct file_operations lstats_fops = {
259 .open = lstats_open,
260 .read = seq_read,
261 .write = lstats_write,
262 .llseek = seq_lseek,
263 .release = single_release,
264};
265
266static int __init init_lstats_procfs(void)
267{
268 proc_create("latency_stats", 0644, NULL, &lstats_fops);
269 return 0;
270}
271
272int sysctl_latencytop(struct ctl_table *table, int write,
273 void __user *buffer, size_t *lenp, loff_t *ppos)
274{
275 int err;
276
277 err = proc_dointvec(table, write, buffer, lenp, ppos);
278 if (latencytop_enabled)
279 force_schedstat_enabled();
280
281 return err;
282}
283device_initcall(init_lstats_procfs);