Loading...
1/*
2 * latencytop.c: Latency display infrastructure
3 *
4 * (C) Copyright 2008 Intel Corporation
5 * Author: Arjan van de Ven <arjan@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12
13/*
14 * CONFIG_LATENCYTOP enables a kernel latency tracking infrastructure that is
15 * used by the "latencytop" userspace tool. The latency that is tracked is not
16 * the 'traditional' interrupt latency (which is primarily caused by something
17 * else consuming CPU), but instead, it is the latency an application encounters
18 * because the kernel sleeps on its behalf for various reasons.
19 *
20 * This code tracks 2 levels of statistics:
21 * 1) System level latency
22 * 2) Per process latency
23 *
24 * The latency is stored in fixed sized data structures in an accumulated form;
25 * if the "same" latency cause is hit twice, this will be tracked as one entry
26 * in the data structure. Both the count, total accumulated latency and maximum
27 * latency are tracked in this data structure. When the fixed size structure is
28 * full, no new causes are tracked until the buffer is flushed by writing to
29 * the /proc file; the userspace tool does this on a regular basis.
30 *
31 * A latency cause is identified by a stringified backtrace at the point that
32 * the scheduler gets invoked. The userland tool will use this string to
33 * identify the cause of the latency in human readable form.
34 *
35 * The information is exported via /proc/latency_stats and /proc/<pid>/latency.
36 * These files look like this:
37 *
38 * Latency Top version : v0.1
39 * 70 59433 4897 i915_irq_wait drm_ioctl vfs_ioctl do_vfs_ioctl sys_ioctl
40 * | | | |
41 * | | | +----> the stringified backtrace
42 * | | +---------> The maximum latency for this entry in microseconds
43 * | +--------------> The accumulated latency for this entry (microseconds)
44 * +-------------------> The number of times this entry is hit
45 *
46 * (note: the average latency is the accumulated latency divided by the number
47 * of times)
48 */
49
50#include <linux/kallsyms.h>
51#include <linux/seq_file.h>
52#include <linux/notifier.h>
53#include <linux/spinlock.h>
54#include <linux/proc_fs.h>
55#include <linux/latencytop.h>
56#include <linux/export.h>
57#include <linux/sched.h>
58#include <linux/list.h>
59#include <linux/stacktrace.h>
60
61static DEFINE_RAW_SPINLOCK(latency_lock);
62
63#define MAXLR 128
64static struct latency_record latency_record[MAXLR];
65
66int latencytop_enabled;
67
68void clear_all_latency_tracing(struct task_struct *p)
69{
70 unsigned long flags;
71
72 if (!latencytop_enabled)
73 return;
74
75 raw_spin_lock_irqsave(&latency_lock, flags);
76 memset(&p->latency_record, 0, sizeof(p->latency_record));
77 p->latency_record_count = 0;
78 raw_spin_unlock_irqrestore(&latency_lock, flags);
79}
80
81static void clear_global_latency_tracing(void)
82{
83 unsigned long flags;
84
85 raw_spin_lock_irqsave(&latency_lock, flags);
86 memset(&latency_record, 0, sizeof(latency_record));
87 raw_spin_unlock_irqrestore(&latency_lock, flags);
88}
89
90static void __sched
91account_global_scheduler_latency(struct task_struct *tsk,
92 struct latency_record *lat)
93{
94 int firstnonnull = MAXLR + 1;
95 int i;
96
97 if (!latencytop_enabled)
98 return;
99
100 /* skip kernel threads for now */
101 if (!tsk->mm)
102 return;
103
104 for (i = 0; i < MAXLR; i++) {
105 int q, same = 1;
106
107 /* Nothing stored: */
108 if (!latency_record[i].backtrace[0]) {
109 if (firstnonnull > i)
110 firstnonnull = i;
111 continue;
112 }
113 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
114 unsigned long record = lat->backtrace[q];
115
116 if (latency_record[i].backtrace[q] != record) {
117 same = 0;
118 break;
119 }
120
121 /* 0 and ULONG_MAX entries mean end of backtrace: */
122 if (record == 0 || record == ULONG_MAX)
123 break;
124 }
125 if (same) {
126 latency_record[i].count++;
127 latency_record[i].time += lat->time;
128 if (lat->time > latency_record[i].max)
129 latency_record[i].max = lat->time;
130 return;
131 }
132 }
133
134 i = firstnonnull;
135 if (i >= MAXLR - 1)
136 return;
137
138 /* Allocted a new one: */
139 memcpy(&latency_record[i], lat, sizeof(struct latency_record));
140}
141
142/*
143 * Iterator to store a backtrace into a latency record entry
144 */
145static inline void store_stacktrace(struct task_struct *tsk,
146 struct latency_record *lat)
147{
148 struct stack_trace trace;
149
150 memset(&trace, 0, sizeof(trace));
151 trace.max_entries = LT_BACKTRACEDEPTH;
152 trace.entries = &lat->backtrace[0];
153 save_stack_trace_tsk(tsk, &trace);
154}
155
156/**
157 * __account_scheduler_latency - record an occurred latency
158 * @tsk - the task struct of the task hitting the latency
159 * @usecs - the duration of the latency in microseconds
160 * @inter - 1 if the sleep was interruptible, 0 if uninterruptible
161 *
162 * This function is the main entry point for recording latency entries
163 * as called by the scheduler.
164 *
165 * This function has a few special cases to deal with normal 'non-latency'
166 * sleeps: specifically, interruptible sleep longer than 5 msec is skipped
167 * since this usually is caused by waiting for events via select() and co.
168 *
169 * Negative latencies (caused by time going backwards) are also explicitly
170 * skipped.
171 */
172void __sched
173__account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
174{
175 unsigned long flags;
176 int i, q;
177 struct latency_record lat;
178
179 /* Long interruptible waits are generally user requested... */
180 if (inter && usecs > 5000)
181 return;
182
183 /* Negative sleeps are time going backwards */
184 /* Zero-time sleeps are non-interesting */
185 if (usecs <= 0)
186 return;
187
188 memset(&lat, 0, sizeof(lat));
189 lat.count = 1;
190 lat.time = usecs;
191 lat.max = usecs;
192 store_stacktrace(tsk, &lat);
193
194 raw_spin_lock_irqsave(&latency_lock, flags);
195
196 account_global_scheduler_latency(tsk, &lat);
197
198 for (i = 0; i < tsk->latency_record_count; i++) {
199 struct latency_record *mylat;
200 int same = 1;
201
202 mylat = &tsk->latency_record[i];
203 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
204 unsigned long record = lat.backtrace[q];
205
206 if (mylat->backtrace[q] != record) {
207 same = 0;
208 break;
209 }
210
211 /* 0 and ULONG_MAX entries mean end of backtrace: */
212 if (record == 0 || record == ULONG_MAX)
213 break;
214 }
215 if (same) {
216 mylat->count++;
217 mylat->time += lat.time;
218 if (lat.time > mylat->max)
219 mylat->max = lat.time;
220 goto out_unlock;
221 }
222 }
223
224 /*
225 * short term hack; if we're > 32 we stop; future we recycle:
226 */
227 if (tsk->latency_record_count >= LT_SAVECOUNT)
228 goto out_unlock;
229
230 /* Allocated a new one: */
231 i = tsk->latency_record_count++;
232 memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
233
234out_unlock:
235 raw_spin_unlock_irqrestore(&latency_lock, flags);
236}
237
238static int lstats_show(struct seq_file *m, void *v)
239{
240 int i;
241
242 seq_puts(m, "Latency Top version : v0.1\n");
243
244 for (i = 0; i < MAXLR; i++) {
245 struct latency_record *lr = &latency_record[i];
246
247 if (lr->backtrace[0]) {
248 int q;
249 seq_printf(m, "%i %lu %lu",
250 lr->count, lr->time, lr->max);
251 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
252 unsigned long bt = lr->backtrace[q];
253 if (!bt)
254 break;
255 if (bt == ULONG_MAX)
256 break;
257 seq_printf(m, " %ps", (void *)bt);
258 }
259 seq_puts(m, "\n");
260 }
261 }
262 return 0;
263}
264
265static ssize_t
266lstats_write(struct file *file, const char __user *buf, size_t count,
267 loff_t *offs)
268{
269 clear_global_latency_tracing();
270
271 return count;
272}
273
274static int lstats_open(struct inode *inode, struct file *filp)
275{
276 return single_open(filp, lstats_show, NULL);
277}
278
279static const struct file_operations lstats_fops = {
280 .open = lstats_open,
281 .read = seq_read,
282 .write = lstats_write,
283 .llseek = seq_lseek,
284 .release = single_release,
285};
286
287static int __init init_lstats_procfs(void)
288{
289 proc_create("latency_stats", 0644, NULL, &lstats_fops);
290 return 0;
291}
292
293int sysctl_latencytop(struct ctl_table *table, int write,
294 void __user *buffer, size_t *lenp, loff_t *ppos)
295{
296 int err;
297
298 err = proc_dointvec(table, write, buffer, lenp, ppos);
299 if (latencytop_enabled)
300 force_schedstat_enabled();
301
302 return err;
303}
304device_initcall(init_lstats_procfs);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * latencytop.c: Latency display infrastructure
4 *
5 * (C) Copyright 2008 Intel Corporation
6 * Author: Arjan van de Ven <arjan@linux.intel.com>
7 */
8
9/*
10 * CONFIG_LATENCYTOP enables a kernel latency tracking infrastructure that is
11 * used by the "latencytop" userspace tool. The latency that is tracked is not
12 * the 'traditional' interrupt latency (which is primarily caused by something
13 * else consuming CPU), but instead, it is the latency an application encounters
14 * because the kernel sleeps on its behalf for various reasons.
15 *
16 * This code tracks 2 levels of statistics:
17 * 1) System level latency
18 * 2) Per process latency
19 *
20 * The latency is stored in fixed sized data structures in an accumulated form;
21 * if the "same" latency cause is hit twice, this will be tracked as one entry
22 * in the data structure. Both the count, total accumulated latency and maximum
23 * latency are tracked in this data structure. When the fixed size structure is
24 * full, no new causes are tracked until the buffer is flushed by writing to
25 * the /proc file; the userspace tool does this on a regular basis.
26 *
27 * A latency cause is identified by a stringified backtrace at the point that
28 * the scheduler gets invoked. The userland tool will use this string to
29 * identify the cause of the latency in human readable form.
30 *
31 * The information is exported via /proc/latency_stats and /proc/<pid>/latency.
32 * These files look like this:
33 *
34 * Latency Top version : v0.1
35 * 70 59433 4897 i915_irq_wait drm_ioctl vfs_ioctl do_vfs_ioctl sys_ioctl
36 * | | | |
37 * | | | +----> the stringified backtrace
38 * | | +---------> The maximum latency for this entry in microseconds
39 * | +--------------> The accumulated latency for this entry (microseconds)
40 * +-------------------> The number of times this entry is hit
41 *
42 * (note: the average latency is the accumulated latency divided by the number
43 * of times)
44 */
45
46#include <linux/kallsyms.h>
47#include <linux/seq_file.h>
48#include <linux/notifier.h>
49#include <linux/spinlock.h>
50#include <linux/proc_fs.h>
51#include <linux/latencytop.h>
52#include <linux/export.h>
53#include <linux/sched.h>
54#include <linux/sched/debug.h>
55#include <linux/sched/stat.h>
56#include <linux/list.h>
57#include <linux/stacktrace.h>
58#include <linux/sysctl.h>
59
60static DEFINE_RAW_SPINLOCK(latency_lock);
61
62#define MAXLR 128
63static struct latency_record latency_record[MAXLR];
64
65int latencytop_enabled;
66
67#ifdef CONFIG_SYSCTL
68static int sysctl_latencytop(const struct ctl_table *table, int write, void *buffer,
69 size_t *lenp, loff_t *ppos)
70{
71 int err;
72
73 err = proc_dointvec(table, write, buffer, lenp, ppos);
74 if (latencytop_enabled)
75 force_schedstat_enabled();
76
77 return err;
78}
79
80static struct ctl_table latencytop_sysctl[] = {
81 {
82 .procname = "latencytop",
83 .data = &latencytop_enabled,
84 .maxlen = sizeof(int),
85 .mode = 0644,
86 .proc_handler = sysctl_latencytop,
87 },
88};
89#endif
90
91void clear_tsk_latency_tracing(struct task_struct *p)
92{
93 unsigned long flags;
94
95 raw_spin_lock_irqsave(&latency_lock, flags);
96 memset(&p->latency_record, 0, sizeof(p->latency_record));
97 p->latency_record_count = 0;
98 raw_spin_unlock_irqrestore(&latency_lock, flags);
99}
100
101static void clear_global_latency_tracing(void)
102{
103 unsigned long flags;
104
105 raw_spin_lock_irqsave(&latency_lock, flags);
106 memset(&latency_record, 0, sizeof(latency_record));
107 raw_spin_unlock_irqrestore(&latency_lock, flags);
108}
109
110static void __sched
111account_global_scheduler_latency(struct task_struct *tsk,
112 struct latency_record *lat)
113{
114 int firstnonnull = MAXLR;
115 int i;
116
117 /* skip kernel threads for now */
118 if (!tsk->mm)
119 return;
120
121 for (i = 0; i < MAXLR; i++) {
122 int q, same = 1;
123
124 /* Nothing stored: */
125 if (!latency_record[i].backtrace[0]) {
126 if (firstnonnull > i)
127 firstnonnull = i;
128 continue;
129 }
130 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
131 unsigned long record = lat->backtrace[q];
132
133 if (latency_record[i].backtrace[q] != record) {
134 same = 0;
135 break;
136 }
137
138 /* 0 entry marks end of backtrace: */
139 if (!record)
140 break;
141 }
142 if (same) {
143 latency_record[i].count++;
144 latency_record[i].time += lat->time;
145 if (lat->time > latency_record[i].max)
146 latency_record[i].max = lat->time;
147 return;
148 }
149 }
150
151 i = firstnonnull;
152 if (i >= MAXLR)
153 return;
154
155 /* Allocted a new one: */
156 memcpy(&latency_record[i], lat, sizeof(struct latency_record));
157}
158
159/**
160 * __account_scheduler_latency - record an occurred latency
161 * @tsk - the task struct of the task hitting the latency
162 * @usecs - the duration of the latency in microseconds
163 * @inter - 1 if the sleep was interruptible, 0 if uninterruptible
164 *
165 * This function is the main entry point for recording latency entries
166 * as called by the scheduler.
167 *
168 * This function has a few special cases to deal with normal 'non-latency'
169 * sleeps: specifically, interruptible sleep longer than 5 msec is skipped
170 * since this usually is caused by waiting for events via select() and co.
171 *
172 * Negative latencies (caused by time going backwards) are also explicitly
173 * skipped.
174 */
175void __sched
176__account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
177{
178 unsigned long flags;
179 int i, q;
180 struct latency_record lat;
181
182 /* Long interruptible waits are generally user requested... */
183 if (inter && usecs > 5000)
184 return;
185
186 /* Negative sleeps are time going backwards */
187 /* Zero-time sleeps are non-interesting */
188 if (usecs <= 0)
189 return;
190
191 memset(&lat, 0, sizeof(lat));
192 lat.count = 1;
193 lat.time = usecs;
194 lat.max = usecs;
195
196 stack_trace_save_tsk(tsk, lat.backtrace, LT_BACKTRACEDEPTH, 0);
197
198 raw_spin_lock_irqsave(&latency_lock, flags);
199
200 account_global_scheduler_latency(tsk, &lat);
201
202 for (i = 0; i < tsk->latency_record_count; i++) {
203 struct latency_record *mylat;
204 int same = 1;
205
206 mylat = &tsk->latency_record[i];
207 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
208 unsigned long record = lat.backtrace[q];
209
210 if (mylat->backtrace[q] != record) {
211 same = 0;
212 break;
213 }
214
215 /* 0 entry is end of backtrace */
216 if (!record)
217 break;
218 }
219 if (same) {
220 mylat->count++;
221 mylat->time += lat.time;
222 if (lat.time > mylat->max)
223 mylat->max = lat.time;
224 goto out_unlock;
225 }
226 }
227
228 /*
229 * short term hack; if we're > 32 we stop; future we recycle:
230 */
231 if (tsk->latency_record_count >= LT_SAVECOUNT)
232 goto out_unlock;
233
234 /* Allocated a new one: */
235 i = tsk->latency_record_count++;
236 memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
237
238out_unlock:
239 raw_spin_unlock_irqrestore(&latency_lock, flags);
240}
241
242static int lstats_show(struct seq_file *m, void *v)
243{
244 int i;
245
246 seq_puts(m, "Latency Top version : v0.1\n");
247
248 for (i = 0; i < MAXLR; i++) {
249 struct latency_record *lr = &latency_record[i];
250
251 if (lr->backtrace[0]) {
252 int q;
253 seq_printf(m, "%i %lu %lu",
254 lr->count, lr->time, lr->max);
255 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
256 unsigned long bt = lr->backtrace[q];
257
258 if (!bt)
259 break;
260
261 seq_printf(m, " %ps", (void *)bt);
262 }
263 seq_puts(m, "\n");
264 }
265 }
266 return 0;
267}
268
269static ssize_t
270lstats_write(struct file *file, const char __user *buf, size_t count,
271 loff_t *offs)
272{
273 clear_global_latency_tracing();
274
275 return count;
276}
277
278static int lstats_open(struct inode *inode, struct file *filp)
279{
280 return single_open(filp, lstats_show, NULL);
281}
282
283static const struct proc_ops lstats_proc_ops = {
284 .proc_open = lstats_open,
285 .proc_read = seq_read,
286 .proc_write = lstats_write,
287 .proc_lseek = seq_lseek,
288 .proc_release = single_release,
289};
290
291static int __init init_lstats_procfs(void)
292{
293 proc_create("latency_stats", 0644, NULL, &lstats_proc_ops);
294#ifdef CONFIG_SYSCTL
295 register_sysctl_init("kernel", latencytop_sysctl);
296#endif
297 return 0;
298}
299device_initcall(init_lstats_procfs);