Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 * latencytop.c: Latency display infrastructure
  3 *
  4 * (C) Copyright 2008 Intel Corporation
  5 * Author: Arjan van de Ven <arjan@linux.intel.com>
  6 *
  7 * This program is free software; you can redistribute it and/or
  8 * modify it under the terms of the GNU General Public License
  9 * as published by the Free Software Foundation; version 2
 10 * of the License.
 11 */
 12
 13/*
 14 * CONFIG_LATENCYTOP enables a kernel latency tracking infrastructure that is
 15 * used by the "latencytop" userspace tool. The latency that is tracked is not
 16 * the 'traditional' interrupt latency (which is primarily caused by something
 17 * else consuming CPU), but instead, it is the latency an application encounters
 18 * because the kernel sleeps on its behalf for various reasons.
 19 *
 20 * This code tracks 2 levels of statistics:
 21 * 1) System level latency
 22 * 2) Per process latency
 23 *
 24 * The latency is stored in fixed sized data structures in an accumulated form;
 25 * if the "same" latency cause is hit twice, this will be tracked as one entry
 26 * in the data structure. Both the count, total accumulated latency and maximum
 27 * latency are tracked in this data structure. When the fixed size structure is
 28 * full, no new causes are tracked until the buffer is flushed by writing to
 29 * the /proc file; the userspace tool does this on a regular basis.
 30 *
 31 * A latency cause is identified by a stringified backtrace at the point that
 32 * the scheduler gets invoked. The userland tool will use this string to
 33 * identify the cause of the latency in human readable form.
 34 *
 35 * The information is exported via /proc/latency_stats and /proc/<pid>/latency.
 36 * These files look like this:
 37 *
 38 * Latency Top version : v0.1
 39 * 70 59433 4897 i915_irq_wait drm_ioctl vfs_ioctl do_vfs_ioctl sys_ioctl
 40 * |    |    |    |
 41 * |    |    |    +----> the stringified backtrace
 42 * |    |    +---------> The maximum latency for this entry in microseconds
 43 * |    +--------------> The accumulated latency for this entry (microseconds)
 44 * +-------------------> The number of times this entry is hit
 45 *
 46 * (note: the average latency is the accumulated latency divided by the number
 47 * of times)
 48 */
 49
 50#include <linux/kallsyms.h>
 51#include <linux/seq_file.h>
 52#include <linux/notifier.h>
 53#include <linux/spinlock.h>
 54#include <linux/proc_fs.h>
 55#include <linux/latencytop.h>
 56#include <linux/export.h>
 57#include <linux/sched.h>
 58#include <linux/sched/debug.h>
 59#include <linux/sched/stat.h>
 60#include <linux/list.h>
 61#include <linux/stacktrace.h>
 
 62
 63static DEFINE_RAW_SPINLOCK(latency_lock);
 64
 65#define MAXLR 128
 66static struct latency_record latency_record[MAXLR];
 67
 68int latencytop_enabled;
 69
 70void clear_all_latency_tracing(struct task_struct *p)
 
 
 71{
 72	unsigned long flags;
 73
 74	if (!latencytop_enabled)
 75		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 76
 77	raw_spin_lock_irqsave(&latency_lock, flags);
 78	memset(&p->latency_record, 0, sizeof(p->latency_record));
 79	p->latency_record_count = 0;
 80	raw_spin_unlock_irqrestore(&latency_lock, flags);
 81}
 82
 83static void clear_global_latency_tracing(void)
 84{
 85	unsigned long flags;
 86
 87	raw_spin_lock_irqsave(&latency_lock, flags);
 88	memset(&latency_record, 0, sizeof(latency_record));
 89	raw_spin_unlock_irqrestore(&latency_lock, flags);
 90}
 91
 92static void __sched
 93account_global_scheduler_latency(struct task_struct *tsk,
 94				 struct latency_record *lat)
 95{
 96	int firstnonnull = MAXLR + 1;
 97	int i;
 98
 99	if (!latencytop_enabled)
100		return;
101
102	/* skip kernel threads for now */
103	if (!tsk->mm)
104		return;
105
106	for (i = 0; i < MAXLR; i++) {
107		int q, same = 1;
108
109		/* Nothing stored: */
110		if (!latency_record[i].backtrace[0]) {
111			if (firstnonnull > i)
112				firstnonnull = i;
113			continue;
114		}
115		for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
116			unsigned long record = lat->backtrace[q];
117
118			if (latency_record[i].backtrace[q] != record) {
119				same = 0;
120				break;
121			}
122
123			/* 0 and ULONG_MAX entries mean end of backtrace: */
124			if (record == 0 || record == ULONG_MAX)
125				break;
126		}
127		if (same) {
128			latency_record[i].count++;
129			latency_record[i].time += lat->time;
130			if (lat->time > latency_record[i].max)
131				latency_record[i].max = lat->time;
132			return;
133		}
134	}
135
136	i = firstnonnull;
137	if (i >= MAXLR - 1)
138		return;
139
140	/* Allocted a new one: */
141	memcpy(&latency_record[i], lat, sizeof(struct latency_record));
142}
143
144/*
145 * Iterator to store a backtrace into a latency record entry
146 */
147static inline void store_stacktrace(struct task_struct *tsk,
148					struct latency_record *lat)
149{
150	struct stack_trace trace;
151
152	memset(&trace, 0, sizeof(trace));
153	trace.max_entries = LT_BACKTRACEDEPTH;
154	trace.entries = &lat->backtrace[0];
155	save_stack_trace_tsk(tsk, &trace);
156}
157
158/**
159 * __account_scheduler_latency - record an occurred latency
160 * @tsk - the task struct of the task hitting the latency
161 * @usecs - the duration of the latency in microseconds
162 * @inter - 1 if the sleep was interruptible, 0 if uninterruptible
163 *
164 * This function is the main entry point for recording latency entries
165 * as called by the scheduler.
166 *
167 * This function has a few special cases to deal with normal 'non-latency'
168 * sleeps: specifically, interruptible sleep longer than 5 msec is skipped
169 * since this usually is caused by waiting for events via select() and co.
170 *
171 * Negative latencies (caused by time going backwards) are also explicitly
172 * skipped.
173 */
174void __sched
175__account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
176{
177	unsigned long flags;
178	int i, q;
179	struct latency_record lat;
180
181	/* Long interruptible waits are generally user requested... */
182	if (inter && usecs > 5000)
183		return;
184
185	/* Negative sleeps are time going backwards */
186	/* Zero-time sleeps are non-interesting */
187	if (usecs <= 0)
188		return;
189
190	memset(&lat, 0, sizeof(lat));
191	lat.count = 1;
192	lat.time = usecs;
193	lat.max = usecs;
194	store_stacktrace(tsk, &lat);
 
195
196	raw_spin_lock_irqsave(&latency_lock, flags);
197
198	account_global_scheduler_latency(tsk, &lat);
199
200	for (i = 0; i < tsk->latency_record_count; i++) {
201		struct latency_record *mylat;
202		int same = 1;
203
204		mylat = &tsk->latency_record[i];
205		for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
206			unsigned long record = lat.backtrace[q];
207
208			if (mylat->backtrace[q] != record) {
209				same = 0;
210				break;
211			}
212
213			/* 0 and ULONG_MAX entries mean end of backtrace: */
214			if (record == 0 || record == ULONG_MAX)
215				break;
216		}
217		if (same) {
218			mylat->count++;
219			mylat->time += lat.time;
220			if (lat.time > mylat->max)
221				mylat->max = lat.time;
222			goto out_unlock;
223		}
224	}
225
226	/*
227	 * short term hack; if we're > 32 we stop; future we recycle:
228	 */
229	if (tsk->latency_record_count >= LT_SAVECOUNT)
230		goto out_unlock;
231
232	/* Allocated a new one: */
233	i = tsk->latency_record_count++;
234	memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
235
236out_unlock:
237	raw_spin_unlock_irqrestore(&latency_lock, flags);
238}
239
240static int lstats_show(struct seq_file *m, void *v)
241{
242	int i;
243
244	seq_puts(m, "Latency Top version : v0.1\n");
245
246	for (i = 0; i < MAXLR; i++) {
247		struct latency_record *lr = &latency_record[i];
248
249		if (lr->backtrace[0]) {
250			int q;
251			seq_printf(m, "%i %lu %lu",
252				   lr->count, lr->time, lr->max);
253			for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
254				unsigned long bt = lr->backtrace[q];
 
255				if (!bt)
256					break;
257				if (bt == ULONG_MAX)
258					break;
259				seq_printf(m, " %ps", (void *)bt);
260			}
261			seq_puts(m, "\n");
262		}
263	}
264	return 0;
265}
266
267static ssize_t
268lstats_write(struct file *file, const char __user *buf, size_t count,
269	     loff_t *offs)
270{
271	clear_global_latency_tracing();
272
273	return count;
274}
275
276static int lstats_open(struct inode *inode, struct file *filp)
277{
278	return single_open(filp, lstats_show, NULL);
279}
280
281static const struct file_operations lstats_fops = {
282	.open		= lstats_open,
283	.read		= seq_read,
284	.write		= lstats_write,
285	.llseek		= seq_lseek,
286	.release	= single_release,
287};
288
289static int __init init_lstats_procfs(void)
290{
291	proc_create("latency_stats", 0644, NULL, &lstats_fops);
 
 
 
292	return 0;
293}
294
295int sysctl_latencytop(struct ctl_table *table, int write,
296			void __user *buffer, size_t *lenp, loff_t *ppos)
297{
298	int err;
299
300	err = proc_dointvec(table, write, buffer, lenp, ppos);
301	if (latencytop_enabled)
302		force_schedstat_enabled();
303
304	return err;
305}
306device_initcall(init_lstats_procfs);
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * latencytop.c: Latency display infrastructure
  4 *
  5 * (C) Copyright 2008 Intel Corporation
  6 * Author: Arjan van de Ven <arjan@linux.intel.com>
 
 
 
 
 
  7 */
  8
  9/*
 10 * CONFIG_LATENCYTOP enables a kernel latency tracking infrastructure that is
 11 * used by the "latencytop" userspace tool. The latency that is tracked is not
 12 * the 'traditional' interrupt latency (which is primarily caused by something
 13 * else consuming CPU), but instead, it is the latency an application encounters
 14 * because the kernel sleeps on its behalf for various reasons.
 15 *
 16 * This code tracks 2 levels of statistics:
 17 * 1) System level latency
 18 * 2) Per process latency
 19 *
 20 * The latency is stored in fixed sized data structures in an accumulated form;
 21 * if the "same" latency cause is hit twice, this will be tracked as one entry
 22 * in the data structure. Both the count, total accumulated latency and maximum
 23 * latency are tracked in this data structure. When the fixed size structure is
 24 * full, no new causes are tracked until the buffer is flushed by writing to
 25 * the /proc file; the userspace tool does this on a regular basis.
 26 *
 27 * A latency cause is identified by a stringified backtrace at the point that
 28 * the scheduler gets invoked. The userland tool will use this string to
 29 * identify the cause of the latency in human readable form.
 30 *
 31 * The information is exported via /proc/latency_stats and /proc/<pid>/latency.
 32 * These files look like this:
 33 *
 34 * Latency Top version : v0.1
 35 * 70 59433 4897 i915_irq_wait drm_ioctl vfs_ioctl do_vfs_ioctl sys_ioctl
 36 * |    |    |    |
 37 * |    |    |    +----> the stringified backtrace
 38 * |    |    +---------> The maximum latency for this entry in microseconds
 39 * |    +--------------> The accumulated latency for this entry (microseconds)
 40 * +-------------------> The number of times this entry is hit
 41 *
 42 * (note: the average latency is the accumulated latency divided by the number
 43 * of times)
 44 */
 45
 46#include <linux/kallsyms.h>
 47#include <linux/seq_file.h>
 48#include <linux/notifier.h>
 49#include <linux/spinlock.h>
 50#include <linux/proc_fs.h>
 51#include <linux/latencytop.h>
 52#include <linux/export.h>
 53#include <linux/sched.h>
 54#include <linux/sched/debug.h>
 55#include <linux/sched/stat.h>
 56#include <linux/list.h>
 57#include <linux/stacktrace.h>
 58#include <linux/sysctl.h>
 59
 60static DEFINE_RAW_SPINLOCK(latency_lock);
 61
 62#define MAXLR 128
 63static struct latency_record latency_record[MAXLR];
 64
 65int latencytop_enabled;
 66
 67#ifdef CONFIG_SYSCTL
 68static int sysctl_latencytop(const struct ctl_table *table, int write, void *buffer,
 69		size_t *lenp, loff_t *ppos)
 70{
 71	int err;
 72
 73	err = proc_dointvec(table, write, buffer, lenp, ppos);
 74	if (latencytop_enabled)
 75		force_schedstat_enabled();
 76
 77	return err;
 78}
 79
 80static struct ctl_table latencytop_sysctl[] = {
 81	{
 82		.procname   = "latencytop",
 83		.data       = &latencytop_enabled,
 84		.maxlen     = sizeof(int),
 85		.mode       = 0644,
 86		.proc_handler   = sysctl_latencytop,
 87	},
 88};
 89#endif
 90
 91void clear_tsk_latency_tracing(struct task_struct *p)
 92{
 93	unsigned long flags;
 94
 95	raw_spin_lock_irqsave(&latency_lock, flags);
 96	memset(&p->latency_record, 0, sizeof(p->latency_record));
 97	p->latency_record_count = 0;
 98	raw_spin_unlock_irqrestore(&latency_lock, flags);
 99}
100
101static void clear_global_latency_tracing(void)
102{
103	unsigned long flags;
104
105	raw_spin_lock_irqsave(&latency_lock, flags);
106	memset(&latency_record, 0, sizeof(latency_record));
107	raw_spin_unlock_irqrestore(&latency_lock, flags);
108}
109
110static void __sched
111account_global_scheduler_latency(struct task_struct *tsk,
112				 struct latency_record *lat)
113{
114	int firstnonnull = MAXLR;
115	int i;
116
 
 
 
117	/* skip kernel threads for now */
118	if (!tsk->mm)
119		return;
120
121	for (i = 0; i < MAXLR; i++) {
122		int q, same = 1;
123
124		/* Nothing stored: */
125		if (!latency_record[i].backtrace[0]) {
126			if (firstnonnull > i)
127				firstnonnull = i;
128			continue;
129		}
130		for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
131			unsigned long record = lat->backtrace[q];
132
133			if (latency_record[i].backtrace[q] != record) {
134				same = 0;
135				break;
136			}
137
138			/* 0 entry marks end of backtrace: */
139			if (!record)
140				break;
141		}
142		if (same) {
143			latency_record[i].count++;
144			latency_record[i].time += lat->time;
145			if (lat->time > latency_record[i].max)
146				latency_record[i].max = lat->time;
147			return;
148		}
149	}
150
151	i = firstnonnull;
152	if (i >= MAXLR)
153		return;
154
155	/* Allocted a new one: */
156	memcpy(&latency_record[i], lat, sizeof(struct latency_record));
157}
158
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159/**
160 * __account_scheduler_latency - record an occurred latency
161 * @tsk - the task struct of the task hitting the latency
162 * @usecs - the duration of the latency in microseconds
163 * @inter - 1 if the sleep was interruptible, 0 if uninterruptible
164 *
165 * This function is the main entry point for recording latency entries
166 * as called by the scheduler.
167 *
168 * This function has a few special cases to deal with normal 'non-latency'
169 * sleeps: specifically, interruptible sleep longer than 5 msec is skipped
170 * since this usually is caused by waiting for events via select() and co.
171 *
172 * Negative latencies (caused by time going backwards) are also explicitly
173 * skipped.
174 */
175void __sched
176__account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
177{
178	unsigned long flags;
179	int i, q;
180	struct latency_record lat;
181
182	/* Long interruptible waits are generally user requested... */
183	if (inter && usecs > 5000)
184		return;
185
186	/* Negative sleeps are time going backwards */
187	/* Zero-time sleeps are non-interesting */
188	if (usecs <= 0)
189		return;
190
191	memset(&lat, 0, sizeof(lat));
192	lat.count = 1;
193	lat.time = usecs;
194	lat.max = usecs;
195
196	stack_trace_save_tsk(tsk, lat.backtrace, LT_BACKTRACEDEPTH, 0);
197
198	raw_spin_lock_irqsave(&latency_lock, flags);
199
200	account_global_scheduler_latency(tsk, &lat);
201
202	for (i = 0; i < tsk->latency_record_count; i++) {
203		struct latency_record *mylat;
204		int same = 1;
205
206		mylat = &tsk->latency_record[i];
207		for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
208			unsigned long record = lat.backtrace[q];
209
210			if (mylat->backtrace[q] != record) {
211				same = 0;
212				break;
213			}
214
215			/* 0 entry is end of backtrace */
216			if (!record)
217				break;
218		}
219		if (same) {
220			mylat->count++;
221			mylat->time += lat.time;
222			if (lat.time > mylat->max)
223				mylat->max = lat.time;
224			goto out_unlock;
225		}
226	}
227
228	/*
229	 * short term hack; if we're > 32 we stop; future we recycle:
230	 */
231	if (tsk->latency_record_count >= LT_SAVECOUNT)
232		goto out_unlock;
233
234	/* Allocated a new one: */
235	i = tsk->latency_record_count++;
236	memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
237
238out_unlock:
239	raw_spin_unlock_irqrestore(&latency_lock, flags);
240}
241
242static int lstats_show(struct seq_file *m, void *v)
243{
244	int i;
245
246	seq_puts(m, "Latency Top version : v0.1\n");
247
248	for (i = 0; i < MAXLR; i++) {
249		struct latency_record *lr = &latency_record[i];
250
251		if (lr->backtrace[0]) {
252			int q;
253			seq_printf(m, "%i %lu %lu",
254				   lr->count, lr->time, lr->max);
255			for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
256				unsigned long bt = lr->backtrace[q];
257
258				if (!bt)
259					break;
260
 
261				seq_printf(m, " %ps", (void *)bt);
262			}
263			seq_puts(m, "\n");
264		}
265	}
266	return 0;
267}
268
269static ssize_t
270lstats_write(struct file *file, const char __user *buf, size_t count,
271	     loff_t *offs)
272{
273	clear_global_latency_tracing();
274
275	return count;
276}
277
278static int lstats_open(struct inode *inode, struct file *filp)
279{
280	return single_open(filp, lstats_show, NULL);
281}
282
283static const struct proc_ops lstats_proc_ops = {
284	.proc_open	= lstats_open,
285	.proc_read	= seq_read,
286	.proc_write	= lstats_write,
287	.proc_lseek	= seq_lseek,
288	.proc_release	= single_release,
289};
290
291static int __init init_lstats_procfs(void)
292{
293	proc_create("latency_stats", 0644, NULL, &lstats_proc_ops);
294#ifdef CONFIG_SYSCTL
295	register_sysctl_init("kernel", latencytop_sysctl);
296#endif
297	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
298}
299device_initcall(init_lstats_procfs);