Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * latencytop.c: Latency display infrastructure
  4 *
  5 * (C) Copyright 2008 Intel Corporation
  6 * Author: Arjan van de Ven <arjan@linux.intel.com>
 
 
 
 
 
  7 */
  8
  9/*
 10 * CONFIG_LATENCYTOP enables a kernel latency tracking infrastructure that is
 11 * used by the "latencytop" userspace tool. The latency that is tracked is not
 12 * the 'traditional' interrupt latency (which is primarily caused by something
 13 * else consuming CPU), but instead, it is the latency an application encounters
 14 * because the kernel sleeps on its behalf for various reasons.
 15 *
 16 * This code tracks 2 levels of statistics:
 17 * 1) System level latency
 18 * 2) Per process latency
 19 *
 20 * The latency is stored in fixed sized data structures in an accumulated form;
 21 * if the "same" latency cause is hit twice, this will be tracked as one entry
 22 * in the data structure. Both the count, total accumulated latency and maximum
 23 * latency are tracked in this data structure. When the fixed size structure is
 24 * full, no new causes are tracked until the buffer is flushed by writing to
 25 * the /proc file; the userspace tool does this on a regular basis.
 26 *
 27 * A latency cause is identified by a stringified backtrace at the point that
 28 * the scheduler gets invoked. The userland tool will use this string to
 29 * identify the cause of the latency in human readable form.
 30 *
 31 * The information is exported via /proc/latency_stats and /proc/<pid>/latency.
 32 * These files look like this:
 33 *
 34 * Latency Top version : v0.1
 35 * 70 59433 4897 i915_irq_wait drm_ioctl vfs_ioctl do_vfs_ioctl sys_ioctl
 36 * |    |    |    |
 37 * |    |    |    +----> the stringified backtrace
 38 * |    |    +---------> The maximum latency for this entry in microseconds
 39 * |    +--------------> The accumulated latency for this entry (microseconds)
 40 * +-------------------> The number of times this entry is hit
 41 *
 42 * (note: the average latency is the accumulated latency divided by the number
 43 * of times)
 44 */
 45
 
 46#include <linux/kallsyms.h>
 47#include <linux/seq_file.h>
 48#include <linux/notifier.h>
 49#include <linux/spinlock.h>
 50#include <linux/proc_fs.h>
 51#include <linux/latencytop.h>
 52#include <linux/export.h>
 53#include <linux/sched.h>
 54#include <linux/sched/debug.h>
 55#include <linux/sched/stat.h>
 56#include <linux/list.h>
 57#include <linux/stacktrace.h>
 58#include <linux/sysctl.h>
 59
 60static DEFINE_RAW_SPINLOCK(latency_lock);
 61
 62#define MAXLR 128
 63static struct latency_record latency_record[MAXLR];
 64
 65int latencytop_enabled;
 66
 67#ifdef CONFIG_SYSCTL
 68static int sysctl_latencytop(struct ctl_table *table, int write, void *buffer,
 69		size_t *lenp, loff_t *ppos)
 70{
 71	int err;
 72
 73	err = proc_dointvec(table, write, buffer, lenp, ppos);
 74	if (latencytop_enabled)
 75		force_schedstat_enabled();
 76
 77	return err;
 78}
 79
 80static struct ctl_table latencytop_sysctl[] = {
 81	{
 82		.procname   = "latencytop",
 83		.data       = &latencytop_enabled,
 84		.maxlen     = sizeof(int),
 85		.mode       = 0644,
 86		.proc_handler   = sysctl_latencytop,
 87	},
 88	{}
 89};
 90#endif
 91
 92void clear_tsk_latency_tracing(struct task_struct *p)
 93{
 94	unsigned long flags;
 95
 96	raw_spin_lock_irqsave(&latency_lock, flags);
 
 
 
 97	memset(&p->latency_record, 0, sizeof(p->latency_record));
 98	p->latency_record_count = 0;
 99	raw_spin_unlock_irqrestore(&latency_lock, flags);
100}
101
102static void clear_global_latency_tracing(void)
103{
104	unsigned long flags;
105
106	raw_spin_lock_irqsave(&latency_lock, flags);
107	memset(&latency_record, 0, sizeof(latency_record));
108	raw_spin_unlock_irqrestore(&latency_lock, flags);
109}
110
111static void __sched
112account_global_scheduler_latency(struct task_struct *tsk,
113				 struct latency_record *lat)
114{
115	int firstnonnull = MAXLR;
116	int i;
117
 
 
 
118	/* skip kernel threads for now */
119	if (!tsk->mm)
120		return;
121
122	for (i = 0; i < MAXLR; i++) {
123		int q, same = 1;
124
125		/* Nothing stored: */
126		if (!latency_record[i].backtrace[0]) {
127			if (firstnonnull > i)
128				firstnonnull = i;
129			continue;
130		}
131		for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
132			unsigned long record = lat->backtrace[q];
133
134			if (latency_record[i].backtrace[q] != record) {
135				same = 0;
136				break;
137			}
138
139			/* 0 entry marks end of backtrace: */
140			if (!record)
141				break;
142		}
143		if (same) {
144			latency_record[i].count++;
145			latency_record[i].time += lat->time;
146			if (lat->time > latency_record[i].max)
147				latency_record[i].max = lat->time;
148			return;
149		}
150	}
151
152	i = firstnonnull;
153	if (i >= MAXLR)
154		return;
155
156	/* Allocted a new one: */
157	memcpy(&latency_record[i], lat, sizeof(struct latency_record));
158}
159
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160/**
161 * __account_scheduler_latency - record an occurred latency
162 * @tsk - the task struct of the task hitting the latency
163 * @usecs - the duration of the latency in microseconds
164 * @inter - 1 if the sleep was interruptible, 0 if uninterruptible
165 *
166 * This function is the main entry point for recording latency entries
167 * as called by the scheduler.
168 *
169 * This function has a few special cases to deal with normal 'non-latency'
170 * sleeps: specifically, interruptible sleep longer than 5 msec is skipped
171 * since this usually is caused by waiting for events via select() and co.
172 *
173 * Negative latencies (caused by time going backwards) are also explicitly
174 * skipped.
175 */
176void __sched
177__account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
178{
179	unsigned long flags;
180	int i, q;
181	struct latency_record lat;
182
183	/* Long interruptible waits are generally user requested... */
184	if (inter && usecs > 5000)
185		return;
186
187	/* Negative sleeps are time going backwards */
188	/* Zero-time sleeps are non-interesting */
189	if (usecs <= 0)
190		return;
191
192	memset(&lat, 0, sizeof(lat));
193	lat.count = 1;
194	lat.time = usecs;
195	lat.max = usecs;
 
196
197	stack_trace_save_tsk(tsk, lat.backtrace, LT_BACKTRACEDEPTH, 0);
198
199	raw_spin_lock_irqsave(&latency_lock, flags);
200
201	account_global_scheduler_latency(tsk, &lat);
202
203	for (i = 0; i < tsk->latency_record_count; i++) {
204		struct latency_record *mylat;
205		int same = 1;
206
207		mylat = &tsk->latency_record[i];
208		for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
209			unsigned long record = lat.backtrace[q];
210
211			if (mylat->backtrace[q] != record) {
212				same = 0;
213				break;
214			}
215
216			/* 0 entry is end of backtrace */
217			if (!record)
218				break;
219		}
220		if (same) {
221			mylat->count++;
222			mylat->time += lat.time;
223			if (lat.time > mylat->max)
224				mylat->max = lat.time;
225			goto out_unlock;
226		}
227	}
228
229	/*
230	 * short term hack; if we're > 32 we stop; future we recycle:
231	 */
232	if (tsk->latency_record_count >= LT_SAVECOUNT)
233		goto out_unlock;
234
235	/* Allocated a new one: */
236	i = tsk->latency_record_count++;
237	memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
238
239out_unlock:
240	raw_spin_unlock_irqrestore(&latency_lock, flags);
241}
242
243static int lstats_show(struct seq_file *m, void *v)
244{
245	int i;
246
247	seq_puts(m, "Latency Top version : v0.1\n");
248
249	for (i = 0; i < MAXLR; i++) {
250		struct latency_record *lr = &latency_record[i];
251
252		if (lr->backtrace[0]) {
253			int q;
254			seq_printf(m, "%i %lu %lu",
255				   lr->count, lr->time, lr->max);
256			for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
257				unsigned long bt = lr->backtrace[q];
258
259				if (!bt)
260					break;
261
 
262				seq_printf(m, " %ps", (void *)bt);
263			}
264			seq_puts(m, "\n");
265		}
266	}
267	return 0;
268}
269
270static ssize_t
271lstats_write(struct file *file, const char __user *buf, size_t count,
272	     loff_t *offs)
273{
274	clear_global_latency_tracing();
275
276	return count;
277}
278
279static int lstats_open(struct inode *inode, struct file *filp)
280{
281	return single_open(filp, lstats_show, NULL);
282}
283
284static const struct proc_ops lstats_proc_ops = {
285	.proc_open	= lstats_open,
286	.proc_read	= seq_read,
287	.proc_write	= lstats_write,
288	.proc_lseek	= seq_lseek,
289	.proc_release	= single_release,
290};
291
292static int __init init_lstats_procfs(void)
293{
294	proc_create("latency_stats", 0644, NULL, &lstats_proc_ops);
295#ifdef CONFIG_SYSCTL
296	register_sysctl_init("kernel", latencytop_sysctl);
297#endif
298	return 0;
299}
300device_initcall(init_lstats_procfs);
v3.1
 
  1/*
  2 * latencytop.c: Latency display infrastructure
  3 *
  4 * (C) Copyright 2008 Intel Corporation
  5 * Author: Arjan van de Ven <arjan@linux.intel.com>
  6 *
  7 * This program is free software; you can redistribute it and/or
  8 * modify it under the terms of the GNU General Public License
  9 * as published by the Free Software Foundation; version 2
 10 * of the License.
 11 */
 12
 13/*
 14 * CONFIG_LATENCYTOP enables a kernel latency tracking infrastructure that is
 15 * used by the "latencytop" userspace tool. The latency that is tracked is not
 16 * the 'traditional' interrupt latency (which is primarily caused by something
 17 * else consuming CPU), but instead, it is the latency an application encounters
 18 * because the kernel sleeps on its behalf for various reasons.
 19 *
 20 * This code tracks 2 levels of statistics:
 21 * 1) System level latency
 22 * 2) Per process latency
 23 *
 24 * The latency is stored in fixed sized data structures in an accumulated form;
 25 * if the "same" latency cause is hit twice, this will be tracked as one entry
 26 * in the data structure. Both the count, total accumulated latency and maximum
 27 * latency are tracked in this data structure. When the fixed size structure is
 28 * full, no new causes are tracked until the buffer is flushed by writing to
 29 * the /proc file; the userspace tool does this on a regular basis.
 30 *
 31 * A latency cause is identified by a stringified backtrace at the point that
 32 * the scheduler gets invoked. The userland tool will use this string to
 33 * identify the cause of the latency in human readable form.
 34 *
 35 * The information is exported via /proc/latency_stats and /proc/<pid>/latency.
 36 * These files look like this:
 37 *
 38 * Latency Top version : v0.1
 39 * 70 59433 4897 i915_irq_wait drm_ioctl vfs_ioctl do_vfs_ioctl sys_ioctl
 40 * |    |    |    |
 41 * |    |    |    +----> the stringified backtrace
 42 * |    |    +---------> The maximum latency for this entry in microseconds
 43 * |    +--------------> The accumulated latency for this entry (microseconds)
 44 * +-------------------> The number of times this entry is hit
 45 *
 46 * (note: the average latency is the accumulated latency divided by the number
 47 * of times)
 48 */
 49
 50#include <linux/latencytop.h>
 51#include <linux/kallsyms.h>
 52#include <linux/seq_file.h>
 53#include <linux/notifier.h>
 54#include <linux/spinlock.h>
 55#include <linux/proc_fs.h>
 56#include <linux/module.h>
 
 57#include <linux/sched.h>
 
 
 58#include <linux/list.h>
 59#include <linux/stacktrace.h>
 
 60
 61static DEFINE_SPINLOCK(latency_lock);
 62
 63#define MAXLR 128
 64static struct latency_record latency_record[MAXLR];
 65
 66int latencytop_enabled;
 67
 68void clear_all_latency_tracing(struct task_struct *p)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 69{
 70	unsigned long flags;
 71
 72	if (!latencytop_enabled)
 73		return;
 74
 75	spin_lock_irqsave(&latency_lock, flags);
 76	memset(&p->latency_record, 0, sizeof(p->latency_record));
 77	p->latency_record_count = 0;
 78	spin_unlock_irqrestore(&latency_lock, flags);
 79}
 80
 81static void clear_global_latency_tracing(void)
 82{
 83	unsigned long flags;
 84
 85	spin_lock_irqsave(&latency_lock, flags);
 86	memset(&latency_record, 0, sizeof(latency_record));
 87	spin_unlock_irqrestore(&latency_lock, flags);
 88}
 89
 90static void __sched
 91account_global_scheduler_latency(struct task_struct *tsk, struct latency_record *lat)
 
 92{
 93	int firstnonnull = MAXLR + 1;
 94	int i;
 95
 96	if (!latencytop_enabled)
 97		return;
 98
 99	/* skip kernel threads for now */
100	if (!tsk->mm)
101		return;
102
103	for (i = 0; i < MAXLR; i++) {
104		int q, same = 1;
105
106		/* Nothing stored: */
107		if (!latency_record[i].backtrace[0]) {
108			if (firstnonnull > i)
109				firstnonnull = i;
110			continue;
111		}
112		for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
113			unsigned long record = lat->backtrace[q];
114
115			if (latency_record[i].backtrace[q] != record) {
116				same = 0;
117				break;
118			}
119
120			/* 0 and ULONG_MAX entries mean end of backtrace: */
121			if (record == 0 || record == ULONG_MAX)
122				break;
123		}
124		if (same) {
125			latency_record[i].count++;
126			latency_record[i].time += lat->time;
127			if (lat->time > latency_record[i].max)
128				latency_record[i].max = lat->time;
129			return;
130		}
131	}
132
133	i = firstnonnull;
134	if (i >= MAXLR - 1)
135		return;
136
137	/* Allocted a new one: */
138	memcpy(&latency_record[i], lat, sizeof(struct latency_record));
139}
140
141/*
142 * Iterator to store a backtrace into a latency record entry
143 */
144static inline void store_stacktrace(struct task_struct *tsk,
145					struct latency_record *lat)
146{
147	struct stack_trace trace;
148
149	memset(&trace, 0, sizeof(trace));
150	trace.max_entries = LT_BACKTRACEDEPTH;
151	trace.entries = &lat->backtrace[0];
152	save_stack_trace_tsk(tsk, &trace);
153}
154
155/**
156 * __account_scheduler_latency - record an occurred latency
157 * @tsk - the task struct of the task hitting the latency
158 * @usecs - the duration of the latency in microseconds
159 * @inter - 1 if the sleep was interruptible, 0 if uninterruptible
160 *
161 * This function is the main entry point for recording latency entries
162 * as called by the scheduler.
163 *
164 * This function has a few special cases to deal with normal 'non-latency'
165 * sleeps: specifically, interruptible sleep longer than 5 msec is skipped
166 * since this usually is caused by waiting for events via select() and co.
167 *
168 * Negative latencies (caused by time going backwards) are also explicitly
169 * skipped.
170 */
171void __sched
172__account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
173{
174	unsigned long flags;
175	int i, q;
176	struct latency_record lat;
177
178	/* Long interruptible waits are generally user requested... */
179	if (inter && usecs > 5000)
180		return;
181
182	/* Negative sleeps are time going backwards */
183	/* Zero-time sleeps are non-interesting */
184	if (usecs <= 0)
185		return;
186
187	memset(&lat, 0, sizeof(lat));
188	lat.count = 1;
189	lat.time = usecs;
190	lat.max = usecs;
191	store_stacktrace(tsk, &lat);
192
193	spin_lock_irqsave(&latency_lock, flags);
 
 
194
195	account_global_scheduler_latency(tsk, &lat);
196
197	for (i = 0; i < tsk->latency_record_count; i++) {
198		struct latency_record *mylat;
199		int same = 1;
200
201		mylat = &tsk->latency_record[i];
202		for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
203			unsigned long record = lat.backtrace[q];
204
205			if (mylat->backtrace[q] != record) {
206				same = 0;
207				break;
208			}
209
210			/* 0 and ULONG_MAX entries mean end of backtrace: */
211			if (record == 0 || record == ULONG_MAX)
212				break;
213		}
214		if (same) {
215			mylat->count++;
216			mylat->time += lat.time;
217			if (lat.time > mylat->max)
218				mylat->max = lat.time;
219			goto out_unlock;
220		}
221	}
222
223	/*
224	 * short term hack; if we're > 32 we stop; future we recycle:
225	 */
226	if (tsk->latency_record_count >= LT_SAVECOUNT)
227		goto out_unlock;
228
229	/* Allocated a new one: */
230	i = tsk->latency_record_count++;
231	memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
232
233out_unlock:
234	spin_unlock_irqrestore(&latency_lock, flags);
235}
236
237static int lstats_show(struct seq_file *m, void *v)
238{
239	int i;
240
241	seq_puts(m, "Latency Top version : v0.1\n");
242
243	for (i = 0; i < MAXLR; i++) {
244		struct latency_record *lr = &latency_record[i];
245
246		if (lr->backtrace[0]) {
247			int q;
248			seq_printf(m, "%i %lu %lu",
249				   lr->count, lr->time, lr->max);
250			for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
251				unsigned long bt = lr->backtrace[q];
 
252				if (!bt)
253					break;
254				if (bt == ULONG_MAX)
255					break;
256				seq_printf(m, " %ps", (void *)bt);
257			}
258			seq_printf(m, "\n");
259		}
260	}
261	return 0;
262}
263
264static ssize_t
265lstats_write(struct file *file, const char __user *buf, size_t count,
266	     loff_t *offs)
267{
268	clear_global_latency_tracing();
269
270	return count;
271}
272
273static int lstats_open(struct inode *inode, struct file *filp)
274{
275	return single_open(filp, lstats_show, NULL);
276}
277
278static const struct file_operations lstats_fops = {
279	.open		= lstats_open,
280	.read		= seq_read,
281	.write		= lstats_write,
282	.llseek		= seq_lseek,
283	.release	= single_release,
284};
285
286static int __init init_lstats_procfs(void)
287{
288	proc_create("latency_stats", 0644, NULL, &lstats_fops);
 
 
 
289	return 0;
290}
291device_initcall(init_lstats_procfs);