Linux Audio

Check our new training course

Loading...
v4.10.11
  1/*
  2 * trace_hwlatdetect.c - A simple Hardware Latency detector.
  3 *
  4 * Use this tracer to detect large system latencies induced by the behavior of
  5 * certain underlying system hardware or firmware, independent of Linux itself.
  6 * The code was developed originally to detect the presence of SMIs on Intel
  7 * and AMD systems, although there is no dependency upon x86 herein.
  8 *
  9 * The classical example usage of this tracer is in detecting the presence of
 10 * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a
 11 * somewhat special form of hardware interrupt spawned from earlier CPU debug
 12 * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge
 13 * LPC (or other device) to generate a special interrupt under certain
 14 * circumstances, for example, upon expiration of a special SMI timer device,
 15 * due to certain external thermal readings, on certain I/O address accesses,
 16 * and other situations. An SMI hits a special CPU pin, triggers a special
 17 * SMI mode (complete with special memory map), and the OS is unaware.
 18 *
 19 * Although certain hardware-inducing latencies are necessary (for example,
 20 * a modern system often requires an SMI handler for correct thermal control
 21 * and remote management) they can wreak havoc upon any OS-level performance
 22 * guarantees toward low-latency, especially when the OS is not even made
 23 * aware of the presence of these interrupts. For this reason, we need a
 24 * somewhat brute force mechanism to detect these interrupts. In this case,
 25 * we do it by hogging all of the CPU(s) for configurable timer intervals,
 26 * sampling the built-in CPU timer, looking for discontiguous readings.
 27 *
 28 * WARNING: This implementation necessarily introduces latencies. Therefore,
 29 *          you should NEVER use this tracer while running in a production
 30 *          environment requiring any kind of low-latency performance
 31 *          guarantee(s).
 32 *
 33 * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm@redhat.com>
 34 * Copyright (C) 2013-2016 Steven Rostedt, Red Hat, Inc. <srostedt@redhat.com>
 35 *
 36 * Includes useful feedback from Clark Williams <clark@redhat.com>
 37 *
 38 * This file is licensed under the terms of the GNU General Public
 39 * License version 2. This program is licensed "as is" without any
 40 * warranty of any kind, whether express or implied.
 41 */
 42#include <linux/kthread.h>
 43#include <linux/tracefs.h>
 44#include <linux/uaccess.h>
 45#include <linux/cpumask.h>
 46#include <linux/delay.h>
 
 47#include "trace.h"
 48
 49static struct trace_array	*hwlat_trace;
 50
 51#define U64STR_SIZE		22			/* 20 digits max */
 52
 53#define BANNER			"hwlat_detector: "
 54#define DEFAULT_SAMPLE_WINDOW	1000000			/* 1s */
 55#define DEFAULT_SAMPLE_WIDTH	500000			/* 0.5s */
 56#define DEFAULT_LAT_THRESHOLD	10			/* 10us */
 57
 58/* sampling thread*/
 59static struct task_struct *hwlat_kthread;
 60
 61static struct dentry *hwlat_sample_width;	/* sample width us */
 62static struct dentry *hwlat_sample_window;	/* sample window us */
 63
 64/* Save the previous tracing_thresh value */
 65static unsigned long save_tracing_thresh;
 66
 67/* NMI timestamp counters */
 68static u64 nmi_ts_start;
 69static u64 nmi_total_ts;
 70static int nmi_count;
 71static int nmi_cpu;
 72
 73/* Tells NMIs to call back to the hwlat tracer to record timestamps */
 74bool trace_hwlat_callback_enabled;
 75
 76/* If the user changed threshold, remember it */
 77static u64 last_tracing_thresh = DEFAULT_LAT_THRESHOLD * NSEC_PER_USEC;
 78
 79/* Individual latency samples are stored here when detected. */
 80struct hwlat_sample {
 81	u64		seqnum;		/* unique sequence */
 82	u64		duration;	/* delta */
 83	u64		outer_duration;	/* delta (outer loop) */
 84	u64		nmi_total_ts;	/* Total time spent in NMIs */
 85	struct timespec	timestamp;	/* wall time */
 86	int		nmi_count;	/* # NMIs during this sample */
 87};
 88
 89/* keep the global state somewhere. */
 90static struct hwlat_data {
 91
 92	struct mutex lock;		/* protect changes */
 93
 94	u64	count;			/* total since reset */
 95
 96	u64	sample_window;		/* total sampling window (on+off) */
 97	u64	sample_width;		/* active sampling portion of window */
 98
 99} hwlat_data = {
100	.sample_window		= DEFAULT_SAMPLE_WINDOW,
101	.sample_width		= DEFAULT_SAMPLE_WIDTH,
102};
103
104static void trace_hwlat_sample(struct hwlat_sample *sample)
105{
106	struct trace_array *tr = hwlat_trace;
107	struct trace_event_call *call = &event_hwlat;
108	struct ring_buffer *buffer = tr->trace_buffer.buffer;
109	struct ring_buffer_event *event;
110	struct hwlat_entry *entry;
111	unsigned long flags;
112	int pc;
113
114	pc = preempt_count();
115	local_save_flags(flags);
116
117	event = trace_buffer_lock_reserve(buffer, TRACE_HWLAT, sizeof(*entry),
118					  flags, pc);
119	if (!event)
120		return;
121	entry	= ring_buffer_event_data(event);
122	entry->seqnum			= sample->seqnum;
123	entry->duration			= sample->duration;
124	entry->outer_duration		= sample->outer_duration;
125	entry->timestamp		= sample->timestamp;
126	entry->nmi_total_ts		= sample->nmi_total_ts;
127	entry->nmi_count		= sample->nmi_count;
128
129	if (!call_filter_check_discard(call, entry, buffer, event))
130		trace_buffer_unlock_commit_nostack(buffer, event);
131}
132
133/* Macros to encapsulate the time capturing infrastructure */
134#define time_type	u64
135#define time_get()	trace_clock_local()
136#define time_to_us(x)	div_u64(x, 1000)
137#define time_sub(a, b)	((a) - (b))
138#define init_time(a, b)	(a = b)
139#define time_u64(a)	a
140
141void trace_hwlat_callback(bool enter)
142{
143	if (smp_processor_id() != nmi_cpu)
144		return;
145
146	/*
147	 * Currently trace_clock_local() calls sched_clock() and the
148	 * generic version is not NMI safe.
149	 */
150	if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK)) {
151		if (enter)
152			nmi_ts_start = time_get();
153		else
154			nmi_total_ts = time_get() - nmi_ts_start;
155	}
156
157	if (enter)
158		nmi_count++;
159}
160
161/**
162 * get_sample - sample the CPU TSC and look for likely hardware latencies
163 *
164 * Used to repeatedly capture the CPU TSC (or similar), looking for potential
165 * hardware-induced latency. Called with interrupts disabled and with
166 * hwlat_data.lock held.
167 */
168static int get_sample(void)
169{
170	struct trace_array *tr = hwlat_trace;
171	time_type start, t1, t2, last_t2;
172	s64 diff, total, last_total = 0;
173	u64 sample = 0;
174	u64 thresh = tracing_thresh;
175	u64 outer_sample = 0;
176	int ret = -1;
177
178	do_div(thresh, NSEC_PER_USEC); /* modifies interval value */
179
180	nmi_cpu = smp_processor_id();
181	nmi_total_ts = 0;
182	nmi_count = 0;
183	/* Make sure NMIs see this first */
184	barrier();
185
186	trace_hwlat_callback_enabled = true;
187
188	init_time(last_t2, 0);
189	start = time_get(); /* start timestamp */
190
191	do {
192
193		t1 = time_get();	/* we'll look for a discontinuity */
194		t2 = time_get();
195
196		if (time_u64(last_t2)) {
197			/* Check the delta from outer loop (t2 to next t1) */
198			diff = time_to_us(time_sub(t1, last_t2));
199			/* This shouldn't happen */
200			if (diff < 0) {
201				pr_err(BANNER "time running backwards\n");
202				goto out;
203			}
204			if (diff > outer_sample)
205				outer_sample = diff;
206		}
207		last_t2 = t2;
208
209		total = time_to_us(time_sub(t2, start)); /* sample width */
210
211		/* Check for possible overflows */
212		if (total < last_total) {
213			pr_err("Time total overflowed\n");
214			break;
215		}
216		last_total = total;
217
218		/* This checks the inner loop (t1 to t2) */
219		diff = time_to_us(time_sub(t2, t1));     /* current diff */
220
221		/* This shouldn't happen */
222		if (diff < 0) {
223			pr_err(BANNER "time running backwards\n");
224			goto out;
225		}
226
227		if (diff > sample)
228			sample = diff; /* only want highest value */
229
230	} while (total <= hwlat_data.sample_width);
231
232	barrier(); /* finish the above in the view for NMIs */
233	trace_hwlat_callback_enabled = false;
234	barrier(); /* Make sure nmi_total_ts is no longer updated */
235
236	ret = 0;
237
238	/* If we exceed the threshold value, we have found a hardware latency */
239	if (sample > thresh || outer_sample > thresh) {
240		struct hwlat_sample s;
241
242		ret = 1;
243
244		/* We read in microseconds */
245		if (nmi_total_ts)
246			do_div(nmi_total_ts, NSEC_PER_USEC);
247
248		hwlat_data.count++;
249		s.seqnum = hwlat_data.count;
250		s.duration = sample;
251		s.outer_duration = outer_sample;
252		s.timestamp = CURRENT_TIME;
253		s.nmi_total_ts = nmi_total_ts;
254		s.nmi_count = nmi_count;
255		trace_hwlat_sample(&s);
256
257		/* Keep a running maximum ever recorded hardware latency */
258		if (sample > tr->max_latency)
259			tr->max_latency = sample;
260	}
261
262out:
263	return ret;
264}
265
266static struct cpumask save_cpumask;
267static bool disable_migrate;
268
269static void move_to_next_cpu(bool initmask)
270{
271	static struct cpumask *current_mask;
272	int next_cpu;
273
274	if (disable_migrate)
275		return;
276
277	/* Just pick the first CPU on first iteration */
278	if (initmask) {
279		current_mask = &save_cpumask;
280		get_online_cpus();
281		cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
282		put_online_cpus();
283		next_cpu = cpumask_first(current_mask);
284		goto set_affinity;
285	}
286
287	/*
288	 * If for some reason the user modifies the CPU affinity
289	 * of this thread, than stop migrating for the duration
290	 * of the current test.
291	 */
292	if (!cpumask_equal(current_mask, &current->cpus_allowed))
293		goto disable;
294
295	get_online_cpus();
296	cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
297	next_cpu = cpumask_next(smp_processor_id(), current_mask);
298	put_online_cpus();
299
300	if (next_cpu >= nr_cpu_ids)
301		next_cpu = cpumask_first(current_mask);
302
303 set_affinity:
304	if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */
305		goto disable;
306
307	cpumask_clear(current_mask);
308	cpumask_set_cpu(next_cpu, current_mask);
309
310	sched_setaffinity(0, current_mask);
311	return;
312
313 disable:
314	disable_migrate = true;
315}
316
317/*
318 * kthread_fn - The CPU time sampling/hardware latency detection kernel thread
319 *
320 * Used to periodically sample the CPU TSC via a call to get_sample. We
321 * disable interrupts, which does (intentionally) introduce latency since we
322 * need to ensure nothing else might be running (and thus preempting).
323 * Obviously this should never be used in production environments.
324 *
325 * Currently this runs on which ever CPU it was scheduled on, but most
326 * real-world hardware latency situations occur across several CPUs,
327 * but we might later generalize this if we find there are any actualy
328 * systems with alternate SMI delivery or other hardware latencies.
329 */
330static int kthread_fn(void *data)
331{
332	u64 interval;
333	bool initmask = true;
334
335	while (!kthread_should_stop()) {
336
337		move_to_next_cpu(initmask);
338		initmask = false;
339
340		local_irq_disable();
341		get_sample();
342		local_irq_enable();
343
344		mutex_lock(&hwlat_data.lock);
345		interval = hwlat_data.sample_window - hwlat_data.sample_width;
346		mutex_unlock(&hwlat_data.lock);
347
348		do_div(interval, USEC_PER_MSEC); /* modifies interval value */
349
350		/* Always sleep for at least 1ms */
351		if (interval < 1)
352			interval = 1;
353
354		if (msleep_interruptible(interval))
355			break;
356	}
357
358	return 0;
359}
360
361/**
362 * start_kthread - Kick off the hardware latency sampling/detector kthread
363 *
364 * This starts the kernel thread that will sit and sample the CPU timestamp
365 * counter (TSC or similar) and look for potential hardware latencies.
366 */
367static int start_kthread(struct trace_array *tr)
368{
 
369	struct task_struct *kthread;
 
 
 
 
 
 
 
 
370
371	kthread = kthread_create(kthread_fn, NULL, "hwlatd");
372	if (IS_ERR(kthread)) {
373		pr_err(BANNER "could not start sampling thread\n");
374		return -ENOMEM;
375	}
 
 
 
 
 
376	hwlat_kthread = kthread;
377	wake_up_process(kthread);
378
379	return 0;
380}
381
382/**
383 * stop_kthread - Inform the hardware latency samping/detector kthread to stop
384 *
385 * This kicks the running hardware latency sampling/detector kernel thread and
386 * tells it to stop sampling now. Use this on unload and at system shutdown.
387 */
388static void stop_kthread(void)
389{
390	if (!hwlat_kthread)
391		return;
392	kthread_stop(hwlat_kthread);
393	hwlat_kthread = NULL;
394}
395
396/*
397 * hwlat_read - Wrapper read function for reading both window and width
398 * @filp: The active open file structure
399 * @ubuf: The userspace provided buffer to read value into
400 * @cnt: The maximum number of bytes to read
401 * @ppos: The current "file" position
402 *
403 * This function provides a generic read implementation for the global state
404 * "hwlat_data" structure filesystem entries.
405 */
406static ssize_t hwlat_read(struct file *filp, char __user *ubuf,
407			  size_t cnt, loff_t *ppos)
408{
409	char buf[U64STR_SIZE];
410	u64 *entry = filp->private_data;
411	u64 val;
412	int len;
413
414	if (!entry)
415		return -EFAULT;
416
417	if (cnt > sizeof(buf))
418		cnt = sizeof(buf);
419
420	val = *entry;
421
422	len = snprintf(buf, sizeof(buf), "%llu\n", val);
423
424	return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
425}
426
427/**
428 * hwlat_width_write - Write function for "width" entry
429 * @filp: The active open file structure
430 * @ubuf: The user buffer that contains the value to write
431 * @cnt: The maximum number of bytes to write to "file"
432 * @ppos: The current position in @file
433 *
434 * This function provides a write implementation for the "width" interface
435 * to the hardware latency detector. It can be used to configure
436 * for how many us of the total window us we will actively sample for any
437 * hardware-induced latency periods. Obviously, it is not possible to
438 * sample constantly and have the system respond to a sample reader, or,
439 * worse, without having the system appear to have gone out to lunch. It
440 * is enforced that width is less that the total window size.
441 */
442static ssize_t
443hwlat_width_write(struct file *filp, const char __user *ubuf,
444		  size_t cnt, loff_t *ppos)
445{
446	u64 val;
447	int err;
448
449	err = kstrtoull_from_user(ubuf, cnt, 10, &val);
450	if (err)
451		return err;
452
453	mutex_lock(&hwlat_data.lock);
454	if (val < hwlat_data.sample_window)
455		hwlat_data.sample_width = val;
456	else
457		err = -EINVAL;
458	mutex_unlock(&hwlat_data.lock);
459
460	if (err)
461		return err;
462
463	return cnt;
464}
465
466/**
467 * hwlat_window_write - Write function for "window" entry
468 * @filp: The active open file structure
469 * @ubuf: The user buffer that contains the value to write
470 * @cnt: The maximum number of bytes to write to "file"
471 * @ppos: The current position in @file
472 *
473 * This function provides a write implementation for the "window" interface
474 * to the hardware latency detetector. The window is the total time
475 * in us that will be considered one sample period. Conceptually, windows
476 * occur back-to-back and contain a sample width period during which
477 * actual sampling occurs. Can be used to write a new total window size. It
478 * is enfoced that any value written must be greater than the sample width
479 * size, or an error results.
480 */
481static ssize_t
482hwlat_window_write(struct file *filp, const char __user *ubuf,
483		   size_t cnt, loff_t *ppos)
484{
485	u64 val;
486	int err;
487
488	err = kstrtoull_from_user(ubuf, cnt, 10, &val);
489	if (err)
490		return err;
491
492	mutex_lock(&hwlat_data.lock);
493	if (hwlat_data.sample_width < val)
494		hwlat_data.sample_window = val;
495	else
496		err = -EINVAL;
497	mutex_unlock(&hwlat_data.lock);
498
499	if (err)
500		return err;
501
502	return cnt;
503}
504
505static const struct file_operations width_fops = {
506	.open		= tracing_open_generic,
507	.read		= hwlat_read,
508	.write		= hwlat_width_write,
509};
510
511static const struct file_operations window_fops = {
512	.open		= tracing_open_generic,
513	.read		= hwlat_read,
514	.write		= hwlat_window_write,
515};
516
517/**
518 * init_tracefs - A function to initialize the tracefs interface files
519 *
520 * This function creates entries in tracefs for "hwlat_detector".
521 * It creates the hwlat_detector directory in the tracing directory,
522 * and within that directory is the count, width and window files to
523 * change and view those values.
524 */
525static int init_tracefs(void)
526{
527	struct dentry *d_tracer;
528	struct dentry *top_dir;
529
530	d_tracer = tracing_init_dentry();
531	if (IS_ERR(d_tracer))
532		return -ENOMEM;
533
534	top_dir = tracefs_create_dir("hwlat_detector", d_tracer);
535	if (!top_dir)
536		return -ENOMEM;
537
538	hwlat_sample_window = tracefs_create_file("window", 0640,
539						  top_dir,
540						  &hwlat_data.sample_window,
541						  &window_fops);
542	if (!hwlat_sample_window)
543		goto err;
544
545	hwlat_sample_width = tracefs_create_file("width", 0644,
546						 top_dir,
547						 &hwlat_data.sample_width,
548						 &width_fops);
549	if (!hwlat_sample_width)
550		goto err;
551
552	return 0;
553
554 err:
555	tracefs_remove_recursive(top_dir);
556	return -ENOMEM;
557}
558
559static void hwlat_tracer_start(struct trace_array *tr)
560{
561	int err;
562
563	err = start_kthread(tr);
564	if (err)
565		pr_err(BANNER "Cannot start hwlat kthread\n");
566}
567
568static void hwlat_tracer_stop(struct trace_array *tr)
569{
570	stop_kthread();
571}
572
573static bool hwlat_busy;
574
575static int hwlat_tracer_init(struct trace_array *tr)
576{
577	/* Only allow one instance to enable this */
578	if (hwlat_busy)
579		return -EBUSY;
580
581	hwlat_trace = tr;
582
583	disable_migrate = false;
584	hwlat_data.count = 0;
585	tr->max_latency = 0;
586	save_tracing_thresh = tracing_thresh;
587
588	/* tracing_thresh is in nsecs, we speak in usecs */
589	if (!tracing_thresh)
590		tracing_thresh = last_tracing_thresh;
591
592	if (tracer_tracing_is_on(tr))
593		hwlat_tracer_start(tr);
594
595	hwlat_busy = true;
596
597	return 0;
598}
599
600static void hwlat_tracer_reset(struct trace_array *tr)
601{
602	stop_kthread();
603
604	/* the tracing threshold is static between runs */
605	last_tracing_thresh = tracing_thresh;
606
607	tracing_thresh = save_tracing_thresh;
608	hwlat_busy = false;
609}
610
611static struct tracer hwlat_tracer __read_mostly =
612{
613	.name		= "hwlat",
614	.init		= hwlat_tracer_init,
615	.reset		= hwlat_tracer_reset,
616	.start		= hwlat_tracer_start,
617	.stop		= hwlat_tracer_stop,
618	.allow_instances = true,
619};
620
621__init static int init_hwlat_tracer(void)
622{
623	int ret;
624
625	mutex_init(&hwlat_data.lock);
626
627	ret = register_tracer(&hwlat_tracer);
628	if (ret)
629		return ret;
630
631	init_tracefs();
632
633	return 0;
634}
635late_initcall(init_hwlat_tracer);
v4.17
  1/*
  2 * trace_hwlatdetect.c - A simple Hardware Latency detector.
  3 *
  4 * Use this tracer to detect large system latencies induced by the behavior of
  5 * certain underlying system hardware or firmware, independent of Linux itself.
  6 * The code was developed originally to detect the presence of SMIs on Intel
  7 * and AMD systems, although there is no dependency upon x86 herein.
  8 *
  9 * The classical example usage of this tracer is in detecting the presence of
 10 * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a
 11 * somewhat special form of hardware interrupt spawned from earlier CPU debug
 12 * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge
 13 * LPC (or other device) to generate a special interrupt under certain
 14 * circumstances, for example, upon expiration of a special SMI timer device,
 15 * due to certain external thermal readings, on certain I/O address accesses,
 16 * and other situations. An SMI hits a special CPU pin, triggers a special
 17 * SMI mode (complete with special memory map), and the OS is unaware.
 18 *
 19 * Although certain hardware-inducing latencies are necessary (for example,
 20 * a modern system often requires an SMI handler for correct thermal control
 21 * and remote management) they can wreak havoc upon any OS-level performance
 22 * guarantees toward low-latency, especially when the OS is not even made
 23 * aware of the presence of these interrupts. For this reason, we need a
 24 * somewhat brute force mechanism to detect these interrupts. In this case,
 25 * we do it by hogging all of the CPU(s) for configurable timer intervals,
 26 * sampling the built-in CPU timer, looking for discontiguous readings.
 27 *
 28 * WARNING: This implementation necessarily introduces latencies. Therefore,
 29 *          you should NEVER use this tracer while running in a production
 30 *          environment requiring any kind of low-latency performance
 31 *          guarantee(s).
 32 *
 33 * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm@redhat.com>
 34 * Copyright (C) 2013-2016 Steven Rostedt, Red Hat, Inc. <srostedt@redhat.com>
 35 *
 36 * Includes useful feedback from Clark Williams <clark@redhat.com>
 37 *
 38 * This file is licensed under the terms of the GNU General Public
 39 * License version 2. This program is licensed "as is" without any
 40 * warranty of any kind, whether express or implied.
 41 */
 42#include <linux/kthread.h>
 43#include <linux/tracefs.h>
 44#include <linux/uaccess.h>
 45#include <linux/cpumask.h>
 46#include <linux/delay.h>
 47#include <linux/sched/clock.h>
 48#include "trace.h"
 49
 50static struct trace_array	*hwlat_trace;
 51
 52#define U64STR_SIZE		22			/* 20 digits max */
 53
 54#define BANNER			"hwlat_detector: "
 55#define DEFAULT_SAMPLE_WINDOW	1000000			/* 1s */
 56#define DEFAULT_SAMPLE_WIDTH	500000			/* 0.5s */
 57#define DEFAULT_LAT_THRESHOLD	10			/* 10us */
 58
 59/* sampling thread*/
 60static struct task_struct *hwlat_kthread;
 61
 62static struct dentry *hwlat_sample_width;	/* sample width us */
 63static struct dentry *hwlat_sample_window;	/* sample window us */
 64
 65/* Save the previous tracing_thresh value */
 66static unsigned long save_tracing_thresh;
 67
 68/* NMI timestamp counters */
 69static u64 nmi_ts_start;
 70static u64 nmi_total_ts;
 71static int nmi_count;
 72static int nmi_cpu;
 73
 74/* Tells NMIs to call back to the hwlat tracer to record timestamps */
 75bool trace_hwlat_callback_enabled;
 76
 77/* If the user changed threshold, remember it */
 78static u64 last_tracing_thresh = DEFAULT_LAT_THRESHOLD * NSEC_PER_USEC;
 79
 80/* Individual latency samples are stored here when detected. */
 81struct hwlat_sample {
 82	u64			seqnum;		/* unique sequence */
 83	u64			duration;	/* delta */
 84	u64			outer_duration;	/* delta (outer loop) */
 85	u64			nmi_total_ts;	/* Total time spent in NMIs */
 86	struct timespec64	timestamp;	/* wall time */
 87	int			nmi_count;	/* # NMIs during this sample */
 88};
 89
 90/* keep the global state somewhere. */
 91static struct hwlat_data {
 92
 93	struct mutex lock;		/* protect changes */
 94
 95	u64	count;			/* total since reset */
 96
 97	u64	sample_window;		/* total sampling window (on+off) */
 98	u64	sample_width;		/* active sampling portion of window */
 99
100} hwlat_data = {
101	.sample_window		= DEFAULT_SAMPLE_WINDOW,
102	.sample_width		= DEFAULT_SAMPLE_WIDTH,
103};
104
105static void trace_hwlat_sample(struct hwlat_sample *sample)
106{
107	struct trace_array *tr = hwlat_trace;
108	struct trace_event_call *call = &event_hwlat;
109	struct ring_buffer *buffer = tr->trace_buffer.buffer;
110	struct ring_buffer_event *event;
111	struct hwlat_entry *entry;
112	unsigned long flags;
113	int pc;
114
115	pc = preempt_count();
116	local_save_flags(flags);
117
118	event = trace_buffer_lock_reserve(buffer, TRACE_HWLAT, sizeof(*entry),
119					  flags, pc);
120	if (!event)
121		return;
122	entry	= ring_buffer_event_data(event);
123	entry->seqnum			= sample->seqnum;
124	entry->duration			= sample->duration;
125	entry->outer_duration		= sample->outer_duration;
126	entry->timestamp		= sample->timestamp;
127	entry->nmi_total_ts		= sample->nmi_total_ts;
128	entry->nmi_count		= sample->nmi_count;
129
130	if (!call_filter_check_discard(call, entry, buffer, event))
131		trace_buffer_unlock_commit_nostack(buffer, event);
132}
133
134/* Macros to encapsulate the time capturing infrastructure */
135#define time_type	u64
136#define time_get()	trace_clock_local()
137#define time_to_us(x)	div_u64(x, 1000)
138#define time_sub(a, b)	((a) - (b))
139#define init_time(a, b)	(a = b)
140#define time_u64(a)	a
141
142void trace_hwlat_callback(bool enter)
143{
144	if (smp_processor_id() != nmi_cpu)
145		return;
146
147	/*
148	 * Currently trace_clock_local() calls sched_clock() and the
149	 * generic version is not NMI safe.
150	 */
151	if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK)) {
152		if (enter)
153			nmi_ts_start = time_get();
154		else
155			nmi_total_ts = time_get() - nmi_ts_start;
156	}
157
158	if (enter)
159		nmi_count++;
160}
161
162/**
163 * get_sample - sample the CPU TSC and look for likely hardware latencies
164 *
165 * Used to repeatedly capture the CPU TSC (or similar), looking for potential
166 * hardware-induced latency. Called with interrupts disabled and with
167 * hwlat_data.lock held.
168 */
169static int get_sample(void)
170{
171	struct trace_array *tr = hwlat_trace;
172	time_type start, t1, t2, last_t2;
173	s64 diff, total, last_total = 0;
174	u64 sample = 0;
175	u64 thresh = tracing_thresh;
176	u64 outer_sample = 0;
177	int ret = -1;
178
179	do_div(thresh, NSEC_PER_USEC); /* modifies interval value */
180
181	nmi_cpu = smp_processor_id();
182	nmi_total_ts = 0;
183	nmi_count = 0;
184	/* Make sure NMIs see this first */
185	barrier();
186
187	trace_hwlat_callback_enabled = true;
188
189	init_time(last_t2, 0);
190	start = time_get(); /* start timestamp */
191
192	do {
193
194		t1 = time_get();	/* we'll look for a discontinuity */
195		t2 = time_get();
196
197		if (time_u64(last_t2)) {
198			/* Check the delta from outer loop (t2 to next t1) */
199			diff = time_to_us(time_sub(t1, last_t2));
200			/* This shouldn't happen */
201			if (diff < 0) {
202				pr_err(BANNER "time running backwards\n");
203				goto out;
204			}
205			if (diff > outer_sample)
206				outer_sample = diff;
207		}
208		last_t2 = t2;
209
210		total = time_to_us(time_sub(t2, start)); /* sample width */
211
212		/* Check for possible overflows */
213		if (total < last_total) {
214			pr_err("Time total overflowed\n");
215			break;
216		}
217		last_total = total;
218
219		/* This checks the inner loop (t1 to t2) */
220		diff = time_to_us(time_sub(t2, t1));     /* current diff */
221
222		/* This shouldn't happen */
223		if (diff < 0) {
224			pr_err(BANNER "time running backwards\n");
225			goto out;
226		}
227
228		if (diff > sample)
229			sample = diff; /* only want highest value */
230
231	} while (total <= hwlat_data.sample_width);
232
233	barrier(); /* finish the above in the view for NMIs */
234	trace_hwlat_callback_enabled = false;
235	barrier(); /* Make sure nmi_total_ts is no longer updated */
236
237	ret = 0;
238
239	/* If we exceed the threshold value, we have found a hardware latency */
240	if (sample > thresh || outer_sample > thresh) {
241		struct hwlat_sample s;
242
243		ret = 1;
244
245		/* We read in microseconds */
246		if (nmi_total_ts)
247			do_div(nmi_total_ts, NSEC_PER_USEC);
248
249		hwlat_data.count++;
250		s.seqnum = hwlat_data.count;
251		s.duration = sample;
252		s.outer_duration = outer_sample;
253		ktime_get_real_ts64(&s.timestamp);
254		s.nmi_total_ts = nmi_total_ts;
255		s.nmi_count = nmi_count;
256		trace_hwlat_sample(&s);
257
258		/* Keep a running maximum ever recorded hardware latency */
259		if (sample > tr->max_latency)
260			tr->max_latency = sample;
261	}
262
263out:
264	return ret;
265}
266
267static struct cpumask save_cpumask;
268static bool disable_migrate;
269
270static void move_to_next_cpu(void)
271{
272	struct cpumask *current_mask = &save_cpumask;
273	int next_cpu;
274
275	if (disable_migrate)
276		return;
 
 
 
 
 
 
 
 
 
 
 
277	/*
278	 * If for some reason the user modifies the CPU affinity
279	 * of this thread, than stop migrating for the duration
280	 * of the current test.
281	 */
282	if (!cpumask_equal(current_mask, &current->cpus_allowed))
283		goto disable;
284
285	get_online_cpus();
286	cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
287	next_cpu = cpumask_next(smp_processor_id(), current_mask);
288	put_online_cpus();
289
290	if (next_cpu >= nr_cpu_ids)
291		next_cpu = cpumask_first(current_mask);
292
 
293	if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */
294		goto disable;
295
296	cpumask_clear(current_mask);
297	cpumask_set_cpu(next_cpu, current_mask);
298
299	sched_setaffinity(0, current_mask);
300	return;
301
302 disable:
303	disable_migrate = true;
304}
305
306/*
307 * kthread_fn - The CPU time sampling/hardware latency detection kernel thread
308 *
309 * Used to periodically sample the CPU TSC via a call to get_sample. We
310 * disable interrupts, which does (intentionally) introduce latency since we
311 * need to ensure nothing else might be running (and thus preempting).
312 * Obviously this should never be used in production environments.
313 *
314 * Executes one loop interaction on each CPU in tracing_cpumask sysfs file.
 
 
 
315 */
316static int kthread_fn(void *data)
317{
318	u64 interval;
 
319
320	while (!kthread_should_stop()) {
321
322		move_to_next_cpu();
 
323
324		local_irq_disable();
325		get_sample();
326		local_irq_enable();
327
328		mutex_lock(&hwlat_data.lock);
329		interval = hwlat_data.sample_window - hwlat_data.sample_width;
330		mutex_unlock(&hwlat_data.lock);
331
332		do_div(interval, USEC_PER_MSEC); /* modifies interval value */
333
334		/* Always sleep for at least 1ms */
335		if (interval < 1)
336			interval = 1;
337
338		if (msleep_interruptible(interval))
339			break;
340	}
341
342	return 0;
343}
344
345/**
346 * start_kthread - Kick off the hardware latency sampling/detector kthread
347 *
348 * This starts the kernel thread that will sit and sample the CPU timestamp
349 * counter (TSC or similar) and look for potential hardware latencies.
350 */
351static int start_kthread(struct trace_array *tr)
352{
353	struct cpumask *current_mask = &save_cpumask;
354	struct task_struct *kthread;
355	int next_cpu;
356
357	/* Just pick the first CPU on first iteration */
358	current_mask = &save_cpumask;
359	get_online_cpus();
360	cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
361	put_online_cpus();
362	next_cpu = cpumask_first(current_mask);
363
364	kthread = kthread_create(kthread_fn, NULL, "hwlatd");
365	if (IS_ERR(kthread)) {
366		pr_err(BANNER "could not start sampling thread\n");
367		return -ENOMEM;
368	}
369
370	cpumask_clear(current_mask);
371	cpumask_set_cpu(next_cpu, current_mask);
372	sched_setaffinity(kthread->pid, current_mask);
373
374	hwlat_kthread = kthread;
375	wake_up_process(kthread);
376
377	return 0;
378}
379
380/**
381 * stop_kthread - Inform the hardware latency samping/detector kthread to stop
382 *
383 * This kicks the running hardware latency sampling/detector kernel thread and
384 * tells it to stop sampling now. Use this on unload and at system shutdown.
385 */
386static void stop_kthread(void)
387{
388	if (!hwlat_kthread)
389		return;
390	kthread_stop(hwlat_kthread);
391	hwlat_kthread = NULL;
392}
393
394/*
395 * hwlat_read - Wrapper read function for reading both window and width
396 * @filp: The active open file structure
397 * @ubuf: The userspace provided buffer to read value into
398 * @cnt: The maximum number of bytes to read
399 * @ppos: The current "file" position
400 *
401 * This function provides a generic read implementation for the global state
402 * "hwlat_data" structure filesystem entries.
403 */
404static ssize_t hwlat_read(struct file *filp, char __user *ubuf,
405			  size_t cnt, loff_t *ppos)
406{
407	char buf[U64STR_SIZE];
408	u64 *entry = filp->private_data;
409	u64 val;
410	int len;
411
412	if (!entry)
413		return -EFAULT;
414
415	if (cnt > sizeof(buf))
416		cnt = sizeof(buf);
417
418	val = *entry;
419
420	len = snprintf(buf, sizeof(buf), "%llu\n", val);
421
422	return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
423}
424
425/**
426 * hwlat_width_write - Write function for "width" entry
427 * @filp: The active open file structure
428 * @ubuf: The user buffer that contains the value to write
429 * @cnt: The maximum number of bytes to write to "file"
430 * @ppos: The current position in @file
431 *
432 * This function provides a write implementation for the "width" interface
433 * to the hardware latency detector. It can be used to configure
434 * for how many us of the total window us we will actively sample for any
435 * hardware-induced latency periods. Obviously, it is not possible to
436 * sample constantly and have the system respond to a sample reader, or,
437 * worse, without having the system appear to have gone out to lunch. It
438 * is enforced that width is less that the total window size.
439 */
440static ssize_t
441hwlat_width_write(struct file *filp, const char __user *ubuf,
442		  size_t cnt, loff_t *ppos)
443{
444	u64 val;
445	int err;
446
447	err = kstrtoull_from_user(ubuf, cnt, 10, &val);
448	if (err)
449		return err;
450
451	mutex_lock(&hwlat_data.lock);
452	if (val < hwlat_data.sample_window)
453		hwlat_data.sample_width = val;
454	else
455		err = -EINVAL;
456	mutex_unlock(&hwlat_data.lock);
457
458	if (err)
459		return err;
460
461	return cnt;
462}
463
464/**
465 * hwlat_window_write - Write function for "window" entry
466 * @filp: The active open file structure
467 * @ubuf: The user buffer that contains the value to write
468 * @cnt: The maximum number of bytes to write to "file"
469 * @ppos: The current position in @file
470 *
471 * This function provides a write implementation for the "window" interface
472 * to the hardware latency detetector. The window is the total time
473 * in us that will be considered one sample period. Conceptually, windows
474 * occur back-to-back and contain a sample width period during which
475 * actual sampling occurs. Can be used to write a new total window size. It
476 * is enfoced that any value written must be greater than the sample width
477 * size, or an error results.
478 */
479static ssize_t
480hwlat_window_write(struct file *filp, const char __user *ubuf,
481		   size_t cnt, loff_t *ppos)
482{
483	u64 val;
484	int err;
485
486	err = kstrtoull_from_user(ubuf, cnt, 10, &val);
487	if (err)
488		return err;
489
490	mutex_lock(&hwlat_data.lock);
491	if (hwlat_data.sample_width < val)
492		hwlat_data.sample_window = val;
493	else
494		err = -EINVAL;
495	mutex_unlock(&hwlat_data.lock);
496
497	if (err)
498		return err;
499
500	return cnt;
501}
502
503static const struct file_operations width_fops = {
504	.open		= tracing_open_generic,
505	.read		= hwlat_read,
506	.write		= hwlat_width_write,
507};
508
509static const struct file_operations window_fops = {
510	.open		= tracing_open_generic,
511	.read		= hwlat_read,
512	.write		= hwlat_window_write,
513};
514
515/**
516 * init_tracefs - A function to initialize the tracefs interface files
517 *
518 * This function creates entries in tracefs for "hwlat_detector".
519 * It creates the hwlat_detector directory in the tracing directory,
520 * and within that directory is the count, width and window files to
521 * change and view those values.
522 */
523static int init_tracefs(void)
524{
525	struct dentry *d_tracer;
526	struct dentry *top_dir;
527
528	d_tracer = tracing_init_dentry();
529	if (IS_ERR(d_tracer))
530		return -ENOMEM;
531
532	top_dir = tracefs_create_dir("hwlat_detector", d_tracer);
533	if (!top_dir)
534		return -ENOMEM;
535
536	hwlat_sample_window = tracefs_create_file("window", 0640,
537						  top_dir,
538						  &hwlat_data.sample_window,
539						  &window_fops);
540	if (!hwlat_sample_window)
541		goto err;
542
543	hwlat_sample_width = tracefs_create_file("width", 0644,
544						 top_dir,
545						 &hwlat_data.sample_width,
546						 &width_fops);
547	if (!hwlat_sample_width)
548		goto err;
549
550	return 0;
551
552 err:
553	tracefs_remove_recursive(top_dir);
554	return -ENOMEM;
555}
556
557static void hwlat_tracer_start(struct trace_array *tr)
558{
559	int err;
560
561	err = start_kthread(tr);
562	if (err)
563		pr_err(BANNER "Cannot start hwlat kthread\n");
564}
565
566static void hwlat_tracer_stop(struct trace_array *tr)
567{
568	stop_kthread();
569}
570
571static bool hwlat_busy;
572
573static int hwlat_tracer_init(struct trace_array *tr)
574{
575	/* Only allow one instance to enable this */
576	if (hwlat_busy)
577		return -EBUSY;
578
579	hwlat_trace = tr;
580
581	disable_migrate = false;
582	hwlat_data.count = 0;
583	tr->max_latency = 0;
584	save_tracing_thresh = tracing_thresh;
585
586	/* tracing_thresh is in nsecs, we speak in usecs */
587	if (!tracing_thresh)
588		tracing_thresh = last_tracing_thresh;
589
590	if (tracer_tracing_is_on(tr))
591		hwlat_tracer_start(tr);
592
593	hwlat_busy = true;
594
595	return 0;
596}
597
598static void hwlat_tracer_reset(struct trace_array *tr)
599{
600	stop_kthread();
601
602	/* the tracing threshold is static between runs */
603	last_tracing_thresh = tracing_thresh;
604
605	tracing_thresh = save_tracing_thresh;
606	hwlat_busy = false;
607}
608
609static struct tracer hwlat_tracer __read_mostly =
610{
611	.name		= "hwlat",
612	.init		= hwlat_tracer_init,
613	.reset		= hwlat_tracer_reset,
614	.start		= hwlat_tracer_start,
615	.stop		= hwlat_tracer_stop,
616	.allow_instances = true,
617};
618
619__init static int init_hwlat_tracer(void)
620{
621	int ret;
622
623	mutex_init(&hwlat_data.lock);
624
625	ret = register_tracer(&hwlat_tracer);
626	if (ret)
627		return ret;
628
629	init_tracefs();
630
631	return 0;
632}
633late_initcall(init_hwlat_tracer);