Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
 
 
 
 
  3 *
  4 * Authors: Waiman Long <longman@redhat.com>
 
 
 
 
 
  5 */
  6
  7#include "lock_events.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  8
  9#ifdef CONFIG_LOCK_EVENT_COUNTS
 10#ifdef CONFIG_PARAVIRT_SPINLOCKS
 11/*
 12 * Collect pvqspinlock locking event counts
 13 */
 
 14#include <linux/sched.h>
 15#include <linux/sched/clock.h>
 16#include <linux/fs.h>
 17
 18#define EVENT_COUNT(ev)	lockevents[LOCKEVENT_ ## ev]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 19
 20/*
 21 * PV specific per-cpu counter
 22 */
 
 23static DEFINE_PER_CPU(u64, pv_kick_time);
 24
 25/*
 26 * Function to read and return the PV qspinlock counts.
 27 *
 28 * The following counters are handled specially:
 29 * 1. pv_latency_kick
 30 *    Average kick latency (ns) = pv_latency_kick/pv_kick_unlock
 31 * 2. pv_latency_wake
 32 *    Average wake latency (ns) = pv_latency_wake/pv_kick_wake
 33 * 3. pv_hash_hops
 34 *    Average hops/hash = pv_hash_hops/pv_kick_unlock
 35 */
 36ssize_t lockevent_read(struct file *file, char __user *user_buf,
 37		       size_t count, loff_t *ppos)
 38{
 39	char buf[64];
 40	int cpu, id, len;
 41	u64 sum = 0, kicks = 0;
 42
 43	/*
 44	 * Get the counter ID stored in file->f_inode->i_private
 45	 */
 46	id = (long)file_inode(file)->i_private;
 47
 48	if (id >= lockevent_num)
 49		return -EBADF;
 50
 51	for_each_possible_cpu(cpu) {
 52		sum += per_cpu(lockevents[id], cpu);
 53		/*
 54		 * Need to sum additional counters for some of them
 55		 */
 56		switch (id) {
 57
 58		case LOCKEVENT_pv_latency_kick:
 59		case LOCKEVENT_pv_hash_hops:
 60			kicks += per_cpu(EVENT_COUNT(pv_kick_unlock), cpu);
 61			break;
 62
 63		case LOCKEVENT_pv_latency_wake:
 64			kicks += per_cpu(EVENT_COUNT(pv_kick_wake), cpu);
 65			break;
 66		}
 67	}
 68
 69	if (id == LOCKEVENT_pv_hash_hops) {
 70		u64 frac = 0;
 71
 72		if (kicks) {
 73			frac = 100ULL * do_div(sum, kicks);
 74			frac = DIV_ROUND_CLOSEST_ULL(frac, kicks);
 75		}
 76
 77		/*
 78		 * Return a X.XX decimal number
 79		 */
 80		len = snprintf(buf, sizeof(buf) - 1, "%llu.%02llu\n",
 81			       sum, frac);
 82	} else {
 83		/*
 84		 * Round to the nearest ns
 85		 */
 86		if ((id == LOCKEVENT_pv_latency_kick) ||
 87		    (id == LOCKEVENT_pv_latency_wake)) {
 88			if (kicks)
 89				sum = DIV_ROUND_CLOSEST_ULL(sum, kicks);
 90		}
 91		len = snprintf(buf, sizeof(buf) - 1, "%llu\n", sum);
 92	}
 93
 94	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
 95}
 96
 97/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 98 * PV hash hop count
 99 */
100static inline void lockevent_pv_hop(int hopcnt)
101{
102	this_cpu_add(EVENT_COUNT(pv_hash_hops), hopcnt);
103}
104
105/*
106 * Replacement function for pv_kick()
107 */
108static inline void __pv_kick(int cpu)
109{
110	u64 start = sched_clock();
111
112	per_cpu(pv_kick_time, cpu) = start;
113	pv_kick(cpu);
114	this_cpu_add(EVENT_COUNT(pv_latency_kick), sched_clock() - start);
115}
116
117/*
118 * Replacement function for pv_wait()
119 */
120static inline void __pv_wait(u8 *ptr, u8 val)
121{
122	u64 *pkick_time = this_cpu_ptr(&pv_kick_time);
123
124	*pkick_time = 0;
125	pv_wait(ptr, val);
126	if (*pkick_time) {
127		this_cpu_add(EVENT_COUNT(pv_latency_wake),
128			     sched_clock() - *pkick_time);
129		lockevent_inc(pv_kick_wake);
130	}
131}
132
133#define pv_kick(c)	__pv_kick(c)
134#define pv_wait(p, v)	__pv_wait(p, v)
135
136#endif /* CONFIG_PARAVIRT_SPINLOCKS */
137
138#else /* CONFIG_LOCK_EVENT_COUNTS */
139
140static inline void lockevent_pv_hop(int hopcnt)	{ }
 
141
142#endif /* CONFIG_LOCK_EVENT_COUNTS */
v4.17
 
  1/*
  2 * This program is free software; you can redistribute it and/or modify
  3 * it under the terms of the GNU General Public License as published by
  4 * the Free Software Foundation; either version 2 of the License, or
  5 * (at your option) any later version.
  6 *
  7 * This program is distributed in the hope that it will be useful,
  8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 10 * GNU General Public License for more details.
 11 *
 12 * Authors: Waiman Long <waiman.long@hpe.com>
 13 */
 14
 15/*
 16 * When queued spinlock statistical counters are enabled, the following
 17 * debugfs files will be created for reporting the counter values:
 18 *
 19 * <debugfs>/qlockstat/
 20 *   pv_hash_hops	- average # of hops per hashing operation
 21 *   pv_kick_unlock	- # of vCPU kicks issued at unlock time
 22 *   pv_kick_wake	- # of vCPU kicks used for computing pv_latency_wake
 23 *   pv_latency_kick	- average latency (ns) of vCPU kick operation
 24 *   pv_latency_wake	- average latency (ns) from vCPU kick to wakeup
 25 *   pv_lock_slowpath	- # of locking operations via the slowpath
 26 *   pv_lock_stealing	- # of lock stealing operations
 27 *   pv_spurious_wakeup	- # of spurious wakeups in non-head vCPUs
 28 *   pv_wait_again	- # of wait's after a queue head vCPU kick
 29 *   pv_wait_early	- # of early vCPU wait's
 30 *   pv_wait_head	- # of vCPU wait's at the queue head
 31 *   pv_wait_node	- # of vCPU wait's at a non-head queue node
 32 *
 33 * Writing to the "reset_counters" file will reset all the above counter
 34 * values.
 35 *
 36 * These statistical counters are implemented as per-cpu variables which are
 37 * summed and computed whenever the corresponding debugfs files are read. This
 38 * minimizes added overhead making the counters usable even in a production
 39 * environment.
 40 *
 41 * There may be slight difference between pv_kick_wake and pv_kick_unlock.
 42 */
 43enum qlock_stats {
 44	qstat_pv_hash_hops,
 45	qstat_pv_kick_unlock,
 46	qstat_pv_kick_wake,
 47	qstat_pv_latency_kick,
 48	qstat_pv_latency_wake,
 49	qstat_pv_lock_slowpath,
 50	qstat_pv_lock_stealing,
 51	qstat_pv_spurious_wakeup,
 52	qstat_pv_wait_again,
 53	qstat_pv_wait_early,
 54	qstat_pv_wait_head,
 55	qstat_pv_wait_node,
 56	qstat_num,	/* Total number of statistical counters */
 57	qstat_reset_cnts = qstat_num,
 58};
 59
 60#ifdef CONFIG_QUEUED_LOCK_STAT
 
 61/*
 62 * Collect pvqspinlock statistics
 63 */
 64#include <linux/debugfs.h>
 65#include <linux/sched.h>
 66#include <linux/sched/clock.h>
 67#include <linux/fs.h>
 68
 69static const char * const qstat_names[qstat_num + 1] = {
 70	[qstat_pv_hash_hops]	   = "pv_hash_hops",
 71	[qstat_pv_kick_unlock]     = "pv_kick_unlock",
 72	[qstat_pv_kick_wake]       = "pv_kick_wake",
 73	[qstat_pv_spurious_wakeup] = "pv_spurious_wakeup",
 74	[qstat_pv_latency_kick]	   = "pv_latency_kick",
 75	[qstat_pv_latency_wake]    = "pv_latency_wake",
 76	[qstat_pv_lock_slowpath]   = "pv_lock_slowpath",
 77	[qstat_pv_lock_stealing]   = "pv_lock_stealing",
 78	[qstat_pv_wait_again]      = "pv_wait_again",
 79	[qstat_pv_wait_early]      = "pv_wait_early",
 80	[qstat_pv_wait_head]       = "pv_wait_head",
 81	[qstat_pv_wait_node]       = "pv_wait_node",
 82	[qstat_reset_cnts]         = "reset_counters",
 83};
 84
 85/*
 86 * Per-cpu counters
 87 */
 88static DEFINE_PER_CPU(unsigned long, qstats[qstat_num]);
 89static DEFINE_PER_CPU(u64, pv_kick_time);
 90
 91/*
 92 * Function to read and return the qlock statistical counter values
 93 *
 94 * The following counters are handled specially:
 95 * 1. qstat_pv_latency_kick
 96 *    Average kick latency (ns) = pv_latency_kick/pv_kick_unlock
 97 * 2. qstat_pv_latency_wake
 98 *    Average wake latency (ns) = pv_latency_wake/pv_kick_wake
 99 * 3. qstat_pv_hash_hops
100 *    Average hops/hash = pv_hash_hops/pv_kick_unlock
101 */
102static ssize_t qstat_read(struct file *file, char __user *user_buf,
103			  size_t count, loff_t *ppos)
104{
105	char buf[64];
106	int cpu, counter, len;
107	u64 stat = 0, kicks = 0;
108
109	/*
110	 * Get the counter ID stored in file->f_inode->i_private
111	 */
112	counter = (long)file_inode(file)->i_private;
113
114	if (counter >= qstat_num)
115		return -EBADF;
116
117	for_each_possible_cpu(cpu) {
118		stat += per_cpu(qstats[counter], cpu);
119		/*
120		 * Need to sum additional counter for some of them
121		 */
122		switch (counter) {
123
124		case qstat_pv_latency_kick:
125		case qstat_pv_hash_hops:
126			kicks += per_cpu(qstats[qstat_pv_kick_unlock], cpu);
127			break;
128
129		case qstat_pv_latency_wake:
130			kicks += per_cpu(qstats[qstat_pv_kick_wake], cpu);
131			break;
132		}
133	}
134
135	if (counter == qstat_pv_hash_hops) {
136		u64 frac = 0;
137
138		if (kicks) {
139			frac = 100ULL * do_div(stat, kicks);
140			frac = DIV_ROUND_CLOSEST_ULL(frac, kicks);
141		}
142
143		/*
144		 * Return a X.XX decimal number
145		 */
146		len = snprintf(buf, sizeof(buf) - 1, "%llu.%02llu\n", stat, frac);
 
147	} else {
148		/*
149		 * Round to the nearest ns
150		 */
151		if ((counter == qstat_pv_latency_kick) ||
152		    (counter == qstat_pv_latency_wake)) {
153			if (kicks)
154				stat = DIV_ROUND_CLOSEST_ULL(stat, kicks);
155		}
156		len = snprintf(buf, sizeof(buf) - 1, "%llu\n", stat);
157	}
158
159	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
160}
161
162/*
163 * Function to handle write request
164 *
165 * When counter = reset_cnts, reset all the counter values.
166 * Since the counter updates aren't atomic, the resetting is done twice
167 * to make sure that the counters are very likely to be all cleared.
168 */
169static ssize_t qstat_write(struct file *file, const char __user *user_buf,
170			   size_t count, loff_t *ppos)
171{
172	int cpu;
173
174	/*
175	 * Get the counter ID stored in file->f_inode->i_private
176	 */
177	if ((long)file_inode(file)->i_private != qstat_reset_cnts)
178		return count;
179
180	for_each_possible_cpu(cpu) {
181		int i;
182		unsigned long *ptr = per_cpu_ptr(qstats, cpu);
183
184		for (i = 0 ; i < qstat_num; i++)
185			WRITE_ONCE(ptr[i], 0);
186	}
187	return count;
188}
189
190/*
191 * Debugfs data structures
192 */
193static const struct file_operations fops_qstat = {
194	.read = qstat_read,
195	.write = qstat_write,
196	.llseek = default_llseek,
197};
198
199/*
200 * Initialize debugfs for the qspinlock statistical counters
201 */
202static int __init init_qspinlock_stat(void)
203{
204	struct dentry *d_qstat = debugfs_create_dir("qlockstat", NULL);
205	int i;
206
207	if (!d_qstat)
208		goto out;
209
210	/*
211	 * Create the debugfs files
212	 *
213	 * As reading from and writing to the stat files can be slow, only
214	 * root is allowed to do the read/write to limit impact to system
215	 * performance.
216	 */
217	for (i = 0; i < qstat_num; i++)
218		if (!debugfs_create_file(qstat_names[i], 0400, d_qstat,
219					 (void *)(long)i, &fops_qstat))
220			goto fail_undo;
221
222	if (!debugfs_create_file(qstat_names[qstat_reset_cnts], 0200, d_qstat,
223				 (void *)(long)qstat_reset_cnts, &fops_qstat))
224		goto fail_undo;
225
226	return 0;
227fail_undo:
228	debugfs_remove_recursive(d_qstat);
229out:
230	pr_warn("Could not create 'qlockstat' debugfs entries\n");
231	return -ENOMEM;
232}
233fs_initcall(init_qspinlock_stat);
234
235/*
236 * Increment the PV qspinlock statistical counters
237 */
238static inline void qstat_inc(enum qlock_stats stat, bool cond)
239{
240	if (cond)
241		this_cpu_inc(qstats[stat]);
242}
243
244/*
245 * PV hash hop count
246 */
247static inline void qstat_hop(int hopcnt)
248{
249	this_cpu_add(qstats[qstat_pv_hash_hops], hopcnt);
250}
251
252/*
253 * Replacement function for pv_kick()
254 */
255static inline void __pv_kick(int cpu)
256{
257	u64 start = sched_clock();
258
259	per_cpu(pv_kick_time, cpu) = start;
260	pv_kick(cpu);
261	this_cpu_add(qstats[qstat_pv_latency_kick], sched_clock() - start);
262}
263
264/*
265 * Replacement function for pv_wait()
266 */
267static inline void __pv_wait(u8 *ptr, u8 val)
268{
269	u64 *pkick_time = this_cpu_ptr(&pv_kick_time);
270
271	*pkick_time = 0;
272	pv_wait(ptr, val);
273	if (*pkick_time) {
274		this_cpu_add(qstats[qstat_pv_latency_wake],
275			     sched_clock() - *pkick_time);
276		qstat_inc(qstat_pv_kick_wake, true);
277	}
278}
279
280#define pv_kick(c)	__pv_kick(c)
281#define pv_wait(p, v)	__pv_wait(p, v)
282
283#else /* CONFIG_QUEUED_LOCK_STAT */
 
 
284
285static inline void qstat_inc(enum qlock_stats stat, bool cond)	{ }
286static inline void qstat_hop(int hopcnt)			{ }
287
288#endif /* CONFIG_QUEUED_LOCK_STAT */