Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 *
4 * Authors: Waiman Long <longman@redhat.com>
5 */
6
7#include "lock_events.h"
8
9#ifdef CONFIG_LOCK_EVENT_COUNTS
10#ifdef CONFIG_PARAVIRT_SPINLOCKS
11/*
12 * Collect pvqspinlock locking event counts
13 */
14#include <linux/sched.h>
15#include <linux/sched/clock.h>
16#include <linux/fs.h>
17
18#define EVENT_COUNT(ev) lockevents[LOCKEVENT_ ## ev]
19
20/*
21 * PV specific per-cpu counter
22 */
23static DEFINE_PER_CPU(u64, pv_kick_time);
24
25/*
26 * Function to read and return the PV qspinlock counts.
27 *
28 * The following counters are handled specially:
29 * 1. pv_latency_kick
30 * Average kick latency (ns) = pv_latency_kick/pv_kick_unlock
31 * 2. pv_latency_wake
32 * Average wake latency (ns) = pv_latency_wake/pv_kick_wake
33 * 3. pv_hash_hops
34 * Average hops/hash = pv_hash_hops/pv_kick_unlock
35 */
36ssize_t lockevent_read(struct file *file, char __user *user_buf,
37 size_t count, loff_t *ppos)
38{
39 char buf[64];
40 int cpu, id, len;
41 u64 sum = 0, kicks = 0;
42
43 /*
44 * Get the counter ID stored in file->f_inode->i_private
45 */
46 id = (long)file_inode(file)->i_private;
47
48 if (id >= lockevent_num)
49 return -EBADF;
50
51 for_each_possible_cpu(cpu) {
52 sum += per_cpu(lockevents[id], cpu);
53 /*
54 * Need to sum additional counters for some of them
55 */
56 switch (id) {
57
58 case LOCKEVENT_pv_latency_kick:
59 case LOCKEVENT_pv_hash_hops:
60 kicks += per_cpu(EVENT_COUNT(pv_kick_unlock), cpu);
61 break;
62
63 case LOCKEVENT_pv_latency_wake:
64 kicks += per_cpu(EVENT_COUNT(pv_kick_wake), cpu);
65 break;
66 }
67 }
68
69 if (id == LOCKEVENT_pv_hash_hops) {
70 u64 frac = 0;
71
72 if (kicks) {
73 frac = 100ULL * do_div(sum, kicks);
74 frac = DIV_ROUND_CLOSEST_ULL(frac, kicks);
75 }
76
77 /*
78 * Return a X.XX decimal number
79 */
80 len = snprintf(buf, sizeof(buf) - 1, "%llu.%02llu\n",
81 sum, frac);
82 } else {
83 /*
84 * Round to the nearest ns
85 */
86 if ((id == LOCKEVENT_pv_latency_kick) ||
87 (id == LOCKEVENT_pv_latency_wake)) {
88 if (kicks)
89 sum = DIV_ROUND_CLOSEST_ULL(sum, kicks);
90 }
91 len = snprintf(buf, sizeof(buf) - 1, "%llu\n", sum);
92 }
93
94 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
95}
96
97/*
98 * PV hash hop count
99 */
100static inline void lockevent_pv_hop(int hopcnt)
101{
102 this_cpu_add(EVENT_COUNT(pv_hash_hops), hopcnt);
103}
104
105/*
106 * Replacement function for pv_kick()
107 */
108static inline void __pv_kick(int cpu)
109{
110 u64 start = sched_clock();
111
112 per_cpu(pv_kick_time, cpu) = start;
113 pv_kick(cpu);
114 this_cpu_add(EVENT_COUNT(pv_latency_kick), sched_clock() - start);
115}
116
117/*
118 * Replacement function for pv_wait()
119 */
120static inline void __pv_wait(u8 *ptr, u8 val)
121{
122 u64 *pkick_time = this_cpu_ptr(&pv_kick_time);
123
124 *pkick_time = 0;
125 pv_wait(ptr, val);
126 if (*pkick_time) {
127 this_cpu_add(EVENT_COUNT(pv_latency_wake),
128 sched_clock() - *pkick_time);
129 lockevent_inc(pv_kick_wake);
130 }
131}
132
133#define pv_kick(c) __pv_kick(c)
134#define pv_wait(p, v) __pv_wait(p, v)
135
136#endif /* CONFIG_PARAVIRT_SPINLOCKS */
137
138#else /* CONFIG_LOCK_EVENT_COUNTS */
139
140static inline void lockevent_pv_hop(int hopcnt) { }
141
142#endif /* CONFIG_LOCK_EVENT_COUNTS */
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * Authors: Waiman Long <waiman.long@hpe.com>
13 */
14
15/*
16 * When queued spinlock statistical counters are enabled, the following
17 * debugfs files will be created for reporting the counter values:
18 *
19 * <debugfs>/qlockstat/
20 * pv_hash_hops - average # of hops per hashing operation
21 * pv_kick_unlock - # of vCPU kicks issued at unlock time
22 * pv_kick_wake - # of vCPU kicks used for computing pv_latency_wake
23 * pv_latency_kick - average latency (ns) of vCPU kick operation
24 * pv_latency_wake - average latency (ns) from vCPU kick to wakeup
25 * pv_lock_slowpath - # of locking operations via the slowpath
26 * pv_lock_stealing - # of lock stealing operations
27 * pv_spurious_wakeup - # of spurious wakeups
28 * pv_wait_again - # of vCPU wait's that happened after a vCPU kick
29 * pv_wait_early - # of early vCPU wait's
30 * pv_wait_head - # of vCPU wait's at the queue head
31 * pv_wait_node - # of vCPU wait's at a non-head queue node
32 *
33 * Writing to the "reset_counters" file will reset all the above counter
34 * values.
35 *
36 * These statistical counters are implemented as per-cpu variables which are
37 * summed and computed whenever the corresponding debugfs files are read. This
38 * minimizes added overhead making the counters usable even in a production
39 * environment.
40 *
41 * There may be slight difference between pv_kick_wake and pv_kick_unlock.
42 */
43enum qlock_stats {
44 qstat_pv_hash_hops,
45 qstat_pv_kick_unlock,
46 qstat_pv_kick_wake,
47 qstat_pv_latency_kick,
48 qstat_pv_latency_wake,
49 qstat_pv_lock_slowpath,
50 qstat_pv_lock_stealing,
51 qstat_pv_spurious_wakeup,
52 qstat_pv_wait_again,
53 qstat_pv_wait_early,
54 qstat_pv_wait_head,
55 qstat_pv_wait_node,
56 qstat_num, /* Total number of statistical counters */
57 qstat_reset_cnts = qstat_num,
58};
59
60#ifdef CONFIG_QUEUED_LOCK_STAT
61/*
62 * Collect pvqspinlock statistics
63 */
64#include <linux/debugfs.h>
65#include <linux/sched.h>
66#include <linux/fs.h>
67
68static const char * const qstat_names[qstat_num + 1] = {
69 [qstat_pv_hash_hops] = "pv_hash_hops",
70 [qstat_pv_kick_unlock] = "pv_kick_unlock",
71 [qstat_pv_kick_wake] = "pv_kick_wake",
72 [qstat_pv_spurious_wakeup] = "pv_spurious_wakeup",
73 [qstat_pv_latency_kick] = "pv_latency_kick",
74 [qstat_pv_latency_wake] = "pv_latency_wake",
75 [qstat_pv_lock_slowpath] = "pv_lock_slowpath",
76 [qstat_pv_lock_stealing] = "pv_lock_stealing",
77 [qstat_pv_wait_again] = "pv_wait_again",
78 [qstat_pv_wait_early] = "pv_wait_early",
79 [qstat_pv_wait_head] = "pv_wait_head",
80 [qstat_pv_wait_node] = "pv_wait_node",
81 [qstat_reset_cnts] = "reset_counters",
82};
83
84/*
85 * Per-cpu counters
86 */
87static DEFINE_PER_CPU(unsigned long, qstats[qstat_num]);
88static DEFINE_PER_CPU(u64, pv_kick_time);
89
90/*
91 * Function to read and return the qlock statistical counter values
92 *
93 * The following counters are handled specially:
94 * 1. qstat_pv_latency_kick
95 * Average kick latency (ns) = pv_latency_kick/pv_kick_unlock
96 * 2. qstat_pv_latency_wake
97 * Average wake latency (ns) = pv_latency_wake/pv_kick_wake
98 * 3. qstat_pv_hash_hops
99 * Average hops/hash = pv_hash_hops/pv_kick_unlock
100 */
101static ssize_t qstat_read(struct file *file, char __user *user_buf,
102 size_t count, loff_t *ppos)
103{
104 char buf[64];
105 int cpu, counter, len;
106 u64 stat = 0, kicks = 0;
107
108 /*
109 * Get the counter ID stored in file->f_inode->i_private
110 */
111 if (!file->f_inode) {
112 WARN_ON_ONCE(1);
113 return -EBADF;
114 }
115 counter = (long)(file->f_inode->i_private);
116
117 if (counter >= qstat_num)
118 return -EBADF;
119
120 for_each_possible_cpu(cpu) {
121 stat += per_cpu(qstats[counter], cpu);
122 /*
123 * Need to sum additional counter for some of them
124 */
125 switch (counter) {
126
127 case qstat_pv_latency_kick:
128 case qstat_pv_hash_hops:
129 kicks += per_cpu(qstats[qstat_pv_kick_unlock], cpu);
130 break;
131
132 case qstat_pv_latency_wake:
133 kicks += per_cpu(qstats[qstat_pv_kick_wake], cpu);
134 break;
135 }
136 }
137
138 if (counter == qstat_pv_hash_hops) {
139 u64 frac = 0;
140
141 if (kicks) {
142 frac = 100ULL * do_div(stat, kicks);
143 frac = DIV_ROUND_CLOSEST_ULL(frac, kicks);
144 }
145
146 /*
147 * Return a X.XX decimal number
148 */
149 len = snprintf(buf, sizeof(buf) - 1, "%llu.%02llu\n", stat, frac);
150 } else {
151 /*
152 * Round to the nearest ns
153 */
154 if ((counter == qstat_pv_latency_kick) ||
155 (counter == qstat_pv_latency_wake)) {
156 stat = 0;
157 if (kicks)
158 stat = DIV_ROUND_CLOSEST_ULL(stat, kicks);
159 }
160 len = snprintf(buf, sizeof(buf) - 1, "%llu\n", stat);
161 }
162
163 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
164}
165
166/*
167 * Function to handle write request
168 *
169 * When counter = reset_cnts, reset all the counter values.
170 * Since the counter updates aren't atomic, the resetting is done twice
171 * to make sure that the counters are very likely to be all cleared.
172 */
173static ssize_t qstat_write(struct file *file, const char __user *user_buf,
174 size_t count, loff_t *ppos)
175{
176 int cpu;
177
178 /*
179 * Get the counter ID stored in file->f_inode->i_private
180 */
181 if (!file->f_inode) {
182 WARN_ON_ONCE(1);
183 return -EBADF;
184 }
185 if ((long)(file->f_inode->i_private) != qstat_reset_cnts)
186 return count;
187
188 for_each_possible_cpu(cpu) {
189 int i;
190 unsigned long *ptr = per_cpu_ptr(qstats, cpu);
191
192 for (i = 0 ; i < qstat_num; i++)
193 WRITE_ONCE(ptr[i], 0);
194 for (i = 0 ; i < qstat_num; i++)
195 WRITE_ONCE(ptr[i], 0);
196 }
197 return count;
198}
199
200/*
201 * Debugfs data structures
202 */
203static const struct file_operations fops_qstat = {
204 .read = qstat_read,
205 .write = qstat_write,
206 .llseek = default_llseek,
207};
208
209/*
210 * Initialize debugfs for the qspinlock statistical counters
211 */
212static int __init init_qspinlock_stat(void)
213{
214 struct dentry *d_qstat = debugfs_create_dir("qlockstat", NULL);
215 int i;
216
217 if (!d_qstat) {
218 pr_warn("Could not create 'qlockstat' debugfs directory\n");
219 return 0;
220 }
221
222 /*
223 * Create the debugfs files
224 *
225 * As reading from and writing to the stat files can be slow, only
226 * root is allowed to do the read/write to limit impact to system
227 * performance.
228 */
229 for (i = 0; i < qstat_num; i++)
230 debugfs_create_file(qstat_names[i], 0400, d_qstat,
231 (void *)(long)i, &fops_qstat);
232
233 debugfs_create_file(qstat_names[qstat_reset_cnts], 0200, d_qstat,
234 (void *)(long)qstat_reset_cnts, &fops_qstat);
235 return 0;
236}
237fs_initcall(init_qspinlock_stat);
238
239/*
240 * Increment the PV qspinlock statistical counters
241 */
242static inline void qstat_inc(enum qlock_stats stat, bool cond)
243{
244 if (cond)
245 this_cpu_inc(qstats[stat]);
246}
247
248/*
249 * PV hash hop count
250 */
251static inline void qstat_hop(int hopcnt)
252{
253 this_cpu_add(qstats[qstat_pv_hash_hops], hopcnt);
254}
255
256/*
257 * Replacement function for pv_kick()
258 */
259static inline void __pv_kick(int cpu)
260{
261 u64 start = sched_clock();
262
263 per_cpu(pv_kick_time, cpu) = start;
264 pv_kick(cpu);
265 this_cpu_add(qstats[qstat_pv_latency_kick], sched_clock() - start);
266}
267
268/*
269 * Replacement function for pv_wait()
270 */
271static inline void __pv_wait(u8 *ptr, u8 val)
272{
273 u64 *pkick_time = this_cpu_ptr(&pv_kick_time);
274
275 *pkick_time = 0;
276 pv_wait(ptr, val);
277 if (*pkick_time) {
278 this_cpu_add(qstats[qstat_pv_latency_wake],
279 sched_clock() - *pkick_time);
280 qstat_inc(qstat_pv_kick_wake, true);
281 }
282}
283
284#define pv_kick(c) __pv_kick(c)
285#define pv_wait(p, v) __pv_wait(p, v)
286
287#else /* CONFIG_QUEUED_LOCK_STAT */
288
289static inline void qstat_inc(enum qlock_stats stat, bool cond) { }
290static inline void qstat_hop(int hopcnt) { }
291
292#endif /* CONFIG_QUEUED_LOCK_STAT */