Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */
3
4#include <linux/module.h>
5#include <linux/kernel.h>
6#include <linux/highmem.h>
7#include <linux/mm.h>
8#include <linux/slab.h>
9#include <linux/delay.h>
10#include <linux/smp.h>
11#include <uapi/linux/idxd.h>
12#include <linux/idxd.h>
13#include <linux/dmaengine.h>
14#include "../../dma/idxd/idxd.h"
15#include <linux/debugfs.h>
16#include <crypto/internal/acompress.h>
17#include "iaa_crypto.h"
18#include "iaa_crypto_stats.h"
19
20static u64 total_comp_calls;
21static u64 total_decomp_calls;
22static u64 total_sw_decomp_calls;
23static u64 max_comp_delay_ns;
24static u64 max_decomp_delay_ns;
25static u64 max_acomp_delay_ns;
26static u64 max_adecomp_delay_ns;
27static u64 total_comp_bytes_out;
28static u64 total_decomp_bytes_in;
29static u64 total_completion_einval_errors;
30static u64 total_completion_timeout_errors;
31static u64 total_completion_comp_buf_overflow_errors;
32
33static struct dentry *iaa_crypto_debugfs_root;
34
35void update_total_comp_calls(void)
36{
37 total_comp_calls++;
38}
39
40void update_total_comp_bytes_out(int n)
41{
42 total_comp_bytes_out += n;
43}
44
45void update_total_decomp_calls(void)
46{
47 total_decomp_calls++;
48}
49
50void update_total_sw_decomp_calls(void)
51{
52 total_sw_decomp_calls++;
53}
54
55void update_total_decomp_bytes_in(int n)
56{
57 total_decomp_bytes_in += n;
58}
59
60void update_completion_einval_errs(void)
61{
62 total_completion_einval_errors++;
63}
64
65void update_completion_timeout_errs(void)
66{
67 total_completion_timeout_errors++;
68}
69
70void update_completion_comp_buf_overflow_errs(void)
71{
72 total_completion_comp_buf_overflow_errors++;
73}
74
75void update_max_comp_delay_ns(u64 start_time_ns)
76{
77 u64 time_diff;
78
79 time_diff = ktime_get_ns() - start_time_ns;
80
81 if (time_diff > max_comp_delay_ns)
82 max_comp_delay_ns = time_diff;
83}
84
85void update_max_decomp_delay_ns(u64 start_time_ns)
86{
87 u64 time_diff;
88
89 time_diff = ktime_get_ns() - start_time_ns;
90
91 if (time_diff > max_decomp_delay_ns)
92 max_decomp_delay_ns = time_diff;
93}
94
95void update_max_acomp_delay_ns(u64 start_time_ns)
96{
97 u64 time_diff;
98
99 time_diff = ktime_get_ns() - start_time_ns;
100
101 if (time_diff > max_acomp_delay_ns)
102 max_acomp_delay_ns = time_diff;
103}
104
105void update_max_adecomp_delay_ns(u64 start_time_ns)
106{
107 u64 time_diff;
108
109 time_diff = ktime_get_ns() - start_time_ns;
110
111 if (time_diff > max_adecomp_delay_ns)
112 max_adecomp_delay_ns = time_diff;
113}
114
115void update_wq_comp_calls(struct idxd_wq *idxd_wq)
116{
117 struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
118
119 wq->comp_calls++;
120 wq->iaa_device->comp_calls++;
121}
122
123void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n)
124{
125 struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
126
127 wq->comp_bytes += n;
128 wq->iaa_device->comp_bytes += n;
129}
130
131void update_wq_decomp_calls(struct idxd_wq *idxd_wq)
132{
133 struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
134
135 wq->decomp_calls++;
136 wq->iaa_device->decomp_calls++;
137}
138
139void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n)
140{
141 struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
142
143 wq->decomp_bytes += n;
144 wq->iaa_device->decomp_bytes += n;
145}
146
147static void reset_iaa_crypto_stats(void)
148{
149 total_comp_calls = 0;
150 total_decomp_calls = 0;
151 total_sw_decomp_calls = 0;
152 max_comp_delay_ns = 0;
153 max_decomp_delay_ns = 0;
154 max_acomp_delay_ns = 0;
155 max_adecomp_delay_ns = 0;
156 total_comp_bytes_out = 0;
157 total_decomp_bytes_in = 0;
158 total_completion_einval_errors = 0;
159 total_completion_timeout_errors = 0;
160 total_completion_comp_buf_overflow_errors = 0;
161}
162
163static void reset_wq_stats(struct iaa_wq *wq)
164{
165 wq->comp_calls = 0;
166 wq->comp_bytes = 0;
167 wq->decomp_calls = 0;
168 wq->decomp_bytes = 0;
169}
170
171static void reset_device_stats(struct iaa_device *iaa_device)
172{
173 struct iaa_wq *iaa_wq;
174
175 iaa_device->comp_calls = 0;
176 iaa_device->comp_bytes = 0;
177 iaa_device->decomp_calls = 0;
178 iaa_device->decomp_bytes = 0;
179
180 list_for_each_entry(iaa_wq, &iaa_device->wqs, list)
181 reset_wq_stats(iaa_wq);
182}
183
184static void wq_show(struct seq_file *m, struct iaa_wq *iaa_wq)
185{
186 seq_printf(m, " name: %s\n", iaa_wq->wq->name);
187 seq_printf(m, " comp_calls: %llu\n", iaa_wq->comp_calls);
188 seq_printf(m, " comp_bytes: %llu\n", iaa_wq->comp_bytes);
189 seq_printf(m, " decomp_calls: %llu\n", iaa_wq->decomp_calls);
190 seq_printf(m, " decomp_bytes: %llu\n\n", iaa_wq->decomp_bytes);
191}
192
193static void device_stats_show(struct seq_file *m, struct iaa_device *iaa_device)
194{
195 struct iaa_wq *iaa_wq;
196
197 seq_puts(m, "iaa device:\n");
198 seq_printf(m, " id: %d\n", iaa_device->idxd->id);
199 seq_printf(m, " n_wqs: %d\n", iaa_device->n_wq);
200 seq_printf(m, " comp_calls: %llu\n", iaa_device->comp_calls);
201 seq_printf(m, " comp_bytes: %llu\n", iaa_device->comp_bytes);
202 seq_printf(m, " decomp_calls: %llu\n", iaa_device->decomp_calls);
203 seq_printf(m, " decomp_bytes: %llu\n", iaa_device->decomp_bytes);
204 seq_puts(m, " wqs:\n");
205
206 list_for_each_entry(iaa_wq, &iaa_device->wqs, list)
207 wq_show(m, iaa_wq);
208}
209
210static void global_stats_show(struct seq_file *m)
211{
212 seq_puts(m, "global stats:\n");
213 seq_printf(m, " total_comp_calls: %llu\n", total_comp_calls);
214 seq_printf(m, " total_decomp_calls: %llu\n", total_decomp_calls);
215 seq_printf(m, " total_sw_decomp_calls: %llu\n", total_sw_decomp_calls);
216 seq_printf(m, " total_comp_bytes_out: %llu\n", total_comp_bytes_out);
217 seq_printf(m, " total_decomp_bytes_in: %llu\n", total_decomp_bytes_in);
218 seq_printf(m, " total_completion_einval_errors: %llu\n",
219 total_completion_einval_errors);
220 seq_printf(m, " total_completion_timeout_errors: %llu\n",
221 total_completion_timeout_errors);
222 seq_printf(m, " total_completion_comp_buf_overflow_errors: %llu\n\n",
223 total_completion_comp_buf_overflow_errors);
224}
225
226static int wq_stats_show(struct seq_file *m, void *v)
227{
228 struct iaa_device *iaa_device;
229
230 mutex_lock(&iaa_devices_lock);
231
232 global_stats_show(m);
233
234 list_for_each_entry(iaa_device, &iaa_devices, list)
235 device_stats_show(m, iaa_device);
236
237 mutex_unlock(&iaa_devices_lock);
238
239 return 0;
240}
241
242static int iaa_crypto_stats_reset(void *data, u64 value)
243{
244 struct iaa_device *iaa_device;
245
246 reset_iaa_crypto_stats();
247
248 mutex_lock(&iaa_devices_lock);
249
250 list_for_each_entry(iaa_device, &iaa_devices, list)
251 reset_device_stats(iaa_device);
252
253 mutex_unlock(&iaa_devices_lock);
254
255 return 0;
256}
257
258static int wq_stats_open(struct inode *inode, struct file *file)
259{
260 return single_open(file, wq_stats_show, file);
261}
262
263static const struct file_operations wq_stats_fops = {
264 .open = wq_stats_open,
265 .read = seq_read,
266 .llseek = seq_lseek,
267 .release = single_release,
268};
269
270DEFINE_DEBUGFS_ATTRIBUTE(wq_stats_reset_fops, NULL, iaa_crypto_stats_reset, "%llu\n");
271
272int __init iaa_crypto_debugfs_init(void)
273{
274 if (!debugfs_initialized())
275 return -ENODEV;
276
277 iaa_crypto_debugfs_root = debugfs_create_dir("iaa_crypto", NULL);
278 if (!iaa_crypto_debugfs_root)
279 return -ENOMEM;
280
281 debugfs_create_u64("max_comp_delay_ns", 0644,
282 iaa_crypto_debugfs_root, &max_comp_delay_ns);
283 debugfs_create_u64("max_decomp_delay_ns", 0644,
284 iaa_crypto_debugfs_root, &max_decomp_delay_ns);
285 debugfs_create_u64("max_acomp_delay_ns", 0644,
286 iaa_crypto_debugfs_root, &max_comp_delay_ns);
287 debugfs_create_u64("max_adecomp_delay_ns", 0644,
288 iaa_crypto_debugfs_root, &max_decomp_delay_ns);
289 debugfs_create_u64("total_comp_calls", 0644,
290 iaa_crypto_debugfs_root, &total_comp_calls);
291 debugfs_create_u64("total_decomp_calls", 0644,
292 iaa_crypto_debugfs_root, &total_decomp_calls);
293 debugfs_create_u64("total_sw_decomp_calls", 0644,
294 iaa_crypto_debugfs_root, &total_sw_decomp_calls);
295 debugfs_create_u64("total_comp_bytes_out", 0644,
296 iaa_crypto_debugfs_root, &total_comp_bytes_out);
297 debugfs_create_u64("total_decomp_bytes_in", 0644,
298 iaa_crypto_debugfs_root, &total_decomp_bytes_in);
299 debugfs_create_file("wq_stats", 0644, iaa_crypto_debugfs_root, NULL,
300 &wq_stats_fops);
301 debugfs_create_file("stats_reset", 0644, iaa_crypto_debugfs_root, NULL,
302 &wq_stats_reset_fops);
303
304 return 0;
305}
306
307void __exit iaa_crypto_debugfs_cleanup(void)
308{
309 debugfs_remove_recursive(iaa_crypto_debugfs_root);
310}
311
312MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */
3
4#include <linux/module.h>
5#include <linux/kernel.h>
6#include <linux/highmem.h>
7#include <linux/mm.h>
8#include <linux/slab.h>
9#include <linux/delay.h>
10#include <linux/smp.h>
11#include <uapi/linux/idxd.h>
12#include <linux/idxd.h>
13#include <linux/dmaengine.h>
14#include "../../dma/idxd/idxd.h"
15#include <linux/debugfs.h>
16#include <crypto/internal/acompress.h>
17#include "iaa_crypto.h"
18#include "iaa_crypto_stats.h"
19
20static atomic64_t total_comp_calls;
21static atomic64_t total_decomp_calls;
22static atomic64_t total_sw_decomp_calls;
23static atomic64_t total_comp_bytes_out;
24static atomic64_t total_decomp_bytes_in;
25static atomic64_t total_completion_einval_errors;
26static atomic64_t total_completion_timeout_errors;
27static atomic64_t total_completion_comp_buf_overflow_errors;
28
29static struct dentry *iaa_crypto_debugfs_root;
30
31void update_total_comp_calls(void)
32{
33 atomic64_inc(&total_comp_calls);
34}
35
36void update_total_comp_bytes_out(int n)
37{
38 atomic64_add(n, &total_comp_bytes_out);
39}
40
41void update_total_decomp_calls(void)
42{
43 atomic64_inc(&total_decomp_calls);
44}
45
46void update_total_sw_decomp_calls(void)
47{
48 atomic64_inc(&total_sw_decomp_calls);
49}
50
51void update_total_decomp_bytes_in(int n)
52{
53 atomic64_add(n, &total_decomp_bytes_in);
54}
55
56void update_completion_einval_errs(void)
57{
58 atomic64_inc(&total_completion_einval_errors);
59}
60
61void update_completion_timeout_errs(void)
62{
63 atomic64_inc(&total_completion_timeout_errors);
64}
65
66void update_completion_comp_buf_overflow_errs(void)
67{
68 atomic64_inc(&total_completion_comp_buf_overflow_errors);
69}
70
71void update_wq_comp_calls(struct idxd_wq *idxd_wq)
72{
73 struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
74
75 atomic64_inc(&wq->comp_calls);
76 atomic64_inc(&wq->iaa_device->comp_calls);
77}
78
79void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n)
80{
81 struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
82
83 atomic64_add(n, &wq->comp_bytes);
84 atomic64_add(n, &wq->iaa_device->comp_bytes);
85}
86
87void update_wq_decomp_calls(struct idxd_wq *idxd_wq)
88{
89 struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
90
91 atomic64_inc(&wq->decomp_calls);
92 atomic64_inc(&wq->iaa_device->decomp_calls);
93}
94
95void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n)
96{
97 struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
98
99 atomic64_add(n, &wq->decomp_bytes);
100 atomic64_add(n, &wq->iaa_device->decomp_bytes);
101}
102
103static void reset_iaa_crypto_stats(void)
104{
105 atomic64_set(&total_comp_calls, 0);
106 atomic64_set(&total_decomp_calls, 0);
107 atomic64_set(&total_sw_decomp_calls, 0);
108 atomic64_set(&total_comp_bytes_out, 0);
109 atomic64_set(&total_decomp_bytes_in, 0);
110 atomic64_set(&total_completion_einval_errors, 0);
111 atomic64_set(&total_completion_timeout_errors, 0);
112 atomic64_set(&total_completion_comp_buf_overflow_errors, 0);
113}
114
115static void reset_wq_stats(struct iaa_wq *wq)
116{
117 atomic64_set(&wq->comp_calls, 0);
118 atomic64_set(&wq->comp_bytes, 0);
119 atomic64_set(&wq->decomp_calls, 0);
120 atomic64_set(&wq->decomp_bytes, 0);
121}
122
123static void reset_device_stats(struct iaa_device *iaa_device)
124{
125 struct iaa_wq *iaa_wq;
126
127 atomic64_set(&iaa_device->comp_calls, 0);
128 atomic64_set(&iaa_device->comp_bytes, 0);
129 atomic64_set(&iaa_device->decomp_calls, 0);
130 atomic64_set(&iaa_device->decomp_bytes, 0);
131
132 list_for_each_entry(iaa_wq, &iaa_device->wqs, list)
133 reset_wq_stats(iaa_wq);
134}
135
136static void wq_show(struct seq_file *m, struct iaa_wq *iaa_wq)
137{
138 seq_printf(m, " name: %s\n", iaa_wq->wq->name);
139 seq_printf(m, " comp_calls: %llu\n",
140 atomic64_read(&iaa_wq->comp_calls));
141 seq_printf(m, " comp_bytes: %llu\n",
142 atomic64_read(&iaa_wq->comp_bytes));
143 seq_printf(m, " decomp_calls: %llu\n",
144 atomic64_read(&iaa_wq->decomp_calls));
145 seq_printf(m, " decomp_bytes: %llu\n\n",
146 atomic64_read(&iaa_wq->decomp_bytes));
147}
148
149static void device_stats_show(struct seq_file *m, struct iaa_device *iaa_device)
150{
151 struct iaa_wq *iaa_wq;
152
153 seq_puts(m, "iaa device:\n");
154 seq_printf(m, " id: %d\n", iaa_device->idxd->id);
155 seq_printf(m, " n_wqs: %d\n", iaa_device->n_wq);
156 seq_printf(m, " comp_calls: %llu\n",
157 atomic64_read(&iaa_device->comp_calls));
158 seq_printf(m, " comp_bytes: %llu\n",
159 atomic64_read(&iaa_device->comp_bytes));
160 seq_printf(m, " decomp_calls: %llu\n",
161 atomic64_read(&iaa_device->decomp_calls));
162 seq_printf(m, " decomp_bytes: %llu\n",
163 atomic64_read(&iaa_device->decomp_bytes));
164 seq_puts(m, " wqs:\n");
165
166 list_for_each_entry(iaa_wq, &iaa_device->wqs, list)
167 wq_show(m, iaa_wq);
168}
169
170static int global_stats_show(struct seq_file *m, void *v)
171{
172 seq_puts(m, "global stats:\n");
173 seq_printf(m, " total_comp_calls: %llu\n",
174 atomic64_read(&total_comp_calls));
175 seq_printf(m, " total_decomp_calls: %llu\n",
176 atomic64_read(&total_decomp_calls));
177 seq_printf(m, " total_sw_decomp_calls: %llu\n",
178 atomic64_read(&total_sw_decomp_calls));
179 seq_printf(m, " total_comp_bytes_out: %llu\n",
180 atomic64_read(&total_comp_bytes_out));
181 seq_printf(m, " total_decomp_bytes_in: %llu\n",
182 atomic64_read(&total_decomp_bytes_in));
183 seq_printf(m, " total_completion_einval_errors: %llu\n",
184 atomic64_read(&total_completion_einval_errors));
185 seq_printf(m, " total_completion_timeout_errors: %llu\n",
186 atomic64_read(&total_completion_timeout_errors));
187 seq_printf(m, " total_completion_comp_buf_overflow_errors: %llu\n\n",
188 atomic64_read(&total_completion_comp_buf_overflow_errors));
189
190 return 0;
191}
192
193static int wq_stats_show(struct seq_file *m, void *v)
194{
195 struct iaa_device *iaa_device;
196
197 mutex_lock(&iaa_devices_lock);
198
199 list_for_each_entry(iaa_device, &iaa_devices, list)
200 device_stats_show(m, iaa_device);
201
202 mutex_unlock(&iaa_devices_lock);
203
204 return 0;
205}
206
207static int iaa_crypto_stats_reset(void *data, u64 value)
208{
209 struct iaa_device *iaa_device;
210
211 reset_iaa_crypto_stats();
212
213 mutex_lock(&iaa_devices_lock);
214
215 list_for_each_entry(iaa_device, &iaa_devices, list)
216 reset_device_stats(iaa_device);
217
218 mutex_unlock(&iaa_devices_lock);
219
220 return 0;
221}
222
223static int wq_stats_open(struct inode *inode, struct file *file)
224{
225 return single_open(file, wq_stats_show, file);
226}
227
228static const struct file_operations wq_stats_fops = {
229 .open = wq_stats_open,
230 .read = seq_read,
231 .llseek = seq_lseek,
232 .release = single_release,
233};
234
235static int global_stats_open(struct inode *inode, struct file *file)
236{
237 return single_open(file, global_stats_show, file);
238}
239
240static const struct file_operations global_stats_fops = {
241 .open = global_stats_open,
242 .read = seq_read,
243 .llseek = seq_lseek,
244 .release = single_release,
245};
246
247DEFINE_DEBUGFS_ATTRIBUTE(wq_stats_reset_fops, NULL, iaa_crypto_stats_reset, "%llu\n");
248
249int __init iaa_crypto_debugfs_init(void)
250{
251 if (!debugfs_initialized())
252 return -ENODEV;
253
254 iaa_crypto_debugfs_root = debugfs_create_dir("iaa_crypto", NULL);
255
256 debugfs_create_file("global_stats", 0644, iaa_crypto_debugfs_root, NULL,
257 &global_stats_fops);
258 debugfs_create_file("wq_stats", 0644, iaa_crypto_debugfs_root, NULL,
259 &wq_stats_fops);
260 debugfs_create_file("stats_reset", 0644, iaa_crypto_debugfs_root, NULL,
261 &wq_stats_reset_fops);
262
263 return 0;
264}
265
266void __exit iaa_crypto_debugfs_cleanup(void)
267{
268 debugfs_remove_recursive(iaa_crypto_debugfs_root);
269}
270
271MODULE_LICENSE("GPL");