Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/net/sunrpc/stats.c
4 *
5 * procfs-based user access to generic RPC statistics. The stats files
6 * reside in /proc/net/rpc.
7 *
8 * The read routines assume that the buffer passed in is just big enough.
9 * If you implement an RPC service that has its own stats routine which
10 * appends the generic RPC stats, make sure you don't exceed the PAGE_SIZE
11 * limit.
12 *
13 * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de>
14 */
15
16#include <linux/module.h>
17#include <linux/slab.h>
18
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/proc_fs.h>
22#include <linux/seq_file.h>
23#include <linux/sunrpc/clnt.h>
24#include <linux/sunrpc/svcsock.h>
25#include <linux/sunrpc/metrics.h>
26#include <linux/rcupdate.h>
27
28#include <trace/events/sunrpc.h>
29
30#include "netns.h"
31
32#define RPCDBG_FACILITY RPCDBG_MISC
33
34/*
35 * Get RPC client stats
36 */
37static int rpc_proc_show(struct seq_file *seq, void *v) {
38 const struct rpc_stat *statp = seq->private;
39 const struct rpc_program *prog = statp->program;
40 unsigned int i, j;
41
42 seq_printf(seq,
43 "net %u %u %u %u\n",
44 statp->netcnt,
45 statp->netudpcnt,
46 statp->nettcpcnt,
47 statp->nettcpconn);
48 seq_printf(seq,
49 "rpc %u %u %u\n",
50 statp->rpccnt,
51 statp->rpcretrans,
52 statp->rpcauthrefresh);
53
54 for (i = 0; i < prog->nrvers; i++) {
55 const struct rpc_version *vers = prog->version[i];
56 if (!vers)
57 continue;
58 seq_printf(seq, "proc%u %u",
59 vers->number, vers->nrprocs);
60 for (j = 0; j < vers->nrprocs; j++)
61 seq_printf(seq, " %u", vers->counts[j]);
62 seq_putc(seq, '\n');
63 }
64 return 0;
65}
66
67static int rpc_proc_open(struct inode *inode, struct file *file)
68{
69 return single_open(file, rpc_proc_show, pde_data(inode));
70}
71
72static const struct proc_ops rpc_proc_ops = {
73 .proc_open = rpc_proc_open,
74 .proc_read = seq_read,
75 .proc_lseek = seq_lseek,
76 .proc_release = single_release,
77};
78
79/*
80 * Get RPC server stats
81 */
82void svc_seq_show(struct seq_file *seq, const struct svc_stat *statp)
83{
84 const struct svc_program *prog = statp->program;
85 const struct svc_version *vers;
86 unsigned int i, j;
87
88 seq_printf(seq,
89 "net %u %u %u %u\n",
90 statp->netcnt,
91 statp->netudpcnt,
92 statp->nettcpcnt,
93 statp->nettcpconn);
94 seq_printf(seq,
95 "rpc %u %u %u %u %u\n",
96 statp->rpccnt,
97 statp->rpcbadfmt+statp->rpcbadauth+statp->rpcbadclnt,
98 statp->rpcbadfmt,
99 statp->rpcbadauth,
100 statp->rpcbadclnt);
101
102 for (i = 0; i < prog->pg_nvers; i++) {
103 vers = prog->pg_vers[i];
104 if (!vers)
105 continue;
106 seq_printf(seq, "proc%d %u", i, vers->vs_nproc);
107 for (j = 0; j < vers->vs_nproc; j++)
108 seq_printf(seq, " %u", vers->vs_count[j]);
109 seq_putc(seq, '\n');
110 }
111}
112EXPORT_SYMBOL_GPL(svc_seq_show);
113
114/**
115 * rpc_alloc_iostats - allocate an rpc_iostats structure
116 * @clnt: RPC program, version, and xprt
117 *
118 */
119struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt)
120{
121 struct rpc_iostats *stats;
122 int i;
123
124 stats = kcalloc(clnt->cl_maxproc, sizeof(*stats), GFP_KERNEL);
125 if (stats) {
126 for (i = 0; i < clnt->cl_maxproc; i++)
127 spin_lock_init(&stats[i].om_lock);
128 }
129 return stats;
130}
131EXPORT_SYMBOL_GPL(rpc_alloc_iostats);
132
133/**
134 * rpc_free_iostats - release an rpc_iostats structure
135 * @stats: doomed rpc_iostats structure
136 *
137 */
138void rpc_free_iostats(struct rpc_iostats *stats)
139{
140 kfree(stats);
141}
142EXPORT_SYMBOL_GPL(rpc_free_iostats);
143
144/**
145 * rpc_count_iostats_metrics - tally up per-task stats
146 * @task: completed rpc_task
147 * @op_metrics: stat structure for OP that will accumulate stats from @task
148 */
149void rpc_count_iostats_metrics(const struct rpc_task *task,
150 struct rpc_iostats *op_metrics)
151{
152 struct rpc_rqst *req = task->tk_rqstp;
153 ktime_t backlog, execute, now;
154
155 if (!op_metrics || !req)
156 return;
157
158 now = ktime_get();
159 spin_lock(&op_metrics->om_lock);
160
161 op_metrics->om_ops++;
162 /* kernel API: om_ops must never become larger than om_ntrans */
163 op_metrics->om_ntrans += max(req->rq_ntrans, 1);
164 op_metrics->om_timeouts += task->tk_timeouts;
165
166 op_metrics->om_bytes_sent += req->rq_xmit_bytes_sent;
167 op_metrics->om_bytes_recv += req->rq_reply_bytes_recvd;
168
169 backlog = 0;
170 if (ktime_to_ns(req->rq_xtime)) {
171 backlog = ktime_sub(req->rq_xtime, task->tk_start);
172 op_metrics->om_queue = ktime_add(op_metrics->om_queue, backlog);
173 }
174
175 op_metrics->om_rtt = ktime_add(op_metrics->om_rtt, req->rq_rtt);
176
177 execute = ktime_sub(now, task->tk_start);
178 op_metrics->om_execute = ktime_add(op_metrics->om_execute, execute);
179 if (task->tk_status < 0)
180 op_metrics->om_error_status++;
181
182 spin_unlock(&op_metrics->om_lock);
183
184 trace_rpc_stats_latency(req->rq_task, backlog, req->rq_rtt, execute);
185}
186EXPORT_SYMBOL_GPL(rpc_count_iostats_metrics);
187
188/**
189 * rpc_count_iostats - tally up per-task stats
190 * @task: completed rpc_task
191 * @stats: array of stat structures
192 *
193 * Uses the statidx from @task
194 */
195void rpc_count_iostats(const struct rpc_task *task, struct rpc_iostats *stats)
196{
197 rpc_count_iostats_metrics(task,
198 &stats[task->tk_msg.rpc_proc->p_statidx]);
199}
200EXPORT_SYMBOL_GPL(rpc_count_iostats);
201
202static void _print_name(struct seq_file *seq, unsigned int op,
203 const struct rpc_procinfo *procs)
204{
205 if (procs[op].p_name)
206 seq_printf(seq, "\t%12s: ", procs[op].p_name);
207 else if (op == 0)
208 seq_printf(seq, "\t NULL: ");
209 else
210 seq_printf(seq, "\t%12u: ", op);
211}
212
213static void _add_rpc_iostats(struct rpc_iostats *a, struct rpc_iostats *b)
214{
215 a->om_ops += b->om_ops;
216 a->om_ntrans += b->om_ntrans;
217 a->om_timeouts += b->om_timeouts;
218 a->om_bytes_sent += b->om_bytes_sent;
219 a->om_bytes_recv += b->om_bytes_recv;
220 a->om_queue = ktime_add(a->om_queue, b->om_queue);
221 a->om_rtt = ktime_add(a->om_rtt, b->om_rtt);
222 a->om_execute = ktime_add(a->om_execute, b->om_execute);
223 a->om_error_status += b->om_error_status;
224}
225
226static void _print_rpc_iostats(struct seq_file *seq, struct rpc_iostats *stats,
227 int op, const struct rpc_procinfo *procs)
228{
229 _print_name(seq, op, procs);
230 seq_printf(seq, "%lu %lu %lu %llu %llu %llu %llu %llu %lu\n",
231 stats->om_ops,
232 stats->om_ntrans,
233 stats->om_timeouts,
234 stats->om_bytes_sent,
235 stats->om_bytes_recv,
236 ktime_to_ms(stats->om_queue),
237 ktime_to_ms(stats->om_rtt),
238 ktime_to_ms(stats->om_execute),
239 stats->om_error_status);
240}
241
242static int do_print_stats(struct rpc_clnt *clnt, struct rpc_xprt *xprt, void *seqv)
243{
244 struct seq_file *seq = seqv;
245
246 xprt->ops->print_stats(xprt, seq);
247 return 0;
248}
249
250void rpc_clnt_show_stats(struct seq_file *seq, struct rpc_clnt *clnt)
251{
252 unsigned int op, maxproc = clnt->cl_maxproc;
253
254 if (!clnt->cl_metrics)
255 return;
256
257 seq_printf(seq, "\tRPC iostats version: %s ", RPC_IOSTATS_VERS);
258 seq_printf(seq, "p/v: %u/%u (%s)\n",
259 clnt->cl_prog, clnt->cl_vers, clnt->cl_program->name);
260
261 rpc_clnt_iterate_for_each_xprt(clnt, do_print_stats, seq);
262
263 seq_printf(seq, "\tper-op statistics\n");
264 for (op = 0; op < maxproc; op++) {
265 struct rpc_iostats stats = {};
266 struct rpc_clnt *next = clnt;
267 do {
268 _add_rpc_iostats(&stats, &next->cl_metrics[op]);
269 if (next == next->cl_parent)
270 break;
271 next = next->cl_parent;
272 } while (next);
273 _print_rpc_iostats(seq, &stats, op, clnt->cl_procinfo);
274 }
275}
276EXPORT_SYMBOL_GPL(rpc_clnt_show_stats);
277
278/*
279 * Register/unregister RPC proc files
280 */
281static inline struct proc_dir_entry *
282do_register(struct net *net, const char *name, void *data,
283 const struct proc_ops *proc_ops)
284{
285 struct sunrpc_net *sn;
286
287 dprintk("RPC: registering /proc/net/rpc/%s\n", name);
288 sn = net_generic(net, sunrpc_net_id);
289 return proc_create_data(name, 0, sn->proc_net_rpc, proc_ops, data);
290}
291
292struct proc_dir_entry *
293rpc_proc_register(struct net *net, struct rpc_stat *statp)
294{
295 return do_register(net, statp->program->name, statp, &rpc_proc_ops);
296}
297EXPORT_SYMBOL_GPL(rpc_proc_register);
298
299void
300rpc_proc_unregister(struct net *net, const char *name)
301{
302 struct sunrpc_net *sn;
303
304 sn = net_generic(net, sunrpc_net_id);
305 remove_proc_entry(name, sn->proc_net_rpc);
306}
307EXPORT_SYMBOL_GPL(rpc_proc_unregister);
308
309struct proc_dir_entry *
310svc_proc_register(struct net *net, struct svc_stat *statp, const struct proc_ops *proc_ops)
311{
312 return do_register(net, statp->program->pg_name, statp, proc_ops);
313}
314EXPORT_SYMBOL_GPL(svc_proc_register);
315
316void
317svc_proc_unregister(struct net *net, const char *name)
318{
319 struct sunrpc_net *sn;
320
321 sn = net_generic(net, sunrpc_net_id);
322 remove_proc_entry(name, sn->proc_net_rpc);
323}
324EXPORT_SYMBOL_GPL(svc_proc_unregister);
325
326int rpc_proc_init(struct net *net)
327{
328 struct sunrpc_net *sn;
329
330 dprintk("RPC: registering /proc/net/rpc\n");
331 sn = net_generic(net, sunrpc_net_id);
332 sn->proc_net_rpc = proc_mkdir("rpc", net->proc_net);
333 if (sn->proc_net_rpc == NULL)
334 return -ENOMEM;
335
336 return 0;
337}
338
339void rpc_proc_exit(struct net *net)
340{
341 dprintk("RPC: unregistering /proc/net/rpc\n");
342 remove_proc_entry("rpc", net->proc_net);
343}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/net/sunrpc/stats.c
4 *
5 * procfs-based user access to generic RPC statistics. The stats files
6 * reside in /proc/net/rpc.
7 *
8 * The read routines assume that the buffer passed in is just big enough.
9 * If you implement an RPC service that has its own stats routine which
10 * appends the generic RPC stats, make sure you don't exceed the PAGE_SIZE
11 * limit.
12 *
13 * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de>
14 */
15
16#include <linux/module.h>
17#include <linux/slab.h>
18
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/proc_fs.h>
22#include <linux/seq_file.h>
23#include <linux/sunrpc/clnt.h>
24#include <linux/sunrpc/svcsock.h>
25#include <linux/sunrpc/metrics.h>
26#include <linux/rcupdate.h>
27
28#include <trace/events/sunrpc.h>
29
30#include "netns.h"
31
32#define RPCDBG_FACILITY RPCDBG_MISC
33
34/*
35 * Get RPC client stats
36 */
37static int rpc_proc_show(struct seq_file *seq, void *v) {
38 const struct rpc_stat *statp = seq->private;
39 const struct rpc_program *prog = statp->program;
40 unsigned int i, j;
41
42 seq_printf(seq,
43 "net %u %u %u %u\n",
44 statp->netcnt,
45 statp->netudpcnt,
46 statp->nettcpcnt,
47 statp->nettcpconn);
48 seq_printf(seq,
49 "rpc %u %u %u\n",
50 statp->rpccnt,
51 statp->rpcretrans,
52 statp->rpcauthrefresh);
53
54 for (i = 0; i < prog->nrvers; i++) {
55 const struct rpc_version *vers = prog->version[i];
56 if (!vers)
57 continue;
58 seq_printf(seq, "proc%u %u",
59 vers->number, vers->nrprocs);
60 for (j = 0; j < vers->nrprocs; j++)
61 seq_printf(seq, " %u", vers->counts[j]);
62 seq_putc(seq, '\n');
63 }
64 return 0;
65}
66
67static int rpc_proc_open(struct inode *inode, struct file *file)
68{
69 return single_open(file, rpc_proc_show, PDE_DATA(inode));
70}
71
72static const struct file_operations rpc_proc_fops = {
73 .owner = THIS_MODULE,
74 .open = rpc_proc_open,
75 .read = seq_read,
76 .llseek = seq_lseek,
77 .release = single_release,
78};
79
80/*
81 * Get RPC server stats
82 */
83void svc_seq_show(struct seq_file *seq, const struct svc_stat *statp)
84{
85 const struct svc_program *prog = statp->program;
86 const struct svc_version *vers;
87 unsigned int i, j;
88
89 seq_printf(seq,
90 "net %u %u %u %u\n",
91 statp->netcnt,
92 statp->netudpcnt,
93 statp->nettcpcnt,
94 statp->nettcpconn);
95 seq_printf(seq,
96 "rpc %u %u %u %u %u\n",
97 statp->rpccnt,
98 statp->rpcbadfmt+statp->rpcbadauth+statp->rpcbadclnt,
99 statp->rpcbadfmt,
100 statp->rpcbadauth,
101 statp->rpcbadclnt);
102
103 for (i = 0; i < prog->pg_nvers; i++) {
104 vers = prog->pg_vers[i];
105 if (!vers)
106 continue;
107 seq_printf(seq, "proc%d %u", i, vers->vs_nproc);
108 for (j = 0; j < vers->vs_nproc; j++)
109 seq_printf(seq, " %u", vers->vs_count[j]);
110 seq_putc(seq, '\n');
111 }
112}
113EXPORT_SYMBOL_GPL(svc_seq_show);
114
115/**
116 * rpc_alloc_iostats - allocate an rpc_iostats structure
117 * @clnt: RPC program, version, and xprt
118 *
119 */
120struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt)
121{
122 struct rpc_iostats *stats;
123 int i;
124
125 stats = kcalloc(clnt->cl_maxproc, sizeof(*stats), GFP_KERNEL);
126 if (stats) {
127 for (i = 0; i < clnt->cl_maxproc; i++)
128 spin_lock_init(&stats[i].om_lock);
129 }
130 return stats;
131}
132EXPORT_SYMBOL_GPL(rpc_alloc_iostats);
133
134/**
135 * rpc_free_iostats - release an rpc_iostats structure
136 * @stats: doomed rpc_iostats structure
137 *
138 */
139void rpc_free_iostats(struct rpc_iostats *stats)
140{
141 kfree(stats);
142}
143EXPORT_SYMBOL_GPL(rpc_free_iostats);
144
145/**
146 * rpc_count_iostats_metrics - tally up per-task stats
147 * @task: completed rpc_task
148 * @op_metrics: stat structure for OP that will accumulate stats from @task
149 */
150void rpc_count_iostats_metrics(const struct rpc_task *task,
151 struct rpc_iostats *op_metrics)
152{
153 struct rpc_rqst *req = task->tk_rqstp;
154 ktime_t backlog, execute, now;
155
156 if (!op_metrics || !req)
157 return;
158
159 now = ktime_get();
160 spin_lock(&op_metrics->om_lock);
161
162 op_metrics->om_ops++;
163 /* kernel API: om_ops must never become larger than om_ntrans */
164 op_metrics->om_ntrans += max(req->rq_ntrans, 1);
165 op_metrics->om_timeouts += task->tk_timeouts;
166
167 op_metrics->om_bytes_sent += req->rq_xmit_bytes_sent;
168 op_metrics->om_bytes_recv += req->rq_reply_bytes_recvd;
169
170 backlog = 0;
171 if (ktime_to_ns(req->rq_xtime)) {
172 backlog = ktime_sub(req->rq_xtime, task->tk_start);
173 op_metrics->om_queue = ktime_add(op_metrics->om_queue, backlog);
174 }
175
176 op_metrics->om_rtt = ktime_add(op_metrics->om_rtt, req->rq_rtt);
177
178 execute = ktime_sub(now, task->tk_start);
179 op_metrics->om_execute = ktime_add(op_metrics->om_execute, execute);
180 if (task->tk_status < 0)
181 op_metrics->om_error_status++;
182
183 spin_unlock(&op_metrics->om_lock);
184
185 trace_rpc_stats_latency(req->rq_task, backlog, req->rq_rtt, execute);
186}
187EXPORT_SYMBOL_GPL(rpc_count_iostats_metrics);
188
189/**
190 * rpc_count_iostats - tally up per-task stats
191 * @task: completed rpc_task
192 * @stats: array of stat structures
193 *
194 * Uses the statidx from @task
195 */
196void rpc_count_iostats(const struct rpc_task *task, struct rpc_iostats *stats)
197{
198 rpc_count_iostats_metrics(task,
199 &stats[task->tk_msg.rpc_proc->p_statidx]);
200}
201EXPORT_SYMBOL_GPL(rpc_count_iostats);
202
203static void _print_name(struct seq_file *seq, unsigned int op,
204 const struct rpc_procinfo *procs)
205{
206 if (procs[op].p_name)
207 seq_printf(seq, "\t%12s: ", procs[op].p_name);
208 else if (op == 0)
209 seq_printf(seq, "\t NULL: ");
210 else
211 seq_printf(seq, "\t%12u: ", op);
212}
213
214static void _add_rpc_iostats(struct rpc_iostats *a, struct rpc_iostats *b)
215{
216 a->om_ops += b->om_ops;
217 a->om_ntrans += b->om_ntrans;
218 a->om_timeouts += b->om_timeouts;
219 a->om_bytes_sent += b->om_bytes_sent;
220 a->om_bytes_recv += b->om_bytes_recv;
221 a->om_queue = ktime_add(a->om_queue, b->om_queue);
222 a->om_rtt = ktime_add(a->om_rtt, b->om_rtt);
223 a->om_execute = ktime_add(a->om_execute, b->om_execute);
224 a->om_error_status += b->om_error_status;
225}
226
227static void _print_rpc_iostats(struct seq_file *seq, struct rpc_iostats *stats,
228 int op, const struct rpc_procinfo *procs)
229{
230 _print_name(seq, op, procs);
231 seq_printf(seq, "%lu %lu %lu %llu %llu %llu %llu %llu %lu\n",
232 stats->om_ops,
233 stats->om_ntrans,
234 stats->om_timeouts,
235 stats->om_bytes_sent,
236 stats->om_bytes_recv,
237 ktime_to_ms(stats->om_queue),
238 ktime_to_ms(stats->om_rtt),
239 ktime_to_ms(stats->om_execute),
240 stats->om_error_status);
241}
242
243static int do_print_stats(struct rpc_clnt *clnt, struct rpc_xprt *xprt, void *seqv)
244{
245 struct seq_file *seq = seqv;
246
247 xprt->ops->print_stats(xprt, seq);
248 return 0;
249}
250
251void rpc_clnt_show_stats(struct seq_file *seq, struct rpc_clnt *clnt)
252{
253 unsigned int op, maxproc = clnt->cl_maxproc;
254
255 if (!clnt->cl_metrics)
256 return;
257
258 seq_printf(seq, "\tRPC iostats version: %s ", RPC_IOSTATS_VERS);
259 seq_printf(seq, "p/v: %u/%u (%s)\n",
260 clnt->cl_prog, clnt->cl_vers, clnt->cl_program->name);
261
262 rpc_clnt_iterate_for_each_xprt(clnt, do_print_stats, seq);
263
264 seq_printf(seq, "\tper-op statistics\n");
265 for (op = 0; op < maxproc; op++) {
266 struct rpc_iostats stats = {};
267 struct rpc_clnt *next = clnt;
268 do {
269 _add_rpc_iostats(&stats, &next->cl_metrics[op]);
270 if (next == next->cl_parent)
271 break;
272 next = next->cl_parent;
273 } while (next);
274 _print_rpc_iostats(seq, &stats, op, clnt->cl_procinfo);
275 }
276}
277EXPORT_SYMBOL_GPL(rpc_clnt_show_stats);
278
279/*
280 * Register/unregister RPC proc files
281 */
282static inline struct proc_dir_entry *
283do_register(struct net *net, const char *name, void *data,
284 const struct file_operations *fops)
285{
286 struct sunrpc_net *sn;
287
288 dprintk("RPC: registering /proc/net/rpc/%s\n", name);
289 sn = net_generic(net, sunrpc_net_id);
290 return proc_create_data(name, 0, sn->proc_net_rpc, fops, data);
291}
292
293struct proc_dir_entry *
294rpc_proc_register(struct net *net, struct rpc_stat *statp)
295{
296 return do_register(net, statp->program->name, statp, &rpc_proc_fops);
297}
298EXPORT_SYMBOL_GPL(rpc_proc_register);
299
300void
301rpc_proc_unregister(struct net *net, const char *name)
302{
303 struct sunrpc_net *sn;
304
305 sn = net_generic(net, sunrpc_net_id);
306 remove_proc_entry(name, sn->proc_net_rpc);
307}
308EXPORT_SYMBOL_GPL(rpc_proc_unregister);
309
310struct proc_dir_entry *
311svc_proc_register(struct net *net, struct svc_stat *statp, const struct file_operations *fops)
312{
313 return do_register(net, statp->program->pg_name, statp, fops);
314}
315EXPORT_SYMBOL_GPL(svc_proc_register);
316
317void
318svc_proc_unregister(struct net *net, const char *name)
319{
320 struct sunrpc_net *sn;
321
322 sn = net_generic(net, sunrpc_net_id);
323 remove_proc_entry(name, sn->proc_net_rpc);
324}
325EXPORT_SYMBOL_GPL(svc_proc_unregister);
326
327int rpc_proc_init(struct net *net)
328{
329 struct sunrpc_net *sn;
330
331 dprintk("RPC: registering /proc/net/rpc\n");
332 sn = net_generic(net, sunrpc_net_id);
333 sn->proc_net_rpc = proc_mkdir("rpc", net->proc_net);
334 if (sn->proc_net_rpc == NULL)
335 return -ENOMEM;
336
337 return 0;
338}
339
340void rpc_proc_exit(struct net *net)
341{
342 dprintk("RPC: unregistering /proc/net/rpc\n");
343 remove_proc_entry("rpc", net->proc_net);
344}