Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * linux/net/sunrpc/stats.c
  4 *
  5 * procfs-based user access to generic RPC statistics. The stats files
  6 * reside in /proc/net/rpc.
  7 *
  8 * The read routines assume that the buffer passed in is just big enough.
  9 * If you implement an RPC service that has its own stats routine which
 10 * appends the generic RPC stats, make sure you don't exceed the PAGE_SIZE
 11 * limit.
 12 *
 13 * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de>
 14 */
 15
 16#include <linux/module.h>
 17#include <linux/slab.h>
 18
 19#include <linux/init.h>
 20#include <linux/kernel.h>
 21#include <linux/proc_fs.h>
 22#include <linux/seq_file.h>
 23#include <linux/sunrpc/clnt.h>
 24#include <linux/sunrpc/svcsock.h>
 25#include <linux/sunrpc/metrics.h>
 26#include <linux/rcupdate.h>
 27
 28#include <trace/events/sunrpc.h>
 29
 30#include "netns.h"
 31
 32#define RPCDBG_FACILITY	RPCDBG_MISC
 33
 34/*
 35 * Get RPC client stats
 36 */
 37static int rpc_proc_show(struct seq_file *seq, void *v) {
 38	const struct rpc_stat	*statp = seq->private;
 39	const struct rpc_program *prog = statp->program;
 40	unsigned int i, j;
 41
 42	seq_printf(seq,
 43		"net %u %u %u %u\n",
 44			statp->netcnt,
 45			statp->netudpcnt,
 46			statp->nettcpcnt,
 47			statp->nettcpconn);
 48	seq_printf(seq,
 49		"rpc %u %u %u\n",
 50			statp->rpccnt,
 51			statp->rpcretrans,
 52			statp->rpcauthrefresh);
 53
 54	for (i = 0; i < prog->nrvers; i++) {
 55		const struct rpc_version *vers = prog->version[i];
 56		if (!vers)
 57			continue;
 58		seq_printf(seq, "proc%u %u",
 59					vers->number, vers->nrprocs);
 60		for (j = 0; j < vers->nrprocs; j++)
 61			seq_printf(seq, " %u", vers->counts[j]);
 62		seq_putc(seq, '\n');
 63	}
 64	return 0;
 65}
 66
 67static int rpc_proc_open(struct inode *inode, struct file *file)
 68{
 69	return single_open(file, rpc_proc_show, pde_data(inode));
 70}
 71
 72static const struct proc_ops rpc_proc_ops = {
 73	.proc_open	= rpc_proc_open,
 74	.proc_read	= seq_read,
 75	.proc_lseek	= seq_lseek,
 76	.proc_release	= single_release,
 77};
 78
 79/*
 80 * Get RPC server stats
 81 */
 82void svc_seq_show(struct seq_file *seq, const struct svc_stat *statp)
 83{
 84	const struct svc_program *prog = statp->program;
 85	const struct svc_version *vers;
 86	unsigned int i, j;
 87
 88	seq_printf(seq,
 89		"net %u %u %u %u\n",
 90			statp->netcnt,
 91			statp->netudpcnt,
 92			statp->nettcpcnt,
 93			statp->nettcpconn);
 94	seq_printf(seq,
 95		"rpc %u %u %u %u %u\n",
 96			statp->rpccnt,
 97			statp->rpcbadfmt+statp->rpcbadauth+statp->rpcbadclnt,
 98			statp->rpcbadfmt,
 99			statp->rpcbadauth,
100			statp->rpcbadclnt);
101
102	for (i = 0; i < prog->pg_nvers; i++) {
103		vers = prog->pg_vers[i];
104		if (!vers)
105			continue;
106		seq_printf(seq, "proc%d %u", i, vers->vs_nproc);
107		for (j = 0; j < vers->vs_nproc; j++)
108			seq_printf(seq, " %u", vers->vs_count[j]);
109		seq_putc(seq, '\n');
110	}
111}
112EXPORT_SYMBOL_GPL(svc_seq_show);
113
114/**
115 * rpc_alloc_iostats - allocate an rpc_iostats structure
116 * @clnt: RPC program, version, and xprt
117 *
118 */
119struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt)
120{
121	struct rpc_iostats *stats;
122	int i;
123
124	stats = kcalloc(clnt->cl_maxproc, sizeof(*stats), GFP_KERNEL);
125	if (stats) {
126		for (i = 0; i < clnt->cl_maxproc; i++)
127			spin_lock_init(&stats[i].om_lock);
128	}
129	return stats;
130}
131EXPORT_SYMBOL_GPL(rpc_alloc_iostats);
132
133/**
134 * rpc_free_iostats - release an rpc_iostats structure
135 * @stats: doomed rpc_iostats structure
136 *
137 */
138void rpc_free_iostats(struct rpc_iostats *stats)
139{
140	kfree(stats);
141}
142EXPORT_SYMBOL_GPL(rpc_free_iostats);
143
144/**
145 * rpc_count_iostats_metrics - tally up per-task stats
146 * @task: completed rpc_task
147 * @op_metrics: stat structure for OP that will accumulate stats from @task
148 */
149void rpc_count_iostats_metrics(const struct rpc_task *task,
150			       struct rpc_iostats *op_metrics)
151{
152	struct rpc_rqst *req = task->tk_rqstp;
153	ktime_t backlog, execute, now;
154
155	if (!op_metrics || !req)
156		return;
157
158	now = ktime_get();
159	spin_lock(&op_metrics->om_lock);
160
161	op_metrics->om_ops++;
162	/* kernel API: om_ops must never become larger than om_ntrans */
163	op_metrics->om_ntrans += max(req->rq_ntrans, 1);
164	op_metrics->om_timeouts += task->tk_timeouts;
165
166	op_metrics->om_bytes_sent += req->rq_xmit_bytes_sent;
167	op_metrics->om_bytes_recv += req->rq_reply_bytes_recvd;
168
169	backlog = 0;
170	if (ktime_to_ns(req->rq_xtime)) {
171		backlog = ktime_sub(req->rq_xtime, task->tk_start);
172		op_metrics->om_queue = ktime_add(op_metrics->om_queue, backlog);
173	}
174
175	op_metrics->om_rtt = ktime_add(op_metrics->om_rtt, req->rq_rtt);
176
177	execute = ktime_sub(now, task->tk_start);
178	op_metrics->om_execute = ktime_add(op_metrics->om_execute, execute);
179	if (task->tk_status < 0)
180		op_metrics->om_error_status++;
181
182	spin_unlock(&op_metrics->om_lock);
183
184	trace_rpc_stats_latency(req->rq_task, backlog, req->rq_rtt, execute);
185}
186EXPORT_SYMBOL_GPL(rpc_count_iostats_metrics);
187
188/**
189 * rpc_count_iostats - tally up per-task stats
190 * @task: completed rpc_task
191 * @stats: array of stat structures
192 *
193 * Uses the statidx from @task
194 */
195void rpc_count_iostats(const struct rpc_task *task, struct rpc_iostats *stats)
196{
197	rpc_count_iostats_metrics(task,
198				  &stats[task->tk_msg.rpc_proc->p_statidx]);
199}
200EXPORT_SYMBOL_GPL(rpc_count_iostats);
201
202static void _print_name(struct seq_file *seq, unsigned int op,
203			const struct rpc_procinfo *procs)
204{
205	if (procs[op].p_name)
206		seq_printf(seq, "\t%12s: ", procs[op].p_name);
207	else if (op == 0)
208		seq_printf(seq, "\t        NULL: ");
209	else
210		seq_printf(seq, "\t%12u: ", op);
211}
212
213static void _add_rpc_iostats(struct rpc_iostats *a, struct rpc_iostats *b)
214{
215	a->om_ops += b->om_ops;
216	a->om_ntrans += b->om_ntrans;
217	a->om_timeouts += b->om_timeouts;
218	a->om_bytes_sent += b->om_bytes_sent;
219	a->om_bytes_recv += b->om_bytes_recv;
220	a->om_queue = ktime_add(a->om_queue, b->om_queue);
221	a->om_rtt = ktime_add(a->om_rtt, b->om_rtt);
222	a->om_execute = ktime_add(a->om_execute, b->om_execute);
223	a->om_error_status += b->om_error_status;
224}
225
226static void _print_rpc_iostats(struct seq_file *seq, struct rpc_iostats *stats,
227			       int op, const struct rpc_procinfo *procs)
228{
229	_print_name(seq, op, procs);
230	seq_printf(seq, "%lu %lu %lu %llu %llu %llu %llu %llu %lu\n",
231		   stats->om_ops,
232		   stats->om_ntrans,
233		   stats->om_timeouts,
234		   stats->om_bytes_sent,
235		   stats->om_bytes_recv,
236		   ktime_to_ms(stats->om_queue),
237		   ktime_to_ms(stats->om_rtt),
238		   ktime_to_ms(stats->om_execute),
239		   stats->om_error_status);
240}
241
242static int do_print_stats(struct rpc_clnt *clnt, struct rpc_xprt *xprt, void *seqv)
243{
244	struct seq_file *seq = seqv;
245
246	xprt->ops->print_stats(xprt, seq);
247	return 0;
248}
249
250void rpc_clnt_show_stats(struct seq_file *seq, struct rpc_clnt *clnt)
251{
252	unsigned int op, maxproc = clnt->cl_maxproc;
253
254	if (!clnt->cl_metrics)
255		return;
256
257	seq_printf(seq, "\tRPC iostats version: %s  ", RPC_IOSTATS_VERS);
258	seq_printf(seq, "p/v: %u/%u (%s)\n",
259			clnt->cl_prog, clnt->cl_vers, clnt->cl_program->name);
260
261	rpc_clnt_iterate_for_each_xprt(clnt, do_print_stats, seq);
262
263	seq_printf(seq, "\tper-op statistics\n");
264	for (op = 0; op < maxproc; op++) {
265		struct rpc_iostats stats = {};
266		struct rpc_clnt *next = clnt;
267		do {
268			_add_rpc_iostats(&stats, &next->cl_metrics[op]);
269			if (next == next->cl_parent)
270				break;
271			next = next->cl_parent;
272		} while (next);
273		_print_rpc_iostats(seq, &stats, op, clnt->cl_procinfo);
274	}
275}
276EXPORT_SYMBOL_GPL(rpc_clnt_show_stats);
277
278/*
279 * Register/unregister RPC proc files
280 */
281static inline struct proc_dir_entry *
282do_register(struct net *net, const char *name, void *data,
283	    const struct proc_ops *proc_ops)
284{
285	struct sunrpc_net *sn;
286
287	dprintk("RPC:       registering /proc/net/rpc/%s\n", name);
288	sn = net_generic(net, sunrpc_net_id);
289	return proc_create_data(name, 0, sn->proc_net_rpc, proc_ops, data);
290}
291
292struct proc_dir_entry *
293rpc_proc_register(struct net *net, struct rpc_stat *statp)
294{
295	return do_register(net, statp->program->name, statp, &rpc_proc_ops);
296}
297EXPORT_SYMBOL_GPL(rpc_proc_register);
298
299void
300rpc_proc_unregister(struct net *net, const char *name)
301{
302	struct sunrpc_net *sn;
303
304	sn = net_generic(net, sunrpc_net_id);
305	remove_proc_entry(name, sn->proc_net_rpc);
306}
307EXPORT_SYMBOL_GPL(rpc_proc_unregister);
308
309struct proc_dir_entry *
310svc_proc_register(struct net *net, struct svc_stat *statp, const struct proc_ops *proc_ops)
311{
312	return do_register(net, statp->program->pg_name, statp, proc_ops);
313}
314EXPORT_SYMBOL_GPL(svc_proc_register);
315
316void
317svc_proc_unregister(struct net *net, const char *name)
318{
319	struct sunrpc_net *sn;
320
321	sn = net_generic(net, sunrpc_net_id);
322	remove_proc_entry(name, sn->proc_net_rpc);
323}
324EXPORT_SYMBOL_GPL(svc_proc_unregister);
325
326int rpc_proc_init(struct net *net)
327{
328	struct sunrpc_net *sn;
329
330	dprintk("RPC:       registering /proc/net/rpc\n");
331	sn = net_generic(net, sunrpc_net_id);
332	sn->proc_net_rpc = proc_mkdir("rpc", net->proc_net);
333	if (sn->proc_net_rpc == NULL)
334		return -ENOMEM;
335
336	return 0;
337}
338
339void rpc_proc_exit(struct net *net)
340{
341	dprintk("RPC:       unregistering /proc/net/rpc\n");
342	remove_proc_entry("rpc", net->proc_net);
343}
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * linux/net/sunrpc/stats.c
  4 *
  5 * procfs-based user access to generic RPC statistics. The stats files
  6 * reside in /proc/net/rpc.
  7 *
  8 * The read routines assume that the buffer passed in is just big enough.
  9 * If you implement an RPC service that has its own stats routine which
 10 * appends the generic RPC stats, make sure you don't exceed the PAGE_SIZE
 11 * limit.
 12 *
 13 * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de>
 14 */
 15
 16#include <linux/module.h>
 17#include <linux/slab.h>
 18
 19#include <linux/init.h>
 20#include <linux/kernel.h>
 21#include <linux/proc_fs.h>
 22#include <linux/seq_file.h>
 23#include <linux/sunrpc/clnt.h>
 24#include <linux/sunrpc/svcsock.h>
 25#include <linux/sunrpc/metrics.h>
 26#include <linux/rcupdate.h>
 27
 28#include <trace/events/sunrpc.h>
 29
 30#include "netns.h"
 31
 32#define RPCDBG_FACILITY	RPCDBG_MISC
 33
 34/*
 35 * Get RPC client stats
 36 */
 37static int rpc_proc_show(struct seq_file *seq, void *v) {
 38	const struct rpc_stat	*statp = seq->private;
 39	const struct rpc_program *prog = statp->program;
 40	unsigned int i, j;
 41
 42	seq_printf(seq,
 43		"net %u %u %u %u\n",
 44			statp->netcnt,
 45			statp->netudpcnt,
 46			statp->nettcpcnt,
 47			statp->nettcpconn);
 48	seq_printf(seq,
 49		"rpc %u %u %u\n",
 50			statp->rpccnt,
 51			statp->rpcretrans,
 52			statp->rpcauthrefresh);
 53
 54	for (i = 0; i < prog->nrvers; i++) {
 55		const struct rpc_version *vers = prog->version[i];
 56		if (!vers)
 57			continue;
 58		seq_printf(seq, "proc%u %u",
 59					vers->number, vers->nrprocs);
 60		for (j = 0; j < vers->nrprocs; j++)
 61			seq_printf(seq, " %u", vers->counts[j]);
 62		seq_putc(seq, '\n');
 63	}
 64	return 0;
 65}
 66
 67static int rpc_proc_open(struct inode *inode, struct file *file)
 68{
 69	return single_open(file, rpc_proc_show, PDE_DATA(inode));
 70}
 71
 72static const struct proc_ops rpc_proc_ops = {
 73	.proc_open	= rpc_proc_open,
 74	.proc_read	= seq_read,
 75	.proc_lseek	= seq_lseek,
 76	.proc_release	= single_release,
 77};
 78
 79/*
 80 * Get RPC server stats
 81 */
 82void svc_seq_show(struct seq_file *seq, const struct svc_stat *statp)
 83{
 84	const struct svc_program *prog = statp->program;
 85	const struct svc_version *vers;
 86	unsigned int i, j;
 87
 88	seq_printf(seq,
 89		"net %u %u %u %u\n",
 90			statp->netcnt,
 91			statp->netudpcnt,
 92			statp->nettcpcnt,
 93			statp->nettcpconn);
 94	seq_printf(seq,
 95		"rpc %u %u %u %u %u\n",
 96			statp->rpccnt,
 97			statp->rpcbadfmt+statp->rpcbadauth+statp->rpcbadclnt,
 98			statp->rpcbadfmt,
 99			statp->rpcbadauth,
100			statp->rpcbadclnt);
101
102	for (i = 0; i < prog->pg_nvers; i++) {
103		vers = prog->pg_vers[i];
104		if (!vers)
105			continue;
106		seq_printf(seq, "proc%d %u", i, vers->vs_nproc);
107		for (j = 0; j < vers->vs_nproc; j++)
108			seq_printf(seq, " %u", vers->vs_count[j]);
109		seq_putc(seq, '\n');
110	}
111}
112EXPORT_SYMBOL_GPL(svc_seq_show);
113
114/**
115 * rpc_alloc_iostats - allocate an rpc_iostats structure
116 * @clnt: RPC program, version, and xprt
117 *
118 */
119struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt)
120{
121	struct rpc_iostats *stats;
122	int i;
123
124	stats = kcalloc(clnt->cl_maxproc, sizeof(*stats), GFP_KERNEL);
125	if (stats) {
126		for (i = 0; i < clnt->cl_maxproc; i++)
127			spin_lock_init(&stats[i].om_lock);
128	}
129	return stats;
130}
131EXPORT_SYMBOL_GPL(rpc_alloc_iostats);
132
133/**
134 * rpc_free_iostats - release an rpc_iostats structure
135 * @stats: doomed rpc_iostats structure
136 *
137 */
138void rpc_free_iostats(struct rpc_iostats *stats)
139{
140	kfree(stats);
141}
142EXPORT_SYMBOL_GPL(rpc_free_iostats);
143
144/**
145 * rpc_count_iostats_metrics - tally up per-task stats
146 * @task: completed rpc_task
147 * @op_metrics: stat structure for OP that will accumulate stats from @task
148 */
149void rpc_count_iostats_metrics(const struct rpc_task *task,
150			       struct rpc_iostats *op_metrics)
151{
152	struct rpc_rqst *req = task->tk_rqstp;
153	ktime_t backlog, execute, now;
154
155	if (!op_metrics || !req)
156		return;
157
158	now = ktime_get();
159	spin_lock(&op_metrics->om_lock);
160
161	op_metrics->om_ops++;
162	/* kernel API: om_ops must never become larger than om_ntrans */
163	op_metrics->om_ntrans += max(req->rq_ntrans, 1);
164	op_metrics->om_timeouts += task->tk_timeouts;
165
166	op_metrics->om_bytes_sent += req->rq_xmit_bytes_sent;
167	op_metrics->om_bytes_recv += req->rq_reply_bytes_recvd;
168
169	backlog = 0;
170	if (ktime_to_ns(req->rq_xtime)) {
171		backlog = ktime_sub(req->rq_xtime, task->tk_start);
172		op_metrics->om_queue = ktime_add(op_metrics->om_queue, backlog);
173	}
174
175	op_metrics->om_rtt = ktime_add(op_metrics->om_rtt, req->rq_rtt);
176
177	execute = ktime_sub(now, task->tk_start);
178	op_metrics->om_execute = ktime_add(op_metrics->om_execute, execute);
179	if (task->tk_status < 0)
180		op_metrics->om_error_status++;
181
182	spin_unlock(&op_metrics->om_lock);
183
184	trace_rpc_stats_latency(req->rq_task, backlog, req->rq_rtt, execute);
185}
186EXPORT_SYMBOL_GPL(rpc_count_iostats_metrics);
187
188/**
189 * rpc_count_iostats - tally up per-task stats
190 * @task: completed rpc_task
191 * @stats: array of stat structures
192 *
193 * Uses the statidx from @task
194 */
195void rpc_count_iostats(const struct rpc_task *task, struct rpc_iostats *stats)
196{
197	rpc_count_iostats_metrics(task,
198				  &stats[task->tk_msg.rpc_proc->p_statidx]);
199}
200EXPORT_SYMBOL_GPL(rpc_count_iostats);
201
202static void _print_name(struct seq_file *seq, unsigned int op,
203			const struct rpc_procinfo *procs)
204{
205	if (procs[op].p_name)
206		seq_printf(seq, "\t%12s: ", procs[op].p_name);
207	else if (op == 0)
208		seq_printf(seq, "\t        NULL: ");
209	else
210		seq_printf(seq, "\t%12u: ", op);
211}
212
213static void _add_rpc_iostats(struct rpc_iostats *a, struct rpc_iostats *b)
214{
215	a->om_ops += b->om_ops;
216	a->om_ntrans += b->om_ntrans;
217	a->om_timeouts += b->om_timeouts;
218	a->om_bytes_sent += b->om_bytes_sent;
219	a->om_bytes_recv += b->om_bytes_recv;
220	a->om_queue = ktime_add(a->om_queue, b->om_queue);
221	a->om_rtt = ktime_add(a->om_rtt, b->om_rtt);
222	a->om_execute = ktime_add(a->om_execute, b->om_execute);
223	a->om_error_status += b->om_error_status;
224}
225
226static void _print_rpc_iostats(struct seq_file *seq, struct rpc_iostats *stats,
227			       int op, const struct rpc_procinfo *procs)
228{
229	_print_name(seq, op, procs);
230	seq_printf(seq, "%lu %lu %lu %llu %llu %llu %llu %llu %lu\n",
231		   stats->om_ops,
232		   stats->om_ntrans,
233		   stats->om_timeouts,
234		   stats->om_bytes_sent,
235		   stats->om_bytes_recv,
236		   ktime_to_ms(stats->om_queue),
237		   ktime_to_ms(stats->om_rtt),
238		   ktime_to_ms(stats->om_execute),
239		   stats->om_error_status);
240}
241
242static int do_print_stats(struct rpc_clnt *clnt, struct rpc_xprt *xprt, void *seqv)
243{
244	struct seq_file *seq = seqv;
245
246	xprt->ops->print_stats(xprt, seq);
247	return 0;
248}
249
250void rpc_clnt_show_stats(struct seq_file *seq, struct rpc_clnt *clnt)
251{
252	unsigned int op, maxproc = clnt->cl_maxproc;
253
254	if (!clnt->cl_metrics)
255		return;
256
257	seq_printf(seq, "\tRPC iostats version: %s  ", RPC_IOSTATS_VERS);
258	seq_printf(seq, "p/v: %u/%u (%s)\n",
259			clnt->cl_prog, clnt->cl_vers, clnt->cl_program->name);
260
261	rpc_clnt_iterate_for_each_xprt(clnt, do_print_stats, seq);
262
263	seq_printf(seq, "\tper-op statistics\n");
264	for (op = 0; op < maxproc; op++) {
265		struct rpc_iostats stats = {};
266		struct rpc_clnt *next = clnt;
267		do {
268			_add_rpc_iostats(&stats, &next->cl_metrics[op]);
269			if (next == next->cl_parent)
270				break;
271			next = next->cl_parent;
272		} while (next);
273		_print_rpc_iostats(seq, &stats, op, clnt->cl_procinfo);
274	}
275}
276EXPORT_SYMBOL_GPL(rpc_clnt_show_stats);
277
278/*
279 * Register/unregister RPC proc files
280 */
281static inline struct proc_dir_entry *
282do_register(struct net *net, const char *name, void *data,
283	    const struct proc_ops *proc_ops)
284{
285	struct sunrpc_net *sn;
286
287	dprintk("RPC:       registering /proc/net/rpc/%s\n", name);
288	sn = net_generic(net, sunrpc_net_id);
289	return proc_create_data(name, 0, sn->proc_net_rpc, proc_ops, data);
290}
291
292struct proc_dir_entry *
293rpc_proc_register(struct net *net, struct rpc_stat *statp)
294{
295	return do_register(net, statp->program->name, statp, &rpc_proc_ops);
296}
297EXPORT_SYMBOL_GPL(rpc_proc_register);
298
299void
300rpc_proc_unregister(struct net *net, const char *name)
301{
302	struct sunrpc_net *sn;
303
304	sn = net_generic(net, sunrpc_net_id);
305	remove_proc_entry(name, sn->proc_net_rpc);
306}
307EXPORT_SYMBOL_GPL(rpc_proc_unregister);
308
309struct proc_dir_entry *
310svc_proc_register(struct net *net, struct svc_stat *statp, const struct proc_ops *proc_ops)
311{
312	return do_register(net, statp->program->pg_name, statp, proc_ops);
313}
314EXPORT_SYMBOL_GPL(svc_proc_register);
315
316void
317svc_proc_unregister(struct net *net, const char *name)
318{
319	struct sunrpc_net *sn;
320
321	sn = net_generic(net, sunrpc_net_id);
322	remove_proc_entry(name, sn->proc_net_rpc);
323}
324EXPORT_SYMBOL_GPL(svc_proc_unregister);
325
326int rpc_proc_init(struct net *net)
327{
328	struct sunrpc_net *sn;
329
330	dprintk("RPC:       registering /proc/net/rpc\n");
331	sn = net_generic(net, sunrpc_net_id);
332	sn->proc_net_rpc = proc_mkdir("rpc", net->proc_net);
333	if (sn->proc_net_rpc == NULL)
334		return -ENOMEM;
335
336	return 0;
337}
338
339void rpc_proc_exit(struct net *net)
340{
341	dprintk("RPC:       unregistering /proc/net/rpc\n");
342	remove_proc_entry("rpc", net->proc_net);
343}