Loading...
1/*
2 * linux/net/sunrpc/svc_xprt.c
3 *
4 * Author: Tom Tucker <tom@opengridcomputing.com>
5 */
6
7#include <linux/sched.h>
8#include <linux/errno.h>
9#include <linux/freezer.h>
10#include <linux/kthread.h>
11#include <linux/slab.h>
12#include <net/sock.h>
13#include <linux/sunrpc/addr.h>
14#include <linux/sunrpc/stats.h>
15#include <linux/sunrpc/svc_xprt.h>
16#include <linux/sunrpc/svcsock.h>
17#include <linux/sunrpc/xprt.h>
18#include <linux/module.h>
19#include <linux/netdevice.h>
20#include <trace/events/sunrpc.h>
21
22#define RPCDBG_FACILITY RPCDBG_SVCXPRT
23
24static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt);
25static int svc_deferred_recv(struct svc_rqst *rqstp);
26static struct cache_deferred_req *svc_defer(struct cache_req *req);
27static void svc_age_temp_xprts(unsigned long closure);
28static void svc_delete_xprt(struct svc_xprt *xprt);
29
30/* apparently the "standard" is that clients close
31 * idle connections after 5 minutes, servers after
32 * 6 minutes
33 * http://www.connectathon.org/talks96/nfstcp.pdf
34 */
35static int svc_conn_age_period = 6*60;
36
37/* List of registered transport classes */
38static DEFINE_SPINLOCK(svc_xprt_class_lock);
39static LIST_HEAD(svc_xprt_class_list);
40
41/* SMP locking strategy:
42 *
43 * svc_pool->sp_lock protects most of the fields of that pool.
44 * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt.
45 * when both need to be taken (rare), svc_serv->sv_lock is first.
46 * The "service mutex" protects svc_serv->sv_nrthread.
47 * svc_sock->sk_lock protects the svc_sock->sk_deferred list
48 * and the ->sk_info_authunix cache.
49 *
50 * The XPT_BUSY bit in xprt->xpt_flags prevents a transport being
51 * enqueued multiply. During normal transport processing this bit
52 * is set by svc_xprt_enqueue and cleared by svc_xprt_received.
53 * Providers should not manipulate this bit directly.
54 *
55 * Some flags can be set to certain values at any time
56 * providing that certain rules are followed:
57 *
58 * XPT_CONN, XPT_DATA:
59 * - Can be set or cleared at any time.
60 * - After a set, svc_xprt_enqueue must be called to enqueue
61 * the transport for processing.
62 * - After a clear, the transport must be read/accepted.
63 * If this succeeds, it must be set again.
64 * XPT_CLOSE:
65 * - Can set at any time. It is never cleared.
66 * XPT_DEAD:
67 * - Can only be set while XPT_BUSY is held which ensures
68 * that no other thread will be using the transport or will
69 * try to set XPT_DEAD.
70 */
71int svc_reg_xprt_class(struct svc_xprt_class *xcl)
72{
73 struct svc_xprt_class *cl;
74 int res = -EEXIST;
75
76 dprintk("svc: Adding svc transport class '%s'\n", xcl->xcl_name);
77
78 INIT_LIST_HEAD(&xcl->xcl_list);
79 spin_lock(&svc_xprt_class_lock);
80 /* Make sure there isn't already a class with the same name */
81 list_for_each_entry(cl, &svc_xprt_class_list, xcl_list) {
82 if (strcmp(xcl->xcl_name, cl->xcl_name) == 0)
83 goto out;
84 }
85 list_add_tail(&xcl->xcl_list, &svc_xprt_class_list);
86 res = 0;
87out:
88 spin_unlock(&svc_xprt_class_lock);
89 return res;
90}
91EXPORT_SYMBOL_GPL(svc_reg_xprt_class);
92
93void svc_unreg_xprt_class(struct svc_xprt_class *xcl)
94{
95 dprintk("svc: Removing svc transport class '%s'\n", xcl->xcl_name);
96 spin_lock(&svc_xprt_class_lock);
97 list_del_init(&xcl->xcl_list);
98 spin_unlock(&svc_xprt_class_lock);
99}
100EXPORT_SYMBOL_GPL(svc_unreg_xprt_class);
101
102/*
103 * Format the transport list for printing
104 */
105int svc_print_xprts(char *buf, int maxlen)
106{
107 struct svc_xprt_class *xcl;
108 char tmpstr[80];
109 int len = 0;
110 buf[0] = '\0';
111
112 spin_lock(&svc_xprt_class_lock);
113 list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
114 int slen;
115
116 sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload);
117 slen = strlen(tmpstr);
118 if (len + slen > maxlen)
119 break;
120 len += slen;
121 strcat(buf, tmpstr);
122 }
123 spin_unlock(&svc_xprt_class_lock);
124
125 return len;
126}
127
128static void svc_xprt_free(struct kref *kref)
129{
130 struct svc_xprt *xprt =
131 container_of(kref, struct svc_xprt, xpt_ref);
132 struct module *owner = xprt->xpt_class->xcl_owner;
133 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags))
134 svcauth_unix_info_release(xprt);
135 put_net(xprt->xpt_net);
136 /* See comment on corresponding get in xs_setup_bc_tcp(): */
137 if (xprt->xpt_bc_xprt)
138 xprt_put(xprt->xpt_bc_xprt);
139 xprt->xpt_ops->xpo_free(xprt);
140 module_put(owner);
141}
142
143void svc_xprt_put(struct svc_xprt *xprt)
144{
145 kref_put(&xprt->xpt_ref, svc_xprt_free);
146}
147EXPORT_SYMBOL_GPL(svc_xprt_put);
148
149/*
150 * Called by transport drivers to initialize the transport independent
151 * portion of the transport instance.
152 */
153void svc_xprt_init(struct net *net, struct svc_xprt_class *xcl,
154 struct svc_xprt *xprt, struct svc_serv *serv)
155{
156 memset(xprt, 0, sizeof(*xprt));
157 xprt->xpt_class = xcl;
158 xprt->xpt_ops = xcl->xcl_ops;
159 kref_init(&xprt->xpt_ref);
160 xprt->xpt_server = serv;
161 INIT_LIST_HEAD(&xprt->xpt_list);
162 INIT_LIST_HEAD(&xprt->xpt_ready);
163 INIT_LIST_HEAD(&xprt->xpt_deferred);
164 INIT_LIST_HEAD(&xprt->xpt_users);
165 mutex_init(&xprt->xpt_mutex);
166 spin_lock_init(&xprt->xpt_lock);
167 set_bit(XPT_BUSY, &xprt->xpt_flags);
168 rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending");
169 xprt->xpt_net = get_net(net);
170}
171EXPORT_SYMBOL_GPL(svc_xprt_init);
172
173static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
174 struct svc_serv *serv,
175 struct net *net,
176 const int family,
177 const unsigned short port,
178 int flags)
179{
180 struct sockaddr_in sin = {
181 .sin_family = AF_INET,
182 .sin_addr.s_addr = htonl(INADDR_ANY),
183 .sin_port = htons(port),
184 };
185#if IS_ENABLED(CONFIG_IPV6)
186 struct sockaddr_in6 sin6 = {
187 .sin6_family = AF_INET6,
188 .sin6_addr = IN6ADDR_ANY_INIT,
189 .sin6_port = htons(port),
190 };
191#endif
192 struct sockaddr *sap;
193 size_t len;
194
195 switch (family) {
196 case PF_INET:
197 sap = (struct sockaddr *)&sin;
198 len = sizeof(sin);
199 break;
200#if IS_ENABLED(CONFIG_IPV6)
201 case PF_INET6:
202 sap = (struct sockaddr *)&sin6;
203 len = sizeof(sin6);
204 break;
205#endif
206 default:
207 return ERR_PTR(-EAFNOSUPPORT);
208 }
209
210 return xcl->xcl_ops->xpo_create(serv, net, sap, len, flags);
211}
212
213/*
214 * svc_xprt_received conditionally queues the transport for processing
215 * by another thread. The caller must hold the XPT_BUSY bit and must
216 * not thereafter touch transport data.
217 *
218 * Note: XPT_DATA only gets cleared when a read-attempt finds no (or
219 * insufficient) data.
220 */
221static void svc_xprt_received(struct svc_xprt *xprt)
222{
223 if (!test_bit(XPT_BUSY, &xprt->xpt_flags)) {
224 WARN_ONCE(1, "xprt=0x%p already busy!", xprt);
225 return;
226 }
227
228 /* As soon as we clear busy, the xprt could be closed and
229 * 'put', so we need a reference to call svc_enqueue_xprt with:
230 */
231 svc_xprt_get(xprt);
232 smp_mb__before_atomic();
233 clear_bit(XPT_BUSY, &xprt->xpt_flags);
234 xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt);
235 svc_xprt_put(xprt);
236}
237
238void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *new)
239{
240 clear_bit(XPT_TEMP, &new->xpt_flags);
241 spin_lock_bh(&serv->sv_lock);
242 list_add(&new->xpt_list, &serv->sv_permsocks);
243 spin_unlock_bh(&serv->sv_lock);
244 svc_xprt_received(new);
245}
246
247int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
248 struct net *net, const int family,
249 const unsigned short port, int flags)
250{
251 struct svc_xprt_class *xcl;
252
253 dprintk("svc: creating transport %s[%d]\n", xprt_name, port);
254 spin_lock(&svc_xprt_class_lock);
255 list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
256 struct svc_xprt *newxprt;
257 unsigned short newport;
258
259 if (strcmp(xprt_name, xcl->xcl_name))
260 continue;
261
262 if (!try_module_get(xcl->xcl_owner))
263 goto err;
264
265 spin_unlock(&svc_xprt_class_lock);
266 newxprt = __svc_xpo_create(xcl, serv, net, family, port, flags);
267 if (IS_ERR(newxprt)) {
268 module_put(xcl->xcl_owner);
269 return PTR_ERR(newxprt);
270 }
271 svc_add_new_perm_xprt(serv, newxprt);
272 newport = svc_xprt_local_port(newxprt);
273 return newport;
274 }
275 err:
276 spin_unlock(&svc_xprt_class_lock);
277 dprintk("svc: transport %s not found\n", xprt_name);
278
279 /* This errno is exposed to user space. Provide a reasonable
280 * perror msg for a bad transport. */
281 return -EPROTONOSUPPORT;
282}
283EXPORT_SYMBOL_GPL(svc_create_xprt);
284
285/*
286 * Copy the local and remote xprt addresses to the rqstp structure
287 */
288void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt)
289{
290 memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen);
291 rqstp->rq_addrlen = xprt->xpt_remotelen;
292
293 /*
294 * Destination address in request is needed for binding the
295 * source address in RPC replies/callbacks later.
296 */
297 memcpy(&rqstp->rq_daddr, &xprt->xpt_local, xprt->xpt_locallen);
298 rqstp->rq_daddrlen = xprt->xpt_locallen;
299}
300EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs);
301
302/**
303 * svc_print_addr - Format rq_addr field for printing
304 * @rqstp: svc_rqst struct containing address to print
305 * @buf: target buffer for formatted address
306 * @len: length of target buffer
307 *
308 */
309char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len)
310{
311 return __svc_print_addr(svc_addr(rqstp), buf, len);
312}
313EXPORT_SYMBOL_GPL(svc_print_addr);
314
315static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
316{
317 if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE)))
318 return true;
319 if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED)))
320 return xprt->xpt_ops->xpo_has_wspace(xprt);
321 return false;
322}
323
324void svc_xprt_do_enqueue(struct svc_xprt *xprt)
325{
326 struct svc_pool *pool;
327 struct svc_rqst *rqstp = NULL;
328 int cpu;
329 bool queued = false;
330
331 if (!svc_xprt_has_something_to_do(xprt))
332 goto out;
333
334 /* Mark transport as busy. It will remain in this state until
335 * the provider calls svc_xprt_received. We update XPT_BUSY
336 * atomically because it also guards against trying to enqueue
337 * the transport twice.
338 */
339 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) {
340 /* Don't enqueue transport while already enqueued */
341 dprintk("svc: transport %p busy, not enqueued\n", xprt);
342 goto out;
343 }
344
345 cpu = get_cpu();
346 pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
347
348 atomic_long_inc(&pool->sp_stats.packets);
349
350redo_search:
351 /* find a thread for this xprt */
352 rcu_read_lock();
353 list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
354 /* Do a lockless check first */
355 if (test_bit(RQ_BUSY, &rqstp->rq_flags))
356 continue;
357
358 /*
359 * Once the xprt has been queued, it can only be dequeued by
360 * the task that intends to service it. All we can do at that
361 * point is to try to wake this thread back up so that it can
362 * do so.
363 */
364 if (!queued) {
365 spin_lock_bh(&rqstp->rq_lock);
366 if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) {
367 /* already busy, move on... */
368 spin_unlock_bh(&rqstp->rq_lock);
369 continue;
370 }
371
372 /* this one will do */
373 rqstp->rq_xprt = xprt;
374 svc_xprt_get(xprt);
375 spin_unlock_bh(&rqstp->rq_lock);
376 }
377 rcu_read_unlock();
378
379 atomic_long_inc(&pool->sp_stats.threads_woken);
380 wake_up_process(rqstp->rq_task);
381 put_cpu();
382 goto out;
383 }
384 rcu_read_unlock();
385
386 /*
387 * We didn't find an idle thread to use, so we need to queue the xprt.
388 * Do so and then search again. If we find one, we can't hook this one
389 * up to it directly but we can wake the thread up in the hopes that it
390 * will pick it up once it searches for a xprt to service.
391 */
392 if (!queued) {
393 queued = true;
394 dprintk("svc: transport %p put into queue\n", xprt);
395 spin_lock_bh(&pool->sp_lock);
396 list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
397 pool->sp_stats.sockets_queued++;
398 spin_unlock_bh(&pool->sp_lock);
399 goto redo_search;
400 }
401 rqstp = NULL;
402 put_cpu();
403out:
404 trace_svc_xprt_do_enqueue(xprt, rqstp);
405}
406EXPORT_SYMBOL_GPL(svc_xprt_do_enqueue);
407
408/*
409 * Queue up a transport with data pending. If there are idle nfsd
410 * processes, wake 'em up.
411 *
412 */
413void svc_xprt_enqueue(struct svc_xprt *xprt)
414{
415 if (test_bit(XPT_BUSY, &xprt->xpt_flags))
416 return;
417 xprt->xpt_server->sv_ops->svo_enqueue_xprt(xprt);
418}
419EXPORT_SYMBOL_GPL(svc_xprt_enqueue);
420
421/*
422 * Dequeue the first transport, if there is one.
423 */
424static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool)
425{
426 struct svc_xprt *xprt = NULL;
427
428 if (list_empty(&pool->sp_sockets))
429 goto out;
430
431 spin_lock_bh(&pool->sp_lock);
432 if (likely(!list_empty(&pool->sp_sockets))) {
433 xprt = list_first_entry(&pool->sp_sockets,
434 struct svc_xprt, xpt_ready);
435 list_del_init(&xprt->xpt_ready);
436 svc_xprt_get(xprt);
437
438 dprintk("svc: transport %p dequeued, inuse=%d\n",
439 xprt, atomic_read(&xprt->xpt_ref.refcount));
440 }
441 spin_unlock_bh(&pool->sp_lock);
442out:
443 trace_svc_xprt_dequeue(xprt);
444 return xprt;
445}
446
447/**
448 * svc_reserve - change the space reserved for the reply to a request.
449 * @rqstp: The request in question
450 * @space: new max space to reserve
451 *
452 * Each request reserves some space on the output queue of the transport
453 * to make sure the reply fits. This function reduces that reserved
454 * space to be the amount of space used already, plus @space.
455 *
456 */
457void svc_reserve(struct svc_rqst *rqstp, int space)
458{
459 space += rqstp->rq_res.head[0].iov_len;
460
461 if (space < rqstp->rq_reserved) {
462 struct svc_xprt *xprt = rqstp->rq_xprt;
463 atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved);
464 rqstp->rq_reserved = space;
465
466 if (xprt->xpt_ops->xpo_adjust_wspace)
467 xprt->xpt_ops->xpo_adjust_wspace(xprt);
468 svc_xprt_enqueue(xprt);
469 }
470}
471EXPORT_SYMBOL_GPL(svc_reserve);
472
473static void svc_xprt_release(struct svc_rqst *rqstp)
474{
475 struct svc_xprt *xprt = rqstp->rq_xprt;
476
477 rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp);
478
479 kfree(rqstp->rq_deferred);
480 rqstp->rq_deferred = NULL;
481
482 svc_free_res_pages(rqstp);
483 rqstp->rq_res.page_len = 0;
484 rqstp->rq_res.page_base = 0;
485
486 /* Reset response buffer and release
487 * the reservation.
488 * But first, check that enough space was reserved
489 * for the reply, otherwise we have a bug!
490 */
491 if ((rqstp->rq_res.len) > rqstp->rq_reserved)
492 printk(KERN_ERR "RPC request reserved %d but used %d\n",
493 rqstp->rq_reserved,
494 rqstp->rq_res.len);
495
496 rqstp->rq_res.head[0].iov_len = 0;
497 svc_reserve(rqstp, 0);
498 rqstp->rq_xprt = NULL;
499
500 svc_xprt_put(xprt);
501}
502
503/*
504 * Some svc_serv's will have occasional work to do, even when a xprt is not
505 * waiting to be serviced. This function is there to "kick" a task in one of
506 * those services so that it can wake up and do that work. Note that we only
507 * bother with pool 0 as we don't need to wake up more than one thread for
508 * this purpose.
509 */
510void svc_wake_up(struct svc_serv *serv)
511{
512 struct svc_rqst *rqstp;
513 struct svc_pool *pool;
514
515 pool = &serv->sv_pools[0];
516
517 rcu_read_lock();
518 list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
519 /* skip any that aren't queued */
520 if (test_bit(RQ_BUSY, &rqstp->rq_flags))
521 continue;
522 rcu_read_unlock();
523 dprintk("svc: daemon %p woken up.\n", rqstp);
524 wake_up_process(rqstp->rq_task);
525 trace_svc_wake_up(rqstp->rq_task->pid);
526 return;
527 }
528 rcu_read_unlock();
529
530 /* No free entries available */
531 set_bit(SP_TASK_PENDING, &pool->sp_flags);
532 smp_wmb();
533 trace_svc_wake_up(0);
534}
535EXPORT_SYMBOL_GPL(svc_wake_up);
536
537int svc_port_is_privileged(struct sockaddr *sin)
538{
539 switch (sin->sa_family) {
540 case AF_INET:
541 return ntohs(((struct sockaddr_in *)sin)->sin_port)
542 < PROT_SOCK;
543 case AF_INET6:
544 return ntohs(((struct sockaddr_in6 *)sin)->sin6_port)
545 < PROT_SOCK;
546 default:
547 return 0;
548 }
549}
550
551/*
552 * Make sure that we don't have too many active connections. If we have,
553 * something must be dropped. It's not clear what will happen if we allow
554 * "too many" connections, but when dealing with network-facing software,
555 * we have to code defensively. Here we do that by imposing hard limits.
556 *
557 * There's no point in trying to do random drop here for DoS
558 * prevention. The NFS clients does 1 reconnect in 15 seconds. An
559 * attacker can easily beat that.
560 *
561 * The only somewhat efficient mechanism would be if drop old
562 * connections from the same IP first. But right now we don't even
563 * record the client IP in svc_sock.
564 *
565 * single-threaded services that expect a lot of clients will probably
566 * need to set sv_maxconn to override the default value which is based
567 * on the number of threads
568 */
569static void svc_check_conn_limits(struct svc_serv *serv)
570{
571 unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn :
572 (serv->sv_nrthreads+3) * 20;
573
574 if (serv->sv_tmpcnt > limit) {
575 struct svc_xprt *xprt = NULL;
576 spin_lock_bh(&serv->sv_lock);
577 if (!list_empty(&serv->sv_tempsocks)) {
578 /* Try to help the admin */
579 net_notice_ratelimited("%s: too many open connections, consider increasing the %s\n",
580 serv->sv_name, serv->sv_maxconn ?
581 "max number of connections" :
582 "number of threads");
583 /*
584 * Always select the oldest connection. It's not fair,
585 * but so is life
586 */
587 xprt = list_entry(serv->sv_tempsocks.prev,
588 struct svc_xprt,
589 xpt_list);
590 set_bit(XPT_CLOSE, &xprt->xpt_flags);
591 svc_xprt_get(xprt);
592 }
593 spin_unlock_bh(&serv->sv_lock);
594
595 if (xprt) {
596 svc_xprt_enqueue(xprt);
597 svc_xprt_put(xprt);
598 }
599 }
600}
601
602static int svc_alloc_arg(struct svc_rqst *rqstp)
603{
604 struct svc_serv *serv = rqstp->rq_server;
605 struct xdr_buf *arg;
606 int pages;
607 int i;
608
609 /* now allocate needed pages. If we get a failure, sleep briefly */
610 pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE;
611 WARN_ON_ONCE(pages >= RPCSVC_MAXPAGES);
612 if (pages >= RPCSVC_MAXPAGES)
613 /* use as many pages as possible */
614 pages = RPCSVC_MAXPAGES - 1;
615 for (i = 0; i < pages ; i++)
616 while (rqstp->rq_pages[i] == NULL) {
617 struct page *p = alloc_page(GFP_KERNEL);
618 if (!p) {
619 set_current_state(TASK_INTERRUPTIBLE);
620 if (signalled() || kthread_should_stop()) {
621 set_current_state(TASK_RUNNING);
622 return -EINTR;
623 }
624 schedule_timeout(msecs_to_jiffies(500));
625 }
626 rqstp->rq_pages[i] = p;
627 }
628 rqstp->rq_page_end = &rqstp->rq_pages[i];
629 rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */
630
631 /* Make arg->head point to first page and arg->pages point to rest */
632 arg = &rqstp->rq_arg;
633 arg->head[0].iov_base = page_address(rqstp->rq_pages[0]);
634 arg->head[0].iov_len = PAGE_SIZE;
635 arg->pages = rqstp->rq_pages + 1;
636 arg->page_base = 0;
637 /* save at least one page for response */
638 arg->page_len = (pages-2)*PAGE_SIZE;
639 arg->len = (pages-1)*PAGE_SIZE;
640 arg->tail[0].iov_len = 0;
641 return 0;
642}
643
644static bool
645rqst_should_sleep(struct svc_rqst *rqstp)
646{
647 struct svc_pool *pool = rqstp->rq_pool;
648
649 /* did someone call svc_wake_up? */
650 if (test_and_clear_bit(SP_TASK_PENDING, &pool->sp_flags))
651 return false;
652
653 /* was a socket queued? */
654 if (!list_empty(&pool->sp_sockets))
655 return false;
656
657 /* are we shutting down? */
658 if (signalled() || kthread_should_stop())
659 return false;
660
661 /* are we freezing? */
662 if (freezing(current))
663 return false;
664
665 return true;
666}
667
668static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
669{
670 struct svc_xprt *xprt;
671 struct svc_pool *pool = rqstp->rq_pool;
672 long time_left = 0;
673
674 /* rq_xprt should be clear on entry */
675 WARN_ON_ONCE(rqstp->rq_xprt);
676
677 /* Normally we will wait up to 5 seconds for any required
678 * cache information to be provided.
679 */
680 rqstp->rq_chandle.thread_wait = 5*HZ;
681
682 xprt = svc_xprt_dequeue(pool);
683 if (xprt) {
684 rqstp->rq_xprt = xprt;
685
686 /* As there is a shortage of threads and this request
687 * had to be queued, don't allow the thread to wait so
688 * long for cache updates.
689 */
690 rqstp->rq_chandle.thread_wait = 1*HZ;
691 clear_bit(SP_TASK_PENDING, &pool->sp_flags);
692 return xprt;
693 }
694
695 /*
696 * We have to be able to interrupt this wait
697 * to bring down the daemons ...
698 */
699 set_current_state(TASK_INTERRUPTIBLE);
700 clear_bit(RQ_BUSY, &rqstp->rq_flags);
701 smp_mb();
702
703 if (likely(rqst_should_sleep(rqstp)))
704 time_left = schedule_timeout(timeout);
705 else
706 __set_current_state(TASK_RUNNING);
707
708 try_to_freeze();
709
710 spin_lock_bh(&rqstp->rq_lock);
711 set_bit(RQ_BUSY, &rqstp->rq_flags);
712 spin_unlock_bh(&rqstp->rq_lock);
713
714 xprt = rqstp->rq_xprt;
715 if (xprt != NULL)
716 return xprt;
717
718 if (!time_left)
719 atomic_long_inc(&pool->sp_stats.threads_timedout);
720
721 if (signalled() || kthread_should_stop())
722 return ERR_PTR(-EINTR);
723 return ERR_PTR(-EAGAIN);
724}
725
726static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt)
727{
728 spin_lock_bh(&serv->sv_lock);
729 set_bit(XPT_TEMP, &newxpt->xpt_flags);
730 list_add(&newxpt->xpt_list, &serv->sv_tempsocks);
731 serv->sv_tmpcnt++;
732 if (serv->sv_temptimer.function == NULL) {
733 /* setup timer to age temp transports */
734 setup_timer(&serv->sv_temptimer, svc_age_temp_xprts,
735 (unsigned long)serv);
736 mod_timer(&serv->sv_temptimer,
737 jiffies + svc_conn_age_period * HZ);
738 }
739 spin_unlock_bh(&serv->sv_lock);
740 svc_xprt_received(newxpt);
741}
742
743static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
744{
745 struct svc_serv *serv = rqstp->rq_server;
746 int len = 0;
747
748 if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
749 dprintk("svc_recv: found XPT_CLOSE\n");
750 svc_delete_xprt(xprt);
751 /* Leave XPT_BUSY set on the dead xprt: */
752 goto out;
753 }
754 if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
755 struct svc_xprt *newxpt;
756 /*
757 * We know this module_get will succeed because the
758 * listener holds a reference too
759 */
760 __module_get(xprt->xpt_class->xcl_owner);
761 svc_check_conn_limits(xprt->xpt_server);
762 newxpt = xprt->xpt_ops->xpo_accept(xprt);
763 if (newxpt)
764 svc_add_new_temp_xprt(serv, newxpt);
765 else
766 module_put(xprt->xpt_class->xcl_owner);
767 } else {
768 /* XPT_DATA|XPT_DEFERRED case: */
769 dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
770 rqstp, rqstp->rq_pool->sp_id, xprt,
771 atomic_read(&xprt->xpt_ref.refcount));
772 rqstp->rq_deferred = svc_deferred_dequeue(xprt);
773 if (rqstp->rq_deferred)
774 len = svc_deferred_recv(rqstp);
775 else
776 len = xprt->xpt_ops->xpo_recvfrom(rqstp);
777 dprintk("svc: got len=%d\n", len);
778 rqstp->rq_reserved = serv->sv_max_mesg;
779 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
780 }
781 /* clear XPT_BUSY: */
782 svc_xprt_received(xprt);
783out:
784 trace_svc_handle_xprt(xprt, len);
785 return len;
786}
787
788/*
789 * Receive the next request on any transport. This code is carefully
790 * organised not to touch any cachelines in the shared svc_serv
791 * structure, only cachelines in the local svc_pool.
792 */
793int svc_recv(struct svc_rqst *rqstp, long timeout)
794{
795 struct svc_xprt *xprt = NULL;
796 struct svc_serv *serv = rqstp->rq_server;
797 int len, err;
798
799 dprintk("svc: server %p waiting for data (to = %ld)\n",
800 rqstp, timeout);
801
802 if (rqstp->rq_xprt)
803 printk(KERN_ERR
804 "svc_recv: service %p, transport not NULL!\n",
805 rqstp);
806
807 err = svc_alloc_arg(rqstp);
808 if (err)
809 goto out;
810
811 try_to_freeze();
812 cond_resched();
813 err = -EINTR;
814 if (signalled() || kthread_should_stop())
815 goto out;
816
817 xprt = svc_get_next_xprt(rqstp, timeout);
818 if (IS_ERR(xprt)) {
819 err = PTR_ERR(xprt);
820 goto out;
821 }
822
823 len = svc_handle_xprt(rqstp, xprt);
824
825 /* No data, incomplete (TCP) read, or accept() */
826 err = -EAGAIN;
827 if (len <= 0)
828 goto out_release;
829
830 clear_bit(XPT_OLD, &xprt->xpt_flags);
831
832 if (xprt->xpt_ops->xpo_secure_port(rqstp))
833 set_bit(RQ_SECURE, &rqstp->rq_flags);
834 else
835 clear_bit(RQ_SECURE, &rqstp->rq_flags);
836 rqstp->rq_chandle.defer = svc_defer;
837 rqstp->rq_xid = svc_getu32(&rqstp->rq_arg.head[0]);
838
839 if (serv->sv_stats)
840 serv->sv_stats->netcnt++;
841 trace_svc_recv(rqstp, len);
842 return len;
843out_release:
844 rqstp->rq_res.len = 0;
845 svc_xprt_release(rqstp);
846out:
847 trace_svc_recv(rqstp, err);
848 return err;
849}
850EXPORT_SYMBOL_GPL(svc_recv);
851
852/*
853 * Drop request
854 */
855void svc_drop(struct svc_rqst *rqstp)
856{
857 dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt);
858 svc_xprt_release(rqstp);
859}
860EXPORT_SYMBOL_GPL(svc_drop);
861
862/*
863 * Return reply to client.
864 */
865int svc_send(struct svc_rqst *rqstp)
866{
867 struct svc_xprt *xprt;
868 int len = -EFAULT;
869 struct xdr_buf *xb;
870
871 xprt = rqstp->rq_xprt;
872 if (!xprt)
873 goto out;
874
875 /* release the receive skb before sending the reply */
876 rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp);
877
878 /* calculate over-all length */
879 xb = &rqstp->rq_res;
880 xb->len = xb->head[0].iov_len +
881 xb->page_len +
882 xb->tail[0].iov_len;
883
884 /* Grab mutex to serialize outgoing data. */
885 mutex_lock(&xprt->xpt_mutex);
886 if (test_bit(XPT_DEAD, &xprt->xpt_flags)
887 || test_bit(XPT_CLOSE, &xprt->xpt_flags))
888 len = -ENOTCONN;
889 else
890 len = xprt->xpt_ops->xpo_sendto(rqstp);
891 mutex_unlock(&xprt->xpt_mutex);
892 rpc_wake_up(&xprt->xpt_bc_pending);
893 svc_xprt_release(rqstp);
894
895 if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN)
896 len = 0;
897out:
898 trace_svc_send(rqstp, len);
899 return len;
900}
901
902/*
903 * Timer function to close old temporary transports, using
904 * a mark-and-sweep algorithm.
905 */
906static void svc_age_temp_xprts(unsigned long closure)
907{
908 struct svc_serv *serv = (struct svc_serv *)closure;
909 struct svc_xprt *xprt;
910 struct list_head *le, *next;
911
912 dprintk("svc_age_temp_xprts\n");
913
914 if (!spin_trylock_bh(&serv->sv_lock)) {
915 /* busy, try again 1 sec later */
916 dprintk("svc_age_temp_xprts: busy\n");
917 mod_timer(&serv->sv_temptimer, jiffies + HZ);
918 return;
919 }
920
921 list_for_each_safe(le, next, &serv->sv_tempsocks) {
922 xprt = list_entry(le, struct svc_xprt, xpt_list);
923
924 /* First time through, just mark it OLD. Second time
925 * through, close it. */
926 if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags))
927 continue;
928 if (atomic_read(&xprt->xpt_ref.refcount) > 1 ||
929 test_bit(XPT_BUSY, &xprt->xpt_flags))
930 continue;
931 list_del_init(le);
932 set_bit(XPT_CLOSE, &xprt->xpt_flags);
933 dprintk("queuing xprt %p for closing\n", xprt);
934
935 /* a thread will dequeue and close it soon */
936 svc_xprt_enqueue(xprt);
937 }
938 spin_unlock_bh(&serv->sv_lock);
939
940 mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
941}
942
943/* Close temporary transports whose xpt_local matches server_addr immediately
944 * instead of waiting for them to be picked up by the timer.
945 *
946 * This is meant to be called from a notifier_block that runs when an ip
947 * address is deleted.
948 */
949void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr)
950{
951 struct svc_xprt *xprt;
952 struct svc_sock *svsk;
953 struct socket *sock;
954 struct list_head *le, *next;
955 LIST_HEAD(to_be_closed);
956 struct linger no_linger = {
957 .l_onoff = 1,
958 .l_linger = 0,
959 };
960
961 spin_lock_bh(&serv->sv_lock);
962 list_for_each_safe(le, next, &serv->sv_tempsocks) {
963 xprt = list_entry(le, struct svc_xprt, xpt_list);
964 if (rpc_cmp_addr(server_addr, (struct sockaddr *)
965 &xprt->xpt_local)) {
966 dprintk("svc_age_temp_xprts_now: found %p\n", xprt);
967 list_move(le, &to_be_closed);
968 }
969 }
970 spin_unlock_bh(&serv->sv_lock);
971
972 while (!list_empty(&to_be_closed)) {
973 le = to_be_closed.next;
974 list_del_init(le);
975 xprt = list_entry(le, struct svc_xprt, xpt_list);
976 dprintk("svc_age_temp_xprts_now: closing %p\n", xprt);
977 svsk = container_of(xprt, struct svc_sock, sk_xprt);
978 sock = svsk->sk_sock;
979 kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER,
980 (char *)&no_linger, sizeof(no_linger));
981 svc_close_xprt(xprt);
982 }
983}
984EXPORT_SYMBOL_GPL(svc_age_temp_xprts_now);
985
986static void call_xpt_users(struct svc_xprt *xprt)
987{
988 struct svc_xpt_user *u;
989
990 spin_lock(&xprt->xpt_lock);
991 while (!list_empty(&xprt->xpt_users)) {
992 u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list);
993 list_del(&u->list);
994 u->callback(u);
995 }
996 spin_unlock(&xprt->xpt_lock);
997}
998
999/*
1000 * Remove a dead transport
1001 */
1002static void svc_delete_xprt(struct svc_xprt *xprt)
1003{
1004 struct svc_serv *serv = xprt->xpt_server;
1005 struct svc_deferred_req *dr;
1006
1007 /* Only do this once */
1008 if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags))
1009 BUG();
1010
1011 dprintk("svc: svc_delete_xprt(%p)\n", xprt);
1012 xprt->xpt_ops->xpo_detach(xprt);
1013
1014 spin_lock_bh(&serv->sv_lock);
1015 list_del_init(&xprt->xpt_list);
1016 WARN_ON_ONCE(!list_empty(&xprt->xpt_ready));
1017 if (test_bit(XPT_TEMP, &xprt->xpt_flags))
1018 serv->sv_tmpcnt--;
1019 spin_unlock_bh(&serv->sv_lock);
1020
1021 while ((dr = svc_deferred_dequeue(xprt)) != NULL)
1022 kfree(dr);
1023
1024 call_xpt_users(xprt);
1025 svc_xprt_put(xprt);
1026}
1027
1028void svc_close_xprt(struct svc_xprt *xprt)
1029{
1030 set_bit(XPT_CLOSE, &xprt->xpt_flags);
1031 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
1032 /* someone else will have to effect the close */
1033 return;
1034 /*
1035 * We expect svc_close_xprt() to work even when no threads are
1036 * running (e.g., while configuring the server before starting
1037 * any threads), so if the transport isn't busy, we delete
1038 * it ourself:
1039 */
1040 svc_delete_xprt(xprt);
1041}
1042EXPORT_SYMBOL_GPL(svc_close_xprt);
1043
1044static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net)
1045{
1046 struct svc_xprt *xprt;
1047 int ret = 0;
1048
1049 spin_lock(&serv->sv_lock);
1050 list_for_each_entry(xprt, xprt_list, xpt_list) {
1051 if (xprt->xpt_net != net)
1052 continue;
1053 ret++;
1054 set_bit(XPT_CLOSE, &xprt->xpt_flags);
1055 svc_xprt_enqueue(xprt);
1056 }
1057 spin_unlock(&serv->sv_lock);
1058 return ret;
1059}
1060
1061static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net)
1062{
1063 struct svc_pool *pool;
1064 struct svc_xprt *xprt;
1065 struct svc_xprt *tmp;
1066 int i;
1067
1068 for (i = 0; i < serv->sv_nrpools; i++) {
1069 pool = &serv->sv_pools[i];
1070
1071 spin_lock_bh(&pool->sp_lock);
1072 list_for_each_entry_safe(xprt, tmp, &pool->sp_sockets, xpt_ready) {
1073 if (xprt->xpt_net != net)
1074 continue;
1075 list_del_init(&xprt->xpt_ready);
1076 spin_unlock_bh(&pool->sp_lock);
1077 return xprt;
1078 }
1079 spin_unlock_bh(&pool->sp_lock);
1080 }
1081 return NULL;
1082}
1083
1084static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net)
1085{
1086 struct svc_xprt *xprt;
1087
1088 while ((xprt = svc_dequeue_net(serv, net))) {
1089 set_bit(XPT_CLOSE, &xprt->xpt_flags);
1090 svc_delete_xprt(xprt);
1091 }
1092}
1093
1094/*
1095 * Server threads may still be running (especially in the case where the
1096 * service is still running in other network namespaces).
1097 *
1098 * So we shut down sockets the same way we would on a running server, by
1099 * setting XPT_CLOSE, enqueuing, and letting a thread pick it up to do
1100 * the close. In the case there are no such other threads,
1101 * threads running, svc_clean_up_xprts() does a simple version of a
1102 * server's main event loop, and in the case where there are other
1103 * threads, we may need to wait a little while and then check again to
1104 * see if they're done.
1105 */
1106void svc_close_net(struct svc_serv *serv, struct net *net)
1107{
1108 int delay = 0;
1109
1110 while (svc_close_list(serv, &serv->sv_permsocks, net) +
1111 svc_close_list(serv, &serv->sv_tempsocks, net)) {
1112
1113 svc_clean_up_xprts(serv, net);
1114 msleep(delay++);
1115 }
1116}
1117
1118/*
1119 * Handle defer and revisit of requests
1120 */
1121
1122static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
1123{
1124 struct svc_deferred_req *dr =
1125 container_of(dreq, struct svc_deferred_req, handle);
1126 struct svc_xprt *xprt = dr->xprt;
1127
1128 spin_lock(&xprt->xpt_lock);
1129 set_bit(XPT_DEFERRED, &xprt->xpt_flags);
1130 if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) {
1131 spin_unlock(&xprt->xpt_lock);
1132 dprintk("revisit canceled\n");
1133 svc_xprt_put(xprt);
1134 kfree(dr);
1135 return;
1136 }
1137 dprintk("revisit queued\n");
1138 dr->xprt = NULL;
1139 list_add(&dr->handle.recent, &xprt->xpt_deferred);
1140 spin_unlock(&xprt->xpt_lock);
1141 svc_xprt_enqueue(xprt);
1142 svc_xprt_put(xprt);
1143}
1144
1145/*
1146 * Save the request off for later processing. The request buffer looks
1147 * like this:
1148 *
1149 * <xprt-header><rpc-header><rpc-pagelist><rpc-tail>
1150 *
1151 * This code can only handle requests that consist of an xprt-header
1152 * and rpc-header.
1153 */
1154static struct cache_deferred_req *svc_defer(struct cache_req *req)
1155{
1156 struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle);
1157 struct svc_deferred_req *dr;
1158
1159 if (rqstp->rq_arg.page_len || !test_bit(RQ_USEDEFERRAL, &rqstp->rq_flags))
1160 return NULL; /* if more than a page, give up FIXME */
1161 if (rqstp->rq_deferred) {
1162 dr = rqstp->rq_deferred;
1163 rqstp->rq_deferred = NULL;
1164 } else {
1165 size_t skip;
1166 size_t size;
1167 /* FIXME maybe discard if size too large */
1168 size = sizeof(struct svc_deferred_req) + rqstp->rq_arg.len;
1169 dr = kmalloc(size, GFP_KERNEL);
1170 if (dr == NULL)
1171 return NULL;
1172
1173 dr->handle.owner = rqstp->rq_server;
1174 dr->prot = rqstp->rq_prot;
1175 memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen);
1176 dr->addrlen = rqstp->rq_addrlen;
1177 dr->daddr = rqstp->rq_daddr;
1178 dr->argslen = rqstp->rq_arg.len >> 2;
1179 dr->xprt_hlen = rqstp->rq_xprt_hlen;
1180
1181 /* back up head to the start of the buffer and copy */
1182 skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
1183 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip,
1184 dr->argslen << 2);
1185 }
1186 svc_xprt_get(rqstp->rq_xprt);
1187 dr->xprt = rqstp->rq_xprt;
1188 set_bit(RQ_DROPME, &rqstp->rq_flags);
1189
1190 dr->handle.revisit = svc_revisit;
1191 return &dr->handle;
1192}
1193
1194/*
1195 * recv data from a deferred request into an active one
1196 */
1197static int svc_deferred_recv(struct svc_rqst *rqstp)
1198{
1199 struct svc_deferred_req *dr = rqstp->rq_deferred;
1200
1201 /* setup iov_base past transport header */
1202 rqstp->rq_arg.head[0].iov_base = dr->args + (dr->xprt_hlen>>2);
1203 /* The iov_len does not include the transport header bytes */
1204 rqstp->rq_arg.head[0].iov_len = (dr->argslen<<2) - dr->xprt_hlen;
1205 rqstp->rq_arg.page_len = 0;
1206 /* The rq_arg.len includes the transport header bytes */
1207 rqstp->rq_arg.len = dr->argslen<<2;
1208 rqstp->rq_prot = dr->prot;
1209 memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen);
1210 rqstp->rq_addrlen = dr->addrlen;
1211 /* Save off transport header len in case we get deferred again */
1212 rqstp->rq_xprt_hlen = dr->xprt_hlen;
1213 rqstp->rq_daddr = dr->daddr;
1214 rqstp->rq_respages = rqstp->rq_pages;
1215 return (dr->argslen<<2) - dr->xprt_hlen;
1216}
1217
1218
1219static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt)
1220{
1221 struct svc_deferred_req *dr = NULL;
1222
1223 if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags))
1224 return NULL;
1225 spin_lock(&xprt->xpt_lock);
1226 if (!list_empty(&xprt->xpt_deferred)) {
1227 dr = list_entry(xprt->xpt_deferred.next,
1228 struct svc_deferred_req,
1229 handle.recent);
1230 list_del_init(&dr->handle.recent);
1231 } else
1232 clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
1233 spin_unlock(&xprt->xpt_lock);
1234 return dr;
1235}
1236
1237/**
1238 * svc_find_xprt - find an RPC transport instance
1239 * @serv: pointer to svc_serv to search
1240 * @xcl_name: C string containing transport's class name
1241 * @net: owner net pointer
1242 * @af: Address family of transport's local address
1243 * @port: transport's IP port number
1244 *
1245 * Return the transport instance pointer for the endpoint accepting
1246 * connections/peer traffic from the specified transport class,
1247 * address family and port.
1248 *
1249 * Specifying 0 for the address family or port is effectively a
1250 * wild-card, and will result in matching the first transport in the
1251 * service's list that has a matching class name.
1252 */
1253struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name,
1254 struct net *net, const sa_family_t af,
1255 const unsigned short port)
1256{
1257 struct svc_xprt *xprt;
1258 struct svc_xprt *found = NULL;
1259
1260 /* Sanity check the args */
1261 if (serv == NULL || xcl_name == NULL)
1262 return found;
1263
1264 spin_lock_bh(&serv->sv_lock);
1265 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
1266 if (xprt->xpt_net != net)
1267 continue;
1268 if (strcmp(xprt->xpt_class->xcl_name, xcl_name))
1269 continue;
1270 if (af != AF_UNSPEC && af != xprt->xpt_local.ss_family)
1271 continue;
1272 if (port != 0 && port != svc_xprt_local_port(xprt))
1273 continue;
1274 found = xprt;
1275 svc_xprt_get(xprt);
1276 break;
1277 }
1278 spin_unlock_bh(&serv->sv_lock);
1279 return found;
1280}
1281EXPORT_SYMBOL_GPL(svc_find_xprt);
1282
1283static int svc_one_xprt_name(const struct svc_xprt *xprt,
1284 char *pos, int remaining)
1285{
1286 int len;
1287
1288 len = snprintf(pos, remaining, "%s %u\n",
1289 xprt->xpt_class->xcl_name,
1290 svc_xprt_local_port(xprt));
1291 if (len >= remaining)
1292 return -ENAMETOOLONG;
1293 return len;
1294}
1295
1296/**
1297 * svc_xprt_names - format a buffer with a list of transport names
1298 * @serv: pointer to an RPC service
1299 * @buf: pointer to a buffer to be filled in
1300 * @buflen: length of buffer to be filled in
1301 *
1302 * Fills in @buf with a string containing a list of transport names,
1303 * each name terminated with '\n'.
1304 *
1305 * Returns positive length of the filled-in string on success; otherwise
1306 * a negative errno value is returned if an error occurs.
1307 */
1308int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen)
1309{
1310 struct svc_xprt *xprt;
1311 int len, totlen;
1312 char *pos;
1313
1314 /* Sanity check args */
1315 if (!serv)
1316 return 0;
1317
1318 spin_lock_bh(&serv->sv_lock);
1319
1320 pos = buf;
1321 totlen = 0;
1322 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
1323 len = svc_one_xprt_name(xprt, pos, buflen - totlen);
1324 if (len < 0) {
1325 *buf = '\0';
1326 totlen = len;
1327 }
1328 if (len <= 0)
1329 break;
1330
1331 pos += len;
1332 totlen += len;
1333 }
1334
1335 spin_unlock_bh(&serv->sv_lock);
1336 return totlen;
1337}
1338EXPORT_SYMBOL_GPL(svc_xprt_names);
1339
1340
1341/*----------------------------------------------------------------------------*/
1342
1343static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos)
1344{
1345 unsigned int pidx = (unsigned int)*pos;
1346 struct svc_serv *serv = m->private;
1347
1348 dprintk("svc_pool_stats_start, *pidx=%u\n", pidx);
1349
1350 if (!pidx)
1351 return SEQ_START_TOKEN;
1352 return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]);
1353}
1354
1355static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos)
1356{
1357 struct svc_pool *pool = p;
1358 struct svc_serv *serv = m->private;
1359
1360 dprintk("svc_pool_stats_next, *pos=%llu\n", *pos);
1361
1362 if (p == SEQ_START_TOKEN) {
1363 pool = &serv->sv_pools[0];
1364 } else {
1365 unsigned int pidx = (pool - &serv->sv_pools[0]);
1366 if (pidx < serv->sv_nrpools-1)
1367 pool = &serv->sv_pools[pidx+1];
1368 else
1369 pool = NULL;
1370 }
1371 ++*pos;
1372 return pool;
1373}
1374
1375static void svc_pool_stats_stop(struct seq_file *m, void *p)
1376{
1377}
1378
1379static int svc_pool_stats_show(struct seq_file *m, void *p)
1380{
1381 struct svc_pool *pool = p;
1382
1383 if (p == SEQ_START_TOKEN) {
1384 seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n");
1385 return 0;
1386 }
1387
1388 seq_printf(m, "%u %lu %lu %lu %lu\n",
1389 pool->sp_id,
1390 (unsigned long)atomic_long_read(&pool->sp_stats.packets),
1391 pool->sp_stats.sockets_queued,
1392 (unsigned long)atomic_long_read(&pool->sp_stats.threads_woken),
1393 (unsigned long)atomic_long_read(&pool->sp_stats.threads_timedout));
1394
1395 return 0;
1396}
1397
1398static const struct seq_operations svc_pool_stats_seq_ops = {
1399 .start = svc_pool_stats_start,
1400 .next = svc_pool_stats_next,
1401 .stop = svc_pool_stats_stop,
1402 .show = svc_pool_stats_show,
1403};
1404
1405int svc_pool_stats_open(struct svc_serv *serv, struct file *file)
1406{
1407 int err;
1408
1409 err = seq_open(file, &svc_pool_stats_seq_ops);
1410 if (!err)
1411 ((struct seq_file *) file->private_data)->private = serv;
1412 return err;
1413}
1414EXPORT_SYMBOL(svc_pool_stats_open);
1415
1416/*----------------------------------------------------------------------------*/
1/*
2 * linux/net/sunrpc/svc_xprt.c
3 *
4 * Author: Tom Tucker <tom@opengridcomputing.com>
5 */
6
7#include <linux/sched.h>
8#include <linux/errno.h>
9#include <linux/freezer.h>
10#include <linux/kthread.h>
11#include <linux/slab.h>
12#include <net/sock.h>
13#include <linux/sunrpc/stats.h>
14#include <linux/sunrpc/svc_xprt.h>
15#include <linux/sunrpc/svcsock.h>
16#include <linux/sunrpc/xprt.h>
17
18#define RPCDBG_FACILITY RPCDBG_SVCXPRT
19
20static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt);
21static int svc_deferred_recv(struct svc_rqst *rqstp);
22static struct cache_deferred_req *svc_defer(struct cache_req *req);
23static void svc_age_temp_xprts(unsigned long closure);
24
25/* apparently the "standard" is that clients close
26 * idle connections after 5 minutes, servers after
27 * 6 minutes
28 * http://www.connectathon.org/talks96/nfstcp.pdf
29 */
30static int svc_conn_age_period = 6*60;
31
32/* List of registered transport classes */
33static DEFINE_SPINLOCK(svc_xprt_class_lock);
34static LIST_HEAD(svc_xprt_class_list);
35
36/* SMP locking strategy:
37 *
38 * svc_pool->sp_lock protects most of the fields of that pool.
39 * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt.
40 * when both need to be taken (rare), svc_serv->sv_lock is first.
41 * BKL protects svc_serv->sv_nrthread.
42 * svc_sock->sk_lock protects the svc_sock->sk_deferred list
43 * and the ->sk_info_authunix cache.
44 *
45 * The XPT_BUSY bit in xprt->xpt_flags prevents a transport being
46 * enqueued multiply. During normal transport processing this bit
47 * is set by svc_xprt_enqueue and cleared by svc_xprt_received.
48 * Providers should not manipulate this bit directly.
49 *
50 * Some flags can be set to certain values at any time
51 * providing that certain rules are followed:
52 *
53 * XPT_CONN, XPT_DATA:
54 * - Can be set or cleared at any time.
55 * - After a set, svc_xprt_enqueue must be called to enqueue
56 * the transport for processing.
57 * - After a clear, the transport must be read/accepted.
58 * If this succeeds, it must be set again.
59 * XPT_CLOSE:
60 * - Can set at any time. It is never cleared.
61 * XPT_DEAD:
62 * - Can only be set while XPT_BUSY is held which ensures
63 * that no other thread will be using the transport or will
64 * try to set XPT_DEAD.
65 */
66
67int svc_reg_xprt_class(struct svc_xprt_class *xcl)
68{
69 struct svc_xprt_class *cl;
70 int res = -EEXIST;
71
72 dprintk("svc: Adding svc transport class '%s'\n", xcl->xcl_name);
73
74 INIT_LIST_HEAD(&xcl->xcl_list);
75 spin_lock(&svc_xprt_class_lock);
76 /* Make sure there isn't already a class with the same name */
77 list_for_each_entry(cl, &svc_xprt_class_list, xcl_list) {
78 if (strcmp(xcl->xcl_name, cl->xcl_name) == 0)
79 goto out;
80 }
81 list_add_tail(&xcl->xcl_list, &svc_xprt_class_list);
82 res = 0;
83out:
84 spin_unlock(&svc_xprt_class_lock);
85 return res;
86}
87EXPORT_SYMBOL_GPL(svc_reg_xprt_class);
88
89void svc_unreg_xprt_class(struct svc_xprt_class *xcl)
90{
91 dprintk("svc: Removing svc transport class '%s'\n", xcl->xcl_name);
92 spin_lock(&svc_xprt_class_lock);
93 list_del_init(&xcl->xcl_list);
94 spin_unlock(&svc_xprt_class_lock);
95}
96EXPORT_SYMBOL_GPL(svc_unreg_xprt_class);
97
98/*
99 * Format the transport list for printing
100 */
101int svc_print_xprts(char *buf, int maxlen)
102{
103 struct svc_xprt_class *xcl;
104 char tmpstr[80];
105 int len = 0;
106 buf[0] = '\0';
107
108 spin_lock(&svc_xprt_class_lock);
109 list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
110 int slen;
111
112 sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload);
113 slen = strlen(tmpstr);
114 if (len + slen > maxlen)
115 break;
116 len += slen;
117 strcat(buf, tmpstr);
118 }
119 spin_unlock(&svc_xprt_class_lock);
120
121 return len;
122}
123
124static void svc_xprt_free(struct kref *kref)
125{
126 struct svc_xprt *xprt =
127 container_of(kref, struct svc_xprt, xpt_ref);
128 struct module *owner = xprt->xpt_class->xcl_owner;
129 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags))
130 svcauth_unix_info_release(xprt);
131 put_net(xprt->xpt_net);
132 /* See comment on corresponding get in xs_setup_bc_tcp(): */
133 if (xprt->xpt_bc_xprt)
134 xprt_put(xprt->xpt_bc_xprt);
135 xprt->xpt_ops->xpo_free(xprt);
136 module_put(owner);
137}
138
139void svc_xprt_put(struct svc_xprt *xprt)
140{
141 kref_put(&xprt->xpt_ref, svc_xprt_free);
142}
143EXPORT_SYMBOL_GPL(svc_xprt_put);
144
145/*
146 * Called by transport drivers to initialize the transport independent
147 * portion of the transport instance.
148 */
149void svc_xprt_init(struct svc_xprt_class *xcl, struct svc_xprt *xprt,
150 struct svc_serv *serv)
151{
152 memset(xprt, 0, sizeof(*xprt));
153 xprt->xpt_class = xcl;
154 xprt->xpt_ops = xcl->xcl_ops;
155 kref_init(&xprt->xpt_ref);
156 xprt->xpt_server = serv;
157 INIT_LIST_HEAD(&xprt->xpt_list);
158 INIT_LIST_HEAD(&xprt->xpt_ready);
159 INIT_LIST_HEAD(&xprt->xpt_deferred);
160 INIT_LIST_HEAD(&xprt->xpt_users);
161 mutex_init(&xprt->xpt_mutex);
162 spin_lock_init(&xprt->xpt_lock);
163 set_bit(XPT_BUSY, &xprt->xpt_flags);
164 rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending");
165 xprt->xpt_net = get_net(&init_net);
166}
167EXPORT_SYMBOL_GPL(svc_xprt_init);
168
169static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
170 struct svc_serv *serv,
171 struct net *net,
172 const int family,
173 const unsigned short port,
174 int flags)
175{
176 struct sockaddr_in sin = {
177 .sin_family = AF_INET,
178 .sin_addr.s_addr = htonl(INADDR_ANY),
179 .sin_port = htons(port),
180 };
181#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
182 struct sockaddr_in6 sin6 = {
183 .sin6_family = AF_INET6,
184 .sin6_addr = IN6ADDR_ANY_INIT,
185 .sin6_port = htons(port),
186 };
187#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
188 struct sockaddr *sap;
189 size_t len;
190
191 switch (family) {
192 case PF_INET:
193 sap = (struct sockaddr *)&sin;
194 len = sizeof(sin);
195 break;
196#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
197 case PF_INET6:
198 sap = (struct sockaddr *)&sin6;
199 len = sizeof(sin6);
200 break;
201#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
202 default:
203 return ERR_PTR(-EAFNOSUPPORT);
204 }
205
206 return xcl->xcl_ops->xpo_create(serv, net, sap, len, flags);
207}
208
209int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
210 struct net *net, const int family,
211 const unsigned short port, int flags)
212{
213 struct svc_xprt_class *xcl;
214
215 dprintk("svc: creating transport %s[%d]\n", xprt_name, port);
216 spin_lock(&svc_xprt_class_lock);
217 list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
218 struct svc_xprt *newxprt;
219 unsigned short newport;
220
221 if (strcmp(xprt_name, xcl->xcl_name))
222 continue;
223
224 if (!try_module_get(xcl->xcl_owner))
225 goto err;
226
227 spin_unlock(&svc_xprt_class_lock);
228 newxprt = __svc_xpo_create(xcl, serv, net, family, port, flags);
229 if (IS_ERR(newxprt)) {
230 module_put(xcl->xcl_owner);
231 return PTR_ERR(newxprt);
232 }
233
234 clear_bit(XPT_TEMP, &newxprt->xpt_flags);
235 spin_lock_bh(&serv->sv_lock);
236 list_add(&newxprt->xpt_list, &serv->sv_permsocks);
237 spin_unlock_bh(&serv->sv_lock);
238 newport = svc_xprt_local_port(newxprt);
239 clear_bit(XPT_BUSY, &newxprt->xpt_flags);
240 return newport;
241 }
242 err:
243 spin_unlock(&svc_xprt_class_lock);
244 dprintk("svc: transport %s not found\n", xprt_name);
245
246 /* This errno is exposed to user space. Provide a reasonable
247 * perror msg for a bad transport. */
248 return -EPROTONOSUPPORT;
249}
250EXPORT_SYMBOL_GPL(svc_create_xprt);
251
252/*
253 * Copy the local and remote xprt addresses to the rqstp structure
254 */
255void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt)
256{
257 struct sockaddr *sin;
258
259 memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen);
260 rqstp->rq_addrlen = xprt->xpt_remotelen;
261
262 /*
263 * Destination address in request is needed for binding the
264 * source address in RPC replies/callbacks later.
265 */
266 sin = (struct sockaddr *)&xprt->xpt_local;
267 switch (sin->sa_family) {
268 case AF_INET:
269 rqstp->rq_daddr.addr = ((struct sockaddr_in *)sin)->sin_addr;
270 break;
271 case AF_INET6:
272 rqstp->rq_daddr.addr6 = ((struct sockaddr_in6 *)sin)->sin6_addr;
273 break;
274 }
275}
276EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs);
277
278/**
279 * svc_print_addr - Format rq_addr field for printing
280 * @rqstp: svc_rqst struct containing address to print
281 * @buf: target buffer for formatted address
282 * @len: length of target buffer
283 *
284 */
285char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len)
286{
287 return __svc_print_addr(svc_addr(rqstp), buf, len);
288}
289EXPORT_SYMBOL_GPL(svc_print_addr);
290
291/*
292 * Queue up an idle server thread. Must have pool->sp_lock held.
293 * Note: this is really a stack rather than a queue, so that we only
294 * use as many different threads as we need, and the rest don't pollute
295 * the cache.
296 */
297static void svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp)
298{
299 list_add(&rqstp->rq_list, &pool->sp_threads);
300}
301
302/*
303 * Dequeue an nfsd thread. Must have pool->sp_lock held.
304 */
305static void svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp)
306{
307 list_del(&rqstp->rq_list);
308}
309
310static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
311{
312 if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE)))
313 return true;
314 if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED)))
315 return xprt->xpt_ops->xpo_has_wspace(xprt);
316 return false;
317}
318
319/*
320 * Queue up a transport with data pending. If there are idle nfsd
321 * processes, wake 'em up.
322 *
323 */
324void svc_xprt_enqueue(struct svc_xprt *xprt)
325{
326 struct svc_serv *serv = xprt->xpt_server;
327 struct svc_pool *pool;
328 struct svc_rqst *rqstp;
329 int cpu;
330
331 if (!svc_xprt_has_something_to_do(xprt))
332 return;
333
334 cpu = get_cpu();
335 pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
336 put_cpu();
337
338 spin_lock_bh(&pool->sp_lock);
339
340 if (!list_empty(&pool->sp_threads) &&
341 !list_empty(&pool->sp_sockets))
342 printk(KERN_ERR
343 "svc_xprt_enqueue: "
344 "threads and transports both waiting??\n");
345
346 pool->sp_stats.packets++;
347
348 /* Mark transport as busy. It will remain in this state until
349 * the provider calls svc_xprt_received. We update XPT_BUSY
350 * atomically because it also guards against trying to enqueue
351 * the transport twice.
352 */
353 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) {
354 /* Don't enqueue transport while already enqueued */
355 dprintk("svc: transport %p busy, not enqueued\n", xprt);
356 goto out_unlock;
357 }
358
359 if (!list_empty(&pool->sp_threads)) {
360 rqstp = list_entry(pool->sp_threads.next,
361 struct svc_rqst,
362 rq_list);
363 dprintk("svc: transport %p served by daemon %p\n",
364 xprt, rqstp);
365 svc_thread_dequeue(pool, rqstp);
366 if (rqstp->rq_xprt)
367 printk(KERN_ERR
368 "svc_xprt_enqueue: server %p, rq_xprt=%p!\n",
369 rqstp, rqstp->rq_xprt);
370 rqstp->rq_xprt = xprt;
371 svc_xprt_get(xprt);
372 rqstp->rq_reserved = serv->sv_max_mesg;
373 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
374 pool->sp_stats.threads_woken++;
375 wake_up(&rqstp->rq_wait);
376 } else {
377 dprintk("svc: transport %p put into queue\n", xprt);
378 list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
379 pool->sp_stats.sockets_queued++;
380 }
381
382out_unlock:
383 spin_unlock_bh(&pool->sp_lock);
384}
385EXPORT_SYMBOL_GPL(svc_xprt_enqueue);
386
387/*
388 * Dequeue the first transport. Must be called with the pool->sp_lock held.
389 */
390static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool)
391{
392 struct svc_xprt *xprt;
393
394 if (list_empty(&pool->sp_sockets))
395 return NULL;
396
397 xprt = list_entry(pool->sp_sockets.next,
398 struct svc_xprt, xpt_ready);
399 list_del_init(&xprt->xpt_ready);
400
401 dprintk("svc: transport %p dequeued, inuse=%d\n",
402 xprt, atomic_read(&xprt->xpt_ref.refcount));
403
404 return xprt;
405}
406
407/*
408 * svc_xprt_received conditionally queues the transport for processing
409 * by another thread. The caller must hold the XPT_BUSY bit and must
410 * not thereafter touch transport data.
411 *
412 * Note: XPT_DATA only gets cleared when a read-attempt finds no (or
413 * insufficient) data.
414 */
415void svc_xprt_received(struct svc_xprt *xprt)
416{
417 BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags));
418 /* As soon as we clear busy, the xprt could be closed and
419 * 'put', so we need a reference to call svc_xprt_enqueue with:
420 */
421 svc_xprt_get(xprt);
422 clear_bit(XPT_BUSY, &xprt->xpt_flags);
423 svc_xprt_enqueue(xprt);
424 svc_xprt_put(xprt);
425}
426EXPORT_SYMBOL_GPL(svc_xprt_received);
427
428/**
429 * svc_reserve - change the space reserved for the reply to a request.
430 * @rqstp: The request in question
431 * @space: new max space to reserve
432 *
433 * Each request reserves some space on the output queue of the transport
434 * to make sure the reply fits. This function reduces that reserved
435 * space to be the amount of space used already, plus @space.
436 *
437 */
438void svc_reserve(struct svc_rqst *rqstp, int space)
439{
440 space += rqstp->rq_res.head[0].iov_len;
441
442 if (space < rqstp->rq_reserved) {
443 struct svc_xprt *xprt = rqstp->rq_xprt;
444 atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved);
445 rqstp->rq_reserved = space;
446
447 svc_xprt_enqueue(xprt);
448 }
449}
450EXPORT_SYMBOL_GPL(svc_reserve);
451
452static void svc_xprt_release(struct svc_rqst *rqstp)
453{
454 struct svc_xprt *xprt = rqstp->rq_xprt;
455
456 rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp);
457
458 kfree(rqstp->rq_deferred);
459 rqstp->rq_deferred = NULL;
460
461 svc_free_res_pages(rqstp);
462 rqstp->rq_res.page_len = 0;
463 rqstp->rq_res.page_base = 0;
464
465 /* Reset response buffer and release
466 * the reservation.
467 * But first, check that enough space was reserved
468 * for the reply, otherwise we have a bug!
469 */
470 if ((rqstp->rq_res.len) > rqstp->rq_reserved)
471 printk(KERN_ERR "RPC request reserved %d but used %d\n",
472 rqstp->rq_reserved,
473 rqstp->rq_res.len);
474
475 rqstp->rq_res.head[0].iov_len = 0;
476 svc_reserve(rqstp, 0);
477 rqstp->rq_xprt = NULL;
478
479 svc_xprt_put(xprt);
480}
481
482/*
483 * External function to wake up a server waiting for data
484 * This really only makes sense for services like lockd
485 * which have exactly one thread anyway.
486 */
487void svc_wake_up(struct svc_serv *serv)
488{
489 struct svc_rqst *rqstp;
490 unsigned int i;
491 struct svc_pool *pool;
492
493 for (i = 0; i < serv->sv_nrpools; i++) {
494 pool = &serv->sv_pools[i];
495
496 spin_lock_bh(&pool->sp_lock);
497 if (!list_empty(&pool->sp_threads)) {
498 rqstp = list_entry(pool->sp_threads.next,
499 struct svc_rqst,
500 rq_list);
501 dprintk("svc: daemon %p woken up.\n", rqstp);
502 /*
503 svc_thread_dequeue(pool, rqstp);
504 rqstp->rq_xprt = NULL;
505 */
506 wake_up(&rqstp->rq_wait);
507 }
508 spin_unlock_bh(&pool->sp_lock);
509 }
510}
511EXPORT_SYMBOL_GPL(svc_wake_up);
512
513int svc_port_is_privileged(struct sockaddr *sin)
514{
515 switch (sin->sa_family) {
516 case AF_INET:
517 return ntohs(((struct sockaddr_in *)sin)->sin_port)
518 < PROT_SOCK;
519 case AF_INET6:
520 return ntohs(((struct sockaddr_in6 *)sin)->sin6_port)
521 < PROT_SOCK;
522 default:
523 return 0;
524 }
525}
526
527/*
528 * Make sure that we don't have too many active connections. If we have,
529 * something must be dropped. It's not clear what will happen if we allow
530 * "too many" connections, but when dealing with network-facing software,
531 * we have to code defensively. Here we do that by imposing hard limits.
532 *
533 * There's no point in trying to do random drop here for DoS
534 * prevention. The NFS clients does 1 reconnect in 15 seconds. An
535 * attacker can easily beat that.
536 *
537 * The only somewhat efficient mechanism would be if drop old
538 * connections from the same IP first. But right now we don't even
539 * record the client IP in svc_sock.
540 *
541 * single-threaded services that expect a lot of clients will probably
542 * need to set sv_maxconn to override the default value which is based
543 * on the number of threads
544 */
545static void svc_check_conn_limits(struct svc_serv *serv)
546{
547 unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn :
548 (serv->sv_nrthreads+3) * 20;
549
550 if (serv->sv_tmpcnt > limit) {
551 struct svc_xprt *xprt = NULL;
552 spin_lock_bh(&serv->sv_lock);
553 if (!list_empty(&serv->sv_tempsocks)) {
554 if (net_ratelimit()) {
555 /* Try to help the admin */
556 printk(KERN_NOTICE "%s: too many open "
557 "connections, consider increasing %s\n",
558 serv->sv_name, serv->sv_maxconn ?
559 "the max number of connections." :
560 "the number of threads.");
561 }
562 /*
563 * Always select the oldest connection. It's not fair,
564 * but so is life
565 */
566 xprt = list_entry(serv->sv_tempsocks.prev,
567 struct svc_xprt,
568 xpt_list);
569 set_bit(XPT_CLOSE, &xprt->xpt_flags);
570 svc_xprt_get(xprt);
571 }
572 spin_unlock_bh(&serv->sv_lock);
573
574 if (xprt) {
575 svc_xprt_enqueue(xprt);
576 svc_xprt_put(xprt);
577 }
578 }
579}
580
581/*
582 * Receive the next request on any transport. This code is carefully
583 * organised not to touch any cachelines in the shared svc_serv
584 * structure, only cachelines in the local svc_pool.
585 */
586int svc_recv(struct svc_rqst *rqstp, long timeout)
587{
588 struct svc_xprt *xprt = NULL;
589 struct svc_serv *serv = rqstp->rq_server;
590 struct svc_pool *pool = rqstp->rq_pool;
591 int len, i;
592 int pages;
593 struct xdr_buf *arg;
594 DECLARE_WAITQUEUE(wait, current);
595 long time_left;
596
597 dprintk("svc: server %p waiting for data (to = %ld)\n",
598 rqstp, timeout);
599
600 if (rqstp->rq_xprt)
601 printk(KERN_ERR
602 "svc_recv: service %p, transport not NULL!\n",
603 rqstp);
604 if (waitqueue_active(&rqstp->rq_wait))
605 printk(KERN_ERR
606 "svc_recv: service %p, wait queue active!\n",
607 rqstp);
608
609 /* now allocate needed pages. If we get a failure, sleep briefly */
610 pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE;
611 for (i = 0; i < pages ; i++)
612 while (rqstp->rq_pages[i] == NULL) {
613 struct page *p = alloc_page(GFP_KERNEL);
614 if (!p) {
615 set_current_state(TASK_INTERRUPTIBLE);
616 if (signalled() || kthread_should_stop()) {
617 set_current_state(TASK_RUNNING);
618 return -EINTR;
619 }
620 schedule_timeout(msecs_to_jiffies(500));
621 }
622 rqstp->rq_pages[i] = p;
623 }
624 rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */
625 BUG_ON(pages >= RPCSVC_MAXPAGES);
626
627 /* Make arg->head point to first page and arg->pages point to rest */
628 arg = &rqstp->rq_arg;
629 arg->head[0].iov_base = page_address(rqstp->rq_pages[0]);
630 arg->head[0].iov_len = PAGE_SIZE;
631 arg->pages = rqstp->rq_pages + 1;
632 arg->page_base = 0;
633 /* save at least one page for response */
634 arg->page_len = (pages-2)*PAGE_SIZE;
635 arg->len = (pages-1)*PAGE_SIZE;
636 arg->tail[0].iov_len = 0;
637
638 try_to_freeze();
639 cond_resched();
640 if (signalled() || kthread_should_stop())
641 return -EINTR;
642
643 /* Normally we will wait up to 5 seconds for any required
644 * cache information to be provided.
645 */
646 rqstp->rq_chandle.thread_wait = 5*HZ;
647
648 spin_lock_bh(&pool->sp_lock);
649 xprt = svc_xprt_dequeue(pool);
650 if (xprt) {
651 rqstp->rq_xprt = xprt;
652 svc_xprt_get(xprt);
653 rqstp->rq_reserved = serv->sv_max_mesg;
654 atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
655
656 /* As there is a shortage of threads and this request
657 * had to be queued, don't allow the thread to wait so
658 * long for cache updates.
659 */
660 rqstp->rq_chandle.thread_wait = 1*HZ;
661 } else {
662 /* No data pending. Go to sleep */
663 svc_thread_enqueue(pool, rqstp);
664
665 /*
666 * We have to be able to interrupt this wait
667 * to bring down the daemons ...
668 */
669 set_current_state(TASK_INTERRUPTIBLE);
670
671 /*
672 * checking kthread_should_stop() here allows us to avoid
673 * locking and signalling when stopping kthreads that call
674 * svc_recv. If the thread has already been woken up, then
675 * we can exit here without sleeping. If not, then it
676 * it'll be woken up quickly during the schedule_timeout
677 */
678 if (kthread_should_stop()) {
679 set_current_state(TASK_RUNNING);
680 spin_unlock_bh(&pool->sp_lock);
681 return -EINTR;
682 }
683
684 add_wait_queue(&rqstp->rq_wait, &wait);
685 spin_unlock_bh(&pool->sp_lock);
686
687 time_left = schedule_timeout(timeout);
688
689 try_to_freeze();
690
691 spin_lock_bh(&pool->sp_lock);
692 remove_wait_queue(&rqstp->rq_wait, &wait);
693 if (!time_left)
694 pool->sp_stats.threads_timedout++;
695
696 xprt = rqstp->rq_xprt;
697 if (!xprt) {
698 svc_thread_dequeue(pool, rqstp);
699 spin_unlock_bh(&pool->sp_lock);
700 dprintk("svc: server %p, no data yet\n", rqstp);
701 if (signalled() || kthread_should_stop())
702 return -EINTR;
703 else
704 return -EAGAIN;
705 }
706 }
707 spin_unlock_bh(&pool->sp_lock);
708
709 len = 0;
710 if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
711 dprintk("svc_recv: found XPT_CLOSE\n");
712 svc_delete_xprt(xprt);
713 /* Leave XPT_BUSY set on the dead xprt: */
714 goto out;
715 }
716 if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
717 struct svc_xprt *newxpt;
718 newxpt = xprt->xpt_ops->xpo_accept(xprt);
719 if (newxpt) {
720 /*
721 * We know this module_get will succeed because the
722 * listener holds a reference too
723 */
724 __module_get(newxpt->xpt_class->xcl_owner);
725 svc_check_conn_limits(xprt->xpt_server);
726 spin_lock_bh(&serv->sv_lock);
727 set_bit(XPT_TEMP, &newxpt->xpt_flags);
728 list_add(&newxpt->xpt_list, &serv->sv_tempsocks);
729 serv->sv_tmpcnt++;
730 if (serv->sv_temptimer.function == NULL) {
731 /* setup timer to age temp transports */
732 setup_timer(&serv->sv_temptimer,
733 svc_age_temp_xprts,
734 (unsigned long)serv);
735 mod_timer(&serv->sv_temptimer,
736 jiffies + svc_conn_age_period * HZ);
737 }
738 spin_unlock_bh(&serv->sv_lock);
739 svc_xprt_received(newxpt);
740 }
741 } else if (xprt->xpt_ops->xpo_has_wspace(xprt)) {
742 dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n",
743 rqstp, pool->sp_id, xprt,
744 atomic_read(&xprt->xpt_ref.refcount));
745 rqstp->rq_deferred = svc_deferred_dequeue(xprt);
746 if (rqstp->rq_deferred)
747 len = svc_deferred_recv(rqstp);
748 else
749 len = xprt->xpt_ops->xpo_recvfrom(rqstp);
750 dprintk("svc: got len=%d\n", len);
751 }
752 svc_xprt_received(xprt);
753
754 /* No data, incomplete (TCP) read, or accept() */
755 if (len == 0 || len == -EAGAIN)
756 goto out;
757
758 clear_bit(XPT_OLD, &xprt->xpt_flags);
759
760 rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp));
761 rqstp->rq_chandle.defer = svc_defer;
762
763 if (serv->sv_stats)
764 serv->sv_stats->netcnt++;
765 return len;
766out:
767 rqstp->rq_res.len = 0;
768 svc_xprt_release(rqstp);
769 return -EAGAIN;
770}
771EXPORT_SYMBOL_GPL(svc_recv);
772
773/*
774 * Drop request
775 */
776void svc_drop(struct svc_rqst *rqstp)
777{
778 dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt);
779 svc_xprt_release(rqstp);
780}
781EXPORT_SYMBOL_GPL(svc_drop);
782
783/*
784 * Return reply to client.
785 */
786int svc_send(struct svc_rqst *rqstp)
787{
788 struct svc_xprt *xprt;
789 int len;
790 struct xdr_buf *xb;
791
792 xprt = rqstp->rq_xprt;
793 if (!xprt)
794 return -EFAULT;
795
796 /* release the receive skb before sending the reply */
797 rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp);
798
799 /* calculate over-all length */
800 xb = &rqstp->rq_res;
801 xb->len = xb->head[0].iov_len +
802 xb->page_len +
803 xb->tail[0].iov_len;
804
805 /* Grab mutex to serialize outgoing data. */
806 mutex_lock(&xprt->xpt_mutex);
807 if (test_bit(XPT_DEAD, &xprt->xpt_flags))
808 len = -ENOTCONN;
809 else
810 len = xprt->xpt_ops->xpo_sendto(rqstp);
811 mutex_unlock(&xprt->xpt_mutex);
812 rpc_wake_up(&xprt->xpt_bc_pending);
813 svc_xprt_release(rqstp);
814
815 if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN)
816 return 0;
817 return len;
818}
819
820/*
821 * Timer function to close old temporary transports, using
822 * a mark-and-sweep algorithm.
823 */
824static void svc_age_temp_xprts(unsigned long closure)
825{
826 struct svc_serv *serv = (struct svc_serv *)closure;
827 struct svc_xprt *xprt;
828 struct list_head *le, *next;
829 LIST_HEAD(to_be_aged);
830
831 dprintk("svc_age_temp_xprts\n");
832
833 if (!spin_trylock_bh(&serv->sv_lock)) {
834 /* busy, try again 1 sec later */
835 dprintk("svc_age_temp_xprts: busy\n");
836 mod_timer(&serv->sv_temptimer, jiffies + HZ);
837 return;
838 }
839
840 list_for_each_safe(le, next, &serv->sv_tempsocks) {
841 xprt = list_entry(le, struct svc_xprt, xpt_list);
842
843 /* First time through, just mark it OLD. Second time
844 * through, close it. */
845 if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags))
846 continue;
847 if (atomic_read(&xprt->xpt_ref.refcount) > 1 ||
848 test_bit(XPT_BUSY, &xprt->xpt_flags))
849 continue;
850 svc_xprt_get(xprt);
851 list_move(le, &to_be_aged);
852 set_bit(XPT_CLOSE, &xprt->xpt_flags);
853 set_bit(XPT_DETACHED, &xprt->xpt_flags);
854 }
855 spin_unlock_bh(&serv->sv_lock);
856
857 while (!list_empty(&to_be_aged)) {
858 le = to_be_aged.next;
859 /* fiddling the xpt_list node is safe 'cos we're XPT_DETACHED */
860 list_del_init(le);
861 xprt = list_entry(le, struct svc_xprt, xpt_list);
862
863 dprintk("queuing xprt %p for closing\n", xprt);
864
865 /* a thread will dequeue and close it soon */
866 svc_xprt_enqueue(xprt);
867 svc_xprt_put(xprt);
868 }
869
870 mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
871}
872
873static void call_xpt_users(struct svc_xprt *xprt)
874{
875 struct svc_xpt_user *u;
876
877 spin_lock(&xprt->xpt_lock);
878 while (!list_empty(&xprt->xpt_users)) {
879 u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list);
880 list_del(&u->list);
881 u->callback(u);
882 }
883 spin_unlock(&xprt->xpt_lock);
884}
885
886/*
887 * Remove a dead transport
888 */
889void svc_delete_xprt(struct svc_xprt *xprt)
890{
891 struct svc_serv *serv = xprt->xpt_server;
892 struct svc_deferred_req *dr;
893
894 /* Only do this once */
895 if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags))
896 BUG();
897
898 dprintk("svc: svc_delete_xprt(%p)\n", xprt);
899 xprt->xpt_ops->xpo_detach(xprt);
900
901 spin_lock_bh(&serv->sv_lock);
902 if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags))
903 list_del_init(&xprt->xpt_list);
904 /*
905 * The only time we're called while xpt_ready is still on a list
906 * is while the list itself is about to be destroyed (in
907 * svc_destroy). BUT svc_xprt_enqueue could still be attempting
908 * to add new entries to the sp_sockets list, so we can't leave
909 * a freed xprt on it.
910 */
911 list_del_init(&xprt->xpt_ready);
912 if (test_bit(XPT_TEMP, &xprt->xpt_flags))
913 serv->sv_tmpcnt--;
914 spin_unlock_bh(&serv->sv_lock);
915
916 while ((dr = svc_deferred_dequeue(xprt)) != NULL)
917 kfree(dr);
918
919 call_xpt_users(xprt);
920 svc_xprt_put(xprt);
921}
922
923void svc_close_xprt(struct svc_xprt *xprt)
924{
925 set_bit(XPT_CLOSE, &xprt->xpt_flags);
926 if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
927 /* someone else will have to effect the close */
928 return;
929 /*
930 * We expect svc_close_xprt() to work even when no threads are
931 * running (e.g., while configuring the server before starting
932 * any threads), so if the transport isn't busy, we delete
933 * it ourself:
934 */
935 svc_delete_xprt(xprt);
936}
937EXPORT_SYMBOL_GPL(svc_close_xprt);
938
939void svc_close_all(struct list_head *xprt_list)
940{
941 struct svc_xprt *xprt;
942 struct svc_xprt *tmp;
943
944 /*
945 * The server is shutting down, and no more threads are running.
946 * svc_xprt_enqueue() might still be running, but at worst it
947 * will re-add the xprt to sp_sockets, which will soon get
948 * freed. So we don't bother with any more locking, and don't
949 * leave the close to the (nonexistent) server threads:
950 */
951 list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) {
952 set_bit(XPT_CLOSE, &xprt->xpt_flags);
953 svc_delete_xprt(xprt);
954 }
955}
956
957/*
958 * Handle defer and revisit of requests
959 */
960
961static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
962{
963 struct svc_deferred_req *dr =
964 container_of(dreq, struct svc_deferred_req, handle);
965 struct svc_xprt *xprt = dr->xprt;
966
967 spin_lock(&xprt->xpt_lock);
968 set_bit(XPT_DEFERRED, &xprt->xpt_flags);
969 if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) {
970 spin_unlock(&xprt->xpt_lock);
971 dprintk("revisit canceled\n");
972 svc_xprt_put(xprt);
973 kfree(dr);
974 return;
975 }
976 dprintk("revisit queued\n");
977 dr->xprt = NULL;
978 list_add(&dr->handle.recent, &xprt->xpt_deferred);
979 spin_unlock(&xprt->xpt_lock);
980 svc_xprt_enqueue(xprt);
981 svc_xprt_put(xprt);
982}
983
984/*
985 * Save the request off for later processing. The request buffer looks
986 * like this:
987 *
988 * <xprt-header><rpc-header><rpc-pagelist><rpc-tail>
989 *
990 * This code can only handle requests that consist of an xprt-header
991 * and rpc-header.
992 */
993static struct cache_deferred_req *svc_defer(struct cache_req *req)
994{
995 struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle);
996 struct svc_deferred_req *dr;
997
998 if (rqstp->rq_arg.page_len || !rqstp->rq_usedeferral)
999 return NULL; /* if more than a page, give up FIXME */
1000 if (rqstp->rq_deferred) {
1001 dr = rqstp->rq_deferred;
1002 rqstp->rq_deferred = NULL;
1003 } else {
1004 size_t skip;
1005 size_t size;
1006 /* FIXME maybe discard if size too large */
1007 size = sizeof(struct svc_deferred_req) + rqstp->rq_arg.len;
1008 dr = kmalloc(size, GFP_KERNEL);
1009 if (dr == NULL)
1010 return NULL;
1011
1012 dr->handle.owner = rqstp->rq_server;
1013 dr->prot = rqstp->rq_prot;
1014 memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen);
1015 dr->addrlen = rqstp->rq_addrlen;
1016 dr->daddr = rqstp->rq_daddr;
1017 dr->argslen = rqstp->rq_arg.len >> 2;
1018 dr->xprt_hlen = rqstp->rq_xprt_hlen;
1019
1020 /* back up head to the start of the buffer and copy */
1021 skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
1022 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip,
1023 dr->argslen << 2);
1024 }
1025 svc_xprt_get(rqstp->rq_xprt);
1026 dr->xprt = rqstp->rq_xprt;
1027 rqstp->rq_dropme = true;
1028
1029 dr->handle.revisit = svc_revisit;
1030 return &dr->handle;
1031}
1032
1033/*
1034 * recv data from a deferred request into an active one
1035 */
1036static int svc_deferred_recv(struct svc_rqst *rqstp)
1037{
1038 struct svc_deferred_req *dr = rqstp->rq_deferred;
1039
1040 /* setup iov_base past transport header */
1041 rqstp->rq_arg.head[0].iov_base = dr->args + (dr->xprt_hlen>>2);
1042 /* The iov_len does not include the transport header bytes */
1043 rqstp->rq_arg.head[0].iov_len = (dr->argslen<<2) - dr->xprt_hlen;
1044 rqstp->rq_arg.page_len = 0;
1045 /* The rq_arg.len includes the transport header bytes */
1046 rqstp->rq_arg.len = dr->argslen<<2;
1047 rqstp->rq_prot = dr->prot;
1048 memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen);
1049 rqstp->rq_addrlen = dr->addrlen;
1050 /* Save off transport header len in case we get deferred again */
1051 rqstp->rq_xprt_hlen = dr->xprt_hlen;
1052 rqstp->rq_daddr = dr->daddr;
1053 rqstp->rq_respages = rqstp->rq_pages;
1054 return (dr->argslen<<2) - dr->xprt_hlen;
1055}
1056
1057
1058static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt)
1059{
1060 struct svc_deferred_req *dr = NULL;
1061
1062 if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags))
1063 return NULL;
1064 spin_lock(&xprt->xpt_lock);
1065 if (!list_empty(&xprt->xpt_deferred)) {
1066 dr = list_entry(xprt->xpt_deferred.next,
1067 struct svc_deferred_req,
1068 handle.recent);
1069 list_del_init(&dr->handle.recent);
1070 } else
1071 clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
1072 spin_unlock(&xprt->xpt_lock);
1073 return dr;
1074}
1075
1076/**
1077 * svc_find_xprt - find an RPC transport instance
1078 * @serv: pointer to svc_serv to search
1079 * @xcl_name: C string containing transport's class name
1080 * @af: Address family of transport's local address
1081 * @port: transport's IP port number
1082 *
1083 * Return the transport instance pointer for the endpoint accepting
1084 * connections/peer traffic from the specified transport class,
1085 * address family and port.
1086 *
1087 * Specifying 0 for the address family or port is effectively a
1088 * wild-card, and will result in matching the first transport in the
1089 * service's list that has a matching class name.
1090 */
1091struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name,
1092 const sa_family_t af, const unsigned short port)
1093{
1094 struct svc_xprt *xprt;
1095 struct svc_xprt *found = NULL;
1096
1097 /* Sanity check the args */
1098 if (serv == NULL || xcl_name == NULL)
1099 return found;
1100
1101 spin_lock_bh(&serv->sv_lock);
1102 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
1103 if (strcmp(xprt->xpt_class->xcl_name, xcl_name))
1104 continue;
1105 if (af != AF_UNSPEC && af != xprt->xpt_local.ss_family)
1106 continue;
1107 if (port != 0 && port != svc_xprt_local_port(xprt))
1108 continue;
1109 found = xprt;
1110 svc_xprt_get(xprt);
1111 break;
1112 }
1113 spin_unlock_bh(&serv->sv_lock);
1114 return found;
1115}
1116EXPORT_SYMBOL_GPL(svc_find_xprt);
1117
1118static int svc_one_xprt_name(const struct svc_xprt *xprt,
1119 char *pos, int remaining)
1120{
1121 int len;
1122
1123 len = snprintf(pos, remaining, "%s %u\n",
1124 xprt->xpt_class->xcl_name,
1125 svc_xprt_local_port(xprt));
1126 if (len >= remaining)
1127 return -ENAMETOOLONG;
1128 return len;
1129}
1130
1131/**
1132 * svc_xprt_names - format a buffer with a list of transport names
1133 * @serv: pointer to an RPC service
1134 * @buf: pointer to a buffer to be filled in
1135 * @buflen: length of buffer to be filled in
1136 *
1137 * Fills in @buf with a string containing a list of transport names,
1138 * each name terminated with '\n'.
1139 *
1140 * Returns positive length of the filled-in string on success; otherwise
1141 * a negative errno value is returned if an error occurs.
1142 */
1143int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen)
1144{
1145 struct svc_xprt *xprt;
1146 int len, totlen;
1147 char *pos;
1148
1149 /* Sanity check args */
1150 if (!serv)
1151 return 0;
1152
1153 spin_lock_bh(&serv->sv_lock);
1154
1155 pos = buf;
1156 totlen = 0;
1157 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
1158 len = svc_one_xprt_name(xprt, pos, buflen - totlen);
1159 if (len < 0) {
1160 *buf = '\0';
1161 totlen = len;
1162 }
1163 if (len <= 0)
1164 break;
1165
1166 pos += len;
1167 totlen += len;
1168 }
1169
1170 spin_unlock_bh(&serv->sv_lock);
1171 return totlen;
1172}
1173EXPORT_SYMBOL_GPL(svc_xprt_names);
1174
1175
1176/*----------------------------------------------------------------------------*/
1177
1178static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos)
1179{
1180 unsigned int pidx = (unsigned int)*pos;
1181 struct svc_serv *serv = m->private;
1182
1183 dprintk("svc_pool_stats_start, *pidx=%u\n", pidx);
1184
1185 if (!pidx)
1186 return SEQ_START_TOKEN;
1187 return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]);
1188}
1189
1190static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos)
1191{
1192 struct svc_pool *pool = p;
1193 struct svc_serv *serv = m->private;
1194
1195 dprintk("svc_pool_stats_next, *pos=%llu\n", *pos);
1196
1197 if (p == SEQ_START_TOKEN) {
1198 pool = &serv->sv_pools[0];
1199 } else {
1200 unsigned int pidx = (pool - &serv->sv_pools[0]);
1201 if (pidx < serv->sv_nrpools-1)
1202 pool = &serv->sv_pools[pidx+1];
1203 else
1204 pool = NULL;
1205 }
1206 ++*pos;
1207 return pool;
1208}
1209
1210static void svc_pool_stats_stop(struct seq_file *m, void *p)
1211{
1212}
1213
1214static int svc_pool_stats_show(struct seq_file *m, void *p)
1215{
1216 struct svc_pool *pool = p;
1217
1218 if (p == SEQ_START_TOKEN) {
1219 seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n");
1220 return 0;
1221 }
1222
1223 seq_printf(m, "%u %lu %lu %lu %lu\n",
1224 pool->sp_id,
1225 pool->sp_stats.packets,
1226 pool->sp_stats.sockets_queued,
1227 pool->sp_stats.threads_woken,
1228 pool->sp_stats.threads_timedout);
1229
1230 return 0;
1231}
1232
1233static const struct seq_operations svc_pool_stats_seq_ops = {
1234 .start = svc_pool_stats_start,
1235 .next = svc_pool_stats_next,
1236 .stop = svc_pool_stats_stop,
1237 .show = svc_pool_stats_show,
1238};
1239
1240int svc_pool_stats_open(struct svc_serv *serv, struct file *file)
1241{
1242 int err;
1243
1244 err = seq_open(file, &svc_pool_stats_seq_ops);
1245 if (!err)
1246 ((struct seq_file *) file->private_data)->private = serv;
1247 return err;
1248}
1249EXPORT_SYMBOL(svc_pool_stats_open);
1250
1251/*----------------------------------------------------------------------------*/