Linux Audio

Check our new training course

Loading...
v4.17
 
  1/* incoming call handling
  2 *
  3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4 * Written by David Howells (dhowells@redhat.com)
  5 *
  6 * This program is free software; you can redistribute it and/or
  7 * modify it under the terms of the GNU General Public License
  8 * as published by the Free Software Foundation; either version
  9 * 2 of the License, or (at your option) any later version.
 10 */
 11
 12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 13
 14#include <linux/module.h>
 15#include <linux/net.h>
 16#include <linux/skbuff.h>
 17#include <linux/errqueue.h>
 18#include <linux/udp.h>
 19#include <linux/in.h>
 20#include <linux/in6.h>
 21#include <linux/icmp.h>
 22#include <linux/gfp.h>
 23#include <linux/circ_buf.h>
 24#include <net/sock.h>
 25#include <net/af_rxrpc.h>
 26#include <net/ip.h>
 27#include "ar-internal.h"
 28
 
 
 
 
 
 29/*
 30 * Preallocate a single service call, connection and peer and, if possible,
 31 * give them a user ID and attach the user's side of the ID to them.
 32 */
 33static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
 34				      struct rxrpc_backlog *b,
 35				      rxrpc_notify_rx_t notify_rx,
 36				      rxrpc_user_attach_call_t user_attach_call,
 37				      unsigned long user_call_ID, gfp_t gfp,
 38				      unsigned int debug_id)
 39{
 40	const void *here = __builtin_return_address(0);
 41	struct rxrpc_call *call;
 42	struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
 
 43	int max, tmp;
 44	unsigned int size = RXRPC_BACKLOG_MAX;
 45	unsigned int head, tail, call_head, call_tail;
 46
 47	max = rx->sk.sk_max_ack_backlog;
 48	tmp = rx->sk.sk_ack_backlog;
 49	if (tmp >= max) {
 50		_leave(" = -ENOBUFS [full %u]", max);
 51		return -ENOBUFS;
 52	}
 53	max -= tmp;
 54
 55	/* We don't need more conns and peers than we have calls, but on the
 56	 * other hand, we shouldn't ever use more peers than conns or conns
 57	 * than calls.
 58	 */
 59	call_head = b->call_backlog_head;
 60	call_tail = READ_ONCE(b->call_backlog_tail);
 61	tmp = CIRC_CNT(call_head, call_tail, size);
 62	if (tmp >= max) {
 63		_leave(" = -ENOBUFS [enough %u]", tmp);
 64		return -ENOBUFS;
 65	}
 66	max = tmp + 1;
 67
 68	head = b->peer_backlog_head;
 69	tail = READ_ONCE(b->peer_backlog_tail);
 70	if (CIRC_CNT(head, tail, size) < max) {
 71		struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp);
 72		if (!peer)
 73			return -ENOMEM;
 74		b->peer_backlog[head] = peer;
 75		smp_store_release(&b->peer_backlog_head,
 76				  (head + 1) & (size - 1));
 77	}
 78
 79	head = b->conn_backlog_head;
 80	tail = READ_ONCE(b->conn_backlog_tail);
 81	if (CIRC_CNT(head, tail, size) < max) {
 82		struct rxrpc_connection *conn;
 83
 84		conn = rxrpc_prealloc_service_connection(rxnet, gfp);
 85		if (!conn)
 86			return -ENOMEM;
 87		b->conn_backlog[head] = conn;
 88		smp_store_release(&b->conn_backlog_head,
 89				  (head + 1) & (size - 1));
 90
 91		trace_rxrpc_conn(conn, rxrpc_conn_new_service,
 92				 atomic_read(&conn->usage), here);
 93	}
 94
 95	/* Now it gets complicated, because calls get registered with the
 96	 * socket here, particularly if a user ID is preassigned by the user.
 97	 */
 98	call = rxrpc_alloc_call(rx, gfp, debug_id);
 99	if (!call)
100		return -ENOMEM;
101	call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
102	call->state = RXRPC_CALL_SERVER_PREALLOC;
103
104	trace_rxrpc_call(call, rxrpc_call_new_service,
105			 atomic_read(&call->usage),
106			 here, (const void *)user_call_ID);
107
108	write_lock(&rx->call_lock);
109	if (user_attach_call) {
110		struct rxrpc_call *xcall;
111		struct rb_node *parent, **pp;
112
113		/* Check the user ID isn't already in use */
114		pp = &rx->calls.rb_node;
115		parent = NULL;
116		while (*pp) {
117			parent = *pp;
118			xcall = rb_entry(parent, struct rxrpc_call, sock_node);
119			if (user_call_ID < call->user_call_ID)
120				pp = &(*pp)->rb_left;
121			else if (user_call_ID > call->user_call_ID)
122				pp = &(*pp)->rb_right;
123			else
124				goto id_in_use;
125		}
126
127		call->user_call_ID = user_call_ID;
128		call->notify_rx = notify_rx;
 
129		rxrpc_get_call(call, rxrpc_call_got_kernel);
130		user_attach_call(call, user_call_ID);
131		rxrpc_get_call(call, rxrpc_call_got_userid);
132		rb_link_node(&call->sock_node, parent, pp);
133		rb_insert_color(&call->sock_node, &rx->calls);
134		set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
135	}
136
 
 
 
 
 
137	list_add(&call->sock_link, &rx->sock_calls);
138
139	write_unlock(&rx->call_lock);
140
141	rxnet = call->rxnet;
142	write_lock(&rxnet->call_lock);
143	list_add_tail(&call->link, &rxnet->calls);
144	write_unlock(&rxnet->call_lock);
145
146	b->call_backlog[call_head] = call;
147	smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
148	_leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID);
149	return 0;
150
151id_in_use:
152	write_unlock(&rx->call_lock);
153	rxrpc_cleanup_call(call);
154	_leave(" = -EBADSLT");
155	return -EBADSLT;
156}
157
158/*
159 * Preallocate sufficient service connections, calls and peers to cover the
160 * entire backlog of a socket.  When a new call comes in, if we don't have
161 * sufficient of each available, the call gets rejected as busy or ignored.
162 *
163 * The backlog is replenished when a connection is accepted or rejected.
164 */
165int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
166{
167	struct rxrpc_backlog *b = rx->backlog;
168
169	if (!b) {
170		b = kzalloc(sizeof(struct rxrpc_backlog), gfp);
171		if (!b)
172			return -ENOMEM;
173		rx->backlog = b;
174	}
175
176	if (rx->discard_new_call)
177		return 0;
178
179	while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp,
180					  atomic_inc_return(&rxrpc_debug_id)) == 0)
181		;
182
183	return 0;
184}
185
186/*
187 * Discard the preallocation on a service.
188 */
189void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
190{
191	struct rxrpc_backlog *b = rx->backlog;
192	struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
193	unsigned int size = RXRPC_BACKLOG_MAX, head, tail;
194
195	if (!b)
196		return;
197	rx->backlog = NULL;
198
199	/* Make sure that there aren't any incoming calls in progress before we
200	 * clear the preallocation buffers.
201	 */
202	spin_lock_bh(&rx->incoming_lock);
203	spin_unlock_bh(&rx->incoming_lock);
204
205	head = b->peer_backlog_head;
206	tail = b->peer_backlog_tail;
207	while (CIRC_CNT(head, tail, size) > 0) {
208		struct rxrpc_peer *peer = b->peer_backlog[tail];
209		kfree(peer);
210		tail = (tail + 1) & (size - 1);
211	}
212
213	head = b->conn_backlog_head;
214	tail = b->conn_backlog_tail;
215	while (CIRC_CNT(head, tail, size) > 0) {
216		struct rxrpc_connection *conn = b->conn_backlog[tail];
217		write_lock(&rxnet->conn_lock);
218		list_del(&conn->link);
219		list_del(&conn->proc_link);
220		write_unlock(&rxnet->conn_lock);
221		kfree(conn);
222		if (atomic_dec_and_test(&rxnet->nr_conns))
223			wake_up_var(&rxnet->nr_conns);
224		tail = (tail + 1) & (size - 1);
225	}
226
227	head = b->call_backlog_head;
228	tail = b->call_backlog_tail;
229	while (CIRC_CNT(head, tail, size) > 0) {
230		struct rxrpc_call *call = b->call_backlog[tail];
231		rcu_assign_pointer(call->socket, rx);
232		if (rx->discard_new_call) {
233			_debug("discard %lx", call->user_call_ID);
234			rx->discard_new_call(call, call->user_call_ID);
 
 
235			rxrpc_put_call(call, rxrpc_call_put_kernel);
236		}
237		rxrpc_call_completed(call);
238		rxrpc_release_call(rx, call);
239		rxrpc_put_call(call, rxrpc_call_put);
240		tail = (tail + 1) & (size - 1);
241	}
242
243	kfree(b);
244}
245
246/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
247 * Allocate a new incoming call from the prealloc pool, along with a connection
248 * and a peer as necessary.
249 */
250static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
251						    struct rxrpc_local *local,
 
252						    struct rxrpc_connection *conn,
 
 
253						    struct sk_buff *skb)
254{
255	struct rxrpc_backlog *b = rx->backlog;
256	struct rxrpc_peer *peer, *xpeer;
257	struct rxrpc_call *call;
258	unsigned short call_head, conn_head, peer_head;
259	unsigned short call_tail, conn_tail, peer_tail;
260	unsigned short call_count, conn_count;
261
262	/* #calls >= #conns >= #peers must hold true. */
263	call_head = smp_load_acquire(&b->call_backlog_head);
264	call_tail = b->call_backlog_tail;
265	call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX);
266	conn_head = smp_load_acquire(&b->conn_backlog_head);
267	conn_tail = b->conn_backlog_tail;
268	conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX);
269	ASSERTCMP(conn_count, >=, call_count);
270	peer_head = smp_load_acquire(&b->peer_backlog_head);
271	peer_tail = b->peer_backlog_tail;
272	ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=,
273		  conn_count);
274
275	if (call_count == 0)
276		return NULL;
277
278	if (!conn) {
279		/* No connection.  We're going to need a peer to start off
280		 * with.  If one doesn't yet exist, use a spare from the
281		 * preallocation set.  We dump the address into the spare in
282		 * anticipation - and to save on stack space.
283		 */
284		xpeer = b->peer_backlog[peer_tail];
285		if (rxrpc_extract_addr_from_skb(local, &xpeer->srx, skb) < 0)
286			return NULL;
287
288		peer = rxrpc_lookup_incoming_peer(local, xpeer);
289		if (peer == xpeer) {
290			b->peer_backlog[peer_tail] = NULL;
291			smp_store_release(&b->peer_backlog_tail,
292					  (peer_tail + 1) &
293					  (RXRPC_BACKLOG_MAX - 1));
 
 
294		}
295
296		/* Now allocate and set up the connection */
297		conn = b->conn_backlog[conn_tail];
298		b->conn_backlog[conn_tail] = NULL;
299		smp_store_release(&b->conn_backlog_tail,
300				  (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
301		conn->params.local = rxrpc_get_local(local);
302		conn->params.peer = peer;
303		rxrpc_see_connection(conn);
304		rxrpc_new_incoming_connection(rx, conn, skb);
305	} else {
306		rxrpc_get_connection(conn);
307	}
308
309	/* And now we can allocate and set up a new call */
310	call = b->call_backlog[call_tail];
311	b->call_backlog[call_tail] = NULL;
312	smp_store_release(&b->call_backlog_tail,
313			  (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
314
315	rxrpc_see_call(call);
316	call->conn = conn;
 
 
317	call->peer = rxrpc_get_peer(conn->params.peer);
318	call->cong_cwnd = call->peer->cong_cwnd;
319	return call;
320}
321
322/*
323 * Set up a new incoming call.  Called in BH context with the RCU read lock
324 * held.
325 *
326 * If this is for a kernel service, when we allocate the call, it will have
327 * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the
328 * retainer ref obtained from the backlog buffer.  Prealloc calls for userspace
329 * services only have the ref from the backlog buffer.  We want to pass this
330 * ref to non-BH context to dispose of.
331 *
332 * If we want to report an error, we mark the skb with the packet type and
333 * abort code and return NULL.
334 *
335 * The call is returned with the user access mutex held.
336 */
337struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
338					   struct rxrpc_connection *conn,
339					   struct sk_buff *skb)
340{
341	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
342	struct rxrpc_sock *rx;
343	struct rxrpc_call *call;
344	u16 service_id = sp->hdr.serviceId;
 
 
345
346	_enter("");
347
348	/* Get the socket providing the service */
349	rx = rcu_dereference(local->service);
350	if (rx && (service_id == rx->srx.srx_service ||
351		   service_id == rx->second_service))
352		goto found_service;
353
354	trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
355			  RX_INVALID_OPERATION, EOPNOTSUPP);
356	skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
357	skb->priority = RX_INVALID_OPERATION;
358	_leave(" = NULL [service]");
359	return NULL;
360
361found_service:
362	spin_lock(&rx->incoming_lock);
363	if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
364	    rx->sk.sk_state == RXRPC_CLOSE) {
365		trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber,
366				  sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
367		skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
368		skb->priority = RX_INVALID_OPERATION;
369		_leave(" = NULL [close]");
370		call = NULL;
371		goto out;
372	}
373
374	call = rxrpc_alloc_incoming_call(rx, local, conn, skb);
 
 
 
 
 
 
 
 
 
 
 
375	if (!call) {
376		skb->mark = RXRPC_SKB_MARK_BUSY;
377		_leave(" = NULL [busy]");
378		call = NULL;
379		goto out;
380	}
381
382	trace_rxrpc_receive(call, rxrpc_receive_incoming,
383			    sp->hdr.serial, sp->hdr.seq);
384
385	/* Lock the call to prevent rxrpc_kernel_send/recv_data() and
386	 * sendmsg()/recvmsg() inconveniently stealing the mutex once the
387	 * notification is generated.
388	 *
389	 * The BUG should never happen because the kernel should be well
390	 * behaved enough not to access the call before the first notification
391	 * event and userspace is prevented from doing so until the state is
392	 * appropriate.
393	 */
394	if (!mutex_trylock(&call->user_mutex))
395		BUG();
396
397	/* Make the call live. */
398	rxrpc_incoming_call(rx, call, skb);
399	conn = call->conn;
400
401	if (rx->notify_new_call)
402		rx->notify_new_call(&rx->sk, call, call->user_call_ID);
403	else
404		sk_acceptq_added(&rx->sk);
405
406	spin_lock(&conn->state_lock);
407	switch (conn->state) {
408	case RXRPC_CONN_SERVICE_UNSECURED:
409		conn->state = RXRPC_CONN_SERVICE_CHALLENGING;
410		set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
411		rxrpc_queue_conn(call->conn);
412		break;
413
414	case RXRPC_CONN_SERVICE:
415		write_lock(&call->state_lock);
416		if (rx->discard_new_call)
417			call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
418		else
419			call->state = RXRPC_CALL_SERVER_ACCEPTING;
420		write_unlock(&call->state_lock);
421		break;
422
423	case RXRPC_CONN_REMOTELY_ABORTED:
424		rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
425					  conn->remote_abort, -ECONNABORTED);
426		break;
427	case RXRPC_CONN_LOCALLY_ABORTED:
428		rxrpc_abort_call("CON", call, sp->hdr.seq,
429				 conn->local_abort, -ECONNABORTED);
430		break;
431	default:
432		BUG();
433	}
434	spin_unlock(&conn->state_lock);
 
435
436	if (call->state == RXRPC_CALL_SERVER_ACCEPTING)
437		rxrpc_notify_socket(call);
438
439	/* We have to discard the prealloc queue's ref here and rely on a
440	 * combination of the RCU read lock and refs held either by the socket
441	 * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel
442	 * service to prevent the call from being deallocated too early.
443	 */
444	rxrpc_put_call(call, rxrpc_call_put);
445
446	_leave(" = %p{%d}", call, call->debug_id);
447out:
448	spin_unlock(&rx->incoming_lock);
449	return call;
450}
451
452/*
453 * handle acceptance of a call by userspace
454 * - assign the user call ID to the call at the front of the queue
455 * - called with the socket locked.
456 */
457struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
458				     unsigned long user_call_ID,
459				     rxrpc_notify_rx_t notify_rx)
460	__releases(&rx->sk.sk_lock.slock)
461	__acquires(call->user_mutex)
462{
463	struct rxrpc_call *call;
464	struct rb_node *parent, **pp;
465	int ret;
466
467	_enter(",%lx", user_call_ID);
468
469	ASSERT(!irqs_disabled());
470
471	write_lock(&rx->call_lock);
472
473	if (list_empty(&rx->to_be_accepted)) {
474		write_unlock(&rx->call_lock);
475		release_sock(&rx->sk);
476		kleave(" = -ENODATA [empty]");
477		return ERR_PTR(-ENODATA);
478	}
479
480	/* check the user ID isn't already in use */
481	pp = &rx->calls.rb_node;
482	parent = NULL;
483	while (*pp) {
484		parent = *pp;
485		call = rb_entry(parent, struct rxrpc_call, sock_node);
486
487		if (user_call_ID < call->user_call_ID)
488			pp = &(*pp)->rb_left;
489		else if (user_call_ID > call->user_call_ID)
490			pp = &(*pp)->rb_right;
491		else
492			goto id_in_use;
493	}
494
495	/* Dequeue the first call and check it's still valid.  We gain
496	 * responsibility for the queue's reference.
497	 */
498	call = list_entry(rx->to_be_accepted.next,
499			  struct rxrpc_call, accept_link);
500	write_unlock(&rx->call_lock);
501
502	/* We need to gain the mutex from the interrupt handler without
503	 * upsetting lockdep, so we have to release it there and take it here.
504	 * We are, however, still holding the socket lock, so other accepts
505	 * must wait for us and no one can add the user ID behind our backs.
506	 */
507	if (mutex_lock_interruptible(&call->user_mutex) < 0) {
508		release_sock(&rx->sk);
509		kleave(" = -ERESTARTSYS");
510		return ERR_PTR(-ERESTARTSYS);
511	}
512
513	write_lock(&rx->call_lock);
514	list_del_init(&call->accept_link);
515	sk_acceptq_removed(&rx->sk);
516	rxrpc_see_call(call);
517
518	/* Find the user ID insertion point. */
519	pp = &rx->calls.rb_node;
520	parent = NULL;
521	while (*pp) {
522		parent = *pp;
523		call = rb_entry(parent, struct rxrpc_call, sock_node);
524
525		if (user_call_ID < call->user_call_ID)
526			pp = &(*pp)->rb_left;
527		else if (user_call_ID > call->user_call_ID)
528			pp = &(*pp)->rb_right;
529		else
530			BUG();
531	}
532
533	write_lock_bh(&call->state_lock);
534	switch (call->state) {
535	case RXRPC_CALL_SERVER_ACCEPTING:
536		call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
537		break;
538	case RXRPC_CALL_COMPLETE:
539		ret = call->error;
540		goto out_release;
541	default:
542		BUG();
543	}
544
545	/* formalise the acceptance */
546	call->notify_rx = notify_rx;
547	call->user_call_ID = user_call_ID;
548	rxrpc_get_call(call, rxrpc_call_got_userid);
549	rb_link_node(&call->sock_node, parent, pp);
550	rb_insert_color(&call->sock_node, &rx->calls);
551	if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags))
552		BUG();
553
554	write_unlock_bh(&call->state_lock);
555	write_unlock(&rx->call_lock);
556	rxrpc_notify_socket(call);
557	rxrpc_service_prealloc(rx, GFP_KERNEL);
558	release_sock(&rx->sk);
559	_leave(" = %p{%d}", call, call->debug_id);
560	return call;
561
562out_release:
563	_debug("release %p", call);
564	write_unlock_bh(&call->state_lock);
565	write_unlock(&rx->call_lock);
566	rxrpc_release_call(rx, call);
567	rxrpc_put_call(call, rxrpc_call_put);
568	goto out;
569
570id_in_use:
571	ret = -EBADSLT;
572	write_unlock(&rx->call_lock);
573out:
574	rxrpc_service_prealloc(rx, GFP_KERNEL);
575	release_sock(&rx->sk);
576	_leave(" = %d", ret);
577	return ERR_PTR(ret);
578}
579
580/*
581 * Handle rejection of a call by userspace
582 * - reject the call at the front of the queue
583 */
584int rxrpc_reject_call(struct rxrpc_sock *rx)
585{
586	struct rxrpc_call *call;
587	bool abort = false;
588	int ret;
589
590	_enter("");
591
592	ASSERT(!irqs_disabled());
593
594	write_lock(&rx->call_lock);
595
596	if (list_empty(&rx->to_be_accepted)) {
597		write_unlock(&rx->call_lock);
598		return -ENODATA;
599	}
600
601	/* Dequeue the first call and check it's still valid.  We gain
602	 * responsibility for the queue's reference.
603	 */
604	call = list_entry(rx->to_be_accepted.next,
605			  struct rxrpc_call, accept_link);
606	list_del_init(&call->accept_link);
607	sk_acceptq_removed(&rx->sk);
608	rxrpc_see_call(call);
609
610	write_lock_bh(&call->state_lock);
611	switch (call->state) {
612	case RXRPC_CALL_SERVER_ACCEPTING:
613		__rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED);
614		abort = true;
615		/* fall through */
616	case RXRPC_CALL_COMPLETE:
617		ret = call->error;
618		goto out_discard;
619	default:
620		BUG();
621	}
622
623out_discard:
624	write_unlock_bh(&call->state_lock);
625	write_unlock(&rx->call_lock);
626	if (abort) {
627		rxrpc_send_abort_packet(call);
628		rxrpc_release_call(rx, call);
629		rxrpc_put_call(call, rxrpc_call_put);
630	}
631	rxrpc_service_prealloc(rx, GFP_KERNEL);
632	_leave(" = %d", ret);
633	return ret;
634}
635
636/*
637 * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls
638 * @sock: The socket on which to preallocate
639 * @notify_rx: Event notification function for the call
640 * @user_attach_call: Func to attach call to user_call_ID
641 * @user_call_ID: The tag to attach to the preallocated call
642 * @gfp: The allocation conditions.
643 * @debug_id: The tracing debug ID.
644 *
645 * Charge up the socket with preallocated calls, each with a user ID.  A
646 * function should be provided to effect the attachment from the user's side.
647 * The user is given a ref to hold on the call.
648 *
649 * Note that the call may be come connected before this function returns.
650 */
651int rxrpc_kernel_charge_accept(struct socket *sock,
652			       rxrpc_notify_rx_t notify_rx,
653			       rxrpc_user_attach_call_t user_attach_call,
654			       unsigned long user_call_ID, gfp_t gfp,
655			       unsigned int debug_id)
656{
657	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
658	struct rxrpc_backlog *b = rx->backlog;
659
660	if (sock->sk->sk_state == RXRPC_CLOSE)
661		return -ESHUTDOWN;
662
663	return rxrpc_service_prealloc_one(rx, b, notify_rx,
664					  user_attach_call, user_call_ID,
665					  gfp, debug_id);
666}
667EXPORT_SYMBOL(rxrpc_kernel_charge_accept);
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/* incoming call handling
  3 *
  4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  5 * Written by David Howells (dhowells@redhat.com)
 
 
 
 
 
  6 */
  7
  8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  9
 10#include <linux/module.h>
 11#include <linux/net.h>
 12#include <linux/skbuff.h>
 13#include <linux/errqueue.h>
 14#include <linux/udp.h>
 15#include <linux/in.h>
 16#include <linux/in6.h>
 17#include <linux/icmp.h>
 18#include <linux/gfp.h>
 19#include <linux/circ_buf.h>
 20#include <net/sock.h>
 21#include <net/af_rxrpc.h>
 22#include <net/ip.h>
 23#include "ar-internal.h"
 24
 25static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call,
 26			       unsigned long user_call_ID)
 27{
 28}
 29
 30/*
 31 * Preallocate a single service call, connection and peer and, if possible,
 32 * give them a user ID and attach the user's side of the ID to them.
 33 */
 34static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
 35				      struct rxrpc_backlog *b,
 36				      rxrpc_notify_rx_t notify_rx,
 37				      rxrpc_user_attach_call_t user_attach_call,
 38				      unsigned long user_call_ID, gfp_t gfp,
 39				      unsigned int debug_id)
 40{
 41	const void *here = __builtin_return_address(0);
 42	struct rxrpc_call *call, *xcall;
 43	struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
 44	struct rb_node *parent, **pp;
 45	int max, tmp;
 46	unsigned int size = RXRPC_BACKLOG_MAX;
 47	unsigned int head, tail, call_head, call_tail;
 48
 49	max = rx->sk.sk_max_ack_backlog;
 50	tmp = rx->sk.sk_ack_backlog;
 51	if (tmp >= max) {
 52		_leave(" = -ENOBUFS [full %u]", max);
 53		return -ENOBUFS;
 54	}
 55	max -= tmp;
 56
 57	/* We don't need more conns and peers than we have calls, but on the
 58	 * other hand, we shouldn't ever use more peers than conns or conns
 59	 * than calls.
 60	 */
 61	call_head = b->call_backlog_head;
 62	call_tail = READ_ONCE(b->call_backlog_tail);
 63	tmp = CIRC_CNT(call_head, call_tail, size);
 64	if (tmp >= max) {
 65		_leave(" = -ENOBUFS [enough %u]", tmp);
 66		return -ENOBUFS;
 67	}
 68	max = tmp + 1;
 69
 70	head = b->peer_backlog_head;
 71	tail = READ_ONCE(b->peer_backlog_tail);
 72	if (CIRC_CNT(head, tail, size) < max) {
 73		struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp);
 74		if (!peer)
 75			return -ENOMEM;
 76		b->peer_backlog[head] = peer;
 77		smp_store_release(&b->peer_backlog_head,
 78				  (head + 1) & (size - 1));
 79	}
 80
 81	head = b->conn_backlog_head;
 82	tail = READ_ONCE(b->conn_backlog_tail);
 83	if (CIRC_CNT(head, tail, size) < max) {
 84		struct rxrpc_connection *conn;
 85
 86		conn = rxrpc_prealloc_service_connection(rxnet, gfp);
 87		if (!conn)
 88			return -ENOMEM;
 89		b->conn_backlog[head] = conn;
 90		smp_store_release(&b->conn_backlog_head,
 91				  (head + 1) & (size - 1));
 92
 93		trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service,
 94				 atomic_read(&conn->usage), here);
 95	}
 96
 97	/* Now it gets complicated, because calls get registered with the
 98	 * socket here, with a user ID preassigned by the user.
 99	 */
100	call = rxrpc_alloc_call(rx, gfp, debug_id);
101	if (!call)
102		return -ENOMEM;
103	call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
104	call->state = RXRPC_CALL_SERVER_PREALLOC;
105
106	trace_rxrpc_call(call->debug_id, rxrpc_call_new_service,
107			 atomic_read(&call->usage),
108			 here, (const void *)user_call_ID);
109
110	write_lock(&rx->call_lock);
 
 
 
111
112	/* Check the user ID isn't already in use */
113	pp = &rx->calls.rb_node;
114	parent = NULL;
115	while (*pp) {
116		parent = *pp;
117		xcall = rb_entry(parent, struct rxrpc_call, sock_node);
118		if (user_call_ID < xcall->user_call_ID)
119			pp = &(*pp)->rb_left;
120		else if (user_call_ID > xcall->user_call_ID)
121			pp = &(*pp)->rb_right;
122		else
123			goto id_in_use;
124	}
125
126	call->user_call_ID = user_call_ID;
127	call->notify_rx = notify_rx;
128	if (user_attach_call) {
129		rxrpc_get_call(call, rxrpc_call_got_kernel);
130		user_attach_call(call, user_call_ID);
 
 
 
 
131	}
132
133	rxrpc_get_call(call, rxrpc_call_got_userid);
134	rb_link_node(&call->sock_node, parent, pp);
135	rb_insert_color(&call->sock_node, &rx->calls);
136	set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
137
138	list_add(&call->sock_link, &rx->sock_calls);
139
140	write_unlock(&rx->call_lock);
141
142	rxnet = call->rxnet;
143	write_lock(&rxnet->call_lock);
144	list_add_tail(&call->link, &rxnet->calls);
145	write_unlock(&rxnet->call_lock);
146
147	b->call_backlog[call_head] = call;
148	smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
149	_leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID);
150	return 0;
151
152id_in_use:
153	write_unlock(&rx->call_lock);
154	rxrpc_cleanup_call(call);
155	_leave(" = -EBADSLT");
156	return -EBADSLT;
157}
158
159/*
160 * Allocate the preallocation buffers for incoming service calls.  These must
161 * be charged manually.
 
 
 
162 */
163int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
164{
165	struct rxrpc_backlog *b = rx->backlog;
166
167	if (!b) {
168		b = kzalloc(sizeof(struct rxrpc_backlog), gfp);
169		if (!b)
170			return -ENOMEM;
171		rx->backlog = b;
172	}
173
 
 
 
 
 
 
 
174	return 0;
175}
176
177/*
178 * Discard the preallocation on a service.
179 */
180void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
181{
182	struct rxrpc_backlog *b = rx->backlog;
183	struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
184	unsigned int size = RXRPC_BACKLOG_MAX, head, tail;
185
186	if (!b)
187		return;
188	rx->backlog = NULL;
189
190	/* Make sure that there aren't any incoming calls in progress before we
191	 * clear the preallocation buffers.
192	 */
193	spin_lock_bh(&rx->incoming_lock);
194	spin_unlock_bh(&rx->incoming_lock);
195
196	head = b->peer_backlog_head;
197	tail = b->peer_backlog_tail;
198	while (CIRC_CNT(head, tail, size) > 0) {
199		struct rxrpc_peer *peer = b->peer_backlog[tail];
200		kfree(peer);
201		tail = (tail + 1) & (size - 1);
202	}
203
204	head = b->conn_backlog_head;
205	tail = b->conn_backlog_tail;
206	while (CIRC_CNT(head, tail, size) > 0) {
207		struct rxrpc_connection *conn = b->conn_backlog[tail];
208		write_lock(&rxnet->conn_lock);
209		list_del(&conn->link);
210		list_del(&conn->proc_link);
211		write_unlock(&rxnet->conn_lock);
212		kfree(conn);
213		if (atomic_dec_and_test(&rxnet->nr_conns))
214			wake_up_var(&rxnet->nr_conns);
215		tail = (tail + 1) & (size - 1);
216	}
217
218	head = b->call_backlog_head;
219	tail = b->call_backlog_tail;
220	while (CIRC_CNT(head, tail, size) > 0) {
221		struct rxrpc_call *call = b->call_backlog[tail];
222		rcu_assign_pointer(call->socket, rx);
223		if (rx->discard_new_call) {
224			_debug("discard %lx", call->user_call_ID);
225			rx->discard_new_call(call, call->user_call_ID);
226			if (call->notify_rx)
227				call->notify_rx = rxrpc_dummy_notify;
228			rxrpc_put_call(call, rxrpc_call_put_kernel);
229		}
230		rxrpc_call_completed(call);
231		rxrpc_release_call(rx, call);
232		rxrpc_put_call(call, rxrpc_call_put);
233		tail = (tail + 1) & (size - 1);
234	}
235
236	kfree(b);
237}
238
239/*
240 * Ping the other end to fill our RTT cache and to retrieve the rwind
241 * and MTU parameters.
242 */
243static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
244{
245	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
246	ktime_t now = skb->tstamp;
247
248	if (call->peer->rtt_count < 3 ||
249	    ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
250		rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
251				  true, true,
252				  rxrpc_propose_ack_ping_for_params);
253}
254
255/*
256 * Allocate a new incoming call from the prealloc pool, along with a connection
257 * and a peer as necessary.
258 */
259static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
260						    struct rxrpc_local *local,
261						    struct rxrpc_peer *peer,
262						    struct rxrpc_connection *conn,
263						    const struct rxrpc_security *sec,
264						    struct key *key,
265						    struct sk_buff *skb)
266{
267	struct rxrpc_backlog *b = rx->backlog;
 
268	struct rxrpc_call *call;
269	unsigned short call_head, conn_head, peer_head;
270	unsigned short call_tail, conn_tail, peer_tail;
271	unsigned short call_count, conn_count;
272
273	/* #calls >= #conns >= #peers must hold true. */
274	call_head = smp_load_acquire(&b->call_backlog_head);
275	call_tail = b->call_backlog_tail;
276	call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX);
277	conn_head = smp_load_acquire(&b->conn_backlog_head);
278	conn_tail = b->conn_backlog_tail;
279	conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX);
280	ASSERTCMP(conn_count, >=, call_count);
281	peer_head = smp_load_acquire(&b->peer_backlog_head);
282	peer_tail = b->peer_backlog_tail;
283	ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=,
284		  conn_count);
285
286	if (call_count == 0)
287		return NULL;
288
289	if (!conn) {
290		if (peer && !rxrpc_get_peer_maybe(peer))
291			peer = NULL;
292		if (!peer) {
293			peer = b->peer_backlog[peer_tail];
294			if (rxrpc_extract_addr_from_skb(&peer->srx, skb) < 0)
295				return NULL;
 
 
 
 
 
296			b->peer_backlog[peer_tail] = NULL;
297			smp_store_release(&b->peer_backlog_tail,
298					  (peer_tail + 1) &
299					  (RXRPC_BACKLOG_MAX - 1));
300
301			rxrpc_new_incoming_peer(rx, local, peer);
302		}
303
304		/* Now allocate and set up the connection */
305		conn = b->conn_backlog[conn_tail];
306		b->conn_backlog[conn_tail] = NULL;
307		smp_store_release(&b->conn_backlog_tail,
308				  (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
309		conn->params.local = rxrpc_get_local(local);
310		conn->params.peer = peer;
311		rxrpc_see_connection(conn);
312		rxrpc_new_incoming_connection(rx, conn, sec, key, skb);
313	} else {
314		rxrpc_get_connection(conn);
315	}
316
317	/* And now we can allocate and set up a new call */
318	call = b->call_backlog[call_tail];
319	b->call_backlog[call_tail] = NULL;
320	smp_store_release(&b->call_backlog_tail,
321			  (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
322
323	rxrpc_see_call(call);
324	call->conn = conn;
325	call->security = conn->security;
326	call->security_ix = conn->security_ix;
327	call->peer = rxrpc_get_peer(conn->params.peer);
328	call->cong_cwnd = call->peer->cong_cwnd;
329	return call;
330}
331
332/*
333 * Set up a new incoming call.  Called in BH context with the RCU read lock
334 * held.
335 *
336 * If this is for a kernel service, when we allocate the call, it will have
337 * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the
338 * retainer ref obtained from the backlog buffer.  Prealloc calls for userspace
339 * services only have the ref from the backlog buffer.  We want to pass this
340 * ref to non-BH context to dispose of.
341 *
342 * If we want to report an error, we mark the skb with the packet type and
343 * abort code and return NULL.
344 *
345 * The call is returned with the user access mutex held.
346 */
347struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
348					   struct rxrpc_sock *rx,
349					   struct sk_buff *skb)
350{
351	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
352	const struct rxrpc_security *sec = NULL;
353	struct rxrpc_connection *conn;
354	struct rxrpc_peer *peer = NULL;
355	struct rxrpc_call *call = NULL;
356	struct key *key = NULL;
357
358	_enter("");
359
 
 
 
 
 
 
 
 
 
 
 
 
 
 
360	spin_lock(&rx->incoming_lock);
361	if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
362	    rx->sk.sk_state == RXRPC_CLOSE) {
363		trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber,
364				  sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
365		skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
366		skb->priority = RX_INVALID_OPERATION;
367		goto no_call;
 
 
368	}
369
370	/* The peer, connection and call may all have sprung into existence due
371	 * to a duplicate packet being handled on another CPU in parallel, so
372	 * we have to recheck the routing.  However, we're now holding
373	 * rx->incoming_lock, so the values should remain stable.
374	 */
375	conn = rxrpc_find_connection_rcu(local, skb, &peer);
376
377	if (!conn && !rxrpc_look_up_server_security(local, rx, &sec, &key, skb))
378		goto no_call;
379
380	call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, key, skb);
381	key_put(key);
382	if (!call) {
383		skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
384		goto no_call;
 
 
385	}
386
387	trace_rxrpc_receive(call, rxrpc_receive_incoming,
388			    sp->hdr.serial, sp->hdr.seq);
389
 
 
 
 
 
 
 
 
 
 
 
 
390	/* Make the call live. */
391	rxrpc_incoming_call(rx, call, skb);
392	conn = call->conn;
393
394	if (rx->notify_new_call)
395		rx->notify_new_call(&rx->sk, call, call->user_call_ID);
 
 
396
397	spin_lock(&conn->state_lock);
398	switch (conn->state) {
399	case RXRPC_CONN_SERVICE_UNSECURED:
400		conn->state = RXRPC_CONN_SERVICE_CHALLENGING;
401		set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
402		rxrpc_queue_conn(call->conn);
403		break;
404
405	case RXRPC_CONN_SERVICE:
406		write_lock(&call->state_lock);
407		if (call->state < RXRPC_CALL_COMPLETE)
408			call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
 
 
409		write_unlock(&call->state_lock);
410		break;
411
412	case RXRPC_CONN_REMOTELY_ABORTED:
413		rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
414					  conn->abort_code, conn->error);
415		break;
416	case RXRPC_CONN_LOCALLY_ABORTED:
417		rxrpc_abort_call("CON", call, sp->hdr.seq,
418				 conn->abort_code, conn->error);
419		break;
420	default:
421		BUG();
422	}
423	spin_unlock(&conn->state_lock);
424	spin_unlock(&rx->incoming_lock);
425
426	rxrpc_send_ping(call, skb);
 
427
428	/* We have to discard the prealloc queue's ref here and rely on a
429	 * combination of the RCU read lock and refs held either by the socket
430	 * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel
431	 * service to prevent the call from being deallocated too early.
432	 */
433	rxrpc_put_call(call, rxrpc_call_put);
434
435	_leave(" = %p{%d}", call, call->debug_id);
 
 
436	return call;
 
437
438no_call:
439	spin_unlock(&rx->incoming_lock);
440	_leave(" = NULL [%u]", skb->mark);
441	return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
442}
443
444/*
445 * Charge up socket with preallocated calls, attaching user call IDs.
 
446 */
447int rxrpc_user_charge_accept(struct rxrpc_sock *rx, unsigned long user_call_ID)
448{
449	struct rxrpc_backlog *b = rx->backlog;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
450
451	if (rx->sk.sk_state == RXRPC_CLOSE)
452		return -ESHUTDOWN;
 
 
 
 
 
 
 
 
 
 
453
454	return rxrpc_service_prealloc_one(rx, b, NULL, NULL, user_call_ID,
455					  GFP_KERNEL,
456					  atomic_inc_return(&rxrpc_debug_id));
 
 
 
 
 
 
 
 
457}
458
459/*
460 * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls
461 * @sock: The socket on which to preallocate
462 * @notify_rx: Event notification function for the call
463 * @user_attach_call: Func to attach call to user_call_ID
464 * @user_call_ID: The tag to attach to the preallocated call
465 * @gfp: The allocation conditions.
466 * @debug_id: The tracing debug ID.
467 *
468 * Charge up the socket with preallocated calls, each with a user ID.  A
469 * function should be provided to effect the attachment from the user's side.
470 * The user is given a ref to hold on the call.
471 *
472 * Note that the call may be come connected before this function returns.
473 */
474int rxrpc_kernel_charge_accept(struct socket *sock,
475			       rxrpc_notify_rx_t notify_rx,
476			       rxrpc_user_attach_call_t user_attach_call,
477			       unsigned long user_call_ID, gfp_t gfp,
478			       unsigned int debug_id)
479{
480	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
481	struct rxrpc_backlog *b = rx->backlog;
482
483	if (sock->sk->sk_state == RXRPC_CLOSE)
484		return -ESHUTDOWN;
485
486	return rxrpc_service_prealloc_one(rx, b, notify_rx,
487					  user_attach_call, user_call_ID,
488					  gfp, debug_id);
489}
490EXPORT_SYMBOL(rxrpc_kernel_charge_accept);