Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/* incoming call handling
  3 *
  4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  5 * Written by David Howells (dhowells@redhat.com)
  6 */
  7
  8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  9
 10#include <linux/module.h>
 11#include <linux/net.h>
 12#include <linux/skbuff.h>
 13#include <linux/errqueue.h>
 14#include <linux/udp.h>
 15#include <linux/in.h>
 16#include <linux/in6.h>
 17#include <linux/icmp.h>
 18#include <linux/gfp.h>
 19#include <linux/circ_buf.h>
 20#include <net/sock.h>
 21#include <net/af_rxrpc.h>
 22#include <net/ip.h>
 23#include "ar-internal.h"
 24
 25static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call,
 26			       unsigned long user_call_ID)
 27{
 28}
 29
 30/*
 31 * Preallocate a single service call, connection and peer and, if possible,
 32 * give them a user ID and attach the user's side of the ID to them.
 33 */
 34static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
 35				      struct rxrpc_backlog *b,
 36				      rxrpc_notify_rx_t notify_rx,
 37				      rxrpc_user_attach_call_t user_attach_call,
 38				      unsigned long user_call_ID, gfp_t gfp,
 39				      unsigned int debug_id)
 40{
 41	struct rxrpc_call *call, *xcall;
 
 42	struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
 43	struct rb_node *parent, **pp;
 44	int max, tmp;
 45	unsigned int size = RXRPC_BACKLOG_MAX;
 46	unsigned int head, tail, call_head, call_tail;
 47
 48	max = rx->sk.sk_max_ack_backlog;
 49	tmp = rx->sk.sk_ack_backlog;
 50	if (tmp >= max) {
 51		_leave(" = -ENOBUFS [full %u]", max);
 52		return -ENOBUFS;
 53	}
 54	max -= tmp;
 55
 56	/* We don't need more conns and peers than we have calls, but on the
 57	 * other hand, we shouldn't ever use more peers than conns or conns
 58	 * than calls.
 59	 */
 60	call_head = b->call_backlog_head;
 61	call_tail = READ_ONCE(b->call_backlog_tail);
 62	tmp = CIRC_CNT(call_head, call_tail, size);
 63	if (tmp >= max) {
 64		_leave(" = -ENOBUFS [enough %u]", tmp);
 65		return -ENOBUFS;
 66	}
 67	max = tmp + 1;
 68
 69	head = b->peer_backlog_head;
 70	tail = READ_ONCE(b->peer_backlog_tail);
 71	if (CIRC_CNT(head, tail, size) < max) {
 72		struct rxrpc_peer *peer;
 73
 74		peer = rxrpc_alloc_peer(rx->local, gfp, rxrpc_peer_new_prealloc);
 75		if (!peer)
 76			return -ENOMEM;
 77		b->peer_backlog[head] = peer;
 78		smp_store_release(&b->peer_backlog_head,
 79				  (head + 1) & (size - 1));
 80	}
 81
 82	head = b->conn_backlog_head;
 83	tail = READ_ONCE(b->conn_backlog_tail);
 84	if (CIRC_CNT(head, tail, size) < max) {
 85		struct rxrpc_connection *conn;
 86
 87		conn = rxrpc_prealloc_service_connection(rxnet, gfp);
 88		if (!conn)
 89			return -ENOMEM;
 90		b->conn_backlog[head] = conn;
 91		smp_store_release(&b->conn_backlog_head,
 92				  (head + 1) & (size - 1));
 
 
 
 93	}
 94
 95	/* Now it gets complicated, because calls get registered with the
 96	 * socket here, with a user ID preassigned by the user.
 97	 */
 98	call = rxrpc_alloc_call(rx, gfp, debug_id);
 99	if (!call)
100		return -ENOMEM;
101	call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
102	rxrpc_set_call_state(call, RXRPC_CALL_SERVER_PREALLOC);
103	__set_bit(RXRPC_CALL_EV_INITIAL_PING, &call->events);
104
105	trace_rxrpc_call(call->debug_id, refcount_read(&call->ref),
106			 user_call_ID, rxrpc_call_new_prealloc_service);
 
107
108	write_lock(&rx->call_lock);
 
 
 
109
110	/* Check the user ID isn't already in use */
111	pp = &rx->calls.rb_node;
112	parent = NULL;
113	while (*pp) {
114		parent = *pp;
115		xcall = rb_entry(parent, struct rxrpc_call, sock_node);
116		if (user_call_ID < xcall->user_call_ID)
117			pp = &(*pp)->rb_left;
118		else if (user_call_ID > xcall->user_call_ID)
119			pp = &(*pp)->rb_right;
120		else
121			goto id_in_use;
122	}
123
124	call->user_call_ID = user_call_ID;
125	call->notify_rx = notify_rx;
126	if (user_attach_call) {
127		rxrpc_get_call(call, rxrpc_call_get_kernel_service);
128		user_attach_call(call, user_call_ID);
 
 
 
 
129	}
130
131	rxrpc_get_call(call, rxrpc_call_get_userid);
132	rb_link_node(&call->sock_node, parent, pp);
133	rb_insert_color(&call->sock_node, &rx->calls);
134	set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
135
136	list_add(&call->sock_link, &rx->sock_calls);
137
138	write_unlock(&rx->call_lock);
139
140	rxnet = call->rxnet;
141	spin_lock(&rxnet->call_lock);
142	list_add_tail_rcu(&call->link, &rxnet->calls);
143	spin_unlock(&rxnet->call_lock);
144
145	b->call_backlog[call_head] = call;
146	smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
147	_leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID);
148	return 0;
149
150id_in_use:
151	write_unlock(&rx->call_lock);
152	rxrpc_cleanup_call(call);
153	_leave(" = -EBADSLT");
154	return -EBADSLT;
155}
156
157/*
158 * Allocate the preallocation buffers for incoming service calls.  These must
159 * be charged manually.
 
 
 
160 */
161int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
162{
163	struct rxrpc_backlog *b = rx->backlog;
164
165	if (!b) {
166		b = kzalloc(sizeof(struct rxrpc_backlog), gfp);
167		if (!b)
168			return -ENOMEM;
169		rx->backlog = b;
170	}
171
 
 
 
 
 
 
 
172	return 0;
173}
174
175/*
176 * Discard the preallocation on a service.
177 */
178void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
179{
180	struct rxrpc_backlog *b = rx->backlog;
181	struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
182	unsigned int size = RXRPC_BACKLOG_MAX, head, tail;
183
184	if (!b)
185		return;
186	rx->backlog = NULL;
187
188	/* Make sure that there aren't any incoming calls in progress before we
189	 * clear the preallocation buffers.
190	 */
191	spin_lock(&rx->incoming_lock);
192	spin_unlock(&rx->incoming_lock);
193
194	head = b->peer_backlog_head;
195	tail = b->peer_backlog_tail;
196	while (CIRC_CNT(head, tail, size) > 0) {
197		struct rxrpc_peer *peer = b->peer_backlog[tail];
198		rxrpc_put_local(peer->local, rxrpc_local_put_prealloc_conn);
199		kfree(peer);
200		tail = (tail + 1) & (size - 1);
201	}
202
203	head = b->conn_backlog_head;
204	tail = b->conn_backlog_tail;
205	while (CIRC_CNT(head, tail, size) > 0) {
206		struct rxrpc_connection *conn = b->conn_backlog[tail];
207		write_lock(&rxnet->conn_lock);
208		list_del(&conn->link);
209		list_del(&conn->proc_link);
210		write_unlock(&rxnet->conn_lock);
211		kfree(conn);
212		if (atomic_dec_and_test(&rxnet->nr_conns))
213			wake_up_var(&rxnet->nr_conns);
214		tail = (tail + 1) & (size - 1);
215	}
216
217	head = b->call_backlog_head;
218	tail = b->call_backlog_tail;
219	while (CIRC_CNT(head, tail, size) > 0) {
220		struct rxrpc_call *call = b->call_backlog[tail];
221		rcu_assign_pointer(call->socket, rx);
222		if (rx->discard_new_call) {
223			_debug("discard %lx", call->user_call_ID);
224			rx->discard_new_call(call, call->user_call_ID);
225			if (call->notify_rx)
226				call->notify_rx = rxrpc_dummy_notify;
227			rxrpc_put_call(call, rxrpc_call_put_kernel);
228		}
229		rxrpc_call_completed(call);
230		rxrpc_release_call(rx, call);
231		rxrpc_put_call(call, rxrpc_call_put_discard_prealloc);
232		tail = (tail + 1) & (size - 1);
233	}
234
235	kfree(b);
236}
237
238/*
239 * Allocate a new incoming call from the prealloc pool, along with a connection
240 * and a peer as necessary.
241 */
242static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
243						    struct rxrpc_local *local,
244						    struct rxrpc_peer *peer,
245						    struct rxrpc_connection *conn,
246						    const struct rxrpc_security *sec,
247						    struct sockaddr_rxrpc *peer_srx,
248						    struct sk_buff *skb)
249{
250	struct rxrpc_backlog *b = rx->backlog;
251	struct rxrpc_call *call;
252	unsigned short call_head, conn_head, peer_head;
253	unsigned short call_tail, conn_tail, peer_tail;
254	unsigned short call_count, conn_count;
255
256	/* #calls >= #conns >= #peers must hold true. */
257	call_head = smp_load_acquire(&b->call_backlog_head);
258	call_tail = b->call_backlog_tail;
259	call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX);
260	conn_head = smp_load_acquire(&b->conn_backlog_head);
261	conn_tail = b->conn_backlog_tail;
262	conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX);
263	ASSERTCMP(conn_count, >=, call_count);
264	peer_head = smp_load_acquire(&b->peer_backlog_head);
265	peer_tail = b->peer_backlog_tail;
266	ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=,
267		  conn_count);
268
269	if (call_count == 0)
270		return NULL;
271
272	if (!conn) {
273		if (peer && !rxrpc_get_peer_maybe(peer, rxrpc_peer_get_service_conn))
274			peer = NULL;
275		if (!peer) {
276			peer = b->peer_backlog[peer_tail];
277			peer->srx = *peer_srx;
 
278			b->peer_backlog[peer_tail] = NULL;
279			smp_store_release(&b->peer_backlog_tail,
280					  (peer_tail + 1) &
281					  (RXRPC_BACKLOG_MAX - 1));
282
283			rxrpc_new_incoming_peer(local, peer);
284		}
285
286		/* Now allocate and set up the connection */
287		conn = b->conn_backlog[conn_tail];
288		b->conn_backlog[conn_tail] = NULL;
289		smp_store_release(&b->conn_backlog_tail,
290				  (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
291		conn->local = rxrpc_get_local(local, rxrpc_local_get_prealloc_conn);
292		conn->peer = peer;
293		rxrpc_see_connection(conn, rxrpc_conn_see_new_service_conn);
294		rxrpc_new_incoming_connection(rx, conn, sec, skb);
295	} else {
296		rxrpc_get_connection(conn, rxrpc_conn_get_service_conn);
297		atomic_inc(&conn->active);
298	}
299
300	/* And now we can allocate and set up a new call */
301	call = b->call_backlog[call_tail];
302	b->call_backlog[call_tail] = NULL;
303	smp_store_release(&b->call_backlog_tail,
304			  (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
305
306	rxrpc_see_call(call, rxrpc_call_see_accept);
307	call->local = rxrpc_get_local(conn->local, rxrpc_local_get_call);
308	call->conn = conn;
309	call->security = conn->security;
310	call->security_ix = conn->security_ix;
311	call->peer = rxrpc_get_peer(conn->peer, rxrpc_peer_get_accept);
312	call->dest_srx = peer->srx;
313	call->cong_ssthresh = call->peer->cong_ssthresh;
314	call->tx_last_sent = ktime_get_real();
315	return call;
316}
317
318/*
319 * Set up a new incoming call.  Called from the I/O thread.
 
320 *
321 * If this is for a kernel service, when we allocate the call, it will have
322 * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the
323 * retainer ref obtained from the backlog buffer.  Prealloc calls for userspace
324 * services only have the ref from the backlog buffer.
 
325 *
326 * If we want to report an error, we mark the skb with the packet type and
327 * abort code and return false.
 
 
328 */
329bool rxrpc_new_incoming_call(struct rxrpc_local *local,
330			     struct rxrpc_peer *peer,
331			     struct rxrpc_connection *conn,
332			     struct sockaddr_rxrpc *peer_srx,
333			     struct sk_buff *skb)
334{
335	const struct rxrpc_security *sec = NULL;
336	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
337	struct rxrpc_call *call = NULL;
338	struct rxrpc_sock *rx;
 
339
340	_enter("");
341
342	/* Don't set up a call for anything other than a DATA packet. */
343	if (sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
344		return rxrpc_protocol_error(skb, rxrpc_eproto_no_service_call);
345
346	read_lock(&local->services_lock);
347
348	/* Weed out packets to services we're not offering.  Packets that would
349	 * begin a call are explicitly rejected and the rest are just
350	 * discarded.
351	 */
352	rx = local->service;
353	if (!rx || (sp->hdr.serviceId != rx->srx.srx_service &&
354		    sp->hdr.serviceId != rx->second_service)
355	    ) {
356		if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
357		    sp->hdr.seq == 1)
358			goto unsupported_service;
359		goto discard;
360	}
361
362	if (!conn) {
363		sec = rxrpc_get_incoming_security(rx, skb);
364		if (!sec)
365			goto unsupported_security;
366	}
367
368	spin_lock(&rx->incoming_lock);
369	if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
370	    rx->sk.sk_state == RXRPC_CLOSE) {
371		rxrpc_direct_abort(skb, rxrpc_abort_shut_down,
372				   RX_INVALID_OPERATION, -ESHUTDOWN);
373		goto no_call;
 
 
 
 
374	}
375
376	call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, peer_srx,
377					 skb);
 
 
 
 
 
 
378	if (!call) {
379		skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
380		goto no_call;
 
 
381	}
382
383	trace_rxrpc_receive(call, rxrpc_receive_incoming,
384			    sp->hdr.serial, sp->hdr.seq);
385
 
 
 
 
 
 
 
 
 
 
 
 
386	/* Make the call live. */
387	rxrpc_incoming_call(rx, call, skb);
388	conn = call->conn;
389
390	if (rx->notify_new_call)
391		rx->notify_new_call(&rx->sk, call, call->user_call_ID);
 
 
392
393	spin_lock(&conn->state_lock);
394	if (conn->state == RXRPC_CONN_SERVICE_UNSECURED) {
 
395		conn->state = RXRPC_CONN_SERVICE_CHALLENGING;
396		set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
397		rxrpc_queue_conn(call->conn, rxrpc_conn_queue_challenge);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
398	}
399	spin_unlock(&conn->state_lock);
400
 
 
 
 
 
 
 
 
 
 
 
 
401	spin_unlock(&rx->incoming_lock);
402	read_unlock(&local->services_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
403
404	if (hlist_unhashed(&call->error_link)) {
405		spin_lock(&call->peer->lock);
406		hlist_add_head(&call->error_link, &call->peer->error_targets);
407		spin_unlock(&call->peer->lock);
 
 
 
 
 
 
 
 
 
408	}
409
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
410	_leave(" = %p{%d}", call, call->debug_id);
411	rxrpc_input_call_event(call, skb);
412	rxrpc_put_call(call, rxrpc_call_put_input);
413	return true;
414
415unsupported_service:
416	read_unlock(&local->services_lock);
417	return rxrpc_direct_abort(skb, rxrpc_abort_service_not_offered,
418				  RX_INVALID_OPERATION, -EOPNOTSUPP);
419unsupported_security:
420	read_unlock(&local->services_lock);
421	return rxrpc_direct_abort(skb, rxrpc_abort_service_not_offered,
422				  RX_INVALID_OPERATION, -EKEYREJECTED);
423no_call:
424	spin_unlock(&rx->incoming_lock);
425	read_unlock(&local->services_lock);
426	_leave(" = f [%u]", skb->mark);
427	return false;
428discard:
429	read_unlock(&local->services_lock);
430	return true;
431}
432
433/*
434 * Charge up socket with preallocated calls, attaching user call IDs.
 
435 */
436int rxrpc_user_charge_accept(struct rxrpc_sock *rx, unsigned long user_call_ID)
437{
438	struct rxrpc_backlog *b = rx->backlog;
 
 
 
 
 
 
 
 
439
440	if (rx->sk.sk_state == RXRPC_CLOSE)
441		return -ESHUTDOWN;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
442
443	return rxrpc_service_prealloc_one(rx, b, NULL, NULL, user_call_ID,
444					  GFP_KERNEL,
445					  atomic_inc_return(&rxrpc_debug_id));
 
 
 
 
 
 
 
 
446}
447
448/*
449 * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls
450 * @sock: The socket on which to preallocate
451 * @notify_rx: Event notification function for the call
452 * @user_attach_call: Func to attach call to user_call_ID
453 * @user_call_ID: The tag to attach to the preallocated call
454 * @gfp: The allocation conditions.
455 * @debug_id: The tracing debug ID.
456 *
457 * Charge up the socket with preallocated calls, each with a user ID.  A
458 * function should be provided to effect the attachment from the user's side.
459 * The user is given a ref to hold on the call.
460 *
461 * Note that the call may be come connected before this function returns.
462 */
463int rxrpc_kernel_charge_accept(struct socket *sock,
464			       rxrpc_notify_rx_t notify_rx,
465			       rxrpc_user_attach_call_t user_attach_call,
466			       unsigned long user_call_ID, gfp_t gfp,
467			       unsigned int debug_id)
468{
469	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
470	struct rxrpc_backlog *b = rx->backlog;
471
472	if (sock->sk->sk_state == RXRPC_CLOSE)
473		return -ESHUTDOWN;
474
475	return rxrpc_service_prealloc_one(rx, b, notify_rx,
476					  user_attach_call, user_call_ID,
477					  gfp, debug_id);
478}
479EXPORT_SYMBOL(rxrpc_kernel_charge_accept);
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/* incoming call handling
  3 *
  4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  5 * Written by David Howells (dhowells@redhat.com)
  6 */
  7
  8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  9
 10#include <linux/module.h>
 11#include <linux/net.h>
 12#include <linux/skbuff.h>
 13#include <linux/errqueue.h>
 14#include <linux/udp.h>
 15#include <linux/in.h>
 16#include <linux/in6.h>
 17#include <linux/icmp.h>
 18#include <linux/gfp.h>
 19#include <linux/circ_buf.h>
 20#include <net/sock.h>
 21#include <net/af_rxrpc.h>
 22#include <net/ip.h>
 23#include "ar-internal.h"
 24
 
 
 
 
 
 25/*
 26 * Preallocate a single service call, connection and peer and, if possible,
 27 * give them a user ID and attach the user's side of the ID to them.
 28 */
 29static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
 30				      struct rxrpc_backlog *b,
 31				      rxrpc_notify_rx_t notify_rx,
 32				      rxrpc_user_attach_call_t user_attach_call,
 33				      unsigned long user_call_ID, gfp_t gfp,
 34				      unsigned int debug_id)
 35{
 36	const void *here = __builtin_return_address(0);
 37	struct rxrpc_call *call;
 38	struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
 
 39	int max, tmp;
 40	unsigned int size = RXRPC_BACKLOG_MAX;
 41	unsigned int head, tail, call_head, call_tail;
 42
 43	max = rx->sk.sk_max_ack_backlog;
 44	tmp = rx->sk.sk_ack_backlog;
 45	if (tmp >= max) {
 46		_leave(" = -ENOBUFS [full %u]", max);
 47		return -ENOBUFS;
 48	}
 49	max -= tmp;
 50
 51	/* We don't need more conns and peers than we have calls, but on the
 52	 * other hand, we shouldn't ever use more peers than conns or conns
 53	 * than calls.
 54	 */
 55	call_head = b->call_backlog_head;
 56	call_tail = READ_ONCE(b->call_backlog_tail);
 57	tmp = CIRC_CNT(call_head, call_tail, size);
 58	if (tmp >= max) {
 59		_leave(" = -ENOBUFS [enough %u]", tmp);
 60		return -ENOBUFS;
 61	}
 62	max = tmp + 1;
 63
 64	head = b->peer_backlog_head;
 65	tail = READ_ONCE(b->peer_backlog_tail);
 66	if (CIRC_CNT(head, tail, size) < max) {
 67		struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp);
 
 
 68		if (!peer)
 69			return -ENOMEM;
 70		b->peer_backlog[head] = peer;
 71		smp_store_release(&b->peer_backlog_head,
 72				  (head + 1) & (size - 1));
 73	}
 74
 75	head = b->conn_backlog_head;
 76	tail = READ_ONCE(b->conn_backlog_tail);
 77	if (CIRC_CNT(head, tail, size) < max) {
 78		struct rxrpc_connection *conn;
 79
 80		conn = rxrpc_prealloc_service_connection(rxnet, gfp);
 81		if (!conn)
 82			return -ENOMEM;
 83		b->conn_backlog[head] = conn;
 84		smp_store_release(&b->conn_backlog_head,
 85				  (head + 1) & (size - 1));
 86
 87		trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service,
 88				 atomic_read(&conn->usage), here);
 89	}
 90
 91	/* Now it gets complicated, because calls get registered with the
 92	 * socket here, particularly if a user ID is preassigned by the user.
 93	 */
 94	call = rxrpc_alloc_call(rx, gfp, debug_id);
 95	if (!call)
 96		return -ENOMEM;
 97	call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
 98	call->state = RXRPC_CALL_SERVER_PREALLOC;
 
 99
100	trace_rxrpc_call(call->debug_id, rxrpc_call_new_service,
101			 atomic_read(&call->usage),
102			 here, (const void *)user_call_ID);
103
104	write_lock(&rx->call_lock);
105	if (user_attach_call) {
106		struct rxrpc_call *xcall;
107		struct rb_node *parent, **pp;
108
109		/* Check the user ID isn't already in use */
110		pp = &rx->calls.rb_node;
111		parent = NULL;
112		while (*pp) {
113			parent = *pp;
114			xcall = rb_entry(parent, struct rxrpc_call, sock_node);
115			if (user_call_ID < xcall->user_call_ID)
116				pp = &(*pp)->rb_left;
117			else if (user_call_ID > xcall->user_call_ID)
118				pp = &(*pp)->rb_right;
119			else
120				goto id_in_use;
121		}
122
123		call->user_call_ID = user_call_ID;
124		call->notify_rx = notify_rx;
125		rxrpc_get_call(call, rxrpc_call_got_kernel);
 
126		user_attach_call(call, user_call_ID);
127		rxrpc_get_call(call, rxrpc_call_got_userid);
128		rb_link_node(&call->sock_node, parent, pp);
129		rb_insert_color(&call->sock_node, &rx->calls);
130		set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
131	}
132
 
 
 
 
 
133	list_add(&call->sock_link, &rx->sock_calls);
134
135	write_unlock(&rx->call_lock);
136
137	rxnet = call->rxnet;
138	write_lock(&rxnet->call_lock);
139	list_add_tail(&call->link, &rxnet->calls);
140	write_unlock(&rxnet->call_lock);
141
142	b->call_backlog[call_head] = call;
143	smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
144	_leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID);
145	return 0;
146
147id_in_use:
148	write_unlock(&rx->call_lock);
149	rxrpc_cleanup_call(call);
150	_leave(" = -EBADSLT");
151	return -EBADSLT;
152}
153
154/*
155 * Preallocate sufficient service connections, calls and peers to cover the
156 * entire backlog of a socket.  When a new call comes in, if we don't have
157 * sufficient of each available, the call gets rejected as busy or ignored.
158 *
159 * The backlog is replenished when a connection is accepted or rejected.
160 */
161int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
162{
163	struct rxrpc_backlog *b = rx->backlog;
164
165	if (!b) {
166		b = kzalloc(sizeof(struct rxrpc_backlog), gfp);
167		if (!b)
168			return -ENOMEM;
169		rx->backlog = b;
170	}
171
172	if (rx->discard_new_call)
173		return 0;
174
175	while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp,
176					  atomic_inc_return(&rxrpc_debug_id)) == 0)
177		;
178
179	return 0;
180}
181
182/*
183 * Discard the preallocation on a service.
184 */
185void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
186{
187	struct rxrpc_backlog *b = rx->backlog;
188	struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
189	unsigned int size = RXRPC_BACKLOG_MAX, head, tail;
190
191	if (!b)
192		return;
193	rx->backlog = NULL;
194
195	/* Make sure that there aren't any incoming calls in progress before we
196	 * clear the preallocation buffers.
197	 */
198	spin_lock_bh(&rx->incoming_lock);
199	spin_unlock_bh(&rx->incoming_lock);
200
201	head = b->peer_backlog_head;
202	tail = b->peer_backlog_tail;
203	while (CIRC_CNT(head, tail, size) > 0) {
204		struct rxrpc_peer *peer = b->peer_backlog[tail];
 
205		kfree(peer);
206		tail = (tail + 1) & (size - 1);
207	}
208
209	head = b->conn_backlog_head;
210	tail = b->conn_backlog_tail;
211	while (CIRC_CNT(head, tail, size) > 0) {
212		struct rxrpc_connection *conn = b->conn_backlog[tail];
213		write_lock(&rxnet->conn_lock);
214		list_del(&conn->link);
215		list_del(&conn->proc_link);
216		write_unlock(&rxnet->conn_lock);
217		kfree(conn);
218		if (atomic_dec_and_test(&rxnet->nr_conns))
219			wake_up_var(&rxnet->nr_conns);
220		tail = (tail + 1) & (size - 1);
221	}
222
223	head = b->call_backlog_head;
224	tail = b->call_backlog_tail;
225	while (CIRC_CNT(head, tail, size) > 0) {
226		struct rxrpc_call *call = b->call_backlog[tail];
227		rcu_assign_pointer(call->socket, rx);
228		if (rx->discard_new_call) {
229			_debug("discard %lx", call->user_call_ID);
230			rx->discard_new_call(call, call->user_call_ID);
 
 
231			rxrpc_put_call(call, rxrpc_call_put_kernel);
232		}
233		rxrpc_call_completed(call);
234		rxrpc_release_call(rx, call);
235		rxrpc_put_call(call, rxrpc_call_put);
236		tail = (tail + 1) & (size - 1);
237	}
238
239	kfree(b);
240}
241
242/*
243 * Allocate a new incoming call from the prealloc pool, along with a connection
244 * and a peer as necessary.
245 */
246static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
247						    struct rxrpc_local *local,
248						    struct rxrpc_peer *peer,
249						    struct rxrpc_connection *conn,
 
 
250						    struct sk_buff *skb)
251{
252	struct rxrpc_backlog *b = rx->backlog;
253	struct rxrpc_call *call;
254	unsigned short call_head, conn_head, peer_head;
255	unsigned short call_tail, conn_tail, peer_tail;
256	unsigned short call_count, conn_count;
257
258	/* #calls >= #conns >= #peers must hold true. */
259	call_head = smp_load_acquire(&b->call_backlog_head);
260	call_tail = b->call_backlog_tail;
261	call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX);
262	conn_head = smp_load_acquire(&b->conn_backlog_head);
263	conn_tail = b->conn_backlog_tail;
264	conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX);
265	ASSERTCMP(conn_count, >=, call_count);
266	peer_head = smp_load_acquire(&b->peer_backlog_head);
267	peer_tail = b->peer_backlog_tail;
268	ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=,
269		  conn_count);
270
271	if (call_count == 0)
272		return NULL;
273
274	if (!conn) {
275		if (peer && !rxrpc_get_peer_maybe(peer))
276			peer = NULL;
277		if (!peer) {
278			peer = b->peer_backlog[peer_tail];
279			if (rxrpc_extract_addr_from_skb(&peer->srx, skb) < 0)
280				return NULL;
281			b->peer_backlog[peer_tail] = NULL;
282			smp_store_release(&b->peer_backlog_tail,
283					  (peer_tail + 1) &
284					  (RXRPC_BACKLOG_MAX - 1));
285
286			rxrpc_new_incoming_peer(rx, local, peer);
287		}
288
289		/* Now allocate and set up the connection */
290		conn = b->conn_backlog[conn_tail];
291		b->conn_backlog[conn_tail] = NULL;
292		smp_store_release(&b->conn_backlog_tail,
293				  (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
294		conn->params.local = rxrpc_get_local(local);
295		conn->params.peer = peer;
296		rxrpc_see_connection(conn);
297		rxrpc_new_incoming_connection(rx, conn, skb);
298	} else {
299		rxrpc_get_connection(conn);
 
300	}
301
302	/* And now we can allocate and set up a new call */
303	call = b->call_backlog[call_tail];
304	b->call_backlog[call_tail] = NULL;
305	smp_store_release(&b->call_backlog_tail,
306			  (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
307
308	rxrpc_see_call(call);
 
309	call->conn = conn;
310	call->security = conn->security;
311	call->peer = rxrpc_get_peer(conn->params.peer);
312	call->cong_cwnd = call->peer->cong_cwnd;
 
 
 
313	return call;
314}
315
316/*
317 * Set up a new incoming call.  Called in BH context with the RCU read lock
318 * held.
319 *
320 * If this is for a kernel service, when we allocate the call, it will have
321 * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the
322 * retainer ref obtained from the backlog buffer.  Prealloc calls for userspace
323 * services only have the ref from the backlog buffer.  We want to pass this
324 * ref to non-BH context to dispose of.
325 *
326 * If we want to report an error, we mark the skb with the packet type and
327 * abort code and return NULL.
328 *
329 * The call is returned with the user access mutex held.
330 */
331struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
332					   struct rxrpc_sock *rx,
333					   struct sk_buff *skb)
 
 
334{
 
335	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
336	struct rxrpc_connection *conn;
337	struct rxrpc_peer *peer = NULL;
338	struct rxrpc_call *call;
339
340	_enter("");
341
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
342	spin_lock(&rx->incoming_lock);
343	if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
344	    rx->sk.sk_state == RXRPC_CLOSE) {
345		trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber,
346				  sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
347		skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
348		skb->priority = RX_INVALID_OPERATION;
349		_leave(" = NULL [close]");
350		call = NULL;
351		goto out;
352	}
353
354	/* The peer, connection and call may all have sprung into existence due
355	 * to a duplicate packet being handled on another CPU in parallel, so
356	 * we have to recheck the routing.  However, we're now holding
357	 * rx->incoming_lock, so the values should remain stable.
358	 */
359	conn = rxrpc_find_connection_rcu(local, skb, &peer);
360
361	call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb);
362	if (!call) {
363		skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
364		_leave(" = NULL [busy]");
365		call = NULL;
366		goto out;
367	}
368
369	trace_rxrpc_receive(call, rxrpc_receive_incoming,
370			    sp->hdr.serial, sp->hdr.seq);
371
372	/* Lock the call to prevent rxrpc_kernel_send/recv_data() and
373	 * sendmsg()/recvmsg() inconveniently stealing the mutex once the
374	 * notification is generated.
375	 *
376	 * The BUG should never happen because the kernel should be well
377	 * behaved enough not to access the call before the first notification
378	 * event and userspace is prevented from doing so until the state is
379	 * appropriate.
380	 */
381	if (!mutex_trylock(&call->user_mutex))
382		BUG();
383
384	/* Make the call live. */
385	rxrpc_incoming_call(rx, call, skb);
386	conn = call->conn;
387
388	if (rx->notify_new_call)
389		rx->notify_new_call(&rx->sk, call, call->user_call_ID);
390	else
391		sk_acceptq_added(&rx->sk);
392
393	spin_lock(&conn->state_lock);
394	switch (conn->state) {
395	case RXRPC_CONN_SERVICE_UNSECURED:
396		conn->state = RXRPC_CONN_SERVICE_CHALLENGING;
397		set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
398		rxrpc_queue_conn(call->conn);
399		break;
400
401	case RXRPC_CONN_SERVICE:
402		write_lock(&call->state_lock);
403		if (call->state < RXRPC_CALL_COMPLETE) {
404			if (rx->discard_new_call)
405				call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
406			else
407				call->state = RXRPC_CALL_SERVER_ACCEPTING;
408		}
409		write_unlock(&call->state_lock);
410		break;
411
412	case RXRPC_CONN_REMOTELY_ABORTED:
413		rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
414					  conn->abort_code, conn->error);
415		break;
416	case RXRPC_CONN_LOCALLY_ABORTED:
417		rxrpc_abort_call("CON", call, sp->hdr.seq,
418				 conn->abort_code, conn->error);
419		break;
420	default:
421		BUG();
422	}
423	spin_unlock(&conn->state_lock);
424
425	if (call->state == RXRPC_CALL_SERVER_ACCEPTING)
426		rxrpc_notify_socket(call);
427
428	/* We have to discard the prealloc queue's ref here and rely on a
429	 * combination of the RCU read lock and refs held either by the socket
430	 * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel
431	 * service to prevent the call from being deallocated too early.
432	 */
433	rxrpc_put_call(call, rxrpc_call_put);
434
435	_leave(" = %p{%d}", call, call->debug_id);
436out:
437	spin_unlock(&rx->incoming_lock);
438	return call;
439}
440
441/*
442 * handle acceptance of a call by userspace
443 * - assign the user call ID to the call at the front of the queue
444 * - called with the socket locked.
445 */
446struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
447				     unsigned long user_call_ID,
448				     rxrpc_notify_rx_t notify_rx)
449	__releases(&rx->sk.sk_lock.slock)
450	__acquires(call->user_mutex)
451{
452	struct rxrpc_call *call;
453	struct rb_node *parent, **pp;
454	int ret;
455
456	_enter(",%lx", user_call_ID);
457
458	ASSERT(!irqs_disabled());
459
460	write_lock(&rx->call_lock);
461
462	if (list_empty(&rx->to_be_accepted)) {
463		write_unlock(&rx->call_lock);
464		release_sock(&rx->sk);
465		kleave(" = -ENODATA [empty]");
466		return ERR_PTR(-ENODATA);
467	}
468
469	/* check the user ID isn't already in use */
470	pp = &rx->calls.rb_node;
471	parent = NULL;
472	while (*pp) {
473		parent = *pp;
474		call = rb_entry(parent, struct rxrpc_call, sock_node);
475
476		if (user_call_ID < call->user_call_ID)
477			pp = &(*pp)->rb_left;
478		else if (user_call_ID > call->user_call_ID)
479			pp = &(*pp)->rb_right;
480		else
481			goto id_in_use;
482	}
483
484	/* Dequeue the first call and check it's still valid.  We gain
485	 * responsibility for the queue's reference.
486	 */
487	call = list_entry(rx->to_be_accepted.next,
488			  struct rxrpc_call, accept_link);
489	write_unlock(&rx->call_lock);
490
491	/* We need to gain the mutex from the interrupt handler without
492	 * upsetting lockdep, so we have to release it there and take it here.
493	 * We are, however, still holding the socket lock, so other accepts
494	 * must wait for us and no one can add the user ID behind our backs.
495	 */
496	if (mutex_lock_interruptible(&call->user_mutex) < 0) {
497		release_sock(&rx->sk);
498		kleave(" = -ERESTARTSYS");
499		return ERR_PTR(-ERESTARTSYS);
500	}
501
502	write_lock(&rx->call_lock);
503	list_del_init(&call->accept_link);
504	sk_acceptq_removed(&rx->sk);
505	rxrpc_see_call(call);
506
507	/* Find the user ID insertion point. */
508	pp = &rx->calls.rb_node;
509	parent = NULL;
510	while (*pp) {
511		parent = *pp;
512		call = rb_entry(parent, struct rxrpc_call, sock_node);
513
514		if (user_call_ID < call->user_call_ID)
515			pp = &(*pp)->rb_left;
516		else if (user_call_ID > call->user_call_ID)
517			pp = &(*pp)->rb_right;
518		else
519			BUG();
520	}
521
522	write_lock_bh(&call->state_lock);
523	switch (call->state) {
524	case RXRPC_CALL_SERVER_ACCEPTING:
525		call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
526		break;
527	case RXRPC_CALL_COMPLETE:
528		ret = call->error;
529		goto out_release;
530	default:
531		BUG();
532	}
533
534	/* formalise the acceptance */
535	call->notify_rx = notify_rx;
536	call->user_call_ID = user_call_ID;
537	rxrpc_get_call(call, rxrpc_call_got_userid);
538	rb_link_node(&call->sock_node, parent, pp);
539	rb_insert_color(&call->sock_node, &rx->calls);
540	if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags))
541		BUG();
542
543	write_unlock_bh(&call->state_lock);
544	write_unlock(&rx->call_lock);
545	rxrpc_notify_socket(call);
546	rxrpc_service_prealloc(rx, GFP_KERNEL);
547	release_sock(&rx->sk);
548	_leave(" = %p{%d}", call, call->debug_id);
549	return call;
550
551out_release:
552	_debug("release %p", call);
553	write_unlock_bh(&call->state_lock);
554	write_unlock(&rx->call_lock);
555	rxrpc_release_call(rx, call);
556	rxrpc_put_call(call, rxrpc_call_put);
557	goto out;
558
559id_in_use:
560	ret = -EBADSLT;
561	write_unlock(&rx->call_lock);
562out:
563	rxrpc_service_prealloc(rx, GFP_KERNEL);
564	release_sock(&rx->sk);
565	_leave(" = %d", ret);
566	return ERR_PTR(ret);
 
 
567}
568
569/*
570 * Handle rejection of a call by userspace
571 * - reject the call at the front of the queue
572 */
573int rxrpc_reject_call(struct rxrpc_sock *rx)
574{
575	struct rxrpc_call *call;
576	bool abort = false;
577	int ret;
578
579	_enter("");
580
581	ASSERT(!irqs_disabled());
582
583	write_lock(&rx->call_lock);
584
585	if (list_empty(&rx->to_be_accepted)) {
586		write_unlock(&rx->call_lock);
587		return -ENODATA;
588	}
589
590	/* Dequeue the first call and check it's still valid.  We gain
591	 * responsibility for the queue's reference.
592	 */
593	call = list_entry(rx->to_be_accepted.next,
594			  struct rxrpc_call, accept_link);
595	list_del_init(&call->accept_link);
596	sk_acceptq_removed(&rx->sk);
597	rxrpc_see_call(call);
598
599	write_lock_bh(&call->state_lock);
600	switch (call->state) {
601	case RXRPC_CALL_SERVER_ACCEPTING:
602		__rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED);
603		abort = true;
604		/* fall through */
605	case RXRPC_CALL_COMPLETE:
606		ret = call->error;
607		goto out_discard;
608	default:
609		BUG();
610	}
611
612out_discard:
613	write_unlock_bh(&call->state_lock);
614	write_unlock(&rx->call_lock);
615	if (abort) {
616		rxrpc_send_abort_packet(call);
617		rxrpc_release_call(rx, call);
618		rxrpc_put_call(call, rxrpc_call_put);
619	}
620	rxrpc_service_prealloc(rx, GFP_KERNEL);
621	_leave(" = %d", ret);
622	return ret;
623}
624
625/*
626 * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls
627 * @sock: The socket on which to preallocate
628 * @notify_rx: Event notification function for the call
629 * @user_attach_call: Func to attach call to user_call_ID
630 * @user_call_ID: The tag to attach to the preallocated call
631 * @gfp: The allocation conditions.
632 * @debug_id: The tracing debug ID.
633 *
634 * Charge up the socket with preallocated calls, each with a user ID.  A
635 * function should be provided to effect the attachment from the user's side.
636 * The user is given a ref to hold on the call.
637 *
638 * Note that the call may be come connected before this function returns.
639 */
640int rxrpc_kernel_charge_accept(struct socket *sock,
641			       rxrpc_notify_rx_t notify_rx,
642			       rxrpc_user_attach_call_t user_attach_call,
643			       unsigned long user_call_ID, gfp_t gfp,
644			       unsigned int debug_id)
645{
646	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
647	struct rxrpc_backlog *b = rx->backlog;
648
649	if (sock->sk->sk_state == RXRPC_CLOSE)
650		return -ESHUTDOWN;
651
652	return rxrpc_service_prealloc_one(rx, b, notify_rx,
653					  user_attach_call, user_call_ID,
654					  gfp, debug_id);
655}
656EXPORT_SYMBOL(rxrpc_kernel_charge_accept);