Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* incoming call handling
3 *
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/module.h>
11#include <linux/net.h>
12#include <linux/skbuff.h>
13#include <linux/errqueue.h>
14#include <linux/udp.h>
15#include <linux/in.h>
16#include <linux/in6.h>
17#include <linux/icmp.h>
18#include <linux/gfp.h>
19#include <linux/circ_buf.h>
20#include <net/sock.h>
21#include <net/af_rxrpc.h>
22#include <net/ip.h>
23#include "ar-internal.h"
24
25static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call,
26 unsigned long user_call_ID)
27{
28}
29
30/*
31 * Preallocate a single service call, connection and peer and, if possible,
32 * give them a user ID and attach the user's side of the ID to them.
33 */
34static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
35 struct rxrpc_backlog *b,
36 rxrpc_notify_rx_t notify_rx,
37 rxrpc_user_attach_call_t user_attach_call,
38 unsigned long user_call_ID, gfp_t gfp,
39 unsigned int debug_id)
40{
41 struct rxrpc_call *call, *xcall;
42 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
43 struct rb_node *parent, **pp;
44 int max, tmp;
45 unsigned int size = RXRPC_BACKLOG_MAX;
46 unsigned int head, tail, call_head, call_tail;
47
48 max = rx->sk.sk_max_ack_backlog;
49 tmp = rx->sk.sk_ack_backlog;
50 if (tmp >= max) {
51 _leave(" = -ENOBUFS [full %u]", max);
52 return -ENOBUFS;
53 }
54 max -= tmp;
55
56 /* We don't need more conns and peers than we have calls, but on the
57 * other hand, we shouldn't ever use more peers than conns or conns
58 * than calls.
59 */
60 call_head = b->call_backlog_head;
61 call_tail = READ_ONCE(b->call_backlog_tail);
62 tmp = CIRC_CNT(call_head, call_tail, size);
63 if (tmp >= max) {
64 _leave(" = -ENOBUFS [enough %u]", tmp);
65 return -ENOBUFS;
66 }
67 max = tmp + 1;
68
69 head = b->peer_backlog_head;
70 tail = READ_ONCE(b->peer_backlog_tail);
71 if (CIRC_CNT(head, tail, size) < max) {
72 struct rxrpc_peer *peer;
73
74 peer = rxrpc_alloc_peer(rx->local, gfp, rxrpc_peer_new_prealloc);
75 if (!peer)
76 return -ENOMEM;
77 b->peer_backlog[head] = peer;
78 smp_store_release(&b->peer_backlog_head,
79 (head + 1) & (size - 1));
80 }
81
82 head = b->conn_backlog_head;
83 tail = READ_ONCE(b->conn_backlog_tail);
84 if (CIRC_CNT(head, tail, size) < max) {
85 struct rxrpc_connection *conn;
86
87 conn = rxrpc_prealloc_service_connection(rxnet, gfp);
88 if (!conn)
89 return -ENOMEM;
90 b->conn_backlog[head] = conn;
91 smp_store_release(&b->conn_backlog_head,
92 (head + 1) & (size - 1));
93 }
94
95 /* Now it gets complicated, because calls get registered with the
96 * socket here, with a user ID preassigned by the user.
97 */
98 call = rxrpc_alloc_call(rx, gfp, debug_id);
99 if (!call)
100 return -ENOMEM;
101 call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
102 rxrpc_set_call_state(call, RXRPC_CALL_SERVER_PREALLOC);
103 __set_bit(RXRPC_CALL_EV_INITIAL_PING, &call->events);
104
105 trace_rxrpc_call(call->debug_id, refcount_read(&call->ref),
106 user_call_ID, rxrpc_call_new_prealloc_service);
107
108 write_lock(&rx->call_lock);
109
110 /* Check the user ID isn't already in use */
111 pp = &rx->calls.rb_node;
112 parent = NULL;
113 while (*pp) {
114 parent = *pp;
115 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
116 if (user_call_ID < xcall->user_call_ID)
117 pp = &(*pp)->rb_left;
118 else if (user_call_ID > xcall->user_call_ID)
119 pp = &(*pp)->rb_right;
120 else
121 goto id_in_use;
122 }
123
124 call->user_call_ID = user_call_ID;
125 call->notify_rx = notify_rx;
126 if (user_attach_call) {
127 rxrpc_get_call(call, rxrpc_call_get_kernel_service);
128 user_attach_call(call, user_call_ID);
129 }
130
131 rxrpc_get_call(call, rxrpc_call_get_userid);
132 rb_link_node(&call->sock_node, parent, pp);
133 rb_insert_color(&call->sock_node, &rx->calls);
134 set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
135
136 list_add(&call->sock_link, &rx->sock_calls);
137
138 write_unlock(&rx->call_lock);
139
140 rxnet = call->rxnet;
141 spin_lock(&rxnet->call_lock);
142 list_add_tail_rcu(&call->link, &rxnet->calls);
143 spin_unlock(&rxnet->call_lock);
144
145 b->call_backlog[call_head] = call;
146 smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
147 _leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID);
148 return 0;
149
150id_in_use:
151 write_unlock(&rx->call_lock);
152 rxrpc_cleanup_call(call);
153 _leave(" = -EBADSLT");
154 return -EBADSLT;
155}
156
157/*
158 * Allocate the preallocation buffers for incoming service calls. These must
159 * be charged manually.
160 */
161int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
162{
163 struct rxrpc_backlog *b = rx->backlog;
164
165 if (!b) {
166 b = kzalloc(sizeof(struct rxrpc_backlog), gfp);
167 if (!b)
168 return -ENOMEM;
169 rx->backlog = b;
170 }
171
172 return 0;
173}
174
175/*
176 * Discard the preallocation on a service.
177 */
178void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
179{
180 struct rxrpc_backlog *b = rx->backlog;
181 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
182 unsigned int size = RXRPC_BACKLOG_MAX, head, tail;
183
184 if (!b)
185 return;
186 rx->backlog = NULL;
187
188 /* Make sure that there aren't any incoming calls in progress before we
189 * clear the preallocation buffers.
190 */
191 spin_lock(&rx->incoming_lock);
192 spin_unlock(&rx->incoming_lock);
193
194 head = b->peer_backlog_head;
195 tail = b->peer_backlog_tail;
196 while (CIRC_CNT(head, tail, size) > 0) {
197 struct rxrpc_peer *peer = b->peer_backlog[tail];
198 rxrpc_put_local(peer->local, rxrpc_local_put_prealloc_peer);
199 kfree(peer);
200 tail = (tail + 1) & (size - 1);
201 }
202
203 head = b->conn_backlog_head;
204 tail = b->conn_backlog_tail;
205 while (CIRC_CNT(head, tail, size) > 0) {
206 struct rxrpc_connection *conn = b->conn_backlog[tail];
207 write_lock(&rxnet->conn_lock);
208 list_del(&conn->link);
209 list_del(&conn->proc_link);
210 write_unlock(&rxnet->conn_lock);
211 kfree(conn);
212 if (atomic_dec_and_test(&rxnet->nr_conns))
213 wake_up_var(&rxnet->nr_conns);
214 tail = (tail + 1) & (size - 1);
215 }
216
217 head = b->call_backlog_head;
218 tail = b->call_backlog_tail;
219 while (CIRC_CNT(head, tail, size) > 0) {
220 struct rxrpc_call *call = b->call_backlog[tail];
221 rcu_assign_pointer(call->socket, rx);
222 if (rx->discard_new_call) {
223 _debug("discard %lx", call->user_call_ID);
224 rx->discard_new_call(call, call->user_call_ID);
225 if (call->notify_rx)
226 call->notify_rx = rxrpc_dummy_notify;
227 rxrpc_put_call(call, rxrpc_call_put_kernel);
228 }
229 rxrpc_call_completed(call);
230 rxrpc_release_call(rx, call);
231 rxrpc_put_call(call, rxrpc_call_put_discard_prealloc);
232 tail = (tail + 1) & (size - 1);
233 }
234
235 kfree(b);
236}
237
238/*
239 * Allocate a new incoming call from the prealloc pool, along with a connection
240 * and a peer as necessary.
241 */
242static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
243 struct rxrpc_local *local,
244 struct rxrpc_peer *peer,
245 struct rxrpc_connection *conn,
246 const struct rxrpc_security *sec,
247 struct sockaddr_rxrpc *peer_srx,
248 struct sk_buff *skb)
249{
250 struct rxrpc_backlog *b = rx->backlog;
251 struct rxrpc_call *call;
252 unsigned short call_head, conn_head, peer_head;
253 unsigned short call_tail, conn_tail, peer_tail;
254 unsigned short call_count, conn_count;
255
256 /* #calls >= #conns >= #peers must hold true. */
257 call_head = smp_load_acquire(&b->call_backlog_head);
258 call_tail = b->call_backlog_tail;
259 call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX);
260 conn_head = smp_load_acquire(&b->conn_backlog_head);
261 conn_tail = b->conn_backlog_tail;
262 conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX);
263 ASSERTCMP(conn_count, >=, call_count);
264 peer_head = smp_load_acquire(&b->peer_backlog_head);
265 peer_tail = b->peer_backlog_tail;
266 ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=,
267 conn_count);
268
269 if (call_count == 0)
270 return NULL;
271
272 if (!conn) {
273 if (peer && !rxrpc_get_peer_maybe(peer, rxrpc_peer_get_service_conn))
274 peer = NULL;
275 if (!peer) {
276 peer = b->peer_backlog[peer_tail];
277 peer->srx = *peer_srx;
278 b->peer_backlog[peer_tail] = NULL;
279 smp_store_release(&b->peer_backlog_tail,
280 (peer_tail + 1) &
281 (RXRPC_BACKLOG_MAX - 1));
282
283 rxrpc_new_incoming_peer(local, peer);
284 }
285
286 /* Now allocate and set up the connection */
287 conn = b->conn_backlog[conn_tail];
288 b->conn_backlog[conn_tail] = NULL;
289 smp_store_release(&b->conn_backlog_tail,
290 (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
291 conn->local = rxrpc_get_local(local, rxrpc_local_get_prealloc_conn);
292 conn->peer = peer;
293 rxrpc_see_connection(conn, rxrpc_conn_see_new_service_conn);
294 rxrpc_new_incoming_connection(rx, conn, sec, skb);
295 } else {
296 rxrpc_get_connection(conn, rxrpc_conn_get_service_conn);
297 atomic_inc(&conn->active);
298 }
299
300 /* And now we can allocate and set up a new call */
301 call = b->call_backlog[call_tail];
302 b->call_backlog[call_tail] = NULL;
303 smp_store_release(&b->call_backlog_tail,
304 (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
305
306 rxrpc_see_call(call, rxrpc_call_see_accept);
307 call->local = rxrpc_get_local(conn->local, rxrpc_local_get_call);
308 call->conn = conn;
309 call->security = conn->security;
310 call->security_ix = conn->security_ix;
311 call->peer = rxrpc_get_peer(conn->peer, rxrpc_peer_get_accept);
312 call->dest_srx = peer->srx;
313 call->cong_ssthresh = call->peer->cong_ssthresh;
314 call->tx_last_sent = ktime_get_real();
315 return call;
316}
317
318/*
319 * Set up a new incoming call. Called from the I/O thread.
320 *
321 * If this is for a kernel service, when we allocate the call, it will have
322 * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the
323 * retainer ref obtained from the backlog buffer. Prealloc calls for userspace
324 * services only have the ref from the backlog buffer.
325 *
326 * If we want to report an error, we mark the skb with the packet type and
327 * abort code and return false.
328 */
329bool rxrpc_new_incoming_call(struct rxrpc_local *local,
330 struct rxrpc_peer *peer,
331 struct rxrpc_connection *conn,
332 struct sockaddr_rxrpc *peer_srx,
333 struct sk_buff *skb)
334{
335 const struct rxrpc_security *sec = NULL;
336 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
337 struct rxrpc_call *call = NULL;
338 struct rxrpc_sock *rx;
339
340 _enter("");
341
342 /* Don't set up a call for anything other than a DATA packet. */
343 if (sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
344 return rxrpc_protocol_error(skb, rxrpc_eproto_no_service_call);
345
346 read_lock(&local->services_lock);
347
348 /* Weed out packets to services we're not offering. Packets that would
349 * begin a call are explicitly rejected and the rest are just
350 * discarded.
351 */
352 rx = local->service;
353 if (!rx || (sp->hdr.serviceId != rx->srx.srx_service &&
354 sp->hdr.serviceId != rx->second_service)
355 ) {
356 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
357 sp->hdr.seq == 1)
358 goto unsupported_service;
359 goto discard;
360 }
361
362 if (!conn) {
363 sec = rxrpc_get_incoming_security(rx, skb);
364 if (!sec)
365 goto unsupported_security;
366 }
367
368 spin_lock(&rx->incoming_lock);
369 if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
370 rx->sk.sk_state == RXRPC_CLOSE) {
371 rxrpc_direct_abort(skb, rxrpc_abort_shut_down,
372 RX_INVALID_OPERATION, -ESHUTDOWN);
373 goto no_call;
374 }
375
376 call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, peer_srx,
377 skb);
378 if (!call) {
379 skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
380 goto no_call;
381 }
382
383 trace_rxrpc_receive(call, rxrpc_receive_incoming,
384 sp->hdr.serial, sp->hdr.seq);
385
386 /* Make the call live. */
387 rxrpc_incoming_call(rx, call, skb);
388 conn = call->conn;
389
390 if (rx->notify_new_call)
391 rx->notify_new_call(&rx->sk, call, call->user_call_ID);
392
393 spin_lock(&conn->state_lock);
394 if (conn->state == RXRPC_CONN_SERVICE_UNSECURED) {
395 conn->state = RXRPC_CONN_SERVICE_CHALLENGING;
396 set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
397 rxrpc_queue_conn(call->conn, rxrpc_conn_queue_challenge);
398 }
399 spin_unlock(&conn->state_lock);
400
401 spin_unlock(&rx->incoming_lock);
402 read_unlock(&local->services_lock);
403
404 if (hlist_unhashed(&call->error_link)) {
405 spin_lock(&call->peer->lock);
406 hlist_add_head(&call->error_link, &call->peer->error_targets);
407 spin_unlock(&call->peer->lock);
408 }
409
410 _leave(" = %p{%d}", call, call->debug_id);
411 rxrpc_input_call_event(call, skb);
412 rxrpc_put_call(call, rxrpc_call_put_input);
413 return true;
414
415unsupported_service:
416 read_unlock(&local->services_lock);
417 return rxrpc_direct_abort(skb, rxrpc_abort_service_not_offered,
418 RX_INVALID_OPERATION, -EOPNOTSUPP);
419unsupported_security:
420 read_unlock(&local->services_lock);
421 return rxrpc_direct_abort(skb, rxrpc_abort_service_not_offered,
422 RX_INVALID_OPERATION, -EKEYREJECTED);
423no_call:
424 spin_unlock(&rx->incoming_lock);
425 read_unlock(&local->services_lock);
426 _leave(" = f [%u]", skb->mark);
427 return false;
428discard:
429 read_unlock(&local->services_lock);
430 return true;
431}
432
433/*
434 * Charge up socket with preallocated calls, attaching user call IDs.
435 */
436int rxrpc_user_charge_accept(struct rxrpc_sock *rx, unsigned long user_call_ID)
437{
438 struct rxrpc_backlog *b = rx->backlog;
439
440 if (rx->sk.sk_state == RXRPC_CLOSE)
441 return -ESHUTDOWN;
442
443 return rxrpc_service_prealloc_one(rx, b, NULL, NULL, user_call_ID,
444 GFP_KERNEL,
445 atomic_inc_return(&rxrpc_debug_id));
446}
447
448/*
449 * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls
450 * @sock: The socket on which to preallocate
451 * @notify_rx: Event notification function for the call
452 * @user_attach_call: Func to attach call to user_call_ID
453 * @user_call_ID: The tag to attach to the preallocated call
454 * @gfp: The allocation conditions.
455 * @debug_id: The tracing debug ID.
456 *
457 * Charge up the socket with preallocated calls, each with a user ID. A
458 * function should be provided to effect the attachment from the user's side.
459 * The user is given a ref to hold on the call.
460 *
461 * Note that the call may be come connected before this function returns.
462 */
463int rxrpc_kernel_charge_accept(struct socket *sock,
464 rxrpc_notify_rx_t notify_rx,
465 rxrpc_user_attach_call_t user_attach_call,
466 unsigned long user_call_ID, gfp_t gfp,
467 unsigned int debug_id)
468{
469 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
470 struct rxrpc_backlog *b = rx->backlog;
471
472 if (sock->sk->sk_state == RXRPC_CLOSE)
473 return -ESHUTDOWN;
474
475 return rxrpc_service_prealloc_one(rx, b, notify_rx,
476 user_attach_call, user_call_ID,
477 gfp, debug_id);
478}
479EXPORT_SYMBOL(rxrpc_kernel_charge_accept);
1/* incoming call handling
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/module.h>
15#include <linux/net.h>
16#include <linux/skbuff.h>
17#include <linux/errqueue.h>
18#include <linux/udp.h>
19#include <linux/in.h>
20#include <linux/in6.h>
21#include <linux/icmp.h>
22#include <linux/gfp.h>
23#include <linux/circ_buf.h>
24#include <net/sock.h>
25#include <net/af_rxrpc.h>
26#include <net/ip.h>
27#include "ar-internal.h"
28
29/*
30 * Preallocate a single service call, connection and peer and, if possible,
31 * give them a user ID and attach the user's side of the ID to them.
32 */
33static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
34 struct rxrpc_backlog *b,
35 rxrpc_notify_rx_t notify_rx,
36 rxrpc_user_attach_call_t user_attach_call,
37 unsigned long user_call_ID, gfp_t gfp,
38 unsigned int debug_id)
39{
40 const void *here = __builtin_return_address(0);
41 struct rxrpc_call *call;
42 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
43 int max, tmp;
44 unsigned int size = RXRPC_BACKLOG_MAX;
45 unsigned int head, tail, call_head, call_tail;
46
47 max = rx->sk.sk_max_ack_backlog;
48 tmp = rx->sk.sk_ack_backlog;
49 if (tmp >= max) {
50 _leave(" = -ENOBUFS [full %u]", max);
51 return -ENOBUFS;
52 }
53 max -= tmp;
54
55 /* We don't need more conns and peers than we have calls, but on the
56 * other hand, we shouldn't ever use more peers than conns or conns
57 * than calls.
58 */
59 call_head = b->call_backlog_head;
60 call_tail = READ_ONCE(b->call_backlog_tail);
61 tmp = CIRC_CNT(call_head, call_tail, size);
62 if (tmp >= max) {
63 _leave(" = -ENOBUFS [enough %u]", tmp);
64 return -ENOBUFS;
65 }
66 max = tmp + 1;
67
68 head = b->peer_backlog_head;
69 tail = READ_ONCE(b->peer_backlog_tail);
70 if (CIRC_CNT(head, tail, size) < max) {
71 struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp);
72 if (!peer)
73 return -ENOMEM;
74 b->peer_backlog[head] = peer;
75 smp_store_release(&b->peer_backlog_head,
76 (head + 1) & (size - 1));
77 }
78
79 head = b->conn_backlog_head;
80 tail = READ_ONCE(b->conn_backlog_tail);
81 if (CIRC_CNT(head, tail, size) < max) {
82 struct rxrpc_connection *conn;
83
84 conn = rxrpc_prealloc_service_connection(rxnet, gfp);
85 if (!conn)
86 return -ENOMEM;
87 b->conn_backlog[head] = conn;
88 smp_store_release(&b->conn_backlog_head,
89 (head + 1) & (size - 1));
90
91 trace_rxrpc_conn(conn, rxrpc_conn_new_service,
92 atomic_read(&conn->usage), here);
93 }
94
95 /* Now it gets complicated, because calls get registered with the
96 * socket here, particularly if a user ID is preassigned by the user.
97 */
98 call = rxrpc_alloc_call(rx, gfp, debug_id);
99 if (!call)
100 return -ENOMEM;
101 call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
102 call->state = RXRPC_CALL_SERVER_PREALLOC;
103
104 trace_rxrpc_call(call, rxrpc_call_new_service,
105 atomic_read(&call->usage),
106 here, (const void *)user_call_ID);
107
108 write_lock(&rx->call_lock);
109 if (user_attach_call) {
110 struct rxrpc_call *xcall;
111 struct rb_node *parent, **pp;
112
113 /* Check the user ID isn't already in use */
114 pp = &rx->calls.rb_node;
115 parent = NULL;
116 while (*pp) {
117 parent = *pp;
118 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
119 if (user_call_ID < call->user_call_ID)
120 pp = &(*pp)->rb_left;
121 else if (user_call_ID > call->user_call_ID)
122 pp = &(*pp)->rb_right;
123 else
124 goto id_in_use;
125 }
126
127 call->user_call_ID = user_call_ID;
128 call->notify_rx = notify_rx;
129 rxrpc_get_call(call, rxrpc_call_got_kernel);
130 user_attach_call(call, user_call_ID);
131 rxrpc_get_call(call, rxrpc_call_got_userid);
132 rb_link_node(&call->sock_node, parent, pp);
133 rb_insert_color(&call->sock_node, &rx->calls);
134 set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
135 }
136
137 list_add(&call->sock_link, &rx->sock_calls);
138
139 write_unlock(&rx->call_lock);
140
141 rxnet = call->rxnet;
142 write_lock(&rxnet->call_lock);
143 list_add_tail(&call->link, &rxnet->calls);
144 write_unlock(&rxnet->call_lock);
145
146 b->call_backlog[call_head] = call;
147 smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
148 _leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID);
149 return 0;
150
151id_in_use:
152 write_unlock(&rx->call_lock);
153 rxrpc_cleanup_call(call);
154 _leave(" = -EBADSLT");
155 return -EBADSLT;
156}
157
158/*
159 * Preallocate sufficient service connections, calls and peers to cover the
160 * entire backlog of a socket. When a new call comes in, if we don't have
161 * sufficient of each available, the call gets rejected as busy or ignored.
162 *
163 * The backlog is replenished when a connection is accepted or rejected.
164 */
165int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
166{
167 struct rxrpc_backlog *b = rx->backlog;
168
169 if (!b) {
170 b = kzalloc(sizeof(struct rxrpc_backlog), gfp);
171 if (!b)
172 return -ENOMEM;
173 rx->backlog = b;
174 }
175
176 if (rx->discard_new_call)
177 return 0;
178
179 while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp,
180 atomic_inc_return(&rxrpc_debug_id)) == 0)
181 ;
182
183 return 0;
184}
185
186/*
187 * Discard the preallocation on a service.
188 */
189void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
190{
191 struct rxrpc_backlog *b = rx->backlog;
192 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
193 unsigned int size = RXRPC_BACKLOG_MAX, head, tail;
194
195 if (!b)
196 return;
197 rx->backlog = NULL;
198
199 /* Make sure that there aren't any incoming calls in progress before we
200 * clear the preallocation buffers.
201 */
202 spin_lock_bh(&rx->incoming_lock);
203 spin_unlock_bh(&rx->incoming_lock);
204
205 head = b->peer_backlog_head;
206 tail = b->peer_backlog_tail;
207 while (CIRC_CNT(head, tail, size) > 0) {
208 struct rxrpc_peer *peer = b->peer_backlog[tail];
209 kfree(peer);
210 tail = (tail + 1) & (size - 1);
211 }
212
213 head = b->conn_backlog_head;
214 tail = b->conn_backlog_tail;
215 while (CIRC_CNT(head, tail, size) > 0) {
216 struct rxrpc_connection *conn = b->conn_backlog[tail];
217 write_lock(&rxnet->conn_lock);
218 list_del(&conn->link);
219 list_del(&conn->proc_link);
220 write_unlock(&rxnet->conn_lock);
221 kfree(conn);
222 if (atomic_dec_and_test(&rxnet->nr_conns))
223 wake_up_var(&rxnet->nr_conns);
224 tail = (tail + 1) & (size - 1);
225 }
226
227 head = b->call_backlog_head;
228 tail = b->call_backlog_tail;
229 while (CIRC_CNT(head, tail, size) > 0) {
230 struct rxrpc_call *call = b->call_backlog[tail];
231 rcu_assign_pointer(call->socket, rx);
232 if (rx->discard_new_call) {
233 _debug("discard %lx", call->user_call_ID);
234 rx->discard_new_call(call, call->user_call_ID);
235 rxrpc_put_call(call, rxrpc_call_put_kernel);
236 }
237 rxrpc_call_completed(call);
238 rxrpc_release_call(rx, call);
239 rxrpc_put_call(call, rxrpc_call_put);
240 tail = (tail + 1) & (size - 1);
241 }
242
243 kfree(b);
244}
245
246/*
247 * Allocate a new incoming call from the prealloc pool, along with a connection
248 * and a peer as necessary.
249 */
250static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
251 struct rxrpc_local *local,
252 struct rxrpc_connection *conn,
253 struct sk_buff *skb)
254{
255 struct rxrpc_backlog *b = rx->backlog;
256 struct rxrpc_peer *peer, *xpeer;
257 struct rxrpc_call *call;
258 unsigned short call_head, conn_head, peer_head;
259 unsigned short call_tail, conn_tail, peer_tail;
260 unsigned short call_count, conn_count;
261
262 /* #calls >= #conns >= #peers must hold true. */
263 call_head = smp_load_acquire(&b->call_backlog_head);
264 call_tail = b->call_backlog_tail;
265 call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX);
266 conn_head = smp_load_acquire(&b->conn_backlog_head);
267 conn_tail = b->conn_backlog_tail;
268 conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX);
269 ASSERTCMP(conn_count, >=, call_count);
270 peer_head = smp_load_acquire(&b->peer_backlog_head);
271 peer_tail = b->peer_backlog_tail;
272 ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=,
273 conn_count);
274
275 if (call_count == 0)
276 return NULL;
277
278 if (!conn) {
279 /* No connection. We're going to need a peer to start off
280 * with. If one doesn't yet exist, use a spare from the
281 * preallocation set. We dump the address into the spare in
282 * anticipation - and to save on stack space.
283 */
284 xpeer = b->peer_backlog[peer_tail];
285 if (rxrpc_extract_addr_from_skb(local, &xpeer->srx, skb) < 0)
286 return NULL;
287
288 peer = rxrpc_lookup_incoming_peer(local, xpeer);
289 if (peer == xpeer) {
290 b->peer_backlog[peer_tail] = NULL;
291 smp_store_release(&b->peer_backlog_tail,
292 (peer_tail + 1) &
293 (RXRPC_BACKLOG_MAX - 1));
294 }
295
296 /* Now allocate and set up the connection */
297 conn = b->conn_backlog[conn_tail];
298 b->conn_backlog[conn_tail] = NULL;
299 smp_store_release(&b->conn_backlog_tail,
300 (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
301 conn->params.local = rxrpc_get_local(local);
302 conn->params.peer = peer;
303 rxrpc_see_connection(conn);
304 rxrpc_new_incoming_connection(rx, conn, skb);
305 } else {
306 rxrpc_get_connection(conn);
307 }
308
309 /* And now we can allocate and set up a new call */
310 call = b->call_backlog[call_tail];
311 b->call_backlog[call_tail] = NULL;
312 smp_store_release(&b->call_backlog_tail,
313 (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
314
315 rxrpc_see_call(call);
316 call->conn = conn;
317 call->peer = rxrpc_get_peer(conn->params.peer);
318 call->cong_cwnd = call->peer->cong_cwnd;
319 return call;
320}
321
322/*
323 * Set up a new incoming call. Called in BH context with the RCU read lock
324 * held.
325 *
326 * If this is for a kernel service, when we allocate the call, it will have
327 * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the
328 * retainer ref obtained from the backlog buffer. Prealloc calls for userspace
329 * services only have the ref from the backlog buffer. We want to pass this
330 * ref to non-BH context to dispose of.
331 *
332 * If we want to report an error, we mark the skb with the packet type and
333 * abort code and return NULL.
334 *
335 * The call is returned with the user access mutex held.
336 */
337struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
338 struct rxrpc_connection *conn,
339 struct sk_buff *skb)
340{
341 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
342 struct rxrpc_sock *rx;
343 struct rxrpc_call *call;
344 u16 service_id = sp->hdr.serviceId;
345
346 _enter("");
347
348 /* Get the socket providing the service */
349 rx = rcu_dereference(local->service);
350 if (rx && (service_id == rx->srx.srx_service ||
351 service_id == rx->second_service))
352 goto found_service;
353
354 trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
355 RX_INVALID_OPERATION, EOPNOTSUPP);
356 skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
357 skb->priority = RX_INVALID_OPERATION;
358 _leave(" = NULL [service]");
359 return NULL;
360
361found_service:
362 spin_lock(&rx->incoming_lock);
363 if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
364 rx->sk.sk_state == RXRPC_CLOSE) {
365 trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber,
366 sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
367 skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
368 skb->priority = RX_INVALID_OPERATION;
369 _leave(" = NULL [close]");
370 call = NULL;
371 goto out;
372 }
373
374 call = rxrpc_alloc_incoming_call(rx, local, conn, skb);
375 if (!call) {
376 skb->mark = RXRPC_SKB_MARK_BUSY;
377 _leave(" = NULL [busy]");
378 call = NULL;
379 goto out;
380 }
381
382 trace_rxrpc_receive(call, rxrpc_receive_incoming,
383 sp->hdr.serial, sp->hdr.seq);
384
385 /* Lock the call to prevent rxrpc_kernel_send/recv_data() and
386 * sendmsg()/recvmsg() inconveniently stealing the mutex once the
387 * notification is generated.
388 *
389 * The BUG should never happen because the kernel should be well
390 * behaved enough not to access the call before the first notification
391 * event and userspace is prevented from doing so until the state is
392 * appropriate.
393 */
394 if (!mutex_trylock(&call->user_mutex))
395 BUG();
396
397 /* Make the call live. */
398 rxrpc_incoming_call(rx, call, skb);
399 conn = call->conn;
400
401 if (rx->notify_new_call)
402 rx->notify_new_call(&rx->sk, call, call->user_call_ID);
403 else
404 sk_acceptq_added(&rx->sk);
405
406 spin_lock(&conn->state_lock);
407 switch (conn->state) {
408 case RXRPC_CONN_SERVICE_UNSECURED:
409 conn->state = RXRPC_CONN_SERVICE_CHALLENGING;
410 set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
411 rxrpc_queue_conn(call->conn);
412 break;
413
414 case RXRPC_CONN_SERVICE:
415 write_lock(&call->state_lock);
416 if (rx->discard_new_call)
417 call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
418 else
419 call->state = RXRPC_CALL_SERVER_ACCEPTING;
420 write_unlock(&call->state_lock);
421 break;
422
423 case RXRPC_CONN_REMOTELY_ABORTED:
424 rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
425 conn->remote_abort, -ECONNABORTED);
426 break;
427 case RXRPC_CONN_LOCALLY_ABORTED:
428 rxrpc_abort_call("CON", call, sp->hdr.seq,
429 conn->local_abort, -ECONNABORTED);
430 break;
431 default:
432 BUG();
433 }
434 spin_unlock(&conn->state_lock);
435
436 if (call->state == RXRPC_CALL_SERVER_ACCEPTING)
437 rxrpc_notify_socket(call);
438
439 /* We have to discard the prealloc queue's ref here and rely on a
440 * combination of the RCU read lock and refs held either by the socket
441 * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel
442 * service to prevent the call from being deallocated too early.
443 */
444 rxrpc_put_call(call, rxrpc_call_put);
445
446 _leave(" = %p{%d}", call, call->debug_id);
447out:
448 spin_unlock(&rx->incoming_lock);
449 return call;
450}
451
452/*
453 * handle acceptance of a call by userspace
454 * - assign the user call ID to the call at the front of the queue
455 * - called with the socket locked.
456 */
457struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
458 unsigned long user_call_ID,
459 rxrpc_notify_rx_t notify_rx)
460 __releases(&rx->sk.sk_lock.slock)
461 __acquires(call->user_mutex)
462{
463 struct rxrpc_call *call;
464 struct rb_node *parent, **pp;
465 int ret;
466
467 _enter(",%lx", user_call_ID);
468
469 ASSERT(!irqs_disabled());
470
471 write_lock(&rx->call_lock);
472
473 if (list_empty(&rx->to_be_accepted)) {
474 write_unlock(&rx->call_lock);
475 release_sock(&rx->sk);
476 kleave(" = -ENODATA [empty]");
477 return ERR_PTR(-ENODATA);
478 }
479
480 /* check the user ID isn't already in use */
481 pp = &rx->calls.rb_node;
482 parent = NULL;
483 while (*pp) {
484 parent = *pp;
485 call = rb_entry(parent, struct rxrpc_call, sock_node);
486
487 if (user_call_ID < call->user_call_ID)
488 pp = &(*pp)->rb_left;
489 else if (user_call_ID > call->user_call_ID)
490 pp = &(*pp)->rb_right;
491 else
492 goto id_in_use;
493 }
494
495 /* Dequeue the first call and check it's still valid. We gain
496 * responsibility for the queue's reference.
497 */
498 call = list_entry(rx->to_be_accepted.next,
499 struct rxrpc_call, accept_link);
500 write_unlock(&rx->call_lock);
501
502 /* We need to gain the mutex from the interrupt handler without
503 * upsetting lockdep, so we have to release it there and take it here.
504 * We are, however, still holding the socket lock, so other accepts
505 * must wait for us and no one can add the user ID behind our backs.
506 */
507 if (mutex_lock_interruptible(&call->user_mutex) < 0) {
508 release_sock(&rx->sk);
509 kleave(" = -ERESTARTSYS");
510 return ERR_PTR(-ERESTARTSYS);
511 }
512
513 write_lock(&rx->call_lock);
514 list_del_init(&call->accept_link);
515 sk_acceptq_removed(&rx->sk);
516 rxrpc_see_call(call);
517
518 /* Find the user ID insertion point. */
519 pp = &rx->calls.rb_node;
520 parent = NULL;
521 while (*pp) {
522 parent = *pp;
523 call = rb_entry(parent, struct rxrpc_call, sock_node);
524
525 if (user_call_ID < call->user_call_ID)
526 pp = &(*pp)->rb_left;
527 else if (user_call_ID > call->user_call_ID)
528 pp = &(*pp)->rb_right;
529 else
530 BUG();
531 }
532
533 write_lock_bh(&call->state_lock);
534 switch (call->state) {
535 case RXRPC_CALL_SERVER_ACCEPTING:
536 call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
537 break;
538 case RXRPC_CALL_COMPLETE:
539 ret = call->error;
540 goto out_release;
541 default:
542 BUG();
543 }
544
545 /* formalise the acceptance */
546 call->notify_rx = notify_rx;
547 call->user_call_ID = user_call_ID;
548 rxrpc_get_call(call, rxrpc_call_got_userid);
549 rb_link_node(&call->sock_node, parent, pp);
550 rb_insert_color(&call->sock_node, &rx->calls);
551 if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags))
552 BUG();
553
554 write_unlock_bh(&call->state_lock);
555 write_unlock(&rx->call_lock);
556 rxrpc_notify_socket(call);
557 rxrpc_service_prealloc(rx, GFP_KERNEL);
558 release_sock(&rx->sk);
559 _leave(" = %p{%d}", call, call->debug_id);
560 return call;
561
562out_release:
563 _debug("release %p", call);
564 write_unlock_bh(&call->state_lock);
565 write_unlock(&rx->call_lock);
566 rxrpc_release_call(rx, call);
567 rxrpc_put_call(call, rxrpc_call_put);
568 goto out;
569
570id_in_use:
571 ret = -EBADSLT;
572 write_unlock(&rx->call_lock);
573out:
574 rxrpc_service_prealloc(rx, GFP_KERNEL);
575 release_sock(&rx->sk);
576 _leave(" = %d", ret);
577 return ERR_PTR(ret);
578}
579
580/*
581 * Handle rejection of a call by userspace
582 * - reject the call at the front of the queue
583 */
584int rxrpc_reject_call(struct rxrpc_sock *rx)
585{
586 struct rxrpc_call *call;
587 bool abort = false;
588 int ret;
589
590 _enter("");
591
592 ASSERT(!irqs_disabled());
593
594 write_lock(&rx->call_lock);
595
596 if (list_empty(&rx->to_be_accepted)) {
597 write_unlock(&rx->call_lock);
598 return -ENODATA;
599 }
600
601 /* Dequeue the first call and check it's still valid. We gain
602 * responsibility for the queue's reference.
603 */
604 call = list_entry(rx->to_be_accepted.next,
605 struct rxrpc_call, accept_link);
606 list_del_init(&call->accept_link);
607 sk_acceptq_removed(&rx->sk);
608 rxrpc_see_call(call);
609
610 write_lock_bh(&call->state_lock);
611 switch (call->state) {
612 case RXRPC_CALL_SERVER_ACCEPTING:
613 __rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED);
614 abort = true;
615 /* fall through */
616 case RXRPC_CALL_COMPLETE:
617 ret = call->error;
618 goto out_discard;
619 default:
620 BUG();
621 }
622
623out_discard:
624 write_unlock_bh(&call->state_lock);
625 write_unlock(&rx->call_lock);
626 if (abort) {
627 rxrpc_send_abort_packet(call);
628 rxrpc_release_call(rx, call);
629 rxrpc_put_call(call, rxrpc_call_put);
630 }
631 rxrpc_service_prealloc(rx, GFP_KERNEL);
632 _leave(" = %d", ret);
633 return ret;
634}
635
636/*
637 * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls
638 * @sock: The socket on which to preallocate
639 * @notify_rx: Event notification function for the call
640 * @user_attach_call: Func to attach call to user_call_ID
641 * @user_call_ID: The tag to attach to the preallocated call
642 * @gfp: The allocation conditions.
643 * @debug_id: The tracing debug ID.
644 *
645 * Charge up the socket with preallocated calls, each with a user ID. A
646 * function should be provided to effect the attachment from the user's side.
647 * The user is given a ref to hold on the call.
648 *
649 * Note that the call may be come connected before this function returns.
650 */
651int rxrpc_kernel_charge_accept(struct socket *sock,
652 rxrpc_notify_rx_t notify_rx,
653 rxrpc_user_attach_call_t user_attach_call,
654 unsigned long user_call_ID, gfp_t gfp,
655 unsigned int debug_id)
656{
657 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
658 struct rxrpc_backlog *b = rx->backlog;
659
660 if (sock->sk->sk_state == RXRPC_CLOSE)
661 return -ESHUTDOWN;
662
663 return rxrpc_service_prealloc_one(rx, b, notify_rx,
664 user_attach_call, user_call_ID,
665 gfp, debug_id);
666}
667EXPORT_SYMBOL(rxrpc_kernel_charge_accept);