Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* RxRPC recvmsg() implementation
3 *
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/net.h>
11#include <linux/skbuff.h>
12#include <linux/export.h>
13#include <linux/sched/signal.h>
14
15#include <net/sock.h>
16#include <net/af_rxrpc.h>
17#include "ar-internal.h"
18
19/*
20 * Post a call for attention by the socket or kernel service. Further
21 * notifications are suppressed by putting recvmsg_link on a dummy queue.
22 */
23void rxrpc_notify_socket(struct rxrpc_call *call)
24{
25 struct rxrpc_sock *rx;
26 struct sock *sk;
27
28 _enter("%d", call->debug_id);
29
30 if (!list_empty(&call->recvmsg_link))
31 return;
32
33 rcu_read_lock();
34
35 rx = rcu_dereference(call->socket);
36 sk = &rx->sk;
37 if (rx && sk->sk_state < RXRPC_CLOSE) {
38 if (call->notify_rx) {
39 spin_lock(&call->notify_lock);
40 call->notify_rx(sk, call, call->user_call_ID);
41 spin_unlock(&call->notify_lock);
42 } else {
43 write_lock(&rx->recvmsg_lock);
44 if (list_empty(&call->recvmsg_link)) {
45 rxrpc_get_call(call, rxrpc_call_get_notify_socket);
46 list_add_tail(&call->recvmsg_link, &rx->recvmsg_q);
47 }
48 write_unlock(&rx->recvmsg_lock);
49
50 if (!sock_flag(sk, SOCK_DEAD)) {
51 _debug("call %ps", sk->sk_data_ready);
52 sk->sk_data_ready(sk);
53 }
54 }
55 }
56
57 rcu_read_unlock();
58 _leave("");
59}
60
61/*
62 * Pass a call terminating message to userspace.
63 */
64static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg)
65{
66 u32 tmp = 0;
67 int ret;
68
69 switch (call->completion) {
70 case RXRPC_CALL_SUCCEEDED:
71 ret = 0;
72 if (rxrpc_is_service_call(call))
73 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ACK, 0, &tmp);
74 break;
75 case RXRPC_CALL_REMOTELY_ABORTED:
76 tmp = call->abort_code;
77 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &tmp);
78 break;
79 case RXRPC_CALL_LOCALLY_ABORTED:
80 tmp = call->abort_code;
81 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &tmp);
82 break;
83 case RXRPC_CALL_NETWORK_ERROR:
84 tmp = -call->error;
85 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NET_ERROR, 4, &tmp);
86 break;
87 case RXRPC_CALL_LOCAL_ERROR:
88 tmp = -call->error;
89 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_LOCAL_ERROR, 4, &tmp);
90 break;
91 default:
92 pr_err("Invalid terminal call state %u\n", call->completion);
93 BUG();
94 break;
95 }
96
97 trace_rxrpc_recvdata(call, rxrpc_recvmsg_terminal,
98 lower_32_bits(atomic64_read(&call->ackr_window)) - 1,
99 call->rx_pkt_offset, call->rx_pkt_len, ret);
100 return ret;
101}
102
103/*
104 * Discard a packet we've used up and advance the Rx window by one.
105 */
106static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
107{
108 struct rxrpc_skb_priv *sp;
109 struct sk_buff *skb;
110 rxrpc_serial_t serial;
111 rxrpc_seq_t old_consumed = call->rx_consumed, tseq;
112 bool last;
113 int acked;
114
115 _enter("%d", call->debug_id);
116
117 skb = skb_dequeue(&call->recvmsg_queue);
118 rxrpc_see_skb(skb, rxrpc_skb_see_rotate);
119
120 sp = rxrpc_skb(skb);
121 tseq = sp->hdr.seq;
122 serial = sp->hdr.serial;
123 last = sp->hdr.flags & RXRPC_LAST_PACKET;
124
125 /* Barrier against rxrpc_input_data(). */
126 if (after(tseq, call->rx_consumed))
127 smp_store_release(&call->rx_consumed, tseq);
128
129 rxrpc_free_skb(skb, rxrpc_skb_put_rotate);
130
131 trace_rxrpc_receive(call, last ? rxrpc_receive_rotate_last : rxrpc_receive_rotate,
132 serial, call->rx_consumed);
133
134 if (last)
135 set_bit(RXRPC_CALL_RECVMSG_READ_ALL, &call->flags);
136
137 /* Check to see if there's an ACK that needs sending. */
138 acked = atomic_add_return(call->rx_consumed - old_consumed,
139 &call->ackr_nr_consumed);
140 if (acked > 2 &&
141 !test_and_set_bit(RXRPC_CALL_RX_IS_IDLE, &call->flags))
142 rxrpc_poke_call(call, rxrpc_call_poke_idle);
143}
144
145/*
146 * Decrypt and verify a DATA packet.
147 */
148static int rxrpc_verify_data(struct rxrpc_call *call, struct sk_buff *skb)
149{
150 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
151
152 if (sp->flags & RXRPC_RX_VERIFIED)
153 return 0;
154 return call->security->verify_packet(call, skb);
155}
156
157/*
158 * Deliver messages to a call. This keeps processing packets until the buffer
159 * is filled and we find either more DATA (returns 0) or the end of the DATA
160 * (returns 1). If more packets are required, it returns -EAGAIN and if the
161 * call has failed it returns -EIO.
162 */
163static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
164 struct msghdr *msg, struct iov_iter *iter,
165 size_t len, int flags, size_t *_offset)
166{
167 struct rxrpc_skb_priv *sp;
168 struct sk_buff *skb;
169 rxrpc_seq_t seq = 0;
170 size_t remain;
171 unsigned int rx_pkt_offset, rx_pkt_len;
172 int copy, ret = -EAGAIN, ret2;
173
174 rx_pkt_offset = call->rx_pkt_offset;
175 rx_pkt_len = call->rx_pkt_len;
176
177 if (rxrpc_call_has_failed(call)) {
178 seq = lower_32_bits(atomic64_read(&call->ackr_window)) - 1;
179 ret = -EIO;
180 goto done;
181 }
182
183 if (test_bit(RXRPC_CALL_RECVMSG_READ_ALL, &call->flags)) {
184 seq = lower_32_bits(atomic64_read(&call->ackr_window)) - 1;
185 ret = 1;
186 goto done;
187 }
188
189 /* No one else can be removing stuff from the queue, so we shouldn't
190 * need the Rx lock to walk it.
191 */
192 skb = skb_peek(&call->recvmsg_queue);
193 while (skb) {
194 rxrpc_see_skb(skb, rxrpc_skb_see_recvmsg);
195 sp = rxrpc_skb(skb);
196 seq = sp->hdr.seq;
197
198 if (!(flags & MSG_PEEK))
199 trace_rxrpc_receive(call, rxrpc_receive_front,
200 sp->hdr.serial, seq);
201
202 if (msg)
203 sock_recv_timestamp(msg, sock->sk, skb);
204
205 if (rx_pkt_offset == 0) {
206 ret2 = rxrpc_verify_data(call, skb);
207 trace_rxrpc_recvdata(call, rxrpc_recvmsg_next, seq,
208 sp->offset, sp->len, ret2);
209 if (ret2 < 0) {
210 kdebug("verify = %d", ret2);
211 ret = ret2;
212 goto out;
213 }
214 rx_pkt_offset = sp->offset;
215 rx_pkt_len = sp->len;
216 } else {
217 trace_rxrpc_recvdata(call, rxrpc_recvmsg_cont, seq,
218 rx_pkt_offset, rx_pkt_len, 0);
219 }
220
221 /* We have to handle short, empty and used-up DATA packets. */
222 remain = len - *_offset;
223 copy = rx_pkt_len;
224 if (copy > remain)
225 copy = remain;
226 if (copy > 0) {
227 ret2 = skb_copy_datagram_iter(skb, rx_pkt_offset, iter,
228 copy);
229 if (ret2 < 0) {
230 ret = ret2;
231 goto out;
232 }
233
234 /* handle piecemeal consumption of data packets */
235 rx_pkt_offset += copy;
236 rx_pkt_len -= copy;
237 *_offset += copy;
238 }
239
240 if (rx_pkt_len > 0) {
241 trace_rxrpc_recvdata(call, rxrpc_recvmsg_full, seq,
242 rx_pkt_offset, rx_pkt_len, 0);
243 ASSERTCMP(*_offset, ==, len);
244 ret = 0;
245 break;
246 }
247
248 /* The whole packet has been transferred. */
249 if (sp->hdr.flags & RXRPC_LAST_PACKET)
250 ret = 1;
251 rx_pkt_offset = 0;
252 rx_pkt_len = 0;
253
254 skb = skb_peek_next(skb, &call->recvmsg_queue);
255
256 if (!(flags & MSG_PEEK))
257 rxrpc_rotate_rx_window(call);
258 }
259
260out:
261 if (!(flags & MSG_PEEK)) {
262 call->rx_pkt_offset = rx_pkt_offset;
263 call->rx_pkt_len = rx_pkt_len;
264 }
265done:
266 trace_rxrpc_recvdata(call, rxrpc_recvmsg_data_return, seq,
267 rx_pkt_offset, rx_pkt_len, ret);
268 if (ret == -EAGAIN)
269 set_bit(RXRPC_CALL_RX_IS_IDLE, &call->flags);
270 return ret;
271}
272
273/*
274 * Receive a message from an RxRPC socket
275 * - we need to be careful about two or more threads calling recvmsg
276 * simultaneously
277 */
278int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
279 int flags)
280{
281 struct rxrpc_call *call;
282 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
283 struct list_head *l;
284 unsigned int call_debug_id = 0;
285 size_t copied = 0;
286 long timeo;
287 int ret;
288
289 DEFINE_WAIT(wait);
290
291 trace_rxrpc_recvmsg(0, rxrpc_recvmsg_enter, 0);
292
293 if (flags & (MSG_OOB | MSG_TRUNC))
294 return -EOPNOTSUPP;
295
296 timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT);
297
298try_again:
299 lock_sock(&rx->sk);
300
301 /* Return immediately if a client socket has no outstanding calls */
302 if (RB_EMPTY_ROOT(&rx->calls) &&
303 list_empty(&rx->recvmsg_q) &&
304 rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
305 release_sock(&rx->sk);
306 return -EAGAIN;
307 }
308
309 if (list_empty(&rx->recvmsg_q)) {
310 ret = -EWOULDBLOCK;
311 if (timeo == 0) {
312 call = NULL;
313 goto error_no_call;
314 }
315
316 release_sock(&rx->sk);
317
318 /* Wait for something to happen */
319 prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait,
320 TASK_INTERRUPTIBLE);
321 ret = sock_error(&rx->sk);
322 if (ret)
323 goto wait_error;
324
325 if (list_empty(&rx->recvmsg_q)) {
326 if (signal_pending(current))
327 goto wait_interrupted;
328 trace_rxrpc_recvmsg(0, rxrpc_recvmsg_wait, 0);
329 timeo = schedule_timeout(timeo);
330 }
331 finish_wait(sk_sleep(&rx->sk), &wait);
332 goto try_again;
333 }
334
335 /* Find the next call and dequeue it if we're not just peeking. If we
336 * do dequeue it, that comes with a ref that we will need to release.
337 */
338 write_lock(&rx->recvmsg_lock);
339 l = rx->recvmsg_q.next;
340 call = list_entry(l, struct rxrpc_call, recvmsg_link);
341 if (!(flags & MSG_PEEK))
342 list_del_init(&call->recvmsg_link);
343 else
344 rxrpc_get_call(call, rxrpc_call_get_recvmsg);
345 write_unlock(&rx->recvmsg_lock);
346
347 call_debug_id = call->debug_id;
348 trace_rxrpc_recvmsg(call_debug_id, rxrpc_recvmsg_dequeue, 0);
349
350 /* We're going to drop the socket lock, so we need to lock the call
351 * against interference by sendmsg.
352 */
353 if (!mutex_trylock(&call->user_mutex)) {
354 ret = -EWOULDBLOCK;
355 if (flags & MSG_DONTWAIT)
356 goto error_requeue_call;
357 ret = -ERESTARTSYS;
358 if (mutex_lock_interruptible(&call->user_mutex) < 0)
359 goto error_requeue_call;
360 }
361
362 release_sock(&rx->sk);
363
364 if (test_bit(RXRPC_CALL_RELEASED, &call->flags))
365 BUG();
366
367 if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
368 if (flags & MSG_CMSG_COMPAT) {
369 unsigned int id32 = call->user_call_ID;
370
371 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
372 sizeof(unsigned int), &id32);
373 } else {
374 unsigned long idl = call->user_call_ID;
375
376 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
377 sizeof(unsigned long), &idl);
378 }
379 if (ret < 0)
380 goto error_unlock_call;
381 }
382
383 if (msg->msg_name && call->peer) {
384 size_t len = sizeof(call->dest_srx);
385
386 memcpy(msg->msg_name, &call->dest_srx, len);
387 msg->msg_namelen = len;
388 }
389
390 ret = rxrpc_recvmsg_data(sock, call, msg, &msg->msg_iter, len,
391 flags, &copied);
392 if (ret == -EAGAIN)
393 ret = 0;
394 if (ret == -EIO)
395 goto call_failed;
396 if (ret < 0)
397 goto error_unlock_call;
398
399 if (rxrpc_call_is_complete(call) &&
400 skb_queue_empty(&call->recvmsg_queue))
401 goto call_complete;
402 if (rxrpc_call_has_failed(call))
403 goto call_failed;
404
405 rxrpc_notify_socket(call);
406 goto not_yet_complete;
407
408call_failed:
409 rxrpc_purge_queue(&call->recvmsg_queue);
410call_complete:
411 ret = rxrpc_recvmsg_term(call, msg);
412 if (ret < 0)
413 goto error_unlock_call;
414 if (!(flags & MSG_PEEK))
415 rxrpc_release_call(rx, call);
416 msg->msg_flags |= MSG_EOR;
417 ret = 1;
418
419not_yet_complete:
420 if (ret == 0)
421 msg->msg_flags |= MSG_MORE;
422 else
423 msg->msg_flags &= ~MSG_MORE;
424 ret = copied;
425
426error_unlock_call:
427 mutex_unlock(&call->user_mutex);
428 rxrpc_put_call(call, rxrpc_call_put_recvmsg);
429 trace_rxrpc_recvmsg(call_debug_id, rxrpc_recvmsg_return, ret);
430 return ret;
431
432error_requeue_call:
433 if (!(flags & MSG_PEEK)) {
434 write_lock(&rx->recvmsg_lock);
435 list_add(&call->recvmsg_link, &rx->recvmsg_q);
436 write_unlock(&rx->recvmsg_lock);
437 trace_rxrpc_recvmsg(call_debug_id, rxrpc_recvmsg_requeue, 0);
438 } else {
439 rxrpc_put_call(call, rxrpc_call_put_recvmsg);
440 }
441error_no_call:
442 release_sock(&rx->sk);
443error_trace:
444 trace_rxrpc_recvmsg(call_debug_id, rxrpc_recvmsg_return, ret);
445 return ret;
446
447wait_interrupted:
448 ret = sock_intr_errno(timeo);
449wait_error:
450 finish_wait(sk_sleep(&rx->sk), &wait);
451 call = NULL;
452 goto error_trace;
453}
454
455/**
456 * rxrpc_kernel_recv_data - Allow a kernel service to receive data/info
457 * @sock: The socket that the call exists on
458 * @call: The call to send data through
459 * @iter: The buffer to receive into
460 * @_len: The amount of data we want to receive (decreased on return)
461 * @want_more: True if more data is expected to be read
462 * @_abort: Where the abort code is stored if -ECONNABORTED is returned
463 * @_service: Where to store the actual service ID (may be upgraded)
464 *
465 * Allow a kernel service to receive data and pick up information about the
466 * state of a call. Returns 0 if got what was asked for and there's more
467 * available, 1 if we got what was asked for and we're at the end of the data
468 * and -EAGAIN if we need more data.
469 *
470 * Note that we may return -EAGAIN to drain empty packets at the end of the
471 * data, even if we've already copied over the requested data.
472 *
473 * *_abort should also be initialised to 0.
474 */
475int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
476 struct iov_iter *iter, size_t *_len,
477 bool want_more, u32 *_abort, u16 *_service)
478{
479 size_t offset = 0;
480 int ret;
481
482 _enter("{%d},%zu,%d", call->debug_id, *_len, want_more);
483
484 mutex_lock(&call->user_mutex);
485
486 ret = rxrpc_recvmsg_data(sock, call, NULL, iter, *_len, 0, &offset);
487 *_len -= offset;
488 if (ret == -EIO)
489 goto call_failed;
490 if (ret < 0)
491 goto out;
492
493 /* We can only reach here with a partially full buffer if we have
494 * reached the end of the data. We must otherwise have a full buffer
495 * or have been given -EAGAIN.
496 */
497 if (ret == 1) {
498 if (iov_iter_count(iter) > 0)
499 goto short_data;
500 if (!want_more)
501 goto read_phase_complete;
502 ret = 0;
503 goto out;
504 }
505
506 if (!want_more)
507 goto excess_data;
508 goto out;
509
510read_phase_complete:
511 ret = 1;
512out:
513 if (_service)
514 *_service = call->dest_srx.srx_service;
515 mutex_unlock(&call->user_mutex);
516 _leave(" = %d [%zu,%d]", ret, iov_iter_count(iter), *_abort);
517 return ret;
518
519short_data:
520 trace_rxrpc_abort(call->debug_id, rxrpc_recvmsg_short_data,
521 call->cid, call->call_id, call->rx_consumed,
522 0, -EBADMSG);
523 ret = -EBADMSG;
524 goto out;
525excess_data:
526 trace_rxrpc_abort(call->debug_id, rxrpc_recvmsg_excess_data,
527 call->cid, call->call_id, call->rx_consumed,
528 0, -EMSGSIZE);
529 ret = -EMSGSIZE;
530 goto out;
531call_failed:
532 *_abort = call->abort_code;
533 ret = call->error;
534 if (call->completion == RXRPC_CALL_SUCCEEDED) {
535 ret = 1;
536 if (iov_iter_count(iter) > 0)
537 ret = -ECONNRESET;
538 }
539 goto out;
540}
541EXPORT_SYMBOL(rxrpc_kernel_recv_data);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* RxRPC recvmsg() implementation
3 *
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/net.h>
11#include <linux/skbuff.h>
12#include <linux/export.h>
13#include <linux/sched/signal.h>
14
15#include <net/sock.h>
16#include <net/af_rxrpc.h>
17#include "ar-internal.h"
18
19/*
20 * Post a call for attention by the socket or kernel service. Further
21 * notifications are suppressed by putting recvmsg_link on a dummy queue.
22 */
23void rxrpc_notify_socket(struct rxrpc_call *call)
24{
25 struct rxrpc_sock *rx;
26 struct sock *sk;
27
28 _enter("%d", call->debug_id);
29
30 if (!list_empty(&call->recvmsg_link))
31 return;
32
33 rcu_read_lock();
34
35 rx = rcu_dereference(call->socket);
36 sk = &rx->sk;
37 if (rx && sk->sk_state < RXRPC_CLOSE) {
38 if (call->notify_rx) {
39 spin_lock_bh(&call->notify_lock);
40 call->notify_rx(sk, call, call->user_call_ID);
41 spin_unlock_bh(&call->notify_lock);
42 } else {
43 write_lock_bh(&rx->recvmsg_lock);
44 if (list_empty(&call->recvmsg_link)) {
45 rxrpc_get_call(call, rxrpc_call_got);
46 list_add_tail(&call->recvmsg_link, &rx->recvmsg_q);
47 }
48 write_unlock_bh(&rx->recvmsg_lock);
49
50 if (!sock_flag(sk, SOCK_DEAD)) {
51 _debug("call %ps", sk->sk_data_ready);
52 sk->sk_data_ready(sk);
53 }
54 }
55 }
56
57 rcu_read_unlock();
58 _leave("");
59}
60
61/*
62 * Pass a call terminating message to userspace.
63 */
64static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg)
65{
66 u32 tmp = 0;
67 int ret;
68
69 switch (call->completion) {
70 case RXRPC_CALL_SUCCEEDED:
71 ret = 0;
72 if (rxrpc_is_service_call(call))
73 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ACK, 0, &tmp);
74 break;
75 case RXRPC_CALL_REMOTELY_ABORTED:
76 tmp = call->abort_code;
77 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &tmp);
78 break;
79 case RXRPC_CALL_LOCALLY_ABORTED:
80 tmp = call->abort_code;
81 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &tmp);
82 break;
83 case RXRPC_CALL_NETWORK_ERROR:
84 tmp = -call->error;
85 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NET_ERROR, 4, &tmp);
86 break;
87 case RXRPC_CALL_LOCAL_ERROR:
88 tmp = -call->error;
89 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_LOCAL_ERROR, 4, &tmp);
90 break;
91 default:
92 pr_err("Invalid terminal call state %u\n", call->state);
93 BUG();
94 break;
95 }
96
97 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_terminal, call->rx_hard_ack,
98 call->rx_pkt_offset, call->rx_pkt_len, ret);
99 return ret;
100}
101
102/*
103 * Pass back notification of a new call. The call is added to the
104 * to-be-accepted list. This means that the next call to be accepted might not
105 * be the last call seen awaiting acceptance, but unless we leave this on the
106 * front of the queue and block all other messages until someone gives us a
107 * user_ID for it, there's not a lot we can do.
108 */
109static int rxrpc_recvmsg_new_call(struct rxrpc_sock *rx,
110 struct rxrpc_call *call,
111 struct msghdr *msg, int flags)
112{
113 int tmp = 0, ret;
114
115 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NEW_CALL, 0, &tmp);
116
117 if (ret == 0 && !(flags & MSG_PEEK)) {
118 _debug("to be accepted");
119 write_lock_bh(&rx->recvmsg_lock);
120 list_del_init(&call->recvmsg_link);
121 write_unlock_bh(&rx->recvmsg_lock);
122
123 rxrpc_get_call(call, rxrpc_call_got);
124 write_lock(&rx->call_lock);
125 list_add_tail(&call->accept_link, &rx->to_be_accepted);
126 write_unlock(&rx->call_lock);
127 }
128
129 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_to_be_accepted, 1, 0, 0, ret);
130 return ret;
131}
132
133/*
134 * End the packet reception phase.
135 */
136static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
137{
138 _enter("%d,%s", call->debug_id, rxrpc_call_states[call->state]);
139
140 trace_rxrpc_receive(call, rxrpc_receive_end, 0, call->rx_top);
141 ASSERTCMP(call->rx_hard_ack, ==, call->rx_top);
142
143 if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) {
144 rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, serial, false, true,
145 rxrpc_propose_ack_terminal_ack);
146 //rxrpc_send_ack_packet(call, false, NULL);
147 }
148
149 write_lock_bh(&call->state_lock);
150
151 switch (call->state) {
152 case RXRPC_CALL_CLIENT_RECV_REPLY:
153 __rxrpc_call_completed(call);
154 write_unlock_bh(&call->state_lock);
155 break;
156
157 case RXRPC_CALL_SERVER_RECV_REQUEST:
158 call->tx_phase = true;
159 call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
160 call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
161 write_unlock_bh(&call->state_lock);
162 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial, false, true,
163 rxrpc_propose_ack_processing_op);
164 break;
165 default:
166 write_unlock_bh(&call->state_lock);
167 break;
168 }
169}
170
171/*
172 * Discard a packet we've used up and advance the Rx window by one.
173 */
174static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
175{
176 struct rxrpc_skb_priv *sp;
177 struct sk_buff *skb;
178 rxrpc_serial_t serial;
179 rxrpc_seq_t hard_ack, top;
180 bool last = false;
181 u8 subpacket;
182 int ix;
183
184 _enter("%d", call->debug_id);
185
186 hard_ack = call->rx_hard_ack;
187 top = smp_load_acquire(&call->rx_top);
188 ASSERT(before(hard_ack, top));
189
190 hard_ack++;
191 ix = hard_ack & RXRPC_RXTX_BUFF_MASK;
192 skb = call->rxtx_buffer[ix];
193 rxrpc_see_skb(skb, rxrpc_skb_rotated);
194 sp = rxrpc_skb(skb);
195
196 subpacket = call->rxtx_annotations[ix] & RXRPC_RX_ANNO_SUBPACKET;
197 serial = sp->hdr.serial + subpacket;
198
199 if (subpacket == sp->nr_subpackets - 1 &&
200 sp->rx_flags & RXRPC_SKB_INCL_LAST)
201 last = true;
202
203 call->rxtx_buffer[ix] = NULL;
204 call->rxtx_annotations[ix] = 0;
205 /* Barrier against rxrpc_input_data(). */
206 smp_store_release(&call->rx_hard_ack, hard_ack);
207
208 rxrpc_free_skb(skb, rxrpc_skb_freed);
209
210 trace_rxrpc_receive(call, rxrpc_receive_rotate, serial, hard_ack);
211 if (last) {
212 rxrpc_end_rx_phase(call, serial);
213 } else {
214 /* Check to see if there's an ACK that needs sending. */
215 if (after_eq(hard_ack, call->ackr_consumed + 2) ||
216 after_eq(top, call->ackr_seen + 2) ||
217 (hard_ack == top && after(hard_ack, call->ackr_consumed)))
218 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial,
219 true, true,
220 rxrpc_propose_ack_rotate_rx);
221 if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY)
222 rxrpc_send_ack_packet(call, false, NULL);
223 }
224}
225
226/*
227 * Decrypt and verify a (sub)packet. The packet's length may be changed due to
228 * padding, but if this is the case, the packet length will be resident in the
229 * socket buffer. Note that we can't modify the master skb info as the skb may
230 * be the home to multiple subpackets.
231 */
232static int rxrpc_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
233 u8 annotation,
234 unsigned int offset, unsigned int len)
235{
236 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
237 rxrpc_seq_t seq = sp->hdr.seq;
238 u16 cksum = sp->hdr.cksum;
239 u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET;
240
241 _enter("");
242
243 /* For all but the head jumbo subpacket, the security checksum is in a
244 * jumbo header immediately prior to the data.
245 */
246 if (subpacket > 0) {
247 __be16 tmp;
248 if (skb_copy_bits(skb, offset - 2, &tmp, 2) < 0)
249 BUG();
250 cksum = ntohs(tmp);
251 seq += subpacket;
252 }
253
254 return call->security->verify_packet(call, skb, offset, len,
255 seq, cksum);
256}
257
258/*
259 * Locate the data within a packet. This is complicated by:
260 *
261 * (1) An skb may contain a jumbo packet - so we have to find the appropriate
262 * subpacket.
263 *
264 * (2) The (sub)packets may be encrypted and, if so, the encrypted portion
265 * contains an extra header which includes the true length of the data,
266 * excluding any encrypted padding.
267 */
268static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
269 u8 *_annotation,
270 unsigned int *_offset, unsigned int *_len,
271 bool *_last)
272{
273 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
274 unsigned int offset = sizeof(struct rxrpc_wire_header);
275 unsigned int len;
276 bool last = false;
277 int ret;
278 u8 annotation = *_annotation;
279 u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET;
280
281 /* Locate the subpacket */
282 offset += subpacket * RXRPC_JUMBO_SUBPKTLEN;
283 len = skb->len - offset;
284 if (subpacket < sp->nr_subpackets - 1)
285 len = RXRPC_JUMBO_DATALEN;
286 else if (sp->rx_flags & RXRPC_SKB_INCL_LAST)
287 last = true;
288
289 if (!(annotation & RXRPC_RX_ANNO_VERIFIED)) {
290 ret = rxrpc_verify_packet(call, skb, annotation, offset, len);
291 if (ret < 0)
292 return ret;
293 *_annotation |= RXRPC_RX_ANNO_VERIFIED;
294 }
295
296 *_offset = offset;
297 *_len = len;
298 *_last = last;
299 call->security->locate_data(call, skb, _offset, _len);
300 return 0;
301}
302
303/*
304 * Deliver messages to a call. This keeps processing packets until the buffer
305 * is filled and we find either more DATA (returns 0) or the end of the DATA
306 * (returns 1). If more packets are required, it returns -EAGAIN.
307 */
308static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
309 struct msghdr *msg, struct iov_iter *iter,
310 size_t len, int flags, size_t *_offset)
311{
312 struct rxrpc_skb_priv *sp;
313 struct sk_buff *skb;
314 rxrpc_serial_t serial;
315 rxrpc_seq_t hard_ack, top, seq;
316 size_t remain;
317 bool rx_pkt_last;
318 unsigned int rx_pkt_offset, rx_pkt_len;
319 int ix, copy, ret = -EAGAIN, ret2;
320
321 if (test_and_clear_bit(RXRPC_CALL_RX_UNDERRUN, &call->flags) &&
322 call->ackr_reason)
323 rxrpc_send_ack_packet(call, false, NULL);
324
325 rx_pkt_offset = call->rx_pkt_offset;
326 rx_pkt_len = call->rx_pkt_len;
327 rx_pkt_last = call->rx_pkt_last;
328
329 if (call->state >= RXRPC_CALL_SERVER_ACK_REQUEST) {
330 seq = call->rx_hard_ack;
331 ret = 1;
332 goto done;
333 }
334
335 /* Barriers against rxrpc_input_data(). */
336 hard_ack = call->rx_hard_ack;
337 seq = hard_ack + 1;
338
339 while (top = smp_load_acquire(&call->rx_top),
340 before_eq(seq, top)
341 ) {
342 ix = seq & RXRPC_RXTX_BUFF_MASK;
343 skb = call->rxtx_buffer[ix];
344 if (!skb) {
345 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_hole, seq,
346 rx_pkt_offset, rx_pkt_len, 0);
347 break;
348 }
349 smp_rmb();
350 rxrpc_see_skb(skb, rxrpc_skb_seen);
351 sp = rxrpc_skb(skb);
352
353 if (!(flags & MSG_PEEK)) {
354 serial = sp->hdr.serial;
355 serial += call->rxtx_annotations[ix] & RXRPC_RX_ANNO_SUBPACKET;
356 trace_rxrpc_receive(call, rxrpc_receive_front,
357 serial, seq);
358 }
359
360 if (msg)
361 sock_recv_timestamp(msg, sock->sk, skb);
362
363 if (rx_pkt_offset == 0) {
364 ret2 = rxrpc_locate_data(call, skb,
365 &call->rxtx_annotations[ix],
366 &rx_pkt_offset, &rx_pkt_len,
367 &rx_pkt_last);
368 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_next, seq,
369 rx_pkt_offset, rx_pkt_len, ret2);
370 if (ret2 < 0) {
371 ret = ret2;
372 goto out;
373 }
374 } else {
375 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_cont, seq,
376 rx_pkt_offset, rx_pkt_len, 0);
377 }
378
379 /* We have to handle short, empty and used-up DATA packets. */
380 remain = len - *_offset;
381 copy = rx_pkt_len;
382 if (copy > remain)
383 copy = remain;
384 if (copy > 0) {
385 ret2 = skb_copy_datagram_iter(skb, rx_pkt_offset, iter,
386 copy);
387 if (ret2 < 0) {
388 ret = ret2;
389 goto out;
390 }
391
392 /* handle piecemeal consumption of data packets */
393 rx_pkt_offset += copy;
394 rx_pkt_len -= copy;
395 *_offset += copy;
396 }
397
398 if (rx_pkt_len > 0) {
399 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_full, seq,
400 rx_pkt_offset, rx_pkt_len, 0);
401 ASSERTCMP(*_offset, ==, len);
402 ret = 0;
403 break;
404 }
405
406 /* The whole packet has been transferred. */
407 if (!(flags & MSG_PEEK))
408 rxrpc_rotate_rx_window(call);
409 rx_pkt_offset = 0;
410 rx_pkt_len = 0;
411
412 if (rx_pkt_last) {
413 ASSERTCMP(seq, ==, READ_ONCE(call->rx_top));
414 ret = 1;
415 goto out;
416 }
417
418 seq++;
419 }
420
421out:
422 if (!(flags & MSG_PEEK)) {
423 call->rx_pkt_offset = rx_pkt_offset;
424 call->rx_pkt_len = rx_pkt_len;
425 call->rx_pkt_last = rx_pkt_last;
426 }
427done:
428 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_data_return, seq,
429 rx_pkt_offset, rx_pkt_len, ret);
430 if (ret == -EAGAIN)
431 set_bit(RXRPC_CALL_RX_UNDERRUN, &call->flags);
432 return ret;
433}
434
435/*
436 * Receive a message from an RxRPC socket
437 * - we need to be careful about two or more threads calling recvmsg
438 * simultaneously
439 */
440int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
441 int flags)
442{
443 struct rxrpc_call *call;
444 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
445 struct list_head *l;
446 size_t copied = 0;
447 long timeo;
448 int ret;
449
450 DEFINE_WAIT(wait);
451
452 trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_enter, 0, 0, 0, 0);
453
454 if (flags & (MSG_OOB | MSG_TRUNC))
455 return -EOPNOTSUPP;
456
457 timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT);
458
459try_again:
460 lock_sock(&rx->sk);
461
462 /* Return immediately if a client socket has no outstanding calls */
463 if (RB_EMPTY_ROOT(&rx->calls) &&
464 list_empty(&rx->recvmsg_q) &&
465 rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
466 release_sock(&rx->sk);
467 return -ENODATA;
468 }
469
470 if (list_empty(&rx->recvmsg_q)) {
471 ret = -EWOULDBLOCK;
472 if (timeo == 0) {
473 call = NULL;
474 goto error_no_call;
475 }
476
477 release_sock(&rx->sk);
478
479 /* Wait for something to happen */
480 prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait,
481 TASK_INTERRUPTIBLE);
482 ret = sock_error(&rx->sk);
483 if (ret)
484 goto wait_error;
485
486 if (list_empty(&rx->recvmsg_q)) {
487 if (signal_pending(current))
488 goto wait_interrupted;
489 trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_wait,
490 0, 0, 0, 0);
491 timeo = schedule_timeout(timeo);
492 }
493 finish_wait(sk_sleep(&rx->sk), &wait);
494 goto try_again;
495 }
496
497 /* Find the next call and dequeue it if we're not just peeking. If we
498 * do dequeue it, that comes with a ref that we will need to release.
499 */
500 write_lock_bh(&rx->recvmsg_lock);
501 l = rx->recvmsg_q.next;
502 call = list_entry(l, struct rxrpc_call, recvmsg_link);
503 if (!(flags & MSG_PEEK))
504 list_del_init(&call->recvmsg_link);
505 else
506 rxrpc_get_call(call, rxrpc_call_got);
507 write_unlock_bh(&rx->recvmsg_lock);
508
509 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_dequeue, 0, 0, 0, 0);
510
511 /* We're going to drop the socket lock, so we need to lock the call
512 * against interference by sendmsg.
513 */
514 if (!mutex_trylock(&call->user_mutex)) {
515 ret = -EWOULDBLOCK;
516 if (flags & MSG_DONTWAIT)
517 goto error_requeue_call;
518 ret = -ERESTARTSYS;
519 if (mutex_lock_interruptible(&call->user_mutex) < 0)
520 goto error_requeue_call;
521 }
522
523 release_sock(&rx->sk);
524
525 if (test_bit(RXRPC_CALL_RELEASED, &call->flags))
526 BUG();
527
528 if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
529 if (flags & MSG_CMSG_COMPAT) {
530 unsigned int id32 = call->user_call_ID;
531
532 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
533 sizeof(unsigned int), &id32);
534 } else {
535 unsigned long idl = call->user_call_ID;
536
537 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
538 sizeof(unsigned long), &idl);
539 }
540 if (ret < 0)
541 goto error_unlock_call;
542 }
543
544 if (msg->msg_name) {
545 struct sockaddr_rxrpc *srx = msg->msg_name;
546 size_t len = sizeof(call->peer->srx);
547
548 memcpy(msg->msg_name, &call->peer->srx, len);
549 srx->srx_service = call->service_id;
550 msg->msg_namelen = len;
551 }
552
553 switch (READ_ONCE(call->state)) {
554 case RXRPC_CALL_SERVER_ACCEPTING:
555 ret = rxrpc_recvmsg_new_call(rx, call, msg, flags);
556 break;
557 case RXRPC_CALL_CLIENT_RECV_REPLY:
558 case RXRPC_CALL_SERVER_RECV_REQUEST:
559 case RXRPC_CALL_SERVER_ACK_REQUEST:
560 ret = rxrpc_recvmsg_data(sock, call, msg, &msg->msg_iter, len,
561 flags, &copied);
562 if (ret == -EAGAIN)
563 ret = 0;
564
565 if (after(call->rx_top, call->rx_hard_ack) &&
566 call->rxtx_buffer[(call->rx_hard_ack + 1) & RXRPC_RXTX_BUFF_MASK])
567 rxrpc_notify_socket(call);
568 break;
569 default:
570 ret = 0;
571 break;
572 }
573
574 if (ret < 0)
575 goto error_unlock_call;
576
577 if (call->state == RXRPC_CALL_COMPLETE) {
578 ret = rxrpc_recvmsg_term(call, msg);
579 if (ret < 0)
580 goto error_unlock_call;
581 if (!(flags & MSG_PEEK))
582 rxrpc_release_call(rx, call);
583 msg->msg_flags |= MSG_EOR;
584 ret = 1;
585 }
586
587 if (ret == 0)
588 msg->msg_flags |= MSG_MORE;
589 else
590 msg->msg_flags &= ~MSG_MORE;
591 ret = copied;
592
593error_unlock_call:
594 mutex_unlock(&call->user_mutex);
595 rxrpc_put_call(call, rxrpc_call_put);
596 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
597 return ret;
598
599error_requeue_call:
600 if (!(flags & MSG_PEEK)) {
601 write_lock_bh(&rx->recvmsg_lock);
602 list_add(&call->recvmsg_link, &rx->recvmsg_q);
603 write_unlock_bh(&rx->recvmsg_lock);
604 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_requeue, 0, 0, 0, 0);
605 } else {
606 rxrpc_put_call(call, rxrpc_call_put);
607 }
608error_no_call:
609 release_sock(&rx->sk);
610error_trace:
611 trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
612 return ret;
613
614wait_interrupted:
615 ret = sock_intr_errno(timeo);
616wait_error:
617 finish_wait(sk_sleep(&rx->sk), &wait);
618 call = NULL;
619 goto error_trace;
620}
621
622/**
623 * rxrpc_kernel_recv_data - Allow a kernel service to receive data/info
624 * @sock: The socket that the call exists on
625 * @call: The call to send data through
626 * @iter: The buffer to receive into
627 * @want_more: True if more data is expected to be read
628 * @_abort: Where the abort code is stored if -ECONNABORTED is returned
629 * @_service: Where to store the actual service ID (may be upgraded)
630 *
631 * Allow a kernel service to receive data and pick up information about the
632 * state of a call. Returns 0 if got what was asked for and there's more
633 * available, 1 if we got what was asked for and we're at the end of the data
634 * and -EAGAIN if we need more data.
635 *
636 * Note that we may return -EAGAIN to drain empty packets at the end of the
637 * data, even if we've already copied over the requested data.
638 *
639 * *_abort should also be initialised to 0.
640 */
641int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
642 struct iov_iter *iter,
643 bool want_more, u32 *_abort, u16 *_service)
644{
645 size_t offset = 0;
646 int ret;
647
648 _enter("{%d,%s},%zu,%d",
649 call->debug_id, rxrpc_call_states[call->state],
650 iov_iter_count(iter), want_more);
651
652 ASSERTCMP(call->state, !=, RXRPC_CALL_SERVER_ACCEPTING);
653
654 mutex_lock(&call->user_mutex);
655
656 switch (READ_ONCE(call->state)) {
657 case RXRPC_CALL_CLIENT_RECV_REPLY:
658 case RXRPC_CALL_SERVER_RECV_REQUEST:
659 case RXRPC_CALL_SERVER_ACK_REQUEST:
660 ret = rxrpc_recvmsg_data(sock, call, NULL, iter,
661 iov_iter_count(iter), 0,
662 &offset);
663 if (ret < 0)
664 goto out;
665
666 /* We can only reach here with a partially full buffer if we
667 * have reached the end of the data. We must otherwise have a
668 * full buffer or have been given -EAGAIN.
669 */
670 if (ret == 1) {
671 if (iov_iter_count(iter) > 0)
672 goto short_data;
673 if (!want_more)
674 goto read_phase_complete;
675 ret = 0;
676 goto out;
677 }
678
679 if (!want_more)
680 goto excess_data;
681 goto out;
682
683 case RXRPC_CALL_COMPLETE:
684 goto call_complete;
685
686 default:
687 ret = -EINPROGRESS;
688 goto out;
689 }
690
691read_phase_complete:
692 ret = 1;
693out:
694 switch (call->ackr_reason) {
695 case RXRPC_ACK_IDLE:
696 break;
697 case RXRPC_ACK_DELAY:
698 if (ret != -EAGAIN)
699 break;
700 /* Fall through */
701 default:
702 rxrpc_send_ack_packet(call, false, NULL);
703 }
704
705 if (_service)
706 *_service = call->service_id;
707 mutex_unlock(&call->user_mutex);
708 _leave(" = %d [%zu,%d]", ret, iov_iter_count(iter), *_abort);
709 return ret;
710
711short_data:
712 trace_rxrpc_rx_eproto(call, 0, tracepoint_string("short_data"));
713 ret = -EBADMSG;
714 goto out;
715excess_data:
716 trace_rxrpc_rx_eproto(call, 0, tracepoint_string("excess_data"));
717 ret = -EMSGSIZE;
718 goto out;
719call_complete:
720 *_abort = call->abort_code;
721 ret = call->error;
722 if (call->completion == RXRPC_CALL_SUCCEEDED) {
723 ret = 1;
724 if (iov_iter_count(iter) > 0)
725 ret = -ECONNRESET;
726 }
727 goto out;
728}
729EXPORT_SYMBOL(rxrpc_kernel_recv_data);
730
731/**
732 * rxrpc_kernel_get_reply_time - Get timestamp on first reply packet
733 * @sock: The socket that the call exists on
734 * @call: The call to query
735 * @_ts: Where to put the timestamp
736 *
737 * Retrieve the timestamp from the first DATA packet of the reply if it is
738 * in the ring. Returns true if successful, false if not.
739 */
740bool rxrpc_kernel_get_reply_time(struct socket *sock, struct rxrpc_call *call,
741 ktime_t *_ts)
742{
743 struct sk_buff *skb;
744 rxrpc_seq_t hard_ack, top, seq;
745 bool success = false;
746
747 mutex_lock(&call->user_mutex);
748
749 if (READ_ONCE(call->state) != RXRPC_CALL_CLIENT_RECV_REPLY)
750 goto out;
751
752 hard_ack = call->rx_hard_ack;
753 if (hard_ack != 0)
754 goto out;
755
756 seq = hard_ack + 1;
757 top = smp_load_acquire(&call->rx_top);
758 if (after(seq, top))
759 goto out;
760
761 skb = call->rxtx_buffer[seq & RXRPC_RXTX_BUFF_MASK];
762 if (!skb)
763 goto out;
764
765 *_ts = skb_get_ktime(skb);
766 success = true;
767
768out:
769 mutex_unlock(&call->user_mutex);
770 return success;
771}
772EXPORT_SYMBOL(rxrpc_kernel_get_reply_time);