Loading...
1/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/slab.h>
35#include <net/sock.h>
36#include <linux/in.h>
37#include <linux/export.h>
38
39#include "rds.h"
40
41void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
42 __be32 saddr)
43{
44 atomic_set(&inc->i_refcount, 1);
45 INIT_LIST_HEAD(&inc->i_item);
46 inc->i_conn = conn;
47 inc->i_saddr = saddr;
48 inc->i_rdma_cookie = 0;
49}
50EXPORT_SYMBOL_GPL(rds_inc_init);
51
52static void rds_inc_addref(struct rds_incoming *inc)
53{
54 rdsdebug("addref inc %p ref %d\n", inc, atomic_read(&inc->i_refcount));
55 atomic_inc(&inc->i_refcount);
56}
57
58void rds_inc_put(struct rds_incoming *inc)
59{
60 rdsdebug("put inc %p ref %d\n", inc, atomic_read(&inc->i_refcount));
61 if (atomic_dec_and_test(&inc->i_refcount)) {
62 BUG_ON(!list_empty(&inc->i_item));
63
64 inc->i_conn->c_trans->inc_free(inc);
65 }
66}
67EXPORT_SYMBOL_GPL(rds_inc_put);
68
69static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk,
70 struct rds_cong_map *map,
71 int delta, __be16 port)
72{
73 int now_congested;
74
75 if (delta == 0)
76 return;
77
78 rs->rs_rcv_bytes += delta;
79 now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs);
80
81 rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d "
82 "now_cong %d delta %d\n",
83 rs, &rs->rs_bound_addr,
84 ntohs(rs->rs_bound_port), rs->rs_rcv_bytes,
85 rds_sk_rcvbuf(rs), now_congested, delta);
86
87 /* wasn't -> am congested */
88 if (!rs->rs_congested && now_congested) {
89 rs->rs_congested = 1;
90 rds_cong_set_bit(map, port);
91 rds_cong_queue_updates(map);
92 }
93 /* was -> aren't congested */
94 /* Require more free space before reporting uncongested to prevent
95 bouncing cong/uncong state too often */
96 else if (rs->rs_congested && (rs->rs_rcv_bytes < (rds_sk_rcvbuf(rs)/2))) {
97 rs->rs_congested = 0;
98 rds_cong_clear_bit(map, port);
99 rds_cong_queue_updates(map);
100 }
101
102 /* do nothing if no change in cong state */
103}
104
105/*
106 * Process all extension headers that come with this message.
107 */
108static void rds_recv_incoming_exthdrs(struct rds_incoming *inc, struct rds_sock *rs)
109{
110 struct rds_header *hdr = &inc->i_hdr;
111 unsigned int pos = 0, type, len;
112 union {
113 struct rds_ext_header_version version;
114 struct rds_ext_header_rdma rdma;
115 struct rds_ext_header_rdma_dest rdma_dest;
116 } buffer;
117
118 while (1) {
119 len = sizeof(buffer);
120 type = rds_message_next_extension(hdr, &pos, &buffer, &len);
121 if (type == RDS_EXTHDR_NONE)
122 break;
123 /* Process extension header here */
124 switch (type) {
125 case RDS_EXTHDR_RDMA:
126 rds_rdma_unuse(rs, be32_to_cpu(buffer.rdma.h_rdma_rkey), 0);
127 break;
128
129 case RDS_EXTHDR_RDMA_DEST:
130 /* We ignore the size for now. We could stash it
131 * somewhere and use it for error checking. */
132 inc->i_rdma_cookie = rds_rdma_make_cookie(
133 be32_to_cpu(buffer.rdma_dest.h_rdma_rkey),
134 be32_to_cpu(buffer.rdma_dest.h_rdma_offset));
135
136 break;
137 }
138 }
139}
140
141/*
142 * The transport must make sure that this is serialized against other
143 * rx and conn reset on this specific conn.
144 *
145 * We currently assert that only one fragmented message will be sent
146 * down a connection at a time. This lets us reassemble in the conn
147 * instead of per-flow which means that we don't have to go digging through
148 * flows to tear down partial reassembly progress on conn failure and
149 * we save flow lookup and locking for each frag arrival. It does mean
150 * that small messages will wait behind large ones. Fragmenting at all
151 * is only to reduce the memory consumption of pre-posted buffers.
152 *
153 * The caller passes in saddr and daddr instead of us getting it from the
154 * conn. This lets loopback, who only has one conn for both directions,
155 * tell us which roles the addrs in the conn are playing for this message.
156 */
157void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
158 struct rds_incoming *inc, gfp_t gfp)
159{
160 struct rds_sock *rs = NULL;
161 struct sock *sk;
162 unsigned long flags;
163
164 inc->i_conn = conn;
165 inc->i_rx_jiffies = jiffies;
166
167 rdsdebug("conn %p next %llu inc %p seq %llu len %u sport %u dport %u "
168 "flags 0x%x rx_jiffies %lu\n", conn,
169 (unsigned long long)conn->c_next_rx_seq,
170 inc,
171 (unsigned long long)be64_to_cpu(inc->i_hdr.h_sequence),
172 be32_to_cpu(inc->i_hdr.h_len),
173 be16_to_cpu(inc->i_hdr.h_sport),
174 be16_to_cpu(inc->i_hdr.h_dport),
175 inc->i_hdr.h_flags,
176 inc->i_rx_jiffies);
177
178 /*
179 * Sequence numbers should only increase. Messages get their
180 * sequence number as they're queued in a sending conn. They
181 * can be dropped, though, if the sending socket is closed before
182 * they hit the wire. So sequence numbers can skip forward
183 * under normal operation. They can also drop back in the conn
184 * failover case as previously sent messages are resent down the
185 * new instance of a conn. We drop those, otherwise we have
186 * to assume that the next valid seq does not come after a
187 * hole in the fragment stream.
188 *
189 * The headers don't give us a way to realize if fragments of
190 * a message have been dropped. We assume that frags that arrive
191 * to a flow are part of the current message on the flow that is
192 * being reassembled. This means that senders can't drop messages
193 * from the sending conn until all their frags are sent.
194 *
195 * XXX we could spend more on the wire to get more robust failure
196 * detection, arguably worth it to avoid data corruption.
197 */
198 if (be64_to_cpu(inc->i_hdr.h_sequence) < conn->c_next_rx_seq &&
199 (inc->i_hdr.h_flags & RDS_FLAG_RETRANSMITTED)) {
200 rds_stats_inc(s_recv_drop_old_seq);
201 goto out;
202 }
203 conn->c_next_rx_seq = be64_to_cpu(inc->i_hdr.h_sequence) + 1;
204
205 if (rds_sysctl_ping_enable && inc->i_hdr.h_dport == 0) {
206 rds_stats_inc(s_recv_ping);
207 rds_send_pong(conn, inc->i_hdr.h_sport);
208 goto out;
209 }
210
211 rs = rds_find_bound(daddr, inc->i_hdr.h_dport);
212 if (!rs) {
213 rds_stats_inc(s_recv_drop_no_sock);
214 goto out;
215 }
216
217 /* Process extension headers */
218 rds_recv_incoming_exthdrs(inc, rs);
219
220 /* We can be racing with rds_release() which marks the socket dead. */
221 sk = rds_rs_to_sk(rs);
222
223 /* serialize with rds_release -> sock_orphan */
224 write_lock_irqsave(&rs->rs_recv_lock, flags);
225 if (!sock_flag(sk, SOCK_DEAD)) {
226 rdsdebug("adding inc %p to rs %p's recv queue\n", inc, rs);
227 rds_stats_inc(s_recv_queued);
228 rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
229 be32_to_cpu(inc->i_hdr.h_len),
230 inc->i_hdr.h_dport);
231 rds_inc_addref(inc);
232 list_add_tail(&inc->i_item, &rs->rs_recv_queue);
233 __rds_wake_sk_sleep(sk);
234 } else {
235 rds_stats_inc(s_recv_drop_dead_sock);
236 }
237 write_unlock_irqrestore(&rs->rs_recv_lock, flags);
238
239out:
240 if (rs)
241 rds_sock_put(rs);
242}
243EXPORT_SYMBOL_GPL(rds_recv_incoming);
244
245/*
246 * be very careful here. This is being called as the condition in
247 * wait_event_*() needs to cope with being called many times.
248 */
249static int rds_next_incoming(struct rds_sock *rs, struct rds_incoming **inc)
250{
251 unsigned long flags;
252
253 if (!*inc) {
254 read_lock_irqsave(&rs->rs_recv_lock, flags);
255 if (!list_empty(&rs->rs_recv_queue)) {
256 *inc = list_entry(rs->rs_recv_queue.next,
257 struct rds_incoming,
258 i_item);
259 rds_inc_addref(*inc);
260 }
261 read_unlock_irqrestore(&rs->rs_recv_lock, flags);
262 }
263
264 return *inc != NULL;
265}
266
267static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
268 int drop)
269{
270 struct sock *sk = rds_rs_to_sk(rs);
271 int ret = 0;
272 unsigned long flags;
273
274 write_lock_irqsave(&rs->rs_recv_lock, flags);
275 if (!list_empty(&inc->i_item)) {
276 ret = 1;
277 if (drop) {
278 /* XXX make sure this i_conn is reliable */
279 rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
280 -be32_to_cpu(inc->i_hdr.h_len),
281 inc->i_hdr.h_dport);
282 list_del_init(&inc->i_item);
283 rds_inc_put(inc);
284 }
285 }
286 write_unlock_irqrestore(&rs->rs_recv_lock, flags);
287
288 rdsdebug("inc %p rs %p still %d dropped %d\n", inc, rs, ret, drop);
289 return ret;
290}
291
292/*
293 * Pull errors off the error queue.
294 * If msghdr is NULL, we will just purge the error queue.
295 */
296int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
297{
298 struct rds_notifier *notifier;
299 struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */
300 unsigned int count = 0, max_messages = ~0U;
301 unsigned long flags;
302 LIST_HEAD(copy);
303 int err = 0;
304
305
306 /* put_cmsg copies to user space and thus may sleep. We can't do this
307 * with rs_lock held, so first grab as many notifications as we can stuff
308 * in the user provided cmsg buffer. We don't try to copy more, to avoid
309 * losing notifications - except when the buffer is so small that it wouldn't
310 * even hold a single notification. Then we give him as much of this single
311 * msg as we can squeeze in, and set MSG_CTRUNC.
312 */
313 if (msghdr) {
314 max_messages = msghdr->msg_controllen / CMSG_SPACE(sizeof(cmsg));
315 if (!max_messages)
316 max_messages = 1;
317 }
318
319 spin_lock_irqsave(&rs->rs_lock, flags);
320 while (!list_empty(&rs->rs_notify_queue) && count < max_messages) {
321 notifier = list_entry(rs->rs_notify_queue.next,
322 struct rds_notifier, n_list);
323 list_move(¬ifier->n_list, ©);
324 count++;
325 }
326 spin_unlock_irqrestore(&rs->rs_lock, flags);
327
328 if (!count)
329 return 0;
330
331 while (!list_empty(©)) {
332 notifier = list_entry(copy.next, struct rds_notifier, n_list);
333
334 if (msghdr) {
335 cmsg.user_token = notifier->n_user_token;
336 cmsg.status = notifier->n_status;
337
338 err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_RDMA_STATUS,
339 sizeof(cmsg), &cmsg);
340 if (err)
341 break;
342 }
343
344 list_del_init(¬ifier->n_list);
345 kfree(notifier);
346 }
347
348 /* If we bailed out because of an error in put_cmsg,
349 * we may be left with one or more notifications that we
350 * didn't process. Return them to the head of the list. */
351 if (!list_empty(©)) {
352 spin_lock_irqsave(&rs->rs_lock, flags);
353 list_splice(©, &rs->rs_notify_queue);
354 spin_unlock_irqrestore(&rs->rs_lock, flags);
355 }
356
357 return err;
358}
359
360/*
361 * Queue a congestion notification
362 */
363static int rds_notify_cong(struct rds_sock *rs, struct msghdr *msghdr)
364{
365 uint64_t notify = rs->rs_cong_notify;
366 unsigned long flags;
367 int err;
368
369 err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_CONG_UPDATE,
370 sizeof(notify), ¬ify);
371 if (err)
372 return err;
373
374 spin_lock_irqsave(&rs->rs_lock, flags);
375 rs->rs_cong_notify &= ~notify;
376 spin_unlock_irqrestore(&rs->rs_lock, flags);
377
378 return 0;
379}
380
381/*
382 * Receive any control messages.
383 */
384static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg)
385{
386 int ret = 0;
387
388 if (inc->i_rdma_cookie) {
389 ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RDMA_DEST,
390 sizeof(inc->i_rdma_cookie), &inc->i_rdma_cookie);
391 if (ret)
392 return ret;
393 }
394
395 return 0;
396}
397
398int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
399 size_t size, int msg_flags)
400{
401 struct sock *sk = sock->sk;
402 struct rds_sock *rs = rds_sk_to_rs(sk);
403 long timeo;
404 int ret = 0, nonblock = msg_flags & MSG_DONTWAIT;
405 struct sockaddr_in *sin;
406 struct rds_incoming *inc = NULL;
407
408 /* udp_recvmsg()->sock_recvtimeo() gets away without locking too.. */
409 timeo = sock_rcvtimeo(sk, nonblock);
410
411 rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
412
413 msg->msg_namelen = 0;
414
415 if (msg_flags & MSG_OOB)
416 goto out;
417
418 while (1) {
419 /* If there are pending notifications, do those - and nothing else */
420 if (!list_empty(&rs->rs_notify_queue)) {
421 ret = rds_notify_queue_get(rs, msg);
422 break;
423 }
424
425 if (rs->rs_cong_notify) {
426 ret = rds_notify_cong(rs, msg);
427 break;
428 }
429
430 if (!rds_next_incoming(rs, &inc)) {
431 if (nonblock) {
432 ret = -EAGAIN;
433 break;
434 }
435
436 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
437 (!list_empty(&rs->rs_notify_queue) ||
438 rs->rs_cong_notify ||
439 rds_next_incoming(rs, &inc)), timeo);
440 rdsdebug("recvmsg woke inc %p timeo %ld\n", inc,
441 timeo);
442 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
443 continue;
444
445 ret = timeo;
446 if (ret == 0)
447 ret = -ETIMEDOUT;
448 break;
449 }
450
451 rdsdebug("copying inc %p from %pI4:%u to user\n", inc,
452 &inc->i_conn->c_faddr,
453 ntohs(inc->i_hdr.h_sport));
454 ret = inc->i_conn->c_trans->inc_copy_to_user(inc, msg->msg_iov,
455 size);
456 if (ret < 0)
457 break;
458
459 /*
460 * if the message we just copied isn't at the head of the
461 * recv queue then someone else raced us to return it, try
462 * to get the next message.
463 */
464 if (!rds_still_queued(rs, inc, !(msg_flags & MSG_PEEK))) {
465 rds_inc_put(inc);
466 inc = NULL;
467 rds_stats_inc(s_recv_deliver_raced);
468 continue;
469 }
470
471 if (ret < be32_to_cpu(inc->i_hdr.h_len)) {
472 if (msg_flags & MSG_TRUNC)
473 ret = be32_to_cpu(inc->i_hdr.h_len);
474 msg->msg_flags |= MSG_TRUNC;
475 }
476
477 if (rds_cmsg_recv(inc, msg)) {
478 ret = -EFAULT;
479 goto out;
480 }
481
482 rds_stats_inc(s_recv_delivered);
483
484 sin = (struct sockaddr_in *)msg->msg_name;
485 if (sin) {
486 sin->sin_family = AF_INET;
487 sin->sin_port = inc->i_hdr.h_sport;
488 sin->sin_addr.s_addr = inc->i_saddr;
489 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
490 msg->msg_namelen = sizeof(*sin);
491 }
492 break;
493 }
494
495 if (inc)
496 rds_inc_put(inc);
497
498out:
499 return ret;
500}
501
502/*
503 * The socket is being shut down and we're asked to drop messages that were
504 * queued for recvmsg. The caller has unbound the socket so the receive path
505 * won't queue any more incoming fragments or messages on the socket.
506 */
507void rds_clear_recv_queue(struct rds_sock *rs)
508{
509 struct sock *sk = rds_rs_to_sk(rs);
510 struct rds_incoming *inc, *tmp;
511 unsigned long flags;
512
513 write_lock_irqsave(&rs->rs_recv_lock, flags);
514 list_for_each_entry_safe(inc, tmp, &rs->rs_recv_queue, i_item) {
515 rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
516 -be32_to_cpu(inc->i_hdr.h_len),
517 inc->i_hdr.h_dport);
518 list_del_init(&inc->i_item);
519 rds_inc_put(inc);
520 }
521 write_unlock_irqrestore(&rs->rs_recv_lock, flags);
522}
523
524/*
525 * inc->i_saddr isn't used here because it is only set in the receive
526 * path.
527 */
528void rds_inc_info_copy(struct rds_incoming *inc,
529 struct rds_info_iterator *iter,
530 __be32 saddr, __be32 daddr, int flip)
531{
532 struct rds_info_message minfo;
533
534 minfo.seq = be64_to_cpu(inc->i_hdr.h_sequence);
535 minfo.len = be32_to_cpu(inc->i_hdr.h_len);
536
537 if (flip) {
538 minfo.laddr = daddr;
539 minfo.faddr = saddr;
540 minfo.lport = inc->i_hdr.h_dport;
541 minfo.fport = inc->i_hdr.h_sport;
542 } else {
543 minfo.laddr = saddr;
544 minfo.faddr = daddr;
545 minfo.lport = inc->i_hdr.h_sport;
546 minfo.fport = inc->i_hdr.h_dport;
547 }
548
549 rds_info_copy(iter, &minfo, sizeof(minfo));
550}
1/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/slab.h>
35#include <net/sock.h>
36#include <linux/in.h>
37#include <linux/export.h>
38#include <linux/time.h>
39#include <linux/rds.h>
40
41#include "rds.h"
42
43void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
44 __be32 saddr)
45{
46 int i;
47
48 refcount_set(&inc->i_refcount, 1);
49 INIT_LIST_HEAD(&inc->i_item);
50 inc->i_conn = conn;
51 inc->i_saddr = saddr;
52 inc->i_rdma_cookie = 0;
53 inc->i_rx_tstamp.tv_sec = 0;
54 inc->i_rx_tstamp.tv_usec = 0;
55
56 for (i = 0; i < RDS_RX_MAX_TRACES; i++)
57 inc->i_rx_lat_trace[i] = 0;
58}
59EXPORT_SYMBOL_GPL(rds_inc_init);
60
61void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *cp,
62 __be32 saddr)
63{
64 refcount_set(&inc->i_refcount, 1);
65 INIT_LIST_HEAD(&inc->i_item);
66 inc->i_conn = cp->cp_conn;
67 inc->i_conn_path = cp;
68 inc->i_saddr = saddr;
69 inc->i_rdma_cookie = 0;
70 inc->i_rx_tstamp.tv_sec = 0;
71 inc->i_rx_tstamp.tv_usec = 0;
72}
73EXPORT_SYMBOL_GPL(rds_inc_path_init);
74
75static void rds_inc_addref(struct rds_incoming *inc)
76{
77 rdsdebug("addref inc %p ref %d\n", inc, refcount_read(&inc->i_refcount));
78 refcount_inc(&inc->i_refcount);
79}
80
81void rds_inc_put(struct rds_incoming *inc)
82{
83 rdsdebug("put inc %p ref %d\n", inc, refcount_read(&inc->i_refcount));
84 if (refcount_dec_and_test(&inc->i_refcount)) {
85 BUG_ON(!list_empty(&inc->i_item));
86
87 inc->i_conn->c_trans->inc_free(inc);
88 }
89}
90EXPORT_SYMBOL_GPL(rds_inc_put);
91
92static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk,
93 struct rds_cong_map *map,
94 int delta, __be16 port)
95{
96 int now_congested;
97
98 if (delta == 0)
99 return;
100
101 rs->rs_rcv_bytes += delta;
102 if (delta > 0)
103 rds_stats_add(s_recv_bytes_added_to_socket, delta);
104 else
105 rds_stats_add(s_recv_bytes_removed_from_socket, -delta);
106 now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs);
107
108 rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d "
109 "now_cong %d delta %d\n",
110 rs, &rs->rs_bound_addr,
111 ntohs(rs->rs_bound_port), rs->rs_rcv_bytes,
112 rds_sk_rcvbuf(rs), now_congested, delta);
113
114 /* wasn't -> am congested */
115 if (!rs->rs_congested && now_congested) {
116 rs->rs_congested = 1;
117 rds_cong_set_bit(map, port);
118 rds_cong_queue_updates(map);
119 }
120 /* was -> aren't congested */
121 /* Require more free space before reporting uncongested to prevent
122 bouncing cong/uncong state too often */
123 else if (rs->rs_congested && (rs->rs_rcv_bytes < (rds_sk_rcvbuf(rs)/2))) {
124 rs->rs_congested = 0;
125 rds_cong_clear_bit(map, port);
126 rds_cong_queue_updates(map);
127 }
128
129 /* do nothing if no change in cong state */
130}
131
132static void rds_conn_peer_gen_update(struct rds_connection *conn,
133 u32 peer_gen_num)
134{
135 int i;
136 struct rds_message *rm, *tmp;
137 unsigned long flags;
138
139 WARN_ON(conn->c_trans->t_type != RDS_TRANS_TCP);
140 if (peer_gen_num != 0) {
141 if (conn->c_peer_gen_num != 0 &&
142 peer_gen_num != conn->c_peer_gen_num) {
143 for (i = 0; i < RDS_MPATH_WORKERS; i++) {
144 struct rds_conn_path *cp;
145
146 cp = &conn->c_path[i];
147 spin_lock_irqsave(&cp->cp_lock, flags);
148 cp->cp_next_tx_seq = 1;
149 cp->cp_next_rx_seq = 0;
150 list_for_each_entry_safe(rm, tmp,
151 &cp->cp_retrans,
152 m_conn_item) {
153 set_bit(RDS_MSG_FLUSH, &rm->m_flags);
154 }
155 spin_unlock_irqrestore(&cp->cp_lock, flags);
156 }
157 }
158 conn->c_peer_gen_num = peer_gen_num;
159 }
160}
161
162/*
163 * Process all extension headers that come with this message.
164 */
165static void rds_recv_incoming_exthdrs(struct rds_incoming *inc, struct rds_sock *rs)
166{
167 struct rds_header *hdr = &inc->i_hdr;
168 unsigned int pos = 0, type, len;
169 union {
170 struct rds_ext_header_version version;
171 struct rds_ext_header_rdma rdma;
172 struct rds_ext_header_rdma_dest rdma_dest;
173 } buffer;
174
175 while (1) {
176 len = sizeof(buffer);
177 type = rds_message_next_extension(hdr, &pos, &buffer, &len);
178 if (type == RDS_EXTHDR_NONE)
179 break;
180 /* Process extension header here */
181 switch (type) {
182 case RDS_EXTHDR_RDMA:
183 rds_rdma_unuse(rs, be32_to_cpu(buffer.rdma.h_rdma_rkey), 0);
184 break;
185
186 case RDS_EXTHDR_RDMA_DEST:
187 /* We ignore the size for now. We could stash it
188 * somewhere and use it for error checking. */
189 inc->i_rdma_cookie = rds_rdma_make_cookie(
190 be32_to_cpu(buffer.rdma_dest.h_rdma_rkey),
191 be32_to_cpu(buffer.rdma_dest.h_rdma_offset));
192
193 break;
194 }
195 }
196}
197
198static void rds_recv_hs_exthdrs(struct rds_header *hdr,
199 struct rds_connection *conn)
200{
201 unsigned int pos = 0, type, len;
202 union {
203 struct rds_ext_header_version version;
204 u16 rds_npaths;
205 u32 rds_gen_num;
206 } buffer;
207 u32 new_peer_gen_num = 0;
208
209 while (1) {
210 len = sizeof(buffer);
211 type = rds_message_next_extension(hdr, &pos, &buffer, &len);
212 if (type == RDS_EXTHDR_NONE)
213 break;
214 /* Process extension header here */
215 switch (type) {
216 case RDS_EXTHDR_NPATHS:
217 conn->c_npaths = min_t(int, RDS_MPATH_WORKERS,
218 be16_to_cpu(buffer.rds_npaths));
219 break;
220 case RDS_EXTHDR_GEN_NUM:
221 new_peer_gen_num = be32_to_cpu(buffer.rds_gen_num);
222 break;
223 default:
224 pr_warn_ratelimited("ignoring unknown exthdr type "
225 "0x%x\n", type);
226 }
227 }
228 /* if RDS_EXTHDR_NPATHS was not found, default to a single-path */
229 conn->c_npaths = max_t(int, conn->c_npaths, 1);
230 conn->c_ping_triggered = 0;
231 rds_conn_peer_gen_update(conn, new_peer_gen_num);
232}
233
234/* rds_start_mprds() will synchronously start multiple paths when appropriate.
235 * The scheme is based on the following rules:
236 *
237 * 1. rds_sendmsg on first connect attempt sends the probe ping, with the
238 * sender's npaths (s_npaths)
239 * 2. rcvr of probe-ping knows the mprds_paths = min(s_npaths, r_npaths). It
240 * sends back a probe-pong with r_npaths. After that, if rcvr is the
241 * smaller ip addr, it starts rds_conn_path_connect_if_down on all
242 * mprds_paths.
243 * 3. sender gets woken up, and can move to rds_conn_path_connect_if_down.
244 * If it is the smaller ipaddr, rds_conn_path_connect_if_down can be
245 * called after reception of the probe-pong on all mprds_paths.
246 * Otherwise (sender of probe-ping is not the smaller ip addr): just call
247 * rds_conn_path_connect_if_down on the hashed path. (see rule 4)
248 * 4. rds_connect_worker must only trigger a connection if laddr < faddr.
249 * 5. sender may end up queuing the packet on the cp. will get sent out later.
250 * when connection is completed.
251 */
252static void rds_start_mprds(struct rds_connection *conn)
253{
254 int i;
255 struct rds_conn_path *cp;
256
257 if (conn->c_npaths > 1 &&
258 IS_CANONICAL(conn->c_laddr, conn->c_faddr)) {
259 for (i = 0; i < conn->c_npaths; i++) {
260 cp = &conn->c_path[i];
261 rds_conn_path_connect_if_down(cp);
262 }
263 }
264}
265
266/*
267 * The transport must make sure that this is serialized against other
268 * rx and conn reset on this specific conn.
269 *
270 * We currently assert that only one fragmented message will be sent
271 * down a connection at a time. This lets us reassemble in the conn
272 * instead of per-flow which means that we don't have to go digging through
273 * flows to tear down partial reassembly progress on conn failure and
274 * we save flow lookup and locking for each frag arrival. It does mean
275 * that small messages will wait behind large ones. Fragmenting at all
276 * is only to reduce the memory consumption of pre-posted buffers.
277 *
278 * The caller passes in saddr and daddr instead of us getting it from the
279 * conn. This lets loopback, who only has one conn for both directions,
280 * tell us which roles the addrs in the conn are playing for this message.
281 */
282void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
283 struct rds_incoming *inc, gfp_t gfp)
284{
285 struct rds_sock *rs = NULL;
286 struct sock *sk;
287 unsigned long flags;
288 struct rds_conn_path *cp;
289
290 inc->i_conn = conn;
291 inc->i_rx_jiffies = jiffies;
292 if (conn->c_trans->t_mp_capable)
293 cp = inc->i_conn_path;
294 else
295 cp = &conn->c_path[0];
296
297 rdsdebug("conn %p next %llu inc %p seq %llu len %u sport %u dport %u "
298 "flags 0x%x rx_jiffies %lu\n", conn,
299 (unsigned long long)cp->cp_next_rx_seq,
300 inc,
301 (unsigned long long)be64_to_cpu(inc->i_hdr.h_sequence),
302 be32_to_cpu(inc->i_hdr.h_len),
303 be16_to_cpu(inc->i_hdr.h_sport),
304 be16_to_cpu(inc->i_hdr.h_dport),
305 inc->i_hdr.h_flags,
306 inc->i_rx_jiffies);
307
308 /*
309 * Sequence numbers should only increase. Messages get their
310 * sequence number as they're queued in a sending conn. They
311 * can be dropped, though, if the sending socket is closed before
312 * they hit the wire. So sequence numbers can skip forward
313 * under normal operation. They can also drop back in the conn
314 * failover case as previously sent messages are resent down the
315 * new instance of a conn. We drop those, otherwise we have
316 * to assume that the next valid seq does not come after a
317 * hole in the fragment stream.
318 *
319 * The headers don't give us a way to realize if fragments of
320 * a message have been dropped. We assume that frags that arrive
321 * to a flow are part of the current message on the flow that is
322 * being reassembled. This means that senders can't drop messages
323 * from the sending conn until all their frags are sent.
324 *
325 * XXX we could spend more on the wire to get more robust failure
326 * detection, arguably worth it to avoid data corruption.
327 */
328 if (be64_to_cpu(inc->i_hdr.h_sequence) < cp->cp_next_rx_seq &&
329 (inc->i_hdr.h_flags & RDS_FLAG_RETRANSMITTED)) {
330 rds_stats_inc(s_recv_drop_old_seq);
331 goto out;
332 }
333 cp->cp_next_rx_seq = be64_to_cpu(inc->i_hdr.h_sequence) + 1;
334
335 if (rds_sysctl_ping_enable && inc->i_hdr.h_dport == 0) {
336 if (inc->i_hdr.h_sport == 0) {
337 rdsdebug("ignore ping with 0 sport from 0x%x\n", saddr);
338 goto out;
339 }
340 rds_stats_inc(s_recv_ping);
341 rds_send_pong(cp, inc->i_hdr.h_sport);
342 /* if this is a handshake ping, start multipath if necessary */
343 if (RDS_HS_PROBE(be16_to_cpu(inc->i_hdr.h_sport),
344 be16_to_cpu(inc->i_hdr.h_dport))) {
345 rds_recv_hs_exthdrs(&inc->i_hdr, cp->cp_conn);
346 rds_start_mprds(cp->cp_conn);
347 }
348 goto out;
349 }
350
351 if (be16_to_cpu(inc->i_hdr.h_dport) == RDS_FLAG_PROBE_PORT &&
352 inc->i_hdr.h_sport == 0) {
353 rds_recv_hs_exthdrs(&inc->i_hdr, cp->cp_conn);
354 /* if this is a handshake pong, start multipath if necessary */
355 rds_start_mprds(cp->cp_conn);
356 wake_up(&cp->cp_conn->c_hs_waitq);
357 goto out;
358 }
359
360 rs = rds_find_bound(daddr, inc->i_hdr.h_dport);
361 if (!rs) {
362 rds_stats_inc(s_recv_drop_no_sock);
363 goto out;
364 }
365
366 /* Process extension headers */
367 rds_recv_incoming_exthdrs(inc, rs);
368
369 /* We can be racing with rds_release() which marks the socket dead. */
370 sk = rds_rs_to_sk(rs);
371
372 /* serialize with rds_release -> sock_orphan */
373 write_lock_irqsave(&rs->rs_recv_lock, flags);
374 if (!sock_flag(sk, SOCK_DEAD)) {
375 rdsdebug("adding inc %p to rs %p's recv queue\n", inc, rs);
376 rds_stats_inc(s_recv_queued);
377 rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
378 be32_to_cpu(inc->i_hdr.h_len),
379 inc->i_hdr.h_dport);
380 if (sock_flag(sk, SOCK_RCVTSTAMP))
381 do_gettimeofday(&inc->i_rx_tstamp);
382 rds_inc_addref(inc);
383 inc->i_rx_lat_trace[RDS_MSG_RX_END] = local_clock();
384 list_add_tail(&inc->i_item, &rs->rs_recv_queue);
385 __rds_wake_sk_sleep(sk);
386 } else {
387 rds_stats_inc(s_recv_drop_dead_sock);
388 }
389 write_unlock_irqrestore(&rs->rs_recv_lock, flags);
390
391out:
392 if (rs)
393 rds_sock_put(rs);
394}
395EXPORT_SYMBOL_GPL(rds_recv_incoming);
396
397/*
398 * be very careful here. This is being called as the condition in
399 * wait_event_*() needs to cope with being called many times.
400 */
401static int rds_next_incoming(struct rds_sock *rs, struct rds_incoming **inc)
402{
403 unsigned long flags;
404
405 if (!*inc) {
406 read_lock_irqsave(&rs->rs_recv_lock, flags);
407 if (!list_empty(&rs->rs_recv_queue)) {
408 *inc = list_entry(rs->rs_recv_queue.next,
409 struct rds_incoming,
410 i_item);
411 rds_inc_addref(*inc);
412 }
413 read_unlock_irqrestore(&rs->rs_recv_lock, flags);
414 }
415
416 return *inc != NULL;
417}
418
419static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
420 int drop)
421{
422 struct sock *sk = rds_rs_to_sk(rs);
423 int ret = 0;
424 unsigned long flags;
425
426 write_lock_irqsave(&rs->rs_recv_lock, flags);
427 if (!list_empty(&inc->i_item)) {
428 ret = 1;
429 if (drop) {
430 /* XXX make sure this i_conn is reliable */
431 rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
432 -be32_to_cpu(inc->i_hdr.h_len),
433 inc->i_hdr.h_dport);
434 list_del_init(&inc->i_item);
435 rds_inc_put(inc);
436 }
437 }
438 write_unlock_irqrestore(&rs->rs_recv_lock, flags);
439
440 rdsdebug("inc %p rs %p still %d dropped %d\n", inc, rs, ret, drop);
441 return ret;
442}
443
444/*
445 * Pull errors off the error queue.
446 * If msghdr is NULL, we will just purge the error queue.
447 */
448int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
449{
450 struct rds_notifier *notifier;
451 struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */
452 unsigned int count = 0, max_messages = ~0U;
453 unsigned long flags;
454 LIST_HEAD(copy);
455 int err = 0;
456
457
458 /* put_cmsg copies to user space and thus may sleep. We can't do this
459 * with rs_lock held, so first grab as many notifications as we can stuff
460 * in the user provided cmsg buffer. We don't try to copy more, to avoid
461 * losing notifications - except when the buffer is so small that it wouldn't
462 * even hold a single notification. Then we give him as much of this single
463 * msg as we can squeeze in, and set MSG_CTRUNC.
464 */
465 if (msghdr) {
466 max_messages = msghdr->msg_controllen / CMSG_SPACE(sizeof(cmsg));
467 if (!max_messages)
468 max_messages = 1;
469 }
470
471 spin_lock_irqsave(&rs->rs_lock, flags);
472 while (!list_empty(&rs->rs_notify_queue) && count < max_messages) {
473 notifier = list_entry(rs->rs_notify_queue.next,
474 struct rds_notifier, n_list);
475 list_move(¬ifier->n_list, ©);
476 count++;
477 }
478 spin_unlock_irqrestore(&rs->rs_lock, flags);
479
480 if (!count)
481 return 0;
482
483 while (!list_empty(©)) {
484 notifier = list_entry(copy.next, struct rds_notifier, n_list);
485
486 if (msghdr) {
487 cmsg.user_token = notifier->n_user_token;
488 cmsg.status = notifier->n_status;
489
490 err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_RDMA_STATUS,
491 sizeof(cmsg), &cmsg);
492 if (err)
493 break;
494 }
495
496 list_del_init(¬ifier->n_list);
497 kfree(notifier);
498 }
499
500 /* If we bailed out because of an error in put_cmsg,
501 * we may be left with one or more notifications that we
502 * didn't process. Return them to the head of the list. */
503 if (!list_empty(©)) {
504 spin_lock_irqsave(&rs->rs_lock, flags);
505 list_splice(©, &rs->rs_notify_queue);
506 spin_unlock_irqrestore(&rs->rs_lock, flags);
507 }
508
509 return err;
510}
511
512/*
513 * Queue a congestion notification
514 */
515static int rds_notify_cong(struct rds_sock *rs, struct msghdr *msghdr)
516{
517 uint64_t notify = rs->rs_cong_notify;
518 unsigned long flags;
519 int err;
520
521 err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_CONG_UPDATE,
522 sizeof(notify), ¬ify);
523 if (err)
524 return err;
525
526 spin_lock_irqsave(&rs->rs_lock, flags);
527 rs->rs_cong_notify &= ~notify;
528 spin_unlock_irqrestore(&rs->rs_lock, flags);
529
530 return 0;
531}
532
533/*
534 * Receive any control messages.
535 */
536static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg,
537 struct rds_sock *rs)
538{
539 int ret = 0;
540
541 if (inc->i_rdma_cookie) {
542 ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RDMA_DEST,
543 sizeof(inc->i_rdma_cookie), &inc->i_rdma_cookie);
544 if (ret)
545 goto out;
546 }
547
548 if ((inc->i_rx_tstamp.tv_sec != 0) &&
549 sock_flag(rds_rs_to_sk(rs), SOCK_RCVTSTAMP)) {
550 ret = put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP,
551 sizeof(struct timeval),
552 &inc->i_rx_tstamp);
553 if (ret)
554 goto out;
555 }
556
557 if (rs->rs_rx_traces) {
558 struct rds_cmsg_rx_trace t;
559 int i, j;
560
561 memset(&t, 0, sizeof(t));
562 inc->i_rx_lat_trace[RDS_MSG_RX_CMSG] = local_clock();
563 t.rx_traces = rs->rs_rx_traces;
564 for (i = 0; i < rs->rs_rx_traces; i++) {
565 j = rs->rs_rx_trace[i];
566 t.rx_trace_pos[i] = j;
567 t.rx_trace[i] = inc->i_rx_lat_trace[j + 1] -
568 inc->i_rx_lat_trace[j];
569 }
570
571 ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RXPATH_LATENCY,
572 sizeof(t), &t);
573 if (ret)
574 goto out;
575 }
576
577out:
578 return ret;
579}
580
581static bool rds_recvmsg_zcookie(struct rds_sock *rs, struct msghdr *msg)
582{
583 struct rds_msg_zcopy_queue *q = &rs->rs_zcookie_queue;
584 struct rds_msg_zcopy_info *info = NULL;
585 struct rds_zcopy_cookies *done;
586 unsigned long flags;
587
588 if (!msg->msg_control)
589 return false;
590
591 if (!sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY) ||
592 msg->msg_controllen < CMSG_SPACE(sizeof(*done)))
593 return false;
594
595 spin_lock_irqsave(&q->lock, flags);
596 if (!list_empty(&q->zcookie_head)) {
597 info = list_entry(q->zcookie_head.next,
598 struct rds_msg_zcopy_info, rs_zcookie_next);
599 list_del(&info->rs_zcookie_next);
600 }
601 spin_unlock_irqrestore(&q->lock, flags);
602 if (!info)
603 return false;
604 done = &info->zcookies;
605 if (put_cmsg(msg, SOL_RDS, RDS_CMSG_ZCOPY_COMPLETION, sizeof(*done),
606 done)) {
607 spin_lock_irqsave(&q->lock, flags);
608 list_add(&info->rs_zcookie_next, &q->zcookie_head);
609 spin_unlock_irqrestore(&q->lock, flags);
610 return false;
611 }
612 kfree(info);
613 return true;
614}
615
616int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
617 int msg_flags)
618{
619 struct sock *sk = sock->sk;
620 struct rds_sock *rs = rds_sk_to_rs(sk);
621 long timeo;
622 int ret = 0, nonblock = msg_flags & MSG_DONTWAIT;
623 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
624 struct rds_incoming *inc = NULL;
625
626 /* udp_recvmsg()->sock_recvtimeo() gets away without locking too.. */
627 timeo = sock_rcvtimeo(sk, nonblock);
628
629 rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
630
631 if (msg_flags & MSG_OOB)
632 goto out;
633 if (msg_flags & MSG_ERRQUEUE)
634 return sock_recv_errqueue(sk, msg, size, SOL_IP, IP_RECVERR);
635
636 while (1) {
637 /* If there are pending notifications, do those - and nothing else */
638 if (!list_empty(&rs->rs_notify_queue)) {
639 ret = rds_notify_queue_get(rs, msg);
640 break;
641 }
642
643 if (rs->rs_cong_notify) {
644 ret = rds_notify_cong(rs, msg);
645 break;
646 }
647
648 if (!rds_next_incoming(rs, &inc)) {
649 if (nonblock) {
650 bool reaped = rds_recvmsg_zcookie(rs, msg);
651
652 ret = reaped ? 0 : -EAGAIN;
653 break;
654 }
655
656 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
657 (!list_empty(&rs->rs_notify_queue) ||
658 rs->rs_cong_notify ||
659 rds_next_incoming(rs, &inc)), timeo);
660 rdsdebug("recvmsg woke inc %p timeo %ld\n", inc,
661 timeo);
662 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
663 continue;
664
665 ret = timeo;
666 if (ret == 0)
667 ret = -ETIMEDOUT;
668 break;
669 }
670
671 rdsdebug("copying inc %p from %pI4:%u to user\n", inc,
672 &inc->i_conn->c_faddr,
673 ntohs(inc->i_hdr.h_sport));
674 ret = inc->i_conn->c_trans->inc_copy_to_user(inc, &msg->msg_iter);
675 if (ret < 0)
676 break;
677
678 /*
679 * if the message we just copied isn't at the head of the
680 * recv queue then someone else raced us to return it, try
681 * to get the next message.
682 */
683 if (!rds_still_queued(rs, inc, !(msg_flags & MSG_PEEK))) {
684 rds_inc_put(inc);
685 inc = NULL;
686 rds_stats_inc(s_recv_deliver_raced);
687 iov_iter_revert(&msg->msg_iter, ret);
688 continue;
689 }
690
691 if (ret < be32_to_cpu(inc->i_hdr.h_len)) {
692 if (msg_flags & MSG_TRUNC)
693 ret = be32_to_cpu(inc->i_hdr.h_len);
694 msg->msg_flags |= MSG_TRUNC;
695 }
696
697 if (rds_cmsg_recv(inc, msg, rs)) {
698 ret = -EFAULT;
699 goto out;
700 }
701 rds_recvmsg_zcookie(rs, msg);
702
703 rds_stats_inc(s_recv_delivered);
704
705 if (sin) {
706 sin->sin_family = AF_INET;
707 sin->sin_port = inc->i_hdr.h_sport;
708 sin->sin_addr.s_addr = inc->i_saddr;
709 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
710 msg->msg_namelen = sizeof(*sin);
711 }
712 break;
713 }
714
715 if (inc)
716 rds_inc_put(inc);
717
718out:
719 return ret;
720}
721
722/*
723 * The socket is being shut down and we're asked to drop messages that were
724 * queued for recvmsg. The caller has unbound the socket so the receive path
725 * won't queue any more incoming fragments or messages on the socket.
726 */
727void rds_clear_recv_queue(struct rds_sock *rs)
728{
729 struct sock *sk = rds_rs_to_sk(rs);
730 struct rds_incoming *inc, *tmp;
731 unsigned long flags;
732
733 write_lock_irqsave(&rs->rs_recv_lock, flags);
734 list_for_each_entry_safe(inc, tmp, &rs->rs_recv_queue, i_item) {
735 rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
736 -be32_to_cpu(inc->i_hdr.h_len),
737 inc->i_hdr.h_dport);
738 list_del_init(&inc->i_item);
739 rds_inc_put(inc);
740 }
741 write_unlock_irqrestore(&rs->rs_recv_lock, flags);
742}
743
744/*
745 * inc->i_saddr isn't used here because it is only set in the receive
746 * path.
747 */
748void rds_inc_info_copy(struct rds_incoming *inc,
749 struct rds_info_iterator *iter,
750 __be32 saddr, __be32 daddr, int flip)
751{
752 struct rds_info_message minfo;
753
754 minfo.seq = be64_to_cpu(inc->i_hdr.h_sequence);
755 minfo.len = be32_to_cpu(inc->i_hdr.h_len);
756
757 if (flip) {
758 minfo.laddr = daddr;
759 minfo.faddr = saddr;
760 minfo.lport = inc->i_hdr.h_dport;
761 minfo.fport = inc->i_hdr.h_sport;
762 } else {
763 minfo.laddr = saddr;
764 minfo.faddr = daddr;
765 minfo.lport = inc->i_hdr.h_sport;
766 minfo.fport = inc->i_hdr.h_dport;
767 }
768
769 minfo.flags = 0;
770
771 rds_info_copy(iter, &minfo, sizeof(minfo));
772}