Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Copyright (c) 2006 Oracle.  All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 *
 32 */
 33#include <linux/kernel.h>
 34#include <linux/slab.h>
 35#include <net/sock.h>
 36#include <linux/in.h>
 
 
 
 37
 38#include "rds.h"
 39
 40void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
 41		  __be32 saddr)
 42{
 43	atomic_set(&inc->i_refcount, 1);
 
 
 44	INIT_LIST_HEAD(&inc->i_item);
 45	inc->i_conn = conn;
 46	inc->i_saddr = saddr;
 47	inc->i_rdma_cookie = 0;
 
 
 
 
 
 48}
 49EXPORT_SYMBOL_GPL(rds_inc_init);
 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 51static void rds_inc_addref(struct rds_incoming *inc)
 52{
 53	rdsdebug("addref inc %p ref %d\n", inc, atomic_read(&inc->i_refcount));
 54	atomic_inc(&inc->i_refcount);
 55}
 56
 57void rds_inc_put(struct rds_incoming *inc)
 58{
 59	rdsdebug("put inc %p ref %d\n", inc, atomic_read(&inc->i_refcount));
 60	if (atomic_dec_and_test(&inc->i_refcount)) {
 61		BUG_ON(!list_empty(&inc->i_item));
 62
 63		inc->i_conn->c_trans->inc_free(inc);
 64	}
 65}
 66EXPORT_SYMBOL_GPL(rds_inc_put);
 67
 68static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk,
 69				  struct rds_cong_map *map,
 70				  int delta, __be16 port)
 71{
 72	int now_congested;
 73
 74	if (delta == 0)
 75		return;
 76
 77	rs->rs_rcv_bytes += delta;
 
 
 
 
 78	now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs);
 79
 80	rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d "
 81	  "now_cong %d delta %d\n",
 82	  rs, &rs->rs_bound_addr,
 83	  ntohs(rs->rs_bound_port), rs->rs_rcv_bytes,
 84	  rds_sk_rcvbuf(rs), now_congested, delta);
 85
 86	/* wasn't -> am congested */
 87	if (!rs->rs_congested && now_congested) {
 88		rs->rs_congested = 1;
 89		rds_cong_set_bit(map, port);
 90		rds_cong_queue_updates(map);
 91	}
 92	/* was -> aren't congested */
 93	/* Require more free space before reporting uncongested to prevent
 94	   bouncing cong/uncong state too often */
 95	else if (rs->rs_congested && (rs->rs_rcv_bytes < (rds_sk_rcvbuf(rs)/2))) {
 96		rs->rs_congested = 0;
 97		rds_cong_clear_bit(map, port);
 98		rds_cong_queue_updates(map);
 99	}
100
101	/* do nothing if no change in cong state */
102}
103
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104/*
105 * Process all extension headers that come with this message.
106 */
107static void rds_recv_incoming_exthdrs(struct rds_incoming *inc, struct rds_sock *rs)
108{
109	struct rds_header *hdr = &inc->i_hdr;
110	unsigned int pos = 0, type, len;
111	union {
112		struct rds_ext_header_version version;
113		struct rds_ext_header_rdma rdma;
114		struct rds_ext_header_rdma_dest rdma_dest;
115	} buffer;
116
117	while (1) {
118		len = sizeof(buffer);
119		type = rds_message_next_extension(hdr, &pos, &buffer, &len);
120		if (type == RDS_EXTHDR_NONE)
121			break;
122		/* Process extension header here */
123		switch (type) {
124		case RDS_EXTHDR_RDMA:
125			rds_rdma_unuse(rs, be32_to_cpu(buffer.rdma.h_rdma_rkey), 0);
126			break;
127
128		case RDS_EXTHDR_RDMA_DEST:
129			/* We ignore the size for now. We could stash it
130			 * somewhere and use it for error checking. */
131			inc->i_rdma_cookie = rds_rdma_make_cookie(
132					be32_to_cpu(buffer.rdma_dest.h_rdma_rkey),
133					be32_to_cpu(buffer.rdma_dest.h_rdma_offset));
134
135			break;
136		}
137	}
138}
139
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140/*
141 * The transport must make sure that this is serialized against other
142 * rx and conn reset on this specific conn.
143 *
144 * We currently assert that only one fragmented message will be sent
145 * down a connection at a time.  This lets us reassemble in the conn
146 * instead of per-flow which means that we don't have to go digging through
147 * flows to tear down partial reassembly progress on conn failure and
148 * we save flow lookup and locking for each frag arrival.  It does mean
149 * that small messages will wait behind large ones.  Fragmenting at all
150 * is only to reduce the memory consumption of pre-posted buffers.
151 *
152 * The caller passes in saddr and daddr instead of us getting it from the
153 * conn.  This lets loopback, who only has one conn for both directions,
154 * tell us which roles the addrs in the conn are playing for this message.
155 */
156void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
157		       struct rds_incoming *inc, gfp_t gfp, enum km_type km)
158{
159	struct rds_sock *rs = NULL;
160	struct sock *sk;
161	unsigned long flags;
 
162
163	inc->i_conn = conn;
164	inc->i_rx_jiffies = jiffies;
 
 
 
 
165
166	rdsdebug("conn %p next %llu inc %p seq %llu len %u sport %u dport %u "
167		 "flags 0x%x rx_jiffies %lu\n", conn,
168		 (unsigned long long)conn->c_next_rx_seq,
169		 inc,
170		 (unsigned long long)be64_to_cpu(inc->i_hdr.h_sequence),
171		 be32_to_cpu(inc->i_hdr.h_len),
172		 be16_to_cpu(inc->i_hdr.h_sport),
173		 be16_to_cpu(inc->i_hdr.h_dport),
174		 inc->i_hdr.h_flags,
175		 inc->i_rx_jiffies);
176
177	/*
178	 * Sequence numbers should only increase.  Messages get their
179	 * sequence number as they're queued in a sending conn.  They
180	 * can be dropped, though, if the sending socket is closed before
181	 * they hit the wire.  So sequence numbers can skip forward
182	 * under normal operation.  They can also drop back in the conn
183	 * failover case as previously sent messages are resent down the
184	 * new instance of a conn.  We drop those, otherwise we have
185	 * to assume that the next valid seq does not come after a
186	 * hole in the fragment stream.
187	 *
188	 * The headers don't give us a way to realize if fragments of
189	 * a message have been dropped.  We assume that frags that arrive
190	 * to a flow are part of the current message on the flow that is
191	 * being reassembled.  This means that senders can't drop messages
192	 * from the sending conn until all their frags are sent.
193	 *
194	 * XXX we could spend more on the wire to get more robust failure
195	 * detection, arguably worth it to avoid data corruption.
196	 */
197	if (be64_to_cpu(inc->i_hdr.h_sequence) < conn->c_next_rx_seq &&
198	    (inc->i_hdr.h_flags & RDS_FLAG_RETRANSMITTED)) {
199		rds_stats_inc(s_recv_drop_old_seq);
200		goto out;
201	}
202	conn->c_next_rx_seq = be64_to_cpu(inc->i_hdr.h_sequence) + 1;
203
204	if (rds_sysctl_ping_enable && inc->i_hdr.h_dport == 0) {
 
 
 
 
205		rds_stats_inc(s_recv_ping);
206		rds_send_pong(conn, inc->i_hdr.h_sport);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
207		goto out;
208	}
209
210	rs = rds_find_bound(daddr, inc->i_hdr.h_dport);
211	if (!rs) {
212		rds_stats_inc(s_recv_drop_no_sock);
213		goto out;
214	}
215
216	/* Process extension headers */
217	rds_recv_incoming_exthdrs(inc, rs);
218
219	/* We can be racing with rds_release() which marks the socket dead. */
220	sk = rds_rs_to_sk(rs);
221
222	/* serialize with rds_release -> sock_orphan */
223	write_lock_irqsave(&rs->rs_recv_lock, flags);
224	if (!sock_flag(sk, SOCK_DEAD)) {
225		rdsdebug("adding inc %p to rs %p's recv queue\n", inc, rs);
226		rds_stats_inc(s_recv_queued);
227		rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
228				      be32_to_cpu(inc->i_hdr.h_len),
229				      inc->i_hdr.h_dport);
 
 
230		rds_inc_addref(inc);
 
231		list_add_tail(&inc->i_item, &rs->rs_recv_queue);
232		__rds_wake_sk_sleep(sk);
233	} else {
234		rds_stats_inc(s_recv_drop_dead_sock);
235	}
236	write_unlock_irqrestore(&rs->rs_recv_lock, flags);
237
238out:
239	if (rs)
240		rds_sock_put(rs);
241}
242EXPORT_SYMBOL_GPL(rds_recv_incoming);
243
244/*
245 * be very careful here.  This is being called as the condition in
246 * wait_event_*() needs to cope with being called many times.
247 */
248static int rds_next_incoming(struct rds_sock *rs, struct rds_incoming **inc)
249{
250	unsigned long flags;
251
252	if (!*inc) {
253		read_lock_irqsave(&rs->rs_recv_lock, flags);
254		if (!list_empty(&rs->rs_recv_queue)) {
255			*inc = list_entry(rs->rs_recv_queue.next,
256					  struct rds_incoming,
257					  i_item);
258			rds_inc_addref(*inc);
259		}
260		read_unlock_irqrestore(&rs->rs_recv_lock, flags);
261	}
262
263	return *inc != NULL;
264}
265
266static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
267			    int drop)
268{
269	struct sock *sk = rds_rs_to_sk(rs);
270	int ret = 0;
271	unsigned long flags;
272
273	write_lock_irqsave(&rs->rs_recv_lock, flags);
274	if (!list_empty(&inc->i_item)) {
275		ret = 1;
276		if (drop) {
277			/* XXX make sure this i_conn is reliable */
278			rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
279					      -be32_to_cpu(inc->i_hdr.h_len),
280					      inc->i_hdr.h_dport);
281			list_del_init(&inc->i_item);
282			rds_inc_put(inc);
283		}
284	}
285	write_unlock_irqrestore(&rs->rs_recv_lock, flags);
286
287	rdsdebug("inc %p rs %p still %d dropped %d\n", inc, rs, ret, drop);
288	return ret;
289}
290
291/*
292 * Pull errors off the error queue.
293 * If msghdr is NULL, we will just purge the error queue.
294 */
295int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
296{
297	struct rds_notifier *notifier;
298	struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */
299	unsigned int count = 0, max_messages = ~0U;
300	unsigned long flags;
301	LIST_HEAD(copy);
302	int err = 0;
303
304
305	/* put_cmsg copies to user space and thus may sleep. We can't do this
306	 * with rs_lock held, so first grab as many notifications as we can stuff
307	 * in the user provided cmsg buffer. We don't try to copy more, to avoid
308	 * losing notifications - except when the buffer is so small that it wouldn't
309	 * even hold a single notification. Then we give him as much of this single
310	 * msg as we can squeeze in, and set MSG_CTRUNC.
311	 */
312	if (msghdr) {
313		max_messages = msghdr->msg_controllen / CMSG_SPACE(sizeof(cmsg));
314		if (!max_messages)
315			max_messages = 1;
316	}
317
318	spin_lock_irqsave(&rs->rs_lock, flags);
319	while (!list_empty(&rs->rs_notify_queue) && count < max_messages) {
320		notifier = list_entry(rs->rs_notify_queue.next,
321				struct rds_notifier, n_list);
322		list_move(&notifier->n_list, &copy);
323		count++;
324	}
325	spin_unlock_irqrestore(&rs->rs_lock, flags);
326
327	if (!count)
328		return 0;
329
330	while (!list_empty(&copy)) {
331		notifier = list_entry(copy.next, struct rds_notifier, n_list);
332
333		if (msghdr) {
334			cmsg.user_token = notifier->n_user_token;
335			cmsg.status = notifier->n_status;
336
337			err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_RDMA_STATUS,
338				       sizeof(cmsg), &cmsg);
339			if (err)
340				break;
341		}
342
343		list_del_init(&notifier->n_list);
344		kfree(notifier);
345	}
346
347	/* If we bailed out because of an error in put_cmsg,
348	 * we may be left with one or more notifications that we
349	 * didn't process. Return them to the head of the list. */
350	if (!list_empty(&copy)) {
351		spin_lock_irqsave(&rs->rs_lock, flags);
352		list_splice(&copy, &rs->rs_notify_queue);
353		spin_unlock_irqrestore(&rs->rs_lock, flags);
354	}
355
356	return err;
357}
358
359/*
360 * Queue a congestion notification
361 */
362static int rds_notify_cong(struct rds_sock *rs, struct msghdr *msghdr)
363{
364	uint64_t notify = rs->rs_cong_notify;
365	unsigned long flags;
366	int err;
367
368	err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_CONG_UPDATE,
369			sizeof(notify), &notify);
370	if (err)
371		return err;
372
373	spin_lock_irqsave(&rs->rs_lock, flags);
374	rs->rs_cong_notify &= ~notify;
375	spin_unlock_irqrestore(&rs->rs_lock, flags);
376
377	return 0;
378}
379
380/*
381 * Receive any control messages.
382 */
383static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg)
 
384{
385	int ret = 0;
386
387	if (inc->i_rdma_cookie) {
388		ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RDMA_DEST,
389				sizeof(inc->i_rdma_cookie), &inc->i_rdma_cookie);
390		if (ret)
391			return ret;
392	}
393
394	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
395}
396
397int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
398		size_t size, int msg_flags)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
399{
400	struct sock *sk = sock->sk;
401	struct rds_sock *rs = rds_sk_to_rs(sk);
402	long timeo;
403	int ret = 0, nonblock = msg_flags & MSG_DONTWAIT;
404	struct sockaddr_in *sin;
405	struct rds_incoming *inc = NULL;
406
407	/* udp_recvmsg()->sock_recvtimeo() gets away without locking too.. */
408	timeo = sock_rcvtimeo(sk, nonblock);
409
410	rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
411
412	if (msg_flags & MSG_OOB)
413		goto out;
 
 
414
415	while (1) {
416		/* If there are pending notifications, do those - and nothing else */
417		if (!list_empty(&rs->rs_notify_queue)) {
418			ret = rds_notify_queue_get(rs, msg);
419			break;
420		}
421
422		if (rs->rs_cong_notify) {
423			ret = rds_notify_cong(rs, msg);
424			break;
425		}
426
427		if (!rds_next_incoming(rs, &inc)) {
428			if (nonblock) {
429				ret = -EAGAIN;
 
 
430				break;
431			}
432
433			timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
434					(!list_empty(&rs->rs_notify_queue) ||
435					 rs->rs_cong_notify ||
436					 rds_next_incoming(rs, &inc)), timeo);
437			rdsdebug("recvmsg woke inc %p timeo %ld\n", inc,
438				 timeo);
439			if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
440				continue;
441
442			ret = timeo;
443			if (ret == 0)
444				ret = -ETIMEDOUT;
445			break;
446		}
447
448		rdsdebug("copying inc %p from %pI4:%u to user\n", inc,
449			 &inc->i_conn->c_faddr,
450			 ntohs(inc->i_hdr.h_sport));
451		ret = inc->i_conn->c_trans->inc_copy_to_user(inc, msg->msg_iov,
452							     size);
453		if (ret < 0)
454			break;
455
456		/*
457		 * if the message we just copied isn't at the head of the
458		 * recv queue then someone else raced us to return it, try
459		 * to get the next message.
460		 */
461		if (!rds_still_queued(rs, inc, !(msg_flags & MSG_PEEK))) {
462			rds_inc_put(inc);
463			inc = NULL;
464			rds_stats_inc(s_recv_deliver_raced);
 
465			continue;
466		}
467
468		if (ret < be32_to_cpu(inc->i_hdr.h_len)) {
469			if (msg_flags & MSG_TRUNC)
470				ret = be32_to_cpu(inc->i_hdr.h_len);
471			msg->msg_flags |= MSG_TRUNC;
472		}
473
474		if (rds_cmsg_recv(inc, msg)) {
475			ret = -EFAULT;
476			goto out;
477		}
 
478
479		rds_stats_inc(s_recv_delivered);
480
481		sin = (struct sockaddr_in *)msg->msg_name;
482		if (sin) {
483			sin->sin_family = AF_INET;
484			sin->sin_port = inc->i_hdr.h_sport;
485			sin->sin_addr.s_addr = inc->i_saddr;
486			memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
 
487		}
488		break;
489	}
490
491	if (inc)
492		rds_inc_put(inc);
493
494out:
495	return ret;
496}
497
498/*
499 * The socket is being shut down and we're asked to drop messages that were
500 * queued for recvmsg.  The caller has unbound the socket so the receive path
501 * won't queue any more incoming fragments or messages on the socket.
502 */
503void rds_clear_recv_queue(struct rds_sock *rs)
504{
505	struct sock *sk = rds_rs_to_sk(rs);
506	struct rds_incoming *inc, *tmp;
507	unsigned long flags;
508
509	write_lock_irqsave(&rs->rs_recv_lock, flags);
510	list_for_each_entry_safe(inc, tmp, &rs->rs_recv_queue, i_item) {
511		rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
512				      -be32_to_cpu(inc->i_hdr.h_len),
513				      inc->i_hdr.h_dport);
514		list_del_init(&inc->i_item);
515		rds_inc_put(inc);
516	}
517	write_unlock_irqrestore(&rs->rs_recv_lock, flags);
518}
519
520/*
521 * inc->i_saddr isn't used here because it is only set in the receive
522 * path.
523 */
524void rds_inc_info_copy(struct rds_incoming *inc,
525		       struct rds_info_iterator *iter,
526		       __be32 saddr, __be32 daddr, int flip)
527{
528	struct rds_info_message minfo;
529
530	minfo.seq = be64_to_cpu(inc->i_hdr.h_sequence);
531	minfo.len = be32_to_cpu(inc->i_hdr.h_len);
532
533	if (flip) {
534		minfo.laddr = daddr;
535		minfo.faddr = saddr;
536		minfo.lport = inc->i_hdr.h_dport;
537		minfo.fport = inc->i_hdr.h_sport;
538	} else {
539		minfo.laddr = saddr;
540		minfo.faddr = daddr;
541		minfo.lport = inc->i_hdr.h_sport;
542		minfo.fport = inc->i_hdr.h_dport;
543	}
 
 
544
545	rds_info_copy(iter, &minfo, sizeof(minfo));
546}
v4.17
  1/*
  2 * Copyright (c) 2006 Oracle.  All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 *
 32 */
 33#include <linux/kernel.h>
 34#include <linux/slab.h>
 35#include <net/sock.h>
 36#include <linux/in.h>
 37#include <linux/export.h>
 38#include <linux/time.h>
 39#include <linux/rds.h>
 40
 41#include "rds.h"
 42
 43void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
 44		  __be32 saddr)
 45{
 46	int i;
 47
 48	refcount_set(&inc->i_refcount, 1);
 49	INIT_LIST_HEAD(&inc->i_item);
 50	inc->i_conn = conn;
 51	inc->i_saddr = saddr;
 52	inc->i_rdma_cookie = 0;
 53	inc->i_rx_tstamp.tv_sec = 0;
 54	inc->i_rx_tstamp.tv_usec = 0;
 55
 56	for (i = 0; i < RDS_RX_MAX_TRACES; i++)
 57		inc->i_rx_lat_trace[i] = 0;
 58}
 59EXPORT_SYMBOL_GPL(rds_inc_init);
 60
 61void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *cp,
 62		       __be32 saddr)
 63{
 64	refcount_set(&inc->i_refcount, 1);
 65	INIT_LIST_HEAD(&inc->i_item);
 66	inc->i_conn = cp->cp_conn;
 67	inc->i_conn_path = cp;
 68	inc->i_saddr = saddr;
 69	inc->i_rdma_cookie = 0;
 70	inc->i_rx_tstamp.tv_sec = 0;
 71	inc->i_rx_tstamp.tv_usec = 0;
 72}
 73EXPORT_SYMBOL_GPL(rds_inc_path_init);
 74
 75static void rds_inc_addref(struct rds_incoming *inc)
 76{
 77	rdsdebug("addref inc %p ref %d\n", inc, refcount_read(&inc->i_refcount));
 78	refcount_inc(&inc->i_refcount);
 79}
 80
 81void rds_inc_put(struct rds_incoming *inc)
 82{
 83	rdsdebug("put inc %p ref %d\n", inc, refcount_read(&inc->i_refcount));
 84	if (refcount_dec_and_test(&inc->i_refcount)) {
 85		BUG_ON(!list_empty(&inc->i_item));
 86
 87		inc->i_conn->c_trans->inc_free(inc);
 88	}
 89}
 90EXPORT_SYMBOL_GPL(rds_inc_put);
 91
 92static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk,
 93				  struct rds_cong_map *map,
 94				  int delta, __be16 port)
 95{
 96	int now_congested;
 97
 98	if (delta == 0)
 99		return;
100
101	rs->rs_rcv_bytes += delta;
102	if (delta > 0)
103		rds_stats_add(s_recv_bytes_added_to_socket, delta);
104	else
105		rds_stats_add(s_recv_bytes_removed_from_socket, -delta);
106	now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs);
107
108	rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d "
109	  "now_cong %d delta %d\n",
110	  rs, &rs->rs_bound_addr,
111	  ntohs(rs->rs_bound_port), rs->rs_rcv_bytes,
112	  rds_sk_rcvbuf(rs), now_congested, delta);
113
114	/* wasn't -> am congested */
115	if (!rs->rs_congested && now_congested) {
116		rs->rs_congested = 1;
117		rds_cong_set_bit(map, port);
118		rds_cong_queue_updates(map);
119	}
120	/* was -> aren't congested */
121	/* Require more free space before reporting uncongested to prevent
122	   bouncing cong/uncong state too often */
123	else if (rs->rs_congested && (rs->rs_rcv_bytes < (rds_sk_rcvbuf(rs)/2))) {
124		rs->rs_congested = 0;
125		rds_cong_clear_bit(map, port);
126		rds_cong_queue_updates(map);
127	}
128
129	/* do nothing if no change in cong state */
130}
131
132static void rds_conn_peer_gen_update(struct rds_connection *conn,
133				     u32 peer_gen_num)
134{
135	int i;
136	struct rds_message *rm, *tmp;
137	unsigned long flags;
138
139	WARN_ON(conn->c_trans->t_type != RDS_TRANS_TCP);
140	if (peer_gen_num != 0) {
141		if (conn->c_peer_gen_num != 0 &&
142		    peer_gen_num != conn->c_peer_gen_num) {
143			for (i = 0; i < RDS_MPATH_WORKERS; i++) {
144				struct rds_conn_path *cp;
145
146				cp = &conn->c_path[i];
147				spin_lock_irqsave(&cp->cp_lock, flags);
148				cp->cp_next_tx_seq = 1;
149				cp->cp_next_rx_seq = 0;
150				list_for_each_entry_safe(rm, tmp,
151							 &cp->cp_retrans,
152							 m_conn_item) {
153					set_bit(RDS_MSG_FLUSH, &rm->m_flags);
154				}
155				spin_unlock_irqrestore(&cp->cp_lock, flags);
156			}
157		}
158		conn->c_peer_gen_num = peer_gen_num;
159	}
160}
161
162/*
163 * Process all extension headers that come with this message.
164 */
165static void rds_recv_incoming_exthdrs(struct rds_incoming *inc, struct rds_sock *rs)
166{
167	struct rds_header *hdr = &inc->i_hdr;
168	unsigned int pos = 0, type, len;
169	union {
170		struct rds_ext_header_version version;
171		struct rds_ext_header_rdma rdma;
172		struct rds_ext_header_rdma_dest rdma_dest;
173	} buffer;
174
175	while (1) {
176		len = sizeof(buffer);
177		type = rds_message_next_extension(hdr, &pos, &buffer, &len);
178		if (type == RDS_EXTHDR_NONE)
179			break;
180		/* Process extension header here */
181		switch (type) {
182		case RDS_EXTHDR_RDMA:
183			rds_rdma_unuse(rs, be32_to_cpu(buffer.rdma.h_rdma_rkey), 0);
184			break;
185
186		case RDS_EXTHDR_RDMA_DEST:
187			/* We ignore the size for now. We could stash it
188			 * somewhere and use it for error checking. */
189			inc->i_rdma_cookie = rds_rdma_make_cookie(
190					be32_to_cpu(buffer.rdma_dest.h_rdma_rkey),
191					be32_to_cpu(buffer.rdma_dest.h_rdma_offset));
192
193			break;
194		}
195	}
196}
197
198static void rds_recv_hs_exthdrs(struct rds_header *hdr,
199				struct rds_connection *conn)
200{
201	unsigned int pos = 0, type, len;
202	union {
203		struct rds_ext_header_version version;
204		u16 rds_npaths;
205		u32 rds_gen_num;
206	} buffer;
207	u32 new_peer_gen_num = 0;
208
209	while (1) {
210		len = sizeof(buffer);
211		type = rds_message_next_extension(hdr, &pos, &buffer, &len);
212		if (type == RDS_EXTHDR_NONE)
213			break;
214		/* Process extension header here */
215		switch (type) {
216		case RDS_EXTHDR_NPATHS:
217			conn->c_npaths = min_t(int, RDS_MPATH_WORKERS,
218					       be16_to_cpu(buffer.rds_npaths));
219			break;
220		case RDS_EXTHDR_GEN_NUM:
221			new_peer_gen_num = be32_to_cpu(buffer.rds_gen_num);
222			break;
223		default:
224			pr_warn_ratelimited("ignoring unknown exthdr type "
225					     "0x%x\n", type);
226		}
227	}
228	/* if RDS_EXTHDR_NPATHS was not found, default to a single-path */
229	conn->c_npaths = max_t(int, conn->c_npaths, 1);
230	conn->c_ping_triggered = 0;
231	rds_conn_peer_gen_update(conn, new_peer_gen_num);
232}
233
234/* rds_start_mprds() will synchronously start multiple paths when appropriate.
235 * The scheme is based on the following rules:
236 *
237 * 1. rds_sendmsg on first connect attempt sends the probe ping, with the
238 *    sender's npaths (s_npaths)
239 * 2. rcvr of probe-ping knows the mprds_paths = min(s_npaths, r_npaths). It
240 *    sends back a probe-pong with r_npaths. After that, if rcvr is the
241 *    smaller ip addr, it starts rds_conn_path_connect_if_down on all
242 *    mprds_paths.
243 * 3. sender gets woken up, and can move to rds_conn_path_connect_if_down.
244 *    If it is the smaller ipaddr, rds_conn_path_connect_if_down can be
245 *    called after reception of the probe-pong on all mprds_paths.
246 *    Otherwise (sender of probe-ping is not the smaller ip addr): just call
247 *    rds_conn_path_connect_if_down on the hashed path. (see rule 4)
248 * 4. rds_connect_worker must only trigger a connection if laddr < faddr.
249 * 5. sender may end up queuing the packet on the cp. will get sent out later.
250 *    when connection is completed.
251 */
252static void rds_start_mprds(struct rds_connection *conn)
253{
254	int i;
255	struct rds_conn_path *cp;
256
257	if (conn->c_npaths > 1 &&
258	    IS_CANONICAL(conn->c_laddr, conn->c_faddr)) {
259		for (i = 0; i < conn->c_npaths; i++) {
260			cp = &conn->c_path[i];
261			rds_conn_path_connect_if_down(cp);
262		}
263	}
264}
265
266/*
267 * The transport must make sure that this is serialized against other
268 * rx and conn reset on this specific conn.
269 *
270 * We currently assert that only one fragmented message will be sent
271 * down a connection at a time.  This lets us reassemble in the conn
272 * instead of per-flow which means that we don't have to go digging through
273 * flows to tear down partial reassembly progress on conn failure and
274 * we save flow lookup and locking for each frag arrival.  It does mean
275 * that small messages will wait behind large ones.  Fragmenting at all
276 * is only to reduce the memory consumption of pre-posted buffers.
277 *
278 * The caller passes in saddr and daddr instead of us getting it from the
279 * conn.  This lets loopback, who only has one conn for both directions,
280 * tell us which roles the addrs in the conn are playing for this message.
281 */
282void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
283		       struct rds_incoming *inc, gfp_t gfp)
284{
285	struct rds_sock *rs = NULL;
286	struct sock *sk;
287	unsigned long flags;
288	struct rds_conn_path *cp;
289
290	inc->i_conn = conn;
291	inc->i_rx_jiffies = jiffies;
292	if (conn->c_trans->t_mp_capable)
293		cp = inc->i_conn_path;
294	else
295		cp = &conn->c_path[0];
296
297	rdsdebug("conn %p next %llu inc %p seq %llu len %u sport %u dport %u "
298		 "flags 0x%x rx_jiffies %lu\n", conn,
299		 (unsigned long long)cp->cp_next_rx_seq,
300		 inc,
301		 (unsigned long long)be64_to_cpu(inc->i_hdr.h_sequence),
302		 be32_to_cpu(inc->i_hdr.h_len),
303		 be16_to_cpu(inc->i_hdr.h_sport),
304		 be16_to_cpu(inc->i_hdr.h_dport),
305		 inc->i_hdr.h_flags,
306		 inc->i_rx_jiffies);
307
308	/*
309	 * Sequence numbers should only increase.  Messages get their
310	 * sequence number as they're queued in a sending conn.  They
311	 * can be dropped, though, if the sending socket is closed before
312	 * they hit the wire.  So sequence numbers can skip forward
313	 * under normal operation.  They can also drop back in the conn
314	 * failover case as previously sent messages are resent down the
315	 * new instance of a conn.  We drop those, otherwise we have
316	 * to assume that the next valid seq does not come after a
317	 * hole in the fragment stream.
318	 *
319	 * The headers don't give us a way to realize if fragments of
320	 * a message have been dropped.  We assume that frags that arrive
321	 * to a flow are part of the current message on the flow that is
322	 * being reassembled.  This means that senders can't drop messages
323	 * from the sending conn until all their frags are sent.
324	 *
325	 * XXX we could spend more on the wire to get more robust failure
326	 * detection, arguably worth it to avoid data corruption.
327	 */
328	if (be64_to_cpu(inc->i_hdr.h_sequence) < cp->cp_next_rx_seq &&
329	    (inc->i_hdr.h_flags & RDS_FLAG_RETRANSMITTED)) {
330		rds_stats_inc(s_recv_drop_old_seq);
331		goto out;
332	}
333	cp->cp_next_rx_seq = be64_to_cpu(inc->i_hdr.h_sequence) + 1;
334
335	if (rds_sysctl_ping_enable && inc->i_hdr.h_dport == 0) {
336		if (inc->i_hdr.h_sport == 0) {
337			rdsdebug("ignore ping with 0 sport from 0x%x\n", saddr);
338			goto out;
339		}
340		rds_stats_inc(s_recv_ping);
341		rds_send_pong(cp, inc->i_hdr.h_sport);
342		/* if this is a handshake ping, start multipath if necessary */
343		if (RDS_HS_PROBE(be16_to_cpu(inc->i_hdr.h_sport),
344				 be16_to_cpu(inc->i_hdr.h_dport))) {
345			rds_recv_hs_exthdrs(&inc->i_hdr, cp->cp_conn);
346			rds_start_mprds(cp->cp_conn);
347		}
348		goto out;
349	}
350
351	if (be16_to_cpu(inc->i_hdr.h_dport) ==  RDS_FLAG_PROBE_PORT &&
352	    inc->i_hdr.h_sport == 0) {
353		rds_recv_hs_exthdrs(&inc->i_hdr, cp->cp_conn);
354		/* if this is a handshake pong, start multipath if necessary */
355		rds_start_mprds(cp->cp_conn);
356		wake_up(&cp->cp_conn->c_hs_waitq);
357		goto out;
358	}
359
360	rs = rds_find_bound(daddr, inc->i_hdr.h_dport);
361	if (!rs) {
362		rds_stats_inc(s_recv_drop_no_sock);
363		goto out;
364	}
365
366	/* Process extension headers */
367	rds_recv_incoming_exthdrs(inc, rs);
368
369	/* We can be racing with rds_release() which marks the socket dead. */
370	sk = rds_rs_to_sk(rs);
371
372	/* serialize with rds_release -> sock_orphan */
373	write_lock_irqsave(&rs->rs_recv_lock, flags);
374	if (!sock_flag(sk, SOCK_DEAD)) {
375		rdsdebug("adding inc %p to rs %p's recv queue\n", inc, rs);
376		rds_stats_inc(s_recv_queued);
377		rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
378				      be32_to_cpu(inc->i_hdr.h_len),
379				      inc->i_hdr.h_dport);
380		if (sock_flag(sk, SOCK_RCVTSTAMP))
381			do_gettimeofday(&inc->i_rx_tstamp);
382		rds_inc_addref(inc);
383		inc->i_rx_lat_trace[RDS_MSG_RX_END] = local_clock();
384		list_add_tail(&inc->i_item, &rs->rs_recv_queue);
385		__rds_wake_sk_sleep(sk);
386	} else {
387		rds_stats_inc(s_recv_drop_dead_sock);
388	}
389	write_unlock_irqrestore(&rs->rs_recv_lock, flags);
390
391out:
392	if (rs)
393		rds_sock_put(rs);
394}
395EXPORT_SYMBOL_GPL(rds_recv_incoming);
396
397/*
398 * be very careful here.  This is being called as the condition in
399 * wait_event_*() needs to cope with being called many times.
400 */
401static int rds_next_incoming(struct rds_sock *rs, struct rds_incoming **inc)
402{
403	unsigned long flags;
404
405	if (!*inc) {
406		read_lock_irqsave(&rs->rs_recv_lock, flags);
407		if (!list_empty(&rs->rs_recv_queue)) {
408			*inc = list_entry(rs->rs_recv_queue.next,
409					  struct rds_incoming,
410					  i_item);
411			rds_inc_addref(*inc);
412		}
413		read_unlock_irqrestore(&rs->rs_recv_lock, flags);
414	}
415
416	return *inc != NULL;
417}
418
419static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
420			    int drop)
421{
422	struct sock *sk = rds_rs_to_sk(rs);
423	int ret = 0;
424	unsigned long flags;
425
426	write_lock_irqsave(&rs->rs_recv_lock, flags);
427	if (!list_empty(&inc->i_item)) {
428		ret = 1;
429		if (drop) {
430			/* XXX make sure this i_conn is reliable */
431			rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
432					      -be32_to_cpu(inc->i_hdr.h_len),
433					      inc->i_hdr.h_dport);
434			list_del_init(&inc->i_item);
435			rds_inc_put(inc);
436		}
437	}
438	write_unlock_irqrestore(&rs->rs_recv_lock, flags);
439
440	rdsdebug("inc %p rs %p still %d dropped %d\n", inc, rs, ret, drop);
441	return ret;
442}
443
444/*
445 * Pull errors off the error queue.
446 * If msghdr is NULL, we will just purge the error queue.
447 */
448int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
449{
450	struct rds_notifier *notifier;
451	struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */
452	unsigned int count = 0, max_messages = ~0U;
453	unsigned long flags;
454	LIST_HEAD(copy);
455	int err = 0;
456
457
458	/* put_cmsg copies to user space and thus may sleep. We can't do this
459	 * with rs_lock held, so first grab as many notifications as we can stuff
460	 * in the user provided cmsg buffer. We don't try to copy more, to avoid
461	 * losing notifications - except when the buffer is so small that it wouldn't
462	 * even hold a single notification. Then we give him as much of this single
463	 * msg as we can squeeze in, and set MSG_CTRUNC.
464	 */
465	if (msghdr) {
466		max_messages = msghdr->msg_controllen / CMSG_SPACE(sizeof(cmsg));
467		if (!max_messages)
468			max_messages = 1;
469	}
470
471	spin_lock_irqsave(&rs->rs_lock, flags);
472	while (!list_empty(&rs->rs_notify_queue) && count < max_messages) {
473		notifier = list_entry(rs->rs_notify_queue.next,
474				struct rds_notifier, n_list);
475		list_move(&notifier->n_list, &copy);
476		count++;
477	}
478	spin_unlock_irqrestore(&rs->rs_lock, flags);
479
480	if (!count)
481		return 0;
482
483	while (!list_empty(&copy)) {
484		notifier = list_entry(copy.next, struct rds_notifier, n_list);
485
486		if (msghdr) {
487			cmsg.user_token = notifier->n_user_token;
488			cmsg.status = notifier->n_status;
489
490			err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_RDMA_STATUS,
491				       sizeof(cmsg), &cmsg);
492			if (err)
493				break;
494		}
495
496		list_del_init(&notifier->n_list);
497		kfree(notifier);
498	}
499
500	/* If we bailed out because of an error in put_cmsg,
501	 * we may be left with one or more notifications that we
502	 * didn't process. Return them to the head of the list. */
503	if (!list_empty(&copy)) {
504		spin_lock_irqsave(&rs->rs_lock, flags);
505		list_splice(&copy, &rs->rs_notify_queue);
506		spin_unlock_irqrestore(&rs->rs_lock, flags);
507	}
508
509	return err;
510}
511
512/*
513 * Queue a congestion notification
514 */
515static int rds_notify_cong(struct rds_sock *rs, struct msghdr *msghdr)
516{
517	uint64_t notify = rs->rs_cong_notify;
518	unsigned long flags;
519	int err;
520
521	err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_CONG_UPDATE,
522			sizeof(notify), &notify);
523	if (err)
524		return err;
525
526	spin_lock_irqsave(&rs->rs_lock, flags);
527	rs->rs_cong_notify &= ~notify;
528	spin_unlock_irqrestore(&rs->rs_lock, flags);
529
530	return 0;
531}
532
533/*
534 * Receive any control messages.
535 */
536static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg,
537			 struct rds_sock *rs)
538{
539	int ret = 0;
540
541	if (inc->i_rdma_cookie) {
542		ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RDMA_DEST,
543				sizeof(inc->i_rdma_cookie), &inc->i_rdma_cookie);
544		if (ret)
545			goto out;
546	}
547
548	if ((inc->i_rx_tstamp.tv_sec != 0) &&
549	    sock_flag(rds_rs_to_sk(rs), SOCK_RCVTSTAMP)) {
550		ret = put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP,
551			       sizeof(struct timeval),
552			       &inc->i_rx_tstamp);
553		if (ret)
554			goto out;
555	}
556
557	if (rs->rs_rx_traces) {
558		struct rds_cmsg_rx_trace t;
559		int i, j;
560
561		memset(&t, 0, sizeof(t));
562		inc->i_rx_lat_trace[RDS_MSG_RX_CMSG] = local_clock();
563		t.rx_traces =  rs->rs_rx_traces;
564		for (i = 0; i < rs->rs_rx_traces; i++) {
565			j = rs->rs_rx_trace[i];
566			t.rx_trace_pos[i] = j;
567			t.rx_trace[i] = inc->i_rx_lat_trace[j + 1] -
568					  inc->i_rx_lat_trace[j];
569		}
570
571		ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RXPATH_LATENCY,
572			       sizeof(t), &t);
573		if (ret)
574			goto out;
575	}
576
577out:
578	return ret;
579}
580
581static bool rds_recvmsg_zcookie(struct rds_sock *rs, struct msghdr *msg)
582{
583	struct rds_msg_zcopy_queue *q = &rs->rs_zcookie_queue;
584	struct rds_msg_zcopy_info *info = NULL;
585	struct rds_zcopy_cookies *done;
586	unsigned long flags;
587
588	if (!msg->msg_control)
589		return false;
590
591	if (!sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY) ||
592	    msg->msg_controllen < CMSG_SPACE(sizeof(*done)))
593		return false;
594
595	spin_lock_irqsave(&q->lock, flags);
596	if (!list_empty(&q->zcookie_head)) {
597		info = list_entry(q->zcookie_head.next,
598				  struct rds_msg_zcopy_info, rs_zcookie_next);
599		list_del(&info->rs_zcookie_next);
600	}
601	spin_unlock_irqrestore(&q->lock, flags);
602	if (!info)
603		return false;
604	done = &info->zcookies;
605	if (put_cmsg(msg, SOL_RDS, RDS_CMSG_ZCOPY_COMPLETION, sizeof(*done),
606		     done)) {
607		spin_lock_irqsave(&q->lock, flags);
608		list_add(&info->rs_zcookie_next, &q->zcookie_head);
609		spin_unlock_irqrestore(&q->lock, flags);
610		return false;
611	}
612	kfree(info);
613	return true;
614}
615
616int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
617		int msg_flags)
618{
619	struct sock *sk = sock->sk;
620	struct rds_sock *rs = rds_sk_to_rs(sk);
621	long timeo;
622	int ret = 0, nonblock = msg_flags & MSG_DONTWAIT;
623	DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
624	struct rds_incoming *inc = NULL;
625
626	/* udp_recvmsg()->sock_recvtimeo() gets away without locking too.. */
627	timeo = sock_rcvtimeo(sk, nonblock);
628
629	rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
630
631	if (msg_flags & MSG_OOB)
632		goto out;
633	if (msg_flags & MSG_ERRQUEUE)
634		return sock_recv_errqueue(sk, msg, size, SOL_IP, IP_RECVERR);
635
636	while (1) {
637		/* If there are pending notifications, do those - and nothing else */
638		if (!list_empty(&rs->rs_notify_queue)) {
639			ret = rds_notify_queue_get(rs, msg);
640			break;
641		}
642
643		if (rs->rs_cong_notify) {
644			ret = rds_notify_cong(rs, msg);
645			break;
646		}
647
648		if (!rds_next_incoming(rs, &inc)) {
649			if (nonblock) {
650				bool reaped = rds_recvmsg_zcookie(rs, msg);
651
652				ret = reaped ?  0 : -EAGAIN;
653				break;
654			}
655
656			timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
657					(!list_empty(&rs->rs_notify_queue) ||
658					 rs->rs_cong_notify ||
659					 rds_next_incoming(rs, &inc)), timeo);
660			rdsdebug("recvmsg woke inc %p timeo %ld\n", inc,
661				 timeo);
662			if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
663				continue;
664
665			ret = timeo;
666			if (ret == 0)
667				ret = -ETIMEDOUT;
668			break;
669		}
670
671		rdsdebug("copying inc %p from %pI4:%u to user\n", inc,
672			 &inc->i_conn->c_faddr,
673			 ntohs(inc->i_hdr.h_sport));
674		ret = inc->i_conn->c_trans->inc_copy_to_user(inc, &msg->msg_iter);
 
675		if (ret < 0)
676			break;
677
678		/*
679		 * if the message we just copied isn't at the head of the
680		 * recv queue then someone else raced us to return it, try
681		 * to get the next message.
682		 */
683		if (!rds_still_queued(rs, inc, !(msg_flags & MSG_PEEK))) {
684			rds_inc_put(inc);
685			inc = NULL;
686			rds_stats_inc(s_recv_deliver_raced);
687			iov_iter_revert(&msg->msg_iter, ret);
688			continue;
689		}
690
691		if (ret < be32_to_cpu(inc->i_hdr.h_len)) {
692			if (msg_flags & MSG_TRUNC)
693				ret = be32_to_cpu(inc->i_hdr.h_len);
694			msg->msg_flags |= MSG_TRUNC;
695		}
696
697		if (rds_cmsg_recv(inc, msg, rs)) {
698			ret = -EFAULT;
699			goto out;
700		}
701		rds_recvmsg_zcookie(rs, msg);
702
703		rds_stats_inc(s_recv_delivered);
704
 
705		if (sin) {
706			sin->sin_family = AF_INET;
707			sin->sin_port = inc->i_hdr.h_sport;
708			sin->sin_addr.s_addr = inc->i_saddr;
709			memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
710			msg->msg_namelen = sizeof(*sin);
711		}
712		break;
713	}
714
715	if (inc)
716		rds_inc_put(inc);
717
718out:
719	return ret;
720}
721
722/*
723 * The socket is being shut down and we're asked to drop messages that were
724 * queued for recvmsg.  The caller has unbound the socket so the receive path
725 * won't queue any more incoming fragments or messages on the socket.
726 */
727void rds_clear_recv_queue(struct rds_sock *rs)
728{
729	struct sock *sk = rds_rs_to_sk(rs);
730	struct rds_incoming *inc, *tmp;
731	unsigned long flags;
732
733	write_lock_irqsave(&rs->rs_recv_lock, flags);
734	list_for_each_entry_safe(inc, tmp, &rs->rs_recv_queue, i_item) {
735		rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
736				      -be32_to_cpu(inc->i_hdr.h_len),
737				      inc->i_hdr.h_dport);
738		list_del_init(&inc->i_item);
739		rds_inc_put(inc);
740	}
741	write_unlock_irqrestore(&rs->rs_recv_lock, flags);
742}
743
744/*
745 * inc->i_saddr isn't used here because it is only set in the receive
746 * path.
747 */
748void rds_inc_info_copy(struct rds_incoming *inc,
749		       struct rds_info_iterator *iter,
750		       __be32 saddr, __be32 daddr, int flip)
751{
752	struct rds_info_message minfo;
753
754	minfo.seq = be64_to_cpu(inc->i_hdr.h_sequence);
755	minfo.len = be32_to_cpu(inc->i_hdr.h_len);
756
757	if (flip) {
758		minfo.laddr = daddr;
759		minfo.faddr = saddr;
760		minfo.lport = inc->i_hdr.h_dport;
761		minfo.fport = inc->i_hdr.h_sport;
762	} else {
763		minfo.laddr = saddr;
764		minfo.faddr = daddr;
765		minfo.lport = inc->i_hdr.h_sport;
766		minfo.fport = inc->i_hdr.h_dport;
767	}
768
769	minfo.flags = 0;
770
771	rds_info_copy(iter, &minfo, sizeof(minfo));
772}