Linux Audio

Check our new training course

Loading...
v5.9
  1/*
  2 * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 *
 32 */
 33#include <linux/kernel.h>
 34#include <linux/slab.h>
 35#include <net/sock.h>
 36#include <linux/in.h>
 37#include <linux/export.h>
 38#include <linux/time.h>
 39#include <linux/rds.h>
 40
 41#include "rds.h"
 42
 43void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
 44		 struct in6_addr *saddr)
 45{
 
 
 46	refcount_set(&inc->i_refcount, 1);
 47	INIT_LIST_HEAD(&inc->i_item);
 48	inc->i_conn = conn;
 49	inc->i_saddr = *saddr;
 50	inc->i_usercopy.rdma_cookie = 0;
 51	inc->i_usercopy.rx_tstamp = ktime_set(0, 0);
 
 52
 53	memset(inc->i_rx_lat_trace, 0, sizeof(inc->i_rx_lat_trace));
 
 54}
 55EXPORT_SYMBOL_GPL(rds_inc_init);
 56
 57void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *cp,
 58		       struct in6_addr  *saddr)
 59{
 60	refcount_set(&inc->i_refcount, 1);
 61	INIT_LIST_HEAD(&inc->i_item);
 62	inc->i_conn = cp->cp_conn;
 63	inc->i_conn_path = cp;
 64	inc->i_saddr = *saddr;
 65	inc->i_usercopy.rdma_cookie = 0;
 66	inc->i_usercopy.rx_tstamp = ktime_set(0, 0);
 
 67}
 68EXPORT_SYMBOL_GPL(rds_inc_path_init);
 69
 70static void rds_inc_addref(struct rds_incoming *inc)
 71{
 72	rdsdebug("addref inc %p ref %d\n", inc, refcount_read(&inc->i_refcount));
 73	refcount_inc(&inc->i_refcount);
 74}
 75
 76void rds_inc_put(struct rds_incoming *inc)
 77{
 78	rdsdebug("put inc %p ref %d\n", inc, refcount_read(&inc->i_refcount));
 79	if (refcount_dec_and_test(&inc->i_refcount)) {
 80		BUG_ON(!list_empty(&inc->i_item));
 81
 82		inc->i_conn->c_trans->inc_free(inc);
 83	}
 84}
 85EXPORT_SYMBOL_GPL(rds_inc_put);
 86
 87static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk,
 88				  struct rds_cong_map *map,
 89				  int delta, __be16 port)
 90{
 91	int now_congested;
 92
 93	if (delta == 0)
 94		return;
 95
 96	rs->rs_rcv_bytes += delta;
 97	if (delta > 0)
 98		rds_stats_add(s_recv_bytes_added_to_socket, delta);
 99	else
100		rds_stats_add(s_recv_bytes_removed_from_socket, -delta);
101
102	/* loop transport doesn't send/recv congestion updates */
103	if (rs->rs_transport->t_type == RDS_TRANS_LOOP)
104		return;
105
106	now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs);
107
108	rdsdebug("rs %p (%pI6c:%u) recv bytes %d buf %d "
109	  "now_cong %d delta %d\n",
110	  rs, &rs->rs_bound_addr,
111	  ntohs(rs->rs_bound_port), rs->rs_rcv_bytes,
112	  rds_sk_rcvbuf(rs), now_congested, delta);
113
114	/* wasn't -> am congested */
115	if (!rs->rs_congested && now_congested) {
116		rs->rs_congested = 1;
117		rds_cong_set_bit(map, port);
118		rds_cong_queue_updates(map);
119	}
120	/* was -> aren't congested */
121	/* Require more free space before reporting uncongested to prevent
122	   bouncing cong/uncong state too often */
123	else if (rs->rs_congested && (rs->rs_rcv_bytes < (rds_sk_rcvbuf(rs)/2))) {
124		rs->rs_congested = 0;
125		rds_cong_clear_bit(map, port);
126		rds_cong_queue_updates(map);
127	}
128
129	/* do nothing if no change in cong state */
130}
131
132static void rds_conn_peer_gen_update(struct rds_connection *conn,
133				     u32 peer_gen_num)
134{
135	int i;
136	struct rds_message *rm, *tmp;
137	unsigned long flags;
138
139	WARN_ON(conn->c_trans->t_type != RDS_TRANS_TCP);
140	if (peer_gen_num != 0) {
141		if (conn->c_peer_gen_num != 0 &&
142		    peer_gen_num != conn->c_peer_gen_num) {
143			for (i = 0; i < RDS_MPATH_WORKERS; i++) {
144				struct rds_conn_path *cp;
145
146				cp = &conn->c_path[i];
147				spin_lock_irqsave(&cp->cp_lock, flags);
148				cp->cp_next_tx_seq = 1;
149				cp->cp_next_rx_seq = 0;
150				list_for_each_entry_safe(rm, tmp,
151							 &cp->cp_retrans,
152							 m_conn_item) {
153					set_bit(RDS_MSG_FLUSH, &rm->m_flags);
154				}
155				spin_unlock_irqrestore(&cp->cp_lock, flags);
156			}
157		}
158		conn->c_peer_gen_num = peer_gen_num;
159	}
160}
161
162/*
163 * Process all extension headers that come with this message.
164 */
165static void rds_recv_incoming_exthdrs(struct rds_incoming *inc, struct rds_sock *rs)
166{
167	struct rds_header *hdr = &inc->i_hdr;
168	unsigned int pos = 0, type, len;
169	union {
170		struct rds_ext_header_version version;
171		struct rds_ext_header_rdma rdma;
172		struct rds_ext_header_rdma_dest rdma_dest;
173	} buffer;
174
175	while (1) {
176		len = sizeof(buffer);
177		type = rds_message_next_extension(hdr, &pos, &buffer, &len);
178		if (type == RDS_EXTHDR_NONE)
179			break;
180		/* Process extension header here */
181		switch (type) {
182		case RDS_EXTHDR_RDMA:
183			rds_rdma_unuse(rs, be32_to_cpu(buffer.rdma.h_rdma_rkey), 0);
184			break;
185
186		case RDS_EXTHDR_RDMA_DEST:
187			/* We ignore the size for now. We could stash it
188			 * somewhere and use it for error checking. */
189			inc->i_usercopy.rdma_cookie = rds_rdma_make_cookie(
190					be32_to_cpu(buffer.rdma_dest.h_rdma_rkey),
191					be32_to_cpu(buffer.rdma_dest.h_rdma_offset));
192
193			break;
194		}
195	}
196}
197
198static void rds_recv_hs_exthdrs(struct rds_header *hdr,
199				struct rds_connection *conn)
200{
201	unsigned int pos = 0, type, len;
202	union {
203		struct rds_ext_header_version version;
204		u16 rds_npaths;
205		u32 rds_gen_num;
206	} buffer;
207	u32 new_peer_gen_num = 0;
208
209	while (1) {
210		len = sizeof(buffer);
211		type = rds_message_next_extension(hdr, &pos, &buffer, &len);
212		if (type == RDS_EXTHDR_NONE)
213			break;
214		/* Process extension header here */
215		switch (type) {
216		case RDS_EXTHDR_NPATHS:
217			conn->c_npaths = min_t(int, RDS_MPATH_WORKERS,
218					       be16_to_cpu(buffer.rds_npaths));
219			break;
220		case RDS_EXTHDR_GEN_NUM:
221			new_peer_gen_num = be32_to_cpu(buffer.rds_gen_num);
222			break;
223		default:
224			pr_warn_ratelimited("ignoring unknown exthdr type "
225					     "0x%x\n", type);
226		}
227	}
228	/* if RDS_EXTHDR_NPATHS was not found, default to a single-path */
229	conn->c_npaths = max_t(int, conn->c_npaths, 1);
230	conn->c_ping_triggered = 0;
231	rds_conn_peer_gen_update(conn, new_peer_gen_num);
232}
233
234/* rds_start_mprds() will synchronously start multiple paths when appropriate.
235 * The scheme is based on the following rules:
236 *
237 * 1. rds_sendmsg on first connect attempt sends the probe ping, with the
238 *    sender's npaths (s_npaths)
239 * 2. rcvr of probe-ping knows the mprds_paths = min(s_npaths, r_npaths). It
240 *    sends back a probe-pong with r_npaths. After that, if rcvr is the
241 *    smaller ip addr, it starts rds_conn_path_connect_if_down on all
242 *    mprds_paths.
243 * 3. sender gets woken up, and can move to rds_conn_path_connect_if_down.
244 *    If it is the smaller ipaddr, rds_conn_path_connect_if_down can be
245 *    called after reception of the probe-pong on all mprds_paths.
246 *    Otherwise (sender of probe-ping is not the smaller ip addr): just call
247 *    rds_conn_path_connect_if_down on the hashed path. (see rule 4)
248 * 4. rds_connect_worker must only trigger a connection if laddr < faddr.
249 * 5. sender may end up queuing the packet on the cp. will get sent out later.
250 *    when connection is completed.
251 */
252static void rds_start_mprds(struct rds_connection *conn)
253{
254	int i;
255	struct rds_conn_path *cp;
256
257	if (conn->c_npaths > 1 &&
258	    rds_addr_cmp(&conn->c_laddr, &conn->c_faddr) < 0) {
259		for (i = 0; i < conn->c_npaths; i++) {
260			cp = &conn->c_path[i];
261			rds_conn_path_connect_if_down(cp);
262		}
263	}
264}
265
266/*
267 * The transport must make sure that this is serialized against other
268 * rx and conn reset on this specific conn.
269 *
270 * We currently assert that only one fragmented message will be sent
271 * down a connection at a time.  This lets us reassemble in the conn
272 * instead of per-flow which means that we don't have to go digging through
273 * flows to tear down partial reassembly progress on conn failure and
274 * we save flow lookup and locking for each frag arrival.  It does mean
275 * that small messages will wait behind large ones.  Fragmenting at all
276 * is only to reduce the memory consumption of pre-posted buffers.
277 *
278 * The caller passes in saddr and daddr instead of us getting it from the
279 * conn.  This lets loopback, who only has one conn for both directions,
280 * tell us which roles the addrs in the conn are playing for this message.
281 */
282void rds_recv_incoming(struct rds_connection *conn, struct in6_addr *saddr,
283		       struct in6_addr *daddr,
284		       struct rds_incoming *inc, gfp_t gfp)
285{
286	struct rds_sock *rs = NULL;
287	struct sock *sk;
288	unsigned long flags;
289	struct rds_conn_path *cp;
290
291	inc->i_conn = conn;
292	inc->i_rx_jiffies = jiffies;
293	if (conn->c_trans->t_mp_capable)
294		cp = inc->i_conn_path;
295	else
296		cp = &conn->c_path[0];
297
298	rdsdebug("conn %p next %llu inc %p seq %llu len %u sport %u dport %u "
299		 "flags 0x%x rx_jiffies %lu\n", conn,
300		 (unsigned long long)cp->cp_next_rx_seq,
301		 inc,
302		 (unsigned long long)be64_to_cpu(inc->i_hdr.h_sequence),
303		 be32_to_cpu(inc->i_hdr.h_len),
304		 be16_to_cpu(inc->i_hdr.h_sport),
305		 be16_to_cpu(inc->i_hdr.h_dport),
306		 inc->i_hdr.h_flags,
307		 inc->i_rx_jiffies);
308
309	/*
310	 * Sequence numbers should only increase.  Messages get their
311	 * sequence number as they're queued in a sending conn.  They
312	 * can be dropped, though, if the sending socket is closed before
313	 * they hit the wire.  So sequence numbers can skip forward
314	 * under normal operation.  They can also drop back in the conn
315	 * failover case as previously sent messages are resent down the
316	 * new instance of a conn.  We drop those, otherwise we have
317	 * to assume that the next valid seq does not come after a
318	 * hole in the fragment stream.
319	 *
320	 * The headers don't give us a way to realize if fragments of
321	 * a message have been dropped.  We assume that frags that arrive
322	 * to a flow are part of the current message on the flow that is
323	 * being reassembled.  This means that senders can't drop messages
324	 * from the sending conn until all their frags are sent.
325	 *
326	 * XXX we could spend more on the wire to get more robust failure
327	 * detection, arguably worth it to avoid data corruption.
328	 */
329	if (be64_to_cpu(inc->i_hdr.h_sequence) < cp->cp_next_rx_seq &&
330	    (inc->i_hdr.h_flags & RDS_FLAG_RETRANSMITTED)) {
331		rds_stats_inc(s_recv_drop_old_seq);
332		goto out;
333	}
334	cp->cp_next_rx_seq = be64_to_cpu(inc->i_hdr.h_sequence) + 1;
335
336	if (rds_sysctl_ping_enable && inc->i_hdr.h_dport == 0) {
337		if (inc->i_hdr.h_sport == 0) {
338			rdsdebug("ignore ping with 0 sport from %pI6c\n",
339				 saddr);
340			goto out;
341		}
342		rds_stats_inc(s_recv_ping);
343		rds_send_pong(cp, inc->i_hdr.h_sport);
344		/* if this is a handshake ping, start multipath if necessary */
345		if (RDS_HS_PROBE(be16_to_cpu(inc->i_hdr.h_sport),
346				 be16_to_cpu(inc->i_hdr.h_dport))) {
347			rds_recv_hs_exthdrs(&inc->i_hdr, cp->cp_conn);
348			rds_start_mprds(cp->cp_conn);
349		}
350		goto out;
351	}
352
353	if (be16_to_cpu(inc->i_hdr.h_dport) ==  RDS_FLAG_PROBE_PORT &&
354	    inc->i_hdr.h_sport == 0) {
355		rds_recv_hs_exthdrs(&inc->i_hdr, cp->cp_conn);
356		/* if this is a handshake pong, start multipath if necessary */
357		rds_start_mprds(cp->cp_conn);
358		wake_up(&cp->cp_conn->c_hs_waitq);
359		goto out;
360	}
361
362	rs = rds_find_bound(daddr, inc->i_hdr.h_dport, conn->c_bound_if);
363	if (!rs) {
364		rds_stats_inc(s_recv_drop_no_sock);
365		goto out;
366	}
367
368	/* Process extension headers */
369	rds_recv_incoming_exthdrs(inc, rs);
370
371	/* We can be racing with rds_release() which marks the socket dead. */
372	sk = rds_rs_to_sk(rs);
373
374	/* serialize with rds_release -> sock_orphan */
375	write_lock_irqsave(&rs->rs_recv_lock, flags);
376	if (!sock_flag(sk, SOCK_DEAD)) {
377		rdsdebug("adding inc %p to rs %p's recv queue\n", inc, rs);
378		rds_stats_inc(s_recv_queued);
379		rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
380				      be32_to_cpu(inc->i_hdr.h_len),
381				      inc->i_hdr.h_dport);
382		if (sock_flag(sk, SOCK_RCVTSTAMP))
383			inc->i_usercopy.rx_tstamp = ktime_get_real();
384		rds_inc_addref(inc);
385		inc->i_rx_lat_trace[RDS_MSG_RX_END] = local_clock();
386		list_add_tail(&inc->i_item, &rs->rs_recv_queue);
387		__rds_wake_sk_sleep(sk);
388	} else {
389		rds_stats_inc(s_recv_drop_dead_sock);
390	}
391	write_unlock_irqrestore(&rs->rs_recv_lock, flags);
392
393out:
394	if (rs)
395		rds_sock_put(rs);
396}
397EXPORT_SYMBOL_GPL(rds_recv_incoming);
398
399/*
400 * be very careful here.  This is being called as the condition in
401 * wait_event_*() needs to cope with being called many times.
402 */
403static int rds_next_incoming(struct rds_sock *rs, struct rds_incoming **inc)
404{
405	unsigned long flags;
406
407	if (!*inc) {
408		read_lock_irqsave(&rs->rs_recv_lock, flags);
409		if (!list_empty(&rs->rs_recv_queue)) {
410			*inc = list_entry(rs->rs_recv_queue.next,
411					  struct rds_incoming,
412					  i_item);
413			rds_inc_addref(*inc);
414		}
415		read_unlock_irqrestore(&rs->rs_recv_lock, flags);
416	}
417
418	return *inc != NULL;
419}
420
421static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
422			    int drop)
423{
424	struct sock *sk = rds_rs_to_sk(rs);
425	int ret = 0;
426	unsigned long flags;
427
428	write_lock_irqsave(&rs->rs_recv_lock, flags);
429	if (!list_empty(&inc->i_item)) {
430		ret = 1;
431		if (drop) {
432			/* XXX make sure this i_conn is reliable */
433			rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
434					      -be32_to_cpu(inc->i_hdr.h_len),
435					      inc->i_hdr.h_dport);
436			list_del_init(&inc->i_item);
437			rds_inc_put(inc);
438		}
439	}
440	write_unlock_irqrestore(&rs->rs_recv_lock, flags);
441
442	rdsdebug("inc %p rs %p still %d dropped %d\n", inc, rs, ret, drop);
443	return ret;
444}
445
446/*
447 * Pull errors off the error queue.
448 * If msghdr is NULL, we will just purge the error queue.
449 */
450int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
451{
452	struct rds_notifier *notifier;
453	struct rds_rdma_notify cmsg;
454	unsigned int count = 0, max_messages = ~0U;
455	unsigned long flags;
456	LIST_HEAD(copy);
457	int err = 0;
458
459	memset(&cmsg, 0, sizeof(cmsg));	/* fill holes with zero */
460
461	/* put_cmsg copies to user space and thus may sleep. We can't do this
462	 * with rs_lock held, so first grab as many notifications as we can stuff
463	 * in the user provided cmsg buffer. We don't try to copy more, to avoid
464	 * losing notifications - except when the buffer is so small that it wouldn't
465	 * even hold a single notification. Then we give him as much of this single
466	 * msg as we can squeeze in, and set MSG_CTRUNC.
467	 */
468	if (msghdr) {
469		max_messages = msghdr->msg_controllen / CMSG_SPACE(sizeof(cmsg));
470		if (!max_messages)
471			max_messages = 1;
472	}
473
474	spin_lock_irqsave(&rs->rs_lock, flags);
475	while (!list_empty(&rs->rs_notify_queue) && count < max_messages) {
476		notifier = list_entry(rs->rs_notify_queue.next,
477				struct rds_notifier, n_list);
478		list_move(&notifier->n_list, &copy);
479		count++;
480	}
481	spin_unlock_irqrestore(&rs->rs_lock, flags);
482
483	if (!count)
484		return 0;
485
486	while (!list_empty(&copy)) {
487		notifier = list_entry(copy.next, struct rds_notifier, n_list);
488
489		if (msghdr) {
490			cmsg.user_token = notifier->n_user_token;
491			cmsg.status = notifier->n_status;
492
493			err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_RDMA_STATUS,
494				       sizeof(cmsg), &cmsg);
495			if (err)
496				break;
497		}
498
499		list_del_init(&notifier->n_list);
500		kfree(notifier);
501	}
502
503	/* If we bailed out because of an error in put_cmsg,
504	 * we may be left with one or more notifications that we
505	 * didn't process. Return them to the head of the list. */
506	if (!list_empty(&copy)) {
507		spin_lock_irqsave(&rs->rs_lock, flags);
508		list_splice(&copy, &rs->rs_notify_queue);
509		spin_unlock_irqrestore(&rs->rs_lock, flags);
510	}
511
512	return err;
513}
514
515/*
516 * Queue a congestion notification
517 */
518static int rds_notify_cong(struct rds_sock *rs, struct msghdr *msghdr)
519{
520	uint64_t notify = rs->rs_cong_notify;
521	unsigned long flags;
522	int err;
523
524	err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_CONG_UPDATE,
525			sizeof(notify), &notify);
526	if (err)
527		return err;
528
529	spin_lock_irqsave(&rs->rs_lock, flags);
530	rs->rs_cong_notify &= ~notify;
531	spin_unlock_irqrestore(&rs->rs_lock, flags);
532
533	return 0;
534}
535
536/*
537 * Receive any control messages.
538 */
539static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg,
540			 struct rds_sock *rs)
541{
542	int ret = 0;
543
544	if (inc->i_usercopy.rdma_cookie) {
545		ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RDMA_DEST,
546				sizeof(inc->i_usercopy.rdma_cookie),
547				&inc->i_usercopy.rdma_cookie);
548		if (ret)
549			goto out;
550	}
551
552	if ((inc->i_usercopy.rx_tstamp != 0) &&
553	    sock_flag(rds_rs_to_sk(rs), SOCK_RCVTSTAMP)) {
554		struct __kernel_old_timeval tv =
555			ns_to_kernel_old_timeval(inc->i_usercopy.rx_tstamp);
556
557		if (!sock_flag(rds_rs_to_sk(rs), SOCK_TSTAMP_NEW)) {
558			ret = put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_OLD,
559				       sizeof(tv), &tv);
560		} else {
561			struct __kernel_sock_timeval sk_tv;
562
563			sk_tv.tv_sec = tv.tv_sec;
564			sk_tv.tv_usec = tv.tv_usec;
565
566			ret = put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_NEW,
567				       sizeof(sk_tv), &sk_tv);
568		}
569
570		if (ret)
571			goto out;
572	}
573
574	if (rs->rs_rx_traces) {
575		struct rds_cmsg_rx_trace t;
576		int i, j;
577
578		memset(&t, 0, sizeof(t));
579		inc->i_rx_lat_trace[RDS_MSG_RX_CMSG] = local_clock();
580		t.rx_traces =  rs->rs_rx_traces;
581		for (i = 0; i < rs->rs_rx_traces; i++) {
582			j = rs->rs_rx_trace[i];
583			t.rx_trace_pos[i] = j;
584			t.rx_trace[i] = inc->i_rx_lat_trace[j + 1] -
585					  inc->i_rx_lat_trace[j];
586		}
587
588		ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RXPATH_LATENCY,
589			       sizeof(t), &t);
590		if (ret)
591			goto out;
592	}
593
594out:
595	return ret;
596}
597
598static bool rds_recvmsg_zcookie(struct rds_sock *rs, struct msghdr *msg)
599{
600	struct rds_msg_zcopy_queue *q = &rs->rs_zcookie_queue;
601	struct rds_msg_zcopy_info *info = NULL;
602	struct rds_zcopy_cookies *done;
603	unsigned long flags;
604
605	if (!msg->msg_control)
606		return false;
607
608	if (!sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY) ||
609	    msg->msg_controllen < CMSG_SPACE(sizeof(*done)))
610		return false;
611
612	spin_lock_irqsave(&q->lock, flags);
613	if (!list_empty(&q->zcookie_head)) {
614		info = list_entry(q->zcookie_head.next,
615				  struct rds_msg_zcopy_info, rs_zcookie_next);
616		list_del(&info->rs_zcookie_next);
617	}
618	spin_unlock_irqrestore(&q->lock, flags);
619	if (!info)
620		return false;
621	done = &info->zcookies;
622	if (put_cmsg(msg, SOL_RDS, RDS_CMSG_ZCOPY_COMPLETION, sizeof(*done),
623		     done)) {
624		spin_lock_irqsave(&q->lock, flags);
625		list_add(&info->rs_zcookie_next, &q->zcookie_head);
626		spin_unlock_irqrestore(&q->lock, flags);
627		return false;
628	}
629	kfree(info);
630	return true;
631}
632
633int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
634		int msg_flags)
635{
636	struct sock *sk = sock->sk;
637	struct rds_sock *rs = rds_sk_to_rs(sk);
638	long timeo;
639	int ret = 0, nonblock = msg_flags & MSG_DONTWAIT;
640	DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
641	DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
642	struct rds_incoming *inc = NULL;
643
644	/* udp_recvmsg()->sock_recvtimeo() gets away without locking too.. */
645	timeo = sock_rcvtimeo(sk, nonblock);
646
647	rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
648
649	if (msg_flags & MSG_OOB)
650		goto out;
651	if (msg_flags & MSG_ERRQUEUE)
652		return sock_recv_errqueue(sk, msg, size, SOL_IP, IP_RECVERR);
653
654	while (1) {
655		/* If there are pending notifications, do those - and nothing else */
656		if (!list_empty(&rs->rs_notify_queue)) {
657			ret = rds_notify_queue_get(rs, msg);
658			break;
659		}
660
661		if (rs->rs_cong_notify) {
662			ret = rds_notify_cong(rs, msg);
663			break;
664		}
665
666		if (!rds_next_incoming(rs, &inc)) {
667			if (nonblock) {
668				bool reaped = rds_recvmsg_zcookie(rs, msg);
669
670				ret = reaped ?  0 : -EAGAIN;
671				break;
672			}
673
674			timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
675					(!list_empty(&rs->rs_notify_queue) ||
676					 rs->rs_cong_notify ||
677					 rds_next_incoming(rs, &inc)), timeo);
678			rdsdebug("recvmsg woke inc %p timeo %ld\n", inc,
679				 timeo);
680			if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
681				continue;
682
683			ret = timeo;
684			if (ret == 0)
685				ret = -ETIMEDOUT;
686			break;
687		}
688
689		rdsdebug("copying inc %p from %pI6c:%u to user\n", inc,
690			 &inc->i_conn->c_faddr,
691			 ntohs(inc->i_hdr.h_sport));
692		ret = inc->i_conn->c_trans->inc_copy_to_user(inc, &msg->msg_iter);
693		if (ret < 0)
694			break;
695
696		/*
697		 * if the message we just copied isn't at the head of the
698		 * recv queue then someone else raced us to return it, try
699		 * to get the next message.
700		 */
701		if (!rds_still_queued(rs, inc, !(msg_flags & MSG_PEEK))) {
702			rds_inc_put(inc);
703			inc = NULL;
704			rds_stats_inc(s_recv_deliver_raced);
705			iov_iter_revert(&msg->msg_iter, ret);
706			continue;
707		}
708
709		if (ret < be32_to_cpu(inc->i_hdr.h_len)) {
710			if (msg_flags & MSG_TRUNC)
711				ret = be32_to_cpu(inc->i_hdr.h_len);
712			msg->msg_flags |= MSG_TRUNC;
713		}
714
715		if (rds_cmsg_recv(inc, msg, rs)) {
716			ret = -EFAULT;
717			goto out;
718		}
719		rds_recvmsg_zcookie(rs, msg);
720
721		rds_stats_inc(s_recv_delivered);
722
723		if (msg->msg_name) {
724			if (ipv6_addr_v4mapped(&inc->i_saddr)) {
725				sin = (struct sockaddr_in *)msg->msg_name;
726
727				sin->sin_family = AF_INET;
728				sin->sin_port = inc->i_hdr.h_sport;
729				sin->sin_addr.s_addr =
730				    inc->i_saddr.s6_addr32[3];
731				memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
732				msg->msg_namelen = sizeof(*sin);
733			} else {
734				sin6 = (struct sockaddr_in6 *)msg->msg_name;
735
736				sin6->sin6_family = AF_INET6;
737				sin6->sin6_port = inc->i_hdr.h_sport;
738				sin6->sin6_addr = inc->i_saddr;
739				sin6->sin6_flowinfo = 0;
740				sin6->sin6_scope_id = rs->rs_bound_scope_id;
741				msg->msg_namelen = sizeof(*sin6);
742			}
743		}
744		break;
745	}
746
747	if (inc)
748		rds_inc_put(inc);
749
750out:
751	return ret;
752}
753
754/*
755 * The socket is being shut down and we're asked to drop messages that were
756 * queued for recvmsg.  The caller has unbound the socket so the receive path
757 * won't queue any more incoming fragments or messages on the socket.
758 */
759void rds_clear_recv_queue(struct rds_sock *rs)
760{
761	struct sock *sk = rds_rs_to_sk(rs);
762	struct rds_incoming *inc, *tmp;
763	unsigned long flags;
764
765	write_lock_irqsave(&rs->rs_recv_lock, flags);
766	list_for_each_entry_safe(inc, tmp, &rs->rs_recv_queue, i_item) {
767		rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
768				      -be32_to_cpu(inc->i_hdr.h_len),
769				      inc->i_hdr.h_dport);
770		list_del_init(&inc->i_item);
771		rds_inc_put(inc);
772	}
773	write_unlock_irqrestore(&rs->rs_recv_lock, flags);
774}
775
776/*
777 * inc->i_saddr isn't used here because it is only set in the receive
778 * path.
779 */
780void rds_inc_info_copy(struct rds_incoming *inc,
781		       struct rds_info_iterator *iter,
782		       __be32 saddr, __be32 daddr, int flip)
783{
784	struct rds_info_message minfo;
785
786	minfo.seq = be64_to_cpu(inc->i_hdr.h_sequence);
787	minfo.len = be32_to_cpu(inc->i_hdr.h_len);
788	minfo.tos = inc->i_conn->c_tos;
789
790	if (flip) {
791		minfo.laddr = daddr;
792		minfo.faddr = saddr;
793		minfo.lport = inc->i_hdr.h_dport;
794		minfo.fport = inc->i_hdr.h_sport;
795	} else {
796		minfo.laddr = saddr;
797		minfo.faddr = daddr;
798		minfo.lport = inc->i_hdr.h_sport;
799		minfo.fport = inc->i_hdr.h_dport;
800	}
801
802	minfo.flags = 0;
803
804	rds_info_copy(iter, &minfo, sizeof(minfo));
805}
806
807#if IS_ENABLED(CONFIG_IPV6)
808void rds6_inc_info_copy(struct rds_incoming *inc,
809			struct rds_info_iterator *iter,
810			struct in6_addr *saddr, struct in6_addr *daddr,
811			int flip)
812{
813	struct rds6_info_message minfo6;
814
815	minfo6.seq = be64_to_cpu(inc->i_hdr.h_sequence);
816	minfo6.len = be32_to_cpu(inc->i_hdr.h_len);
817	minfo6.tos = inc->i_conn->c_tos;
818
819	if (flip) {
820		minfo6.laddr = *daddr;
821		minfo6.faddr = *saddr;
822		minfo6.lport = inc->i_hdr.h_dport;
823		minfo6.fport = inc->i_hdr.h_sport;
824	} else {
825		minfo6.laddr = *saddr;
826		minfo6.faddr = *daddr;
827		minfo6.lport = inc->i_hdr.h_sport;
828		minfo6.fport = inc->i_hdr.h_dport;
829	}
830
831	minfo6.flags = 0;
832
833	rds_info_copy(iter, &minfo6, sizeof(minfo6));
834}
835#endif
v4.17
  1/*
  2 * Copyright (c) 2006 Oracle.  All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 *
 32 */
 33#include <linux/kernel.h>
 34#include <linux/slab.h>
 35#include <net/sock.h>
 36#include <linux/in.h>
 37#include <linux/export.h>
 38#include <linux/time.h>
 39#include <linux/rds.h>
 40
 41#include "rds.h"
 42
 43void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
 44		  __be32 saddr)
 45{
 46	int i;
 47
 48	refcount_set(&inc->i_refcount, 1);
 49	INIT_LIST_HEAD(&inc->i_item);
 50	inc->i_conn = conn;
 51	inc->i_saddr = saddr;
 52	inc->i_rdma_cookie = 0;
 53	inc->i_rx_tstamp.tv_sec = 0;
 54	inc->i_rx_tstamp.tv_usec = 0;
 55
 56	for (i = 0; i < RDS_RX_MAX_TRACES; i++)
 57		inc->i_rx_lat_trace[i] = 0;
 58}
 59EXPORT_SYMBOL_GPL(rds_inc_init);
 60
 61void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *cp,
 62		       __be32 saddr)
 63{
 64	refcount_set(&inc->i_refcount, 1);
 65	INIT_LIST_HEAD(&inc->i_item);
 66	inc->i_conn = cp->cp_conn;
 67	inc->i_conn_path = cp;
 68	inc->i_saddr = saddr;
 69	inc->i_rdma_cookie = 0;
 70	inc->i_rx_tstamp.tv_sec = 0;
 71	inc->i_rx_tstamp.tv_usec = 0;
 72}
 73EXPORT_SYMBOL_GPL(rds_inc_path_init);
 74
 75static void rds_inc_addref(struct rds_incoming *inc)
 76{
 77	rdsdebug("addref inc %p ref %d\n", inc, refcount_read(&inc->i_refcount));
 78	refcount_inc(&inc->i_refcount);
 79}
 80
 81void rds_inc_put(struct rds_incoming *inc)
 82{
 83	rdsdebug("put inc %p ref %d\n", inc, refcount_read(&inc->i_refcount));
 84	if (refcount_dec_and_test(&inc->i_refcount)) {
 85		BUG_ON(!list_empty(&inc->i_item));
 86
 87		inc->i_conn->c_trans->inc_free(inc);
 88	}
 89}
 90EXPORT_SYMBOL_GPL(rds_inc_put);
 91
 92static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk,
 93				  struct rds_cong_map *map,
 94				  int delta, __be16 port)
 95{
 96	int now_congested;
 97
 98	if (delta == 0)
 99		return;
100
101	rs->rs_rcv_bytes += delta;
102	if (delta > 0)
103		rds_stats_add(s_recv_bytes_added_to_socket, delta);
104	else
105		rds_stats_add(s_recv_bytes_removed_from_socket, -delta);
 
 
 
 
 
106	now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs);
107
108	rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d "
109	  "now_cong %d delta %d\n",
110	  rs, &rs->rs_bound_addr,
111	  ntohs(rs->rs_bound_port), rs->rs_rcv_bytes,
112	  rds_sk_rcvbuf(rs), now_congested, delta);
113
114	/* wasn't -> am congested */
115	if (!rs->rs_congested && now_congested) {
116		rs->rs_congested = 1;
117		rds_cong_set_bit(map, port);
118		rds_cong_queue_updates(map);
119	}
120	/* was -> aren't congested */
121	/* Require more free space before reporting uncongested to prevent
122	   bouncing cong/uncong state too often */
123	else if (rs->rs_congested && (rs->rs_rcv_bytes < (rds_sk_rcvbuf(rs)/2))) {
124		rs->rs_congested = 0;
125		rds_cong_clear_bit(map, port);
126		rds_cong_queue_updates(map);
127	}
128
129	/* do nothing if no change in cong state */
130}
131
132static void rds_conn_peer_gen_update(struct rds_connection *conn,
133				     u32 peer_gen_num)
134{
135	int i;
136	struct rds_message *rm, *tmp;
137	unsigned long flags;
138
139	WARN_ON(conn->c_trans->t_type != RDS_TRANS_TCP);
140	if (peer_gen_num != 0) {
141		if (conn->c_peer_gen_num != 0 &&
142		    peer_gen_num != conn->c_peer_gen_num) {
143			for (i = 0; i < RDS_MPATH_WORKERS; i++) {
144				struct rds_conn_path *cp;
145
146				cp = &conn->c_path[i];
147				spin_lock_irqsave(&cp->cp_lock, flags);
148				cp->cp_next_tx_seq = 1;
149				cp->cp_next_rx_seq = 0;
150				list_for_each_entry_safe(rm, tmp,
151							 &cp->cp_retrans,
152							 m_conn_item) {
153					set_bit(RDS_MSG_FLUSH, &rm->m_flags);
154				}
155				spin_unlock_irqrestore(&cp->cp_lock, flags);
156			}
157		}
158		conn->c_peer_gen_num = peer_gen_num;
159	}
160}
161
162/*
163 * Process all extension headers that come with this message.
164 */
165static void rds_recv_incoming_exthdrs(struct rds_incoming *inc, struct rds_sock *rs)
166{
167	struct rds_header *hdr = &inc->i_hdr;
168	unsigned int pos = 0, type, len;
169	union {
170		struct rds_ext_header_version version;
171		struct rds_ext_header_rdma rdma;
172		struct rds_ext_header_rdma_dest rdma_dest;
173	} buffer;
174
175	while (1) {
176		len = sizeof(buffer);
177		type = rds_message_next_extension(hdr, &pos, &buffer, &len);
178		if (type == RDS_EXTHDR_NONE)
179			break;
180		/* Process extension header here */
181		switch (type) {
182		case RDS_EXTHDR_RDMA:
183			rds_rdma_unuse(rs, be32_to_cpu(buffer.rdma.h_rdma_rkey), 0);
184			break;
185
186		case RDS_EXTHDR_RDMA_DEST:
187			/* We ignore the size for now. We could stash it
188			 * somewhere and use it for error checking. */
189			inc->i_rdma_cookie = rds_rdma_make_cookie(
190					be32_to_cpu(buffer.rdma_dest.h_rdma_rkey),
191					be32_to_cpu(buffer.rdma_dest.h_rdma_offset));
192
193			break;
194		}
195	}
196}
197
198static void rds_recv_hs_exthdrs(struct rds_header *hdr,
199				struct rds_connection *conn)
200{
201	unsigned int pos = 0, type, len;
202	union {
203		struct rds_ext_header_version version;
204		u16 rds_npaths;
205		u32 rds_gen_num;
206	} buffer;
207	u32 new_peer_gen_num = 0;
208
209	while (1) {
210		len = sizeof(buffer);
211		type = rds_message_next_extension(hdr, &pos, &buffer, &len);
212		if (type == RDS_EXTHDR_NONE)
213			break;
214		/* Process extension header here */
215		switch (type) {
216		case RDS_EXTHDR_NPATHS:
217			conn->c_npaths = min_t(int, RDS_MPATH_WORKERS,
218					       be16_to_cpu(buffer.rds_npaths));
219			break;
220		case RDS_EXTHDR_GEN_NUM:
221			new_peer_gen_num = be32_to_cpu(buffer.rds_gen_num);
222			break;
223		default:
224			pr_warn_ratelimited("ignoring unknown exthdr type "
225					     "0x%x\n", type);
226		}
227	}
228	/* if RDS_EXTHDR_NPATHS was not found, default to a single-path */
229	conn->c_npaths = max_t(int, conn->c_npaths, 1);
230	conn->c_ping_triggered = 0;
231	rds_conn_peer_gen_update(conn, new_peer_gen_num);
232}
233
234/* rds_start_mprds() will synchronously start multiple paths when appropriate.
235 * The scheme is based on the following rules:
236 *
237 * 1. rds_sendmsg on first connect attempt sends the probe ping, with the
238 *    sender's npaths (s_npaths)
239 * 2. rcvr of probe-ping knows the mprds_paths = min(s_npaths, r_npaths). It
240 *    sends back a probe-pong with r_npaths. After that, if rcvr is the
241 *    smaller ip addr, it starts rds_conn_path_connect_if_down on all
242 *    mprds_paths.
243 * 3. sender gets woken up, and can move to rds_conn_path_connect_if_down.
244 *    If it is the smaller ipaddr, rds_conn_path_connect_if_down can be
245 *    called after reception of the probe-pong on all mprds_paths.
246 *    Otherwise (sender of probe-ping is not the smaller ip addr): just call
247 *    rds_conn_path_connect_if_down on the hashed path. (see rule 4)
248 * 4. rds_connect_worker must only trigger a connection if laddr < faddr.
249 * 5. sender may end up queuing the packet on the cp. will get sent out later.
250 *    when connection is completed.
251 */
252static void rds_start_mprds(struct rds_connection *conn)
253{
254	int i;
255	struct rds_conn_path *cp;
256
257	if (conn->c_npaths > 1 &&
258	    IS_CANONICAL(conn->c_laddr, conn->c_faddr)) {
259		for (i = 0; i < conn->c_npaths; i++) {
260			cp = &conn->c_path[i];
261			rds_conn_path_connect_if_down(cp);
262		}
263	}
264}
265
266/*
267 * The transport must make sure that this is serialized against other
268 * rx and conn reset on this specific conn.
269 *
270 * We currently assert that only one fragmented message will be sent
271 * down a connection at a time.  This lets us reassemble in the conn
272 * instead of per-flow which means that we don't have to go digging through
273 * flows to tear down partial reassembly progress on conn failure and
274 * we save flow lookup and locking for each frag arrival.  It does mean
275 * that small messages will wait behind large ones.  Fragmenting at all
276 * is only to reduce the memory consumption of pre-posted buffers.
277 *
278 * The caller passes in saddr and daddr instead of us getting it from the
279 * conn.  This lets loopback, who only has one conn for both directions,
280 * tell us which roles the addrs in the conn are playing for this message.
281 */
282void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
 
283		       struct rds_incoming *inc, gfp_t gfp)
284{
285	struct rds_sock *rs = NULL;
286	struct sock *sk;
287	unsigned long flags;
288	struct rds_conn_path *cp;
289
290	inc->i_conn = conn;
291	inc->i_rx_jiffies = jiffies;
292	if (conn->c_trans->t_mp_capable)
293		cp = inc->i_conn_path;
294	else
295		cp = &conn->c_path[0];
296
297	rdsdebug("conn %p next %llu inc %p seq %llu len %u sport %u dport %u "
298		 "flags 0x%x rx_jiffies %lu\n", conn,
299		 (unsigned long long)cp->cp_next_rx_seq,
300		 inc,
301		 (unsigned long long)be64_to_cpu(inc->i_hdr.h_sequence),
302		 be32_to_cpu(inc->i_hdr.h_len),
303		 be16_to_cpu(inc->i_hdr.h_sport),
304		 be16_to_cpu(inc->i_hdr.h_dport),
305		 inc->i_hdr.h_flags,
306		 inc->i_rx_jiffies);
307
308	/*
309	 * Sequence numbers should only increase.  Messages get their
310	 * sequence number as they're queued in a sending conn.  They
311	 * can be dropped, though, if the sending socket is closed before
312	 * they hit the wire.  So sequence numbers can skip forward
313	 * under normal operation.  They can also drop back in the conn
314	 * failover case as previously sent messages are resent down the
315	 * new instance of a conn.  We drop those, otherwise we have
316	 * to assume that the next valid seq does not come after a
317	 * hole in the fragment stream.
318	 *
319	 * The headers don't give us a way to realize if fragments of
320	 * a message have been dropped.  We assume that frags that arrive
321	 * to a flow are part of the current message on the flow that is
322	 * being reassembled.  This means that senders can't drop messages
323	 * from the sending conn until all their frags are sent.
324	 *
325	 * XXX we could spend more on the wire to get more robust failure
326	 * detection, arguably worth it to avoid data corruption.
327	 */
328	if (be64_to_cpu(inc->i_hdr.h_sequence) < cp->cp_next_rx_seq &&
329	    (inc->i_hdr.h_flags & RDS_FLAG_RETRANSMITTED)) {
330		rds_stats_inc(s_recv_drop_old_seq);
331		goto out;
332	}
333	cp->cp_next_rx_seq = be64_to_cpu(inc->i_hdr.h_sequence) + 1;
334
335	if (rds_sysctl_ping_enable && inc->i_hdr.h_dport == 0) {
336		if (inc->i_hdr.h_sport == 0) {
337			rdsdebug("ignore ping with 0 sport from 0x%x\n", saddr);
 
338			goto out;
339		}
340		rds_stats_inc(s_recv_ping);
341		rds_send_pong(cp, inc->i_hdr.h_sport);
342		/* if this is a handshake ping, start multipath if necessary */
343		if (RDS_HS_PROBE(be16_to_cpu(inc->i_hdr.h_sport),
344				 be16_to_cpu(inc->i_hdr.h_dport))) {
345			rds_recv_hs_exthdrs(&inc->i_hdr, cp->cp_conn);
346			rds_start_mprds(cp->cp_conn);
347		}
348		goto out;
349	}
350
351	if (be16_to_cpu(inc->i_hdr.h_dport) ==  RDS_FLAG_PROBE_PORT &&
352	    inc->i_hdr.h_sport == 0) {
353		rds_recv_hs_exthdrs(&inc->i_hdr, cp->cp_conn);
354		/* if this is a handshake pong, start multipath if necessary */
355		rds_start_mprds(cp->cp_conn);
356		wake_up(&cp->cp_conn->c_hs_waitq);
357		goto out;
358	}
359
360	rs = rds_find_bound(daddr, inc->i_hdr.h_dport);
361	if (!rs) {
362		rds_stats_inc(s_recv_drop_no_sock);
363		goto out;
364	}
365
366	/* Process extension headers */
367	rds_recv_incoming_exthdrs(inc, rs);
368
369	/* We can be racing with rds_release() which marks the socket dead. */
370	sk = rds_rs_to_sk(rs);
371
372	/* serialize with rds_release -> sock_orphan */
373	write_lock_irqsave(&rs->rs_recv_lock, flags);
374	if (!sock_flag(sk, SOCK_DEAD)) {
375		rdsdebug("adding inc %p to rs %p's recv queue\n", inc, rs);
376		rds_stats_inc(s_recv_queued);
377		rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
378				      be32_to_cpu(inc->i_hdr.h_len),
379				      inc->i_hdr.h_dport);
380		if (sock_flag(sk, SOCK_RCVTSTAMP))
381			do_gettimeofday(&inc->i_rx_tstamp);
382		rds_inc_addref(inc);
383		inc->i_rx_lat_trace[RDS_MSG_RX_END] = local_clock();
384		list_add_tail(&inc->i_item, &rs->rs_recv_queue);
385		__rds_wake_sk_sleep(sk);
386	} else {
387		rds_stats_inc(s_recv_drop_dead_sock);
388	}
389	write_unlock_irqrestore(&rs->rs_recv_lock, flags);
390
391out:
392	if (rs)
393		rds_sock_put(rs);
394}
395EXPORT_SYMBOL_GPL(rds_recv_incoming);
396
397/*
398 * be very careful here.  This is being called as the condition in
399 * wait_event_*() needs to cope with being called many times.
400 */
401static int rds_next_incoming(struct rds_sock *rs, struct rds_incoming **inc)
402{
403	unsigned long flags;
404
405	if (!*inc) {
406		read_lock_irqsave(&rs->rs_recv_lock, flags);
407		if (!list_empty(&rs->rs_recv_queue)) {
408			*inc = list_entry(rs->rs_recv_queue.next,
409					  struct rds_incoming,
410					  i_item);
411			rds_inc_addref(*inc);
412		}
413		read_unlock_irqrestore(&rs->rs_recv_lock, flags);
414	}
415
416	return *inc != NULL;
417}
418
419static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
420			    int drop)
421{
422	struct sock *sk = rds_rs_to_sk(rs);
423	int ret = 0;
424	unsigned long flags;
425
426	write_lock_irqsave(&rs->rs_recv_lock, flags);
427	if (!list_empty(&inc->i_item)) {
428		ret = 1;
429		if (drop) {
430			/* XXX make sure this i_conn is reliable */
431			rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
432					      -be32_to_cpu(inc->i_hdr.h_len),
433					      inc->i_hdr.h_dport);
434			list_del_init(&inc->i_item);
435			rds_inc_put(inc);
436		}
437	}
438	write_unlock_irqrestore(&rs->rs_recv_lock, flags);
439
440	rdsdebug("inc %p rs %p still %d dropped %d\n", inc, rs, ret, drop);
441	return ret;
442}
443
444/*
445 * Pull errors off the error queue.
446 * If msghdr is NULL, we will just purge the error queue.
447 */
448int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
449{
450	struct rds_notifier *notifier;
451	struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */
452	unsigned int count = 0, max_messages = ~0U;
453	unsigned long flags;
454	LIST_HEAD(copy);
455	int err = 0;
456
 
457
458	/* put_cmsg copies to user space and thus may sleep. We can't do this
459	 * with rs_lock held, so first grab as many notifications as we can stuff
460	 * in the user provided cmsg buffer. We don't try to copy more, to avoid
461	 * losing notifications - except when the buffer is so small that it wouldn't
462	 * even hold a single notification. Then we give him as much of this single
463	 * msg as we can squeeze in, and set MSG_CTRUNC.
464	 */
465	if (msghdr) {
466		max_messages = msghdr->msg_controllen / CMSG_SPACE(sizeof(cmsg));
467		if (!max_messages)
468			max_messages = 1;
469	}
470
471	spin_lock_irqsave(&rs->rs_lock, flags);
472	while (!list_empty(&rs->rs_notify_queue) && count < max_messages) {
473		notifier = list_entry(rs->rs_notify_queue.next,
474				struct rds_notifier, n_list);
475		list_move(&notifier->n_list, &copy);
476		count++;
477	}
478	spin_unlock_irqrestore(&rs->rs_lock, flags);
479
480	if (!count)
481		return 0;
482
483	while (!list_empty(&copy)) {
484		notifier = list_entry(copy.next, struct rds_notifier, n_list);
485
486		if (msghdr) {
487			cmsg.user_token = notifier->n_user_token;
488			cmsg.status = notifier->n_status;
489
490			err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_RDMA_STATUS,
491				       sizeof(cmsg), &cmsg);
492			if (err)
493				break;
494		}
495
496		list_del_init(&notifier->n_list);
497		kfree(notifier);
498	}
499
500	/* If we bailed out because of an error in put_cmsg,
501	 * we may be left with one or more notifications that we
502	 * didn't process. Return them to the head of the list. */
503	if (!list_empty(&copy)) {
504		spin_lock_irqsave(&rs->rs_lock, flags);
505		list_splice(&copy, &rs->rs_notify_queue);
506		spin_unlock_irqrestore(&rs->rs_lock, flags);
507	}
508
509	return err;
510}
511
512/*
513 * Queue a congestion notification
514 */
515static int rds_notify_cong(struct rds_sock *rs, struct msghdr *msghdr)
516{
517	uint64_t notify = rs->rs_cong_notify;
518	unsigned long flags;
519	int err;
520
521	err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_CONG_UPDATE,
522			sizeof(notify), &notify);
523	if (err)
524		return err;
525
526	spin_lock_irqsave(&rs->rs_lock, flags);
527	rs->rs_cong_notify &= ~notify;
528	spin_unlock_irqrestore(&rs->rs_lock, flags);
529
530	return 0;
531}
532
533/*
534 * Receive any control messages.
535 */
536static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg,
537			 struct rds_sock *rs)
538{
539	int ret = 0;
540
541	if (inc->i_rdma_cookie) {
542		ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RDMA_DEST,
543				sizeof(inc->i_rdma_cookie), &inc->i_rdma_cookie);
 
544		if (ret)
545			goto out;
546	}
547
548	if ((inc->i_rx_tstamp.tv_sec != 0) &&
549	    sock_flag(rds_rs_to_sk(rs), SOCK_RCVTSTAMP)) {
550		ret = put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP,
551			       sizeof(struct timeval),
552			       &inc->i_rx_tstamp);
 
 
 
 
 
 
 
 
 
 
 
 
 
553		if (ret)
554			goto out;
555	}
556
557	if (rs->rs_rx_traces) {
558		struct rds_cmsg_rx_trace t;
559		int i, j;
560
561		memset(&t, 0, sizeof(t));
562		inc->i_rx_lat_trace[RDS_MSG_RX_CMSG] = local_clock();
563		t.rx_traces =  rs->rs_rx_traces;
564		for (i = 0; i < rs->rs_rx_traces; i++) {
565			j = rs->rs_rx_trace[i];
566			t.rx_trace_pos[i] = j;
567			t.rx_trace[i] = inc->i_rx_lat_trace[j + 1] -
568					  inc->i_rx_lat_trace[j];
569		}
570
571		ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RXPATH_LATENCY,
572			       sizeof(t), &t);
573		if (ret)
574			goto out;
575	}
576
577out:
578	return ret;
579}
580
581static bool rds_recvmsg_zcookie(struct rds_sock *rs, struct msghdr *msg)
582{
583	struct rds_msg_zcopy_queue *q = &rs->rs_zcookie_queue;
584	struct rds_msg_zcopy_info *info = NULL;
585	struct rds_zcopy_cookies *done;
586	unsigned long flags;
587
588	if (!msg->msg_control)
589		return false;
590
591	if (!sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY) ||
592	    msg->msg_controllen < CMSG_SPACE(sizeof(*done)))
593		return false;
594
595	spin_lock_irqsave(&q->lock, flags);
596	if (!list_empty(&q->zcookie_head)) {
597		info = list_entry(q->zcookie_head.next,
598				  struct rds_msg_zcopy_info, rs_zcookie_next);
599		list_del(&info->rs_zcookie_next);
600	}
601	spin_unlock_irqrestore(&q->lock, flags);
602	if (!info)
603		return false;
604	done = &info->zcookies;
605	if (put_cmsg(msg, SOL_RDS, RDS_CMSG_ZCOPY_COMPLETION, sizeof(*done),
606		     done)) {
607		spin_lock_irqsave(&q->lock, flags);
608		list_add(&info->rs_zcookie_next, &q->zcookie_head);
609		spin_unlock_irqrestore(&q->lock, flags);
610		return false;
611	}
612	kfree(info);
613	return true;
614}
615
616int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
617		int msg_flags)
618{
619	struct sock *sk = sock->sk;
620	struct rds_sock *rs = rds_sk_to_rs(sk);
621	long timeo;
622	int ret = 0, nonblock = msg_flags & MSG_DONTWAIT;
 
623	DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
624	struct rds_incoming *inc = NULL;
625
626	/* udp_recvmsg()->sock_recvtimeo() gets away without locking too.. */
627	timeo = sock_rcvtimeo(sk, nonblock);
628
629	rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
630
631	if (msg_flags & MSG_OOB)
632		goto out;
633	if (msg_flags & MSG_ERRQUEUE)
634		return sock_recv_errqueue(sk, msg, size, SOL_IP, IP_RECVERR);
635
636	while (1) {
637		/* If there are pending notifications, do those - and nothing else */
638		if (!list_empty(&rs->rs_notify_queue)) {
639			ret = rds_notify_queue_get(rs, msg);
640			break;
641		}
642
643		if (rs->rs_cong_notify) {
644			ret = rds_notify_cong(rs, msg);
645			break;
646		}
647
648		if (!rds_next_incoming(rs, &inc)) {
649			if (nonblock) {
650				bool reaped = rds_recvmsg_zcookie(rs, msg);
651
652				ret = reaped ?  0 : -EAGAIN;
653				break;
654			}
655
656			timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
657					(!list_empty(&rs->rs_notify_queue) ||
658					 rs->rs_cong_notify ||
659					 rds_next_incoming(rs, &inc)), timeo);
660			rdsdebug("recvmsg woke inc %p timeo %ld\n", inc,
661				 timeo);
662			if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
663				continue;
664
665			ret = timeo;
666			if (ret == 0)
667				ret = -ETIMEDOUT;
668			break;
669		}
670
671		rdsdebug("copying inc %p from %pI4:%u to user\n", inc,
672			 &inc->i_conn->c_faddr,
673			 ntohs(inc->i_hdr.h_sport));
674		ret = inc->i_conn->c_trans->inc_copy_to_user(inc, &msg->msg_iter);
675		if (ret < 0)
676			break;
677
678		/*
679		 * if the message we just copied isn't at the head of the
680		 * recv queue then someone else raced us to return it, try
681		 * to get the next message.
682		 */
683		if (!rds_still_queued(rs, inc, !(msg_flags & MSG_PEEK))) {
684			rds_inc_put(inc);
685			inc = NULL;
686			rds_stats_inc(s_recv_deliver_raced);
687			iov_iter_revert(&msg->msg_iter, ret);
688			continue;
689		}
690
691		if (ret < be32_to_cpu(inc->i_hdr.h_len)) {
692			if (msg_flags & MSG_TRUNC)
693				ret = be32_to_cpu(inc->i_hdr.h_len);
694			msg->msg_flags |= MSG_TRUNC;
695		}
696
697		if (rds_cmsg_recv(inc, msg, rs)) {
698			ret = -EFAULT;
699			goto out;
700		}
701		rds_recvmsg_zcookie(rs, msg);
702
703		rds_stats_inc(s_recv_delivered);
704
705		if (sin) {
706			sin->sin_family = AF_INET;
707			sin->sin_port = inc->i_hdr.h_sport;
708			sin->sin_addr.s_addr = inc->i_saddr;
709			memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
710			msg->msg_namelen = sizeof(*sin);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
711		}
712		break;
713	}
714
715	if (inc)
716		rds_inc_put(inc);
717
718out:
719	return ret;
720}
721
722/*
723 * The socket is being shut down and we're asked to drop messages that were
724 * queued for recvmsg.  The caller has unbound the socket so the receive path
725 * won't queue any more incoming fragments or messages on the socket.
726 */
727void rds_clear_recv_queue(struct rds_sock *rs)
728{
729	struct sock *sk = rds_rs_to_sk(rs);
730	struct rds_incoming *inc, *tmp;
731	unsigned long flags;
732
733	write_lock_irqsave(&rs->rs_recv_lock, flags);
734	list_for_each_entry_safe(inc, tmp, &rs->rs_recv_queue, i_item) {
735		rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
736				      -be32_to_cpu(inc->i_hdr.h_len),
737				      inc->i_hdr.h_dport);
738		list_del_init(&inc->i_item);
739		rds_inc_put(inc);
740	}
741	write_unlock_irqrestore(&rs->rs_recv_lock, flags);
742}
743
744/*
745 * inc->i_saddr isn't used here because it is only set in the receive
746 * path.
747 */
748void rds_inc_info_copy(struct rds_incoming *inc,
749		       struct rds_info_iterator *iter,
750		       __be32 saddr, __be32 daddr, int flip)
751{
752	struct rds_info_message minfo;
753
754	minfo.seq = be64_to_cpu(inc->i_hdr.h_sequence);
755	minfo.len = be32_to_cpu(inc->i_hdr.h_len);
 
756
757	if (flip) {
758		minfo.laddr = daddr;
759		minfo.faddr = saddr;
760		minfo.lport = inc->i_hdr.h_dport;
761		minfo.fport = inc->i_hdr.h_sport;
762	} else {
763		minfo.laddr = saddr;
764		minfo.faddr = daddr;
765		minfo.lport = inc->i_hdr.h_sport;
766		minfo.fport = inc->i_hdr.h_dport;
767	}
768
769	minfo.flags = 0;
770
771	rds_info_copy(iter, &minfo, sizeof(minfo));
772}