Linux Audio

Check our new training course

Loading...
v3.5.6
   1/*
   2 * Copyright (c) 2006 Oracle.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 *
  32 */
  33#include <linux/kernel.h>
  34#include <linux/moduleparam.h>
  35#include <linux/gfp.h>
  36#include <net/sock.h>
  37#include <linux/in.h>
  38#include <linux/list.h>
  39#include <linux/ratelimit.h>
  40#include <linux/export.h>
  41
  42#include "rds.h"
  43
  44/* When transmitting messages in rds_send_xmit, we need to emerge from
  45 * time to time and briefly release the CPU. Otherwise the softlock watchdog
  46 * will kick our shin.
  47 * Also, it seems fairer to not let one busy connection stall all the
  48 * others.
  49 *
  50 * send_batch_count is the number of times we'll loop in send_xmit. Setting
  51 * it to 0 will restore the old behavior (where we looped until we had
  52 * drained the queue).
  53 */
  54static int send_batch_count = 64;
  55module_param(send_batch_count, int, 0444);
  56MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
  57
  58static void rds_send_remove_from_sock(struct list_head *messages, int status);
  59
  60/*
  61 * Reset the send state.  Callers must ensure that this doesn't race with
  62 * rds_send_xmit().
  63 */
  64void rds_send_reset(struct rds_connection *conn)
  65{
  66	struct rds_message *rm, *tmp;
  67	unsigned long flags;
  68
  69	if (conn->c_xmit_rm) {
  70		rm = conn->c_xmit_rm;
  71		conn->c_xmit_rm = NULL;
  72		/* Tell the user the RDMA op is no longer mapped by the
  73		 * transport. This isn't entirely true (it's flushed out
  74		 * independently) but as the connection is down, there's
  75		 * no ongoing RDMA to/from that memory */
  76		rds_message_unmapped(rm);
  77		rds_message_put(rm);
  78	}
  79
  80	conn->c_xmit_sg = 0;
  81	conn->c_xmit_hdr_off = 0;
  82	conn->c_xmit_data_off = 0;
  83	conn->c_xmit_atomic_sent = 0;
  84	conn->c_xmit_rdma_sent = 0;
  85	conn->c_xmit_data_sent = 0;
  86
  87	conn->c_map_queued = 0;
  88
  89	conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
  90	conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
  91
  92	/* Mark messages as retransmissions, and move them to the send q */
  93	spin_lock_irqsave(&conn->c_lock, flags);
  94	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
  95		set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
  96		set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
  97	}
  98	list_splice_init(&conn->c_retrans, &conn->c_send_queue);
  99	spin_unlock_irqrestore(&conn->c_lock, flags);
 100}
 101
 102static int acquire_in_xmit(struct rds_connection *conn)
 103{
 104	return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0;
 105}
 106
 107static void release_in_xmit(struct rds_connection *conn)
 108{
 109	clear_bit(RDS_IN_XMIT, &conn->c_flags);
 110	smp_mb__after_clear_bit();
 111	/*
 112	 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
 113	 * hot path and finding waiters is very rare.  We don't want to walk
 114	 * the system-wide hashed waitqueue buckets in the fast path only to
 115	 * almost never find waiters.
 116	 */
 117	if (waitqueue_active(&conn->c_waitq))
 118		wake_up_all(&conn->c_waitq);
 119}
 120
 121/*
 122 * We're making the conscious trade-off here to only send one message
 123 * down the connection at a time.
 124 *   Pro:
 125 *      - tx queueing is a simple fifo list
 126 *   	- reassembly is optional and easily done by transports per conn
 127 *      - no per flow rx lookup at all, straight to the socket
 128 *   	- less per-frag memory and wire overhead
 129 *   Con:
 130 *      - queued acks can be delayed behind large messages
 131 *   Depends:
 132 *      - small message latency is higher behind queued large messages
 133 *      - large message latency isn't starved by intervening small sends
 134 */
 135int rds_send_xmit(struct rds_connection *conn)
 136{
 137	struct rds_message *rm;
 138	unsigned long flags;
 139	unsigned int tmp;
 140	struct scatterlist *sg;
 141	int ret = 0;
 142	LIST_HEAD(to_be_dropped);
 143
 144restart:
 145
 146	/*
 147	 * sendmsg calls here after having queued its message on the send
 148	 * queue.  We only have one task feeding the connection at a time.  If
 149	 * another thread is already feeding the queue then we back off.  This
 150	 * avoids blocking the caller and trading per-connection data between
 151	 * caches per message.
 152	 */
 153	if (!acquire_in_xmit(conn)) {
 154		rds_stats_inc(s_send_lock_contention);
 155		ret = -ENOMEM;
 156		goto out;
 157	}
 158
 159	/*
 160	 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
 161	 * we do the opposite to avoid races.
 162	 */
 163	if (!rds_conn_up(conn)) {
 164		release_in_xmit(conn);
 165		ret = 0;
 166		goto out;
 167	}
 168
 169	if (conn->c_trans->xmit_prepare)
 170		conn->c_trans->xmit_prepare(conn);
 171
 172	/*
 173	 * spin trying to push headers and data down the connection until
 174	 * the connection doesn't make forward progress.
 175	 */
 176	while (1) {
 177
 178		rm = conn->c_xmit_rm;
 179
 180		/*
 181		 * If between sending messages, we can send a pending congestion
 182		 * map update.
 183		 */
 184		if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
 185			rm = rds_cong_update_alloc(conn);
 186			if (IS_ERR(rm)) {
 187				ret = PTR_ERR(rm);
 188				break;
 189			}
 190			rm->data.op_active = 1;
 191
 192			conn->c_xmit_rm = rm;
 193		}
 194
 195		/*
 196		 * If not already working on one, grab the next message.
 197		 *
 198		 * c_xmit_rm holds a ref while we're sending this message down
 199		 * the connction.  We can use this ref while holding the
 200		 * send_sem.. rds_send_reset() is serialized with it.
 201		 */
 202		if (!rm) {
 203			unsigned int len;
 204
 205			spin_lock_irqsave(&conn->c_lock, flags);
 206
 207			if (!list_empty(&conn->c_send_queue)) {
 208				rm = list_entry(conn->c_send_queue.next,
 209						struct rds_message,
 210						m_conn_item);
 211				rds_message_addref(rm);
 212
 213				/*
 214				 * Move the message from the send queue to the retransmit
 215				 * list right away.
 216				 */
 217				list_move_tail(&rm->m_conn_item, &conn->c_retrans);
 218			}
 219
 220			spin_unlock_irqrestore(&conn->c_lock, flags);
 221
 222			if (!rm)
 223				break;
 224
 225			/* Unfortunately, the way Infiniband deals with
 226			 * RDMA to a bad MR key is by moving the entire
 227			 * queue pair to error state. We cold possibly
 228			 * recover from that, but right now we drop the
 229			 * connection.
 230			 * Therefore, we never retransmit messages with RDMA ops.
 231			 */
 232			if (rm->rdma.op_active &&
 233			    test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
 234				spin_lock_irqsave(&conn->c_lock, flags);
 235				if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
 236					list_move(&rm->m_conn_item, &to_be_dropped);
 237				spin_unlock_irqrestore(&conn->c_lock, flags);
 238				continue;
 239			}
 240
 241			/* Require an ACK every once in a while */
 242			len = ntohl(rm->m_inc.i_hdr.h_len);
 243			if (conn->c_unacked_packets == 0 ||
 244			    conn->c_unacked_bytes < len) {
 245				__set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
 246
 247				conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
 248				conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
 249				rds_stats_inc(s_send_ack_required);
 250			} else {
 251				conn->c_unacked_bytes -= len;
 252				conn->c_unacked_packets--;
 253			}
 254
 255			conn->c_xmit_rm = rm;
 256		}
 257
 258		/* The transport either sends the whole rdma or none of it */
 259		if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
 260			rm->m_final_op = &rm->rdma;
 261			ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
 262			if (ret)
 263				break;
 264			conn->c_xmit_rdma_sent = 1;
 265
 266			/* The transport owns the mapped memory for now.
 267			 * You can't unmap it while it's on the send queue */
 268			set_bit(RDS_MSG_MAPPED, &rm->m_flags);
 269		}
 270
 271		if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
 272			rm->m_final_op = &rm->atomic;
 273			ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
 274			if (ret)
 275				break;
 276			conn->c_xmit_atomic_sent = 1;
 277
 278			/* The transport owns the mapped memory for now.
 279			 * You can't unmap it while it's on the send queue */
 280			set_bit(RDS_MSG_MAPPED, &rm->m_flags);
 281		}
 282
 283		/*
 284		 * A number of cases require an RDS header to be sent
 285		 * even if there is no data.
 286		 * We permit 0-byte sends; rds-ping depends on this.
 287		 * However, if there are exclusively attached silent ops,
 288		 * we skip the hdr/data send, to enable silent operation.
 289		 */
 290		if (rm->data.op_nents == 0) {
 291			int ops_present;
 292			int all_ops_are_silent = 1;
 293
 294			ops_present = (rm->atomic.op_active || rm->rdma.op_active);
 295			if (rm->atomic.op_active && !rm->atomic.op_silent)
 296				all_ops_are_silent = 0;
 297			if (rm->rdma.op_active && !rm->rdma.op_silent)
 298				all_ops_are_silent = 0;
 299
 300			if (ops_present && all_ops_are_silent
 301			    && !rm->m_rdma_cookie)
 302				rm->data.op_active = 0;
 303		}
 304
 305		if (rm->data.op_active && !conn->c_xmit_data_sent) {
 306			rm->m_final_op = &rm->data;
 307			ret = conn->c_trans->xmit(conn, rm,
 308						  conn->c_xmit_hdr_off,
 309						  conn->c_xmit_sg,
 310						  conn->c_xmit_data_off);
 311			if (ret <= 0)
 312				break;
 313
 314			if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
 315				tmp = min_t(int, ret,
 316					    sizeof(struct rds_header) -
 317					    conn->c_xmit_hdr_off);
 318				conn->c_xmit_hdr_off += tmp;
 319				ret -= tmp;
 320			}
 321
 322			sg = &rm->data.op_sg[conn->c_xmit_sg];
 323			while (ret) {
 324				tmp = min_t(int, ret, sg->length -
 325						      conn->c_xmit_data_off);
 326				conn->c_xmit_data_off += tmp;
 327				ret -= tmp;
 328				if (conn->c_xmit_data_off == sg->length) {
 329					conn->c_xmit_data_off = 0;
 330					sg++;
 331					conn->c_xmit_sg++;
 332					BUG_ON(ret != 0 &&
 333					       conn->c_xmit_sg == rm->data.op_nents);
 334				}
 335			}
 336
 337			if (conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
 338			    (conn->c_xmit_sg == rm->data.op_nents))
 339				conn->c_xmit_data_sent = 1;
 340		}
 341
 342		/*
 343		 * A rm will only take multiple times through this loop
 344		 * if there is a data op. Thus, if the data is sent (or there was
 345		 * none), then we're done with the rm.
 346		 */
 347		if (!rm->data.op_active || conn->c_xmit_data_sent) {
 348			conn->c_xmit_rm = NULL;
 349			conn->c_xmit_sg = 0;
 350			conn->c_xmit_hdr_off = 0;
 351			conn->c_xmit_data_off = 0;
 352			conn->c_xmit_rdma_sent = 0;
 353			conn->c_xmit_atomic_sent = 0;
 354			conn->c_xmit_data_sent = 0;
 355
 356			rds_message_put(rm);
 357		}
 358	}
 359
 360	if (conn->c_trans->xmit_complete)
 361		conn->c_trans->xmit_complete(conn);
 362
 363	release_in_xmit(conn);
 364
 365	/* Nuke any messages we decided not to retransmit. */
 366	if (!list_empty(&to_be_dropped)) {
 367		/* irqs on here, so we can put(), unlike above */
 368		list_for_each_entry(rm, &to_be_dropped, m_conn_item)
 369			rds_message_put(rm);
 370		rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
 371	}
 372
 373	/*
 374	 * Other senders can queue a message after we last test the send queue
 375	 * but before we clear RDS_IN_XMIT.  In that case they'd back off and
 376	 * not try and send their newly queued message.  We need to check the
 377	 * send queue after having cleared RDS_IN_XMIT so that their message
 378	 * doesn't get stuck on the send queue.
 379	 *
 380	 * If the transport cannot continue (i.e ret != 0), then it must
 381	 * call us when more room is available, such as from the tx
 382	 * completion handler.
 383	 */
 384	if (ret == 0) {
 385		smp_mb();
 386		if (!list_empty(&conn->c_send_queue)) {
 387			rds_stats_inc(s_send_lock_queue_raced);
 388			goto restart;
 389		}
 390	}
 391out:
 392	return ret;
 393}
 394
 395static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
 396{
 397	u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
 398
 399	assert_spin_locked(&rs->rs_lock);
 400
 401	BUG_ON(rs->rs_snd_bytes < len);
 402	rs->rs_snd_bytes -= len;
 403
 404	if (rs->rs_snd_bytes == 0)
 405		rds_stats_inc(s_send_queue_empty);
 406}
 407
 408static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
 409				    is_acked_func is_acked)
 410{
 411	if (is_acked)
 412		return is_acked(rm, ack);
 413	return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
 414}
 415
 416/*
 417 * This is pretty similar to what happens below in the ACK
 418 * handling code - except that we call here as soon as we get
 419 * the IB send completion on the RDMA op and the accompanying
 420 * message.
 421 */
 422void rds_rdma_send_complete(struct rds_message *rm, int status)
 423{
 424	struct rds_sock *rs = NULL;
 425	struct rm_rdma_op *ro;
 426	struct rds_notifier *notifier;
 427	unsigned long flags;
 428
 429	spin_lock_irqsave(&rm->m_rs_lock, flags);
 430
 431	ro = &rm->rdma;
 432	if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
 433	    ro->op_active && ro->op_notify && ro->op_notifier) {
 434		notifier = ro->op_notifier;
 435		rs = rm->m_rs;
 436		sock_hold(rds_rs_to_sk(rs));
 437
 438		notifier->n_status = status;
 439		spin_lock(&rs->rs_lock);
 440		list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
 441		spin_unlock(&rs->rs_lock);
 442
 443		ro->op_notifier = NULL;
 444	}
 445
 446	spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 447
 448	if (rs) {
 449		rds_wake_sk_sleep(rs);
 450		sock_put(rds_rs_to_sk(rs));
 451	}
 452}
 453EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
 454
 455/*
 456 * Just like above, except looks at atomic op
 457 */
 458void rds_atomic_send_complete(struct rds_message *rm, int status)
 459{
 460	struct rds_sock *rs = NULL;
 461	struct rm_atomic_op *ao;
 462	struct rds_notifier *notifier;
 463	unsigned long flags;
 464
 465	spin_lock_irqsave(&rm->m_rs_lock, flags);
 466
 467	ao = &rm->atomic;
 468	if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
 469	    && ao->op_active && ao->op_notify && ao->op_notifier) {
 470		notifier = ao->op_notifier;
 471		rs = rm->m_rs;
 472		sock_hold(rds_rs_to_sk(rs));
 473
 474		notifier->n_status = status;
 475		spin_lock(&rs->rs_lock);
 476		list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
 477		spin_unlock(&rs->rs_lock);
 478
 479		ao->op_notifier = NULL;
 480	}
 481
 482	spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 483
 484	if (rs) {
 485		rds_wake_sk_sleep(rs);
 486		sock_put(rds_rs_to_sk(rs));
 487	}
 488}
 489EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
 490
 491/*
 492 * This is the same as rds_rdma_send_complete except we
 493 * don't do any locking - we have all the ingredients (message,
 494 * socket, socket lock) and can just move the notifier.
 495 */
 496static inline void
 497__rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
 498{
 499	struct rm_rdma_op *ro;
 500	struct rm_atomic_op *ao;
 501
 502	ro = &rm->rdma;
 503	if (ro->op_active && ro->op_notify && ro->op_notifier) {
 504		ro->op_notifier->n_status = status;
 505		list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
 506		ro->op_notifier = NULL;
 507	}
 508
 509	ao = &rm->atomic;
 510	if (ao->op_active && ao->op_notify && ao->op_notifier) {
 511		ao->op_notifier->n_status = status;
 512		list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
 513		ao->op_notifier = NULL;
 514	}
 515
 516	/* No need to wake the app - caller does this */
 517}
 518
 519/*
 520 * This is called from the IB send completion when we detect
 521 * a RDMA operation that failed with remote access error.
 522 * So speed is not an issue here.
 523 */
 524struct rds_message *rds_send_get_message(struct rds_connection *conn,
 525					 struct rm_rdma_op *op)
 526{
 527	struct rds_message *rm, *tmp, *found = NULL;
 528	unsigned long flags;
 529
 530	spin_lock_irqsave(&conn->c_lock, flags);
 531
 532	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
 533		if (&rm->rdma == op) {
 534			atomic_inc(&rm->m_refcount);
 535			found = rm;
 536			goto out;
 537		}
 538	}
 539
 540	list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
 541		if (&rm->rdma == op) {
 542			atomic_inc(&rm->m_refcount);
 543			found = rm;
 544			break;
 545		}
 546	}
 547
 548out:
 549	spin_unlock_irqrestore(&conn->c_lock, flags);
 550
 551	return found;
 552}
 553EXPORT_SYMBOL_GPL(rds_send_get_message);
 554
 555/*
 556 * This removes messages from the socket's list if they're on it.  The list
 557 * argument must be private to the caller, we must be able to modify it
 558 * without locks.  The messages must have a reference held for their
 559 * position on the list.  This function will drop that reference after
 560 * removing the messages from the 'messages' list regardless of if it found
 561 * the messages on the socket list or not.
 562 */
 563static void rds_send_remove_from_sock(struct list_head *messages, int status)
 564{
 565	unsigned long flags;
 566	struct rds_sock *rs = NULL;
 567	struct rds_message *rm;
 568
 569	while (!list_empty(messages)) {
 570		int was_on_sock = 0;
 571
 572		rm = list_entry(messages->next, struct rds_message,
 573				m_conn_item);
 574		list_del_init(&rm->m_conn_item);
 575
 576		/*
 577		 * If we see this flag cleared then we're *sure* that someone
 578		 * else beat us to removing it from the sock.  If we race
 579		 * with their flag update we'll get the lock and then really
 580		 * see that the flag has been cleared.
 581		 *
 582		 * The message spinlock makes sure nobody clears rm->m_rs
 583		 * while we're messing with it. It does not prevent the
 584		 * message from being removed from the socket, though.
 585		 */
 586		spin_lock_irqsave(&rm->m_rs_lock, flags);
 587		if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
 588			goto unlock_and_drop;
 589
 590		if (rs != rm->m_rs) {
 591			if (rs) {
 592				rds_wake_sk_sleep(rs);
 593				sock_put(rds_rs_to_sk(rs));
 594			}
 595			rs = rm->m_rs;
 596			sock_hold(rds_rs_to_sk(rs));
 597		}
 598		spin_lock(&rs->rs_lock);
 599
 600		if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
 601			struct rm_rdma_op *ro = &rm->rdma;
 602			struct rds_notifier *notifier;
 603
 604			list_del_init(&rm->m_sock_item);
 605			rds_send_sndbuf_remove(rs, rm);
 606
 607			if (ro->op_active && ro->op_notifier &&
 608			       (ro->op_notify || (ro->op_recverr && status))) {
 609				notifier = ro->op_notifier;
 610				list_add_tail(&notifier->n_list,
 611						&rs->rs_notify_queue);
 612				if (!notifier->n_status)
 613					notifier->n_status = status;
 614				rm->rdma.op_notifier = NULL;
 615			}
 616			was_on_sock = 1;
 617			rm->m_rs = NULL;
 618		}
 619		spin_unlock(&rs->rs_lock);
 620
 621unlock_and_drop:
 622		spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 623		rds_message_put(rm);
 624		if (was_on_sock)
 625			rds_message_put(rm);
 626	}
 627
 628	if (rs) {
 629		rds_wake_sk_sleep(rs);
 630		sock_put(rds_rs_to_sk(rs));
 631	}
 632}
 633
 634/*
 635 * Transports call here when they've determined that the receiver queued
 636 * messages up to, and including, the given sequence number.  Messages are
 637 * moved to the retrans queue when rds_send_xmit picks them off the send
 638 * queue. This means that in the TCP case, the message may not have been
 639 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
 640 * checks the RDS_MSG_HAS_ACK_SEQ bit.
 641 *
 642 * XXX It's not clear to me how this is safely serialized with socket
 643 * destruction.  Maybe it should bail if it sees SOCK_DEAD.
 644 */
 645void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
 646			 is_acked_func is_acked)
 647{
 648	struct rds_message *rm, *tmp;
 649	unsigned long flags;
 650	LIST_HEAD(list);
 651
 652	spin_lock_irqsave(&conn->c_lock, flags);
 653
 654	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
 655		if (!rds_send_is_acked(rm, ack, is_acked))
 656			break;
 657
 658		list_move(&rm->m_conn_item, &list);
 659		clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
 660	}
 661
 662	/* order flag updates with spin locks */
 663	if (!list_empty(&list))
 664		smp_mb__after_clear_bit();
 665
 666	spin_unlock_irqrestore(&conn->c_lock, flags);
 667
 668	/* now remove the messages from the sock list as needed */
 669	rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
 670}
 671EXPORT_SYMBOL_GPL(rds_send_drop_acked);
 672
 673void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
 674{
 675	struct rds_message *rm, *tmp;
 676	struct rds_connection *conn;
 677	unsigned long flags;
 678	LIST_HEAD(list);
 679
 680	/* get all the messages we're dropping under the rs lock */
 681	spin_lock_irqsave(&rs->rs_lock, flags);
 682
 683	list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
 684		if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
 685			     dest->sin_port != rm->m_inc.i_hdr.h_dport))
 686			continue;
 687
 688		list_move(&rm->m_sock_item, &list);
 689		rds_send_sndbuf_remove(rs, rm);
 690		clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
 691	}
 692
 693	/* order flag updates with the rs lock */
 694	smp_mb__after_clear_bit();
 695
 696	spin_unlock_irqrestore(&rs->rs_lock, flags);
 697
 698	if (list_empty(&list))
 699		return;
 700
 701	/* Remove the messages from the conn */
 702	list_for_each_entry(rm, &list, m_sock_item) {
 703
 704		conn = rm->m_inc.i_conn;
 705
 706		spin_lock_irqsave(&conn->c_lock, flags);
 707		/*
 708		 * Maybe someone else beat us to removing rm from the conn.
 709		 * If we race with their flag update we'll get the lock and
 710		 * then really see that the flag has been cleared.
 711		 */
 712		if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
 713			spin_unlock_irqrestore(&conn->c_lock, flags);
 714			continue;
 715		}
 716		list_del_init(&rm->m_conn_item);
 717		spin_unlock_irqrestore(&conn->c_lock, flags);
 718
 719		/*
 720		 * Couldn't grab m_rs_lock in top loop (lock ordering),
 721		 * but we can now.
 722		 */
 723		spin_lock_irqsave(&rm->m_rs_lock, flags);
 724
 725		spin_lock(&rs->rs_lock);
 726		__rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
 727		spin_unlock(&rs->rs_lock);
 728
 729		rm->m_rs = NULL;
 730		spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 731
 732		rds_message_put(rm);
 733	}
 734
 735	rds_wake_sk_sleep(rs);
 736
 737	while (!list_empty(&list)) {
 738		rm = list_entry(list.next, struct rds_message, m_sock_item);
 739		list_del_init(&rm->m_sock_item);
 740
 741		rds_message_wait(rm);
 742		rds_message_put(rm);
 743	}
 744}
 745
 746/*
 747 * we only want this to fire once so we use the callers 'queued'.  It's
 748 * possible that another thread can race with us and remove the
 749 * message from the flow with RDS_CANCEL_SENT_TO.
 750 */
 751static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
 752			     struct rds_message *rm, __be16 sport,
 753			     __be16 dport, int *queued)
 754{
 755	unsigned long flags;
 756	u32 len;
 757
 758	if (*queued)
 759		goto out;
 760
 761	len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
 762
 763	/* this is the only place which holds both the socket's rs_lock
 764	 * and the connection's c_lock */
 765	spin_lock_irqsave(&rs->rs_lock, flags);
 766
 767	/*
 768	 * If there is a little space in sndbuf, we don't queue anything,
 769	 * and userspace gets -EAGAIN. But poll() indicates there's send
 770	 * room. This can lead to bad behavior (spinning) if snd_bytes isn't
 771	 * freed up by incoming acks. So we check the *old* value of
 772	 * rs_snd_bytes here to allow the last msg to exceed the buffer,
 773	 * and poll() now knows no more data can be sent.
 774	 */
 775	if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
 776		rs->rs_snd_bytes += len;
 777
 778		/* let recv side know we are close to send space exhaustion.
 779		 * This is probably not the optimal way to do it, as this
 780		 * means we set the flag on *all* messages as soon as our
 781		 * throughput hits a certain threshold.
 782		 */
 783		if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
 784			__set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
 785
 786		list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
 787		set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
 788		rds_message_addref(rm);
 789		rm->m_rs = rs;
 790
 791		/* The code ordering is a little weird, but we're
 792		   trying to minimize the time we hold c_lock */
 793		rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
 794		rm->m_inc.i_conn = conn;
 795		rds_message_addref(rm);
 796
 797		spin_lock(&conn->c_lock);
 798		rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
 799		list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
 800		set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
 801		spin_unlock(&conn->c_lock);
 802
 803		rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
 804			 rm, len, rs, rs->rs_snd_bytes,
 805			 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
 806
 807		*queued = 1;
 808	}
 809
 810	spin_unlock_irqrestore(&rs->rs_lock, flags);
 811out:
 812	return *queued;
 813}
 814
 815/*
 816 * rds_message is getting to be quite complicated, and we'd like to allocate
 817 * it all in one go. This figures out how big it needs to be up front.
 818 */
 819static int rds_rm_size(struct msghdr *msg, int data_len)
 820{
 821	struct cmsghdr *cmsg;
 822	int size = 0;
 823	int cmsg_groups = 0;
 824	int retval;
 825
 826	for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
 827		if (!CMSG_OK(msg, cmsg))
 828			return -EINVAL;
 829
 830		if (cmsg->cmsg_level != SOL_RDS)
 831			continue;
 832
 833		switch (cmsg->cmsg_type) {
 834		case RDS_CMSG_RDMA_ARGS:
 835			cmsg_groups |= 1;
 836			retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
 837			if (retval < 0)
 838				return retval;
 839			size += retval;
 840
 841			break;
 842
 843		case RDS_CMSG_RDMA_DEST:
 844		case RDS_CMSG_RDMA_MAP:
 845			cmsg_groups |= 2;
 846			/* these are valid but do no add any size */
 847			break;
 848
 849		case RDS_CMSG_ATOMIC_CSWP:
 850		case RDS_CMSG_ATOMIC_FADD:
 851		case RDS_CMSG_MASKED_ATOMIC_CSWP:
 852		case RDS_CMSG_MASKED_ATOMIC_FADD:
 853			cmsg_groups |= 1;
 854			size += sizeof(struct scatterlist);
 855			break;
 856
 857		default:
 858			return -EINVAL;
 859		}
 860
 861	}
 862
 863	size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
 864
 865	/* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
 866	if (cmsg_groups == 3)
 867		return -EINVAL;
 868
 869	return size;
 870}
 871
 872static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
 873			 struct msghdr *msg, int *allocated_mr)
 874{
 875	struct cmsghdr *cmsg;
 876	int ret = 0;
 877
 878	for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
 879		if (!CMSG_OK(msg, cmsg))
 880			return -EINVAL;
 881
 882		if (cmsg->cmsg_level != SOL_RDS)
 883			continue;
 884
 885		/* As a side effect, RDMA_DEST and RDMA_MAP will set
 886		 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
 887		 */
 888		switch (cmsg->cmsg_type) {
 889		case RDS_CMSG_RDMA_ARGS:
 890			ret = rds_cmsg_rdma_args(rs, rm, cmsg);
 891			break;
 892
 893		case RDS_CMSG_RDMA_DEST:
 894			ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
 895			break;
 896
 897		case RDS_CMSG_RDMA_MAP:
 898			ret = rds_cmsg_rdma_map(rs, rm, cmsg);
 899			if (!ret)
 900				*allocated_mr = 1;
 901			break;
 902		case RDS_CMSG_ATOMIC_CSWP:
 903		case RDS_CMSG_ATOMIC_FADD:
 904		case RDS_CMSG_MASKED_ATOMIC_CSWP:
 905		case RDS_CMSG_MASKED_ATOMIC_FADD:
 906			ret = rds_cmsg_atomic(rs, rm, cmsg);
 907			break;
 908
 909		default:
 910			return -EINVAL;
 911		}
 912
 913		if (ret)
 914			break;
 915	}
 916
 917	return ret;
 918}
 919
 920int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
 921		size_t payload_len)
 922{
 923	struct sock *sk = sock->sk;
 924	struct rds_sock *rs = rds_sk_to_rs(sk);
 925	struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
 926	__be32 daddr;
 927	__be16 dport;
 928	struct rds_message *rm = NULL;
 929	struct rds_connection *conn;
 930	int ret = 0;
 931	int queued = 0, allocated_mr = 0;
 932	int nonblock = msg->msg_flags & MSG_DONTWAIT;
 933	long timeo = sock_sndtimeo(sk, nonblock);
 934
 935	/* Mirror Linux UDP mirror of BSD error message compatibility */
 936	/* XXX: Perhaps MSG_MORE someday */
 937	if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
 
 938		ret = -EOPNOTSUPP;
 939		goto out;
 940	}
 941
 942	if (msg->msg_namelen) {
 943		/* XXX fail non-unicast destination IPs? */
 944		if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
 945			ret = -EINVAL;
 946			goto out;
 947		}
 948		daddr = usin->sin_addr.s_addr;
 949		dport = usin->sin_port;
 950	} else {
 951		/* We only care about consistency with ->connect() */
 952		lock_sock(sk);
 953		daddr = rs->rs_conn_addr;
 954		dport = rs->rs_conn_port;
 955		release_sock(sk);
 956	}
 957
 958	/* racing with another thread binding seems ok here */
 959	if (daddr == 0 || rs->rs_bound_addr == 0) {
 960		ret = -ENOTCONN; /* XXX not a great errno */
 961		goto out;
 962	}
 963
 964	/* size of rm including all sgs */
 965	ret = rds_rm_size(msg, payload_len);
 966	if (ret < 0)
 967		goto out;
 968
 969	rm = rds_message_alloc(ret, GFP_KERNEL);
 970	if (!rm) {
 971		ret = -ENOMEM;
 972		goto out;
 973	}
 974
 975	/* Attach data to the rm */
 976	if (payload_len) {
 977		rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
 978		if (!rm->data.op_sg) {
 979			ret = -ENOMEM;
 980			goto out;
 981		}
 982		ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len);
 983		if (ret)
 984			goto out;
 985	}
 986	rm->data.op_active = 1;
 987
 988	rm->m_daddr = daddr;
 989
 990	/* rds_conn_create has a spinlock that runs with IRQ off.
 991	 * Caching the conn in the socket helps a lot. */
 992	if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
 993		conn = rs->rs_conn;
 994	else {
 995		conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr,
 996					rs->rs_transport,
 997					sock->sk->sk_allocation);
 998		if (IS_ERR(conn)) {
 999			ret = PTR_ERR(conn);
1000			goto out;
1001		}
1002		rs->rs_conn = conn;
1003	}
1004
1005	/* Parse any control messages the user may have included. */
1006	ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
1007	if (ret)
1008		goto out;
1009
1010	if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
1011		printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
1012			       &rm->rdma, conn->c_trans->xmit_rdma);
1013		ret = -EOPNOTSUPP;
1014		goto out;
1015	}
1016
1017	if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
1018		printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
1019			       &rm->atomic, conn->c_trans->xmit_atomic);
1020		ret = -EOPNOTSUPP;
1021		goto out;
1022	}
1023
1024	rds_conn_connect_if_down(conn);
1025
1026	ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
1027	if (ret) {
1028		rs->rs_seen_congestion = 1;
1029		goto out;
1030	}
1031
1032	while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
1033				  dport, &queued)) {
1034		rds_stats_inc(s_send_queue_full);
1035		/* XXX make sure this is reasonable */
1036		if (payload_len > rds_sk_sndbuf(rs)) {
1037			ret = -EMSGSIZE;
1038			goto out;
1039		}
1040		if (nonblock) {
1041			ret = -EAGAIN;
1042			goto out;
1043		}
1044
1045		timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
1046					rds_send_queue_rm(rs, conn, rm,
1047							  rs->rs_bound_port,
1048							  dport,
1049							  &queued),
1050					timeo);
1051		rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1052		if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1053			continue;
1054
1055		ret = timeo;
1056		if (ret == 0)
1057			ret = -ETIMEDOUT;
1058		goto out;
1059	}
1060
1061	/*
1062	 * By now we've committed to the send.  We reuse rds_send_worker()
1063	 * to retry sends in the rds thread if the transport asks us to.
1064	 */
1065	rds_stats_inc(s_send_queued);
1066
1067	if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1068		rds_send_xmit(conn);
1069
1070	rds_message_put(rm);
1071	return payload_len;
1072
1073out:
1074	/* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1075	 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1076	 * or in any other way, we need to destroy the MR again */
1077	if (allocated_mr)
1078		rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1079
1080	if (rm)
1081		rds_message_put(rm);
1082	return ret;
1083}
1084
1085/*
1086 * Reply to a ping packet.
1087 */
1088int
1089rds_send_pong(struct rds_connection *conn, __be16 dport)
1090{
1091	struct rds_message *rm;
1092	unsigned long flags;
1093	int ret = 0;
1094
1095	rm = rds_message_alloc(0, GFP_ATOMIC);
1096	if (!rm) {
1097		ret = -ENOMEM;
1098		goto out;
1099	}
1100
1101	rm->m_daddr = conn->c_faddr;
1102	rm->data.op_active = 1;
1103
1104	rds_conn_connect_if_down(conn);
1105
1106	ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
1107	if (ret)
1108		goto out;
1109
1110	spin_lock_irqsave(&conn->c_lock, flags);
1111	list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
1112	set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1113	rds_message_addref(rm);
1114	rm->m_inc.i_conn = conn;
1115
1116	rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport,
1117				    conn->c_next_tx_seq);
1118	conn->c_next_tx_seq++;
1119	spin_unlock_irqrestore(&conn->c_lock, flags);
1120
1121	rds_stats_inc(s_send_queued);
1122	rds_stats_inc(s_send_pong);
1123
1124	if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1125		rds_send_xmit(conn);
1126
1127	rds_message_put(rm);
1128	return 0;
1129
1130out:
1131	if (rm)
1132		rds_message_put(rm);
1133	return ret;
1134}
v3.1
   1/*
   2 * Copyright (c) 2006 Oracle.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 *
  32 */
  33#include <linux/kernel.h>
 
  34#include <linux/gfp.h>
  35#include <net/sock.h>
  36#include <linux/in.h>
  37#include <linux/list.h>
  38#include <linux/ratelimit.h>
 
  39
  40#include "rds.h"
  41
  42/* When transmitting messages in rds_send_xmit, we need to emerge from
  43 * time to time and briefly release the CPU. Otherwise the softlock watchdog
  44 * will kick our shin.
  45 * Also, it seems fairer to not let one busy connection stall all the
  46 * others.
  47 *
  48 * send_batch_count is the number of times we'll loop in send_xmit. Setting
  49 * it to 0 will restore the old behavior (where we looped until we had
  50 * drained the queue).
  51 */
  52static int send_batch_count = 64;
  53module_param(send_batch_count, int, 0444);
  54MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
  55
  56static void rds_send_remove_from_sock(struct list_head *messages, int status);
  57
  58/*
  59 * Reset the send state.  Callers must ensure that this doesn't race with
  60 * rds_send_xmit().
  61 */
  62void rds_send_reset(struct rds_connection *conn)
  63{
  64	struct rds_message *rm, *tmp;
  65	unsigned long flags;
  66
  67	if (conn->c_xmit_rm) {
  68		rm = conn->c_xmit_rm;
  69		conn->c_xmit_rm = NULL;
  70		/* Tell the user the RDMA op is no longer mapped by the
  71		 * transport. This isn't entirely true (it's flushed out
  72		 * independently) but as the connection is down, there's
  73		 * no ongoing RDMA to/from that memory */
  74		rds_message_unmapped(rm);
  75		rds_message_put(rm);
  76	}
  77
  78	conn->c_xmit_sg = 0;
  79	conn->c_xmit_hdr_off = 0;
  80	conn->c_xmit_data_off = 0;
  81	conn->c_xmit_atomic_sent = 0;
  82	conn->c_xmit_rdma_sent = 0;
  83	conn->c_xmit_data_sent = 0;
  84
  85	conn->c_map_queued = 0;
  86
  87	conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
  88	conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
  89
  90	/* Mark messages as retransmissions, and move them to the send q */
  91	spin_lock_irqsave(&conn->c_lock, flags);
  92	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
  93		set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
  94		set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
  95	}
  96	list_splice_init(&conn->c_retrans, &conn->c_send_queue);
  97	spin_unlock_irqrestore(&conn->c_lock, flags);
  98}
  99
 100static int acquire_in_xmit(struct rds_connection *conn)
 101{
 102	return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0;
 103}
 104
 105static void release_in_xmit(struct rds_connection *conn)
 106{
 107	clear_bit(RDS_IN_XMIT, &conn->c_flags);
 108	smp_mb__after_clear_bit();
 109	/*
 110	 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
 111	 * hot path and finding waiters is very rare.  We don't want to walk
 112	 * the system-wide hashed waitqueue buckets in the fast path only to
 113	 * almost never find waiters.
 114	 */
 115	if (waitqueue_active(&conn->c_waitq))
 116		wake_up_all(&conn->c_waitq);
 117}
 118
 119/*
 120 * We're making the conscious trade-off here to only send one message
 121 * down the connection at a time.
 122 *   Pro:
 123 *      - tx queueing is a simple fifo list
 124 *   	- reassembly is optional and easily done by transports per conn
 125 *      - no per flow rx lookup at all, straight to the socket
 126 *   	- less per-frag memory and wire overhead
 127 *   Con:
 128 *      - queued acks can be delayed behind large messages
 129 *   Depends:
 130 *      - small message latency is higher behind queued large messages
 131 *      - large message latency isn't starved by intervening small sends
 132 */
 133int rds_send_xmit(struct rds_connection *conn)
 134{
 135	struct rds_message *rm;
 136	unsigned long flags;
 137	unsigned int tmp;
 138	struct scatterlist *sg;
 139	int ret = 0;
 140	LIST_HEAD(to_be_dropped);
 141
 142restart:
 143
 144	/*
 145	 * sendmsg calls here after having queued its message on the send
 146	 * queue.  We only have one task feeding the connection at a time.  If
 147	 * another thread is already feeding the queue then we back off.  This
 148	 * avoids blocking the caller and trading per-connection data between
 149	 * caches per message.
 150	 */
 151	if (!acquire_in_xmit(conn)) {
 152		rds_stats_inc(s_send_lock_contention);
 153		ret = -ENOMEM;
 154		goto out;
 155	}
 156
 157	/*
 158	 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
 159	 * we do the opposite to avoid races.
 160	 */
 161	if (!rds_conn_up(conn)) {
 162		release_in_xmit(conn);
 163		ret = 0;
 164		goto out;
 165	}
 166
 167	if (conn->c_trans->xmit_prepare)
 168		conn->c_trans->xmit_prepare(conn);
 169
 170	/*
 171	 * spin trying to push headers and data down the connection until
 172	 * the connection doesn't make forward progress.
 173	 */
 174	while (1) {
 175
 176		rm = conn->c_xmit_rm;
 177
 178		/*
 179		 * If between sending messages, we can send a pending congestion
 180		 * map update.
 181		 */
 182		if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
 183			rm = rds_cong_update_alloc(conn);
 184			if (IS_ERR(rm)) {
 185				ret = PTR_ERR(rm);
 186				break;
 187			}
 188			rm->data.op_active = 1;
 189
 190			conn->c_xmit_rm = rm;
 191		}
 192
 193		/*
 194		 * If not already working on one, grab the next message.
 195		 *
 196		 * c_xmit_rm holds a ref while we're sending this message down
 197		 * the connction.  We can use this ref while holding the
 198		 * send_sem.. rds_send_reset() is serialized with it.
 199		 */
 200		if (!rm) {
 201			unsigned int len;
 202
 203			spin_lock_irqsave(&conn->c_lock, flags);
 204
 205			if (!list_empty(&conn->c_send_queue)) {
 206				rm = list_entry(conn->c_send_queue.next,
 207						struct rds_message,
 208						m_conn_item);
 209				rds_message_addref(rm);
 210
 211				/*
 212				 * Move the message from the send queue to the retransmit
 213				 * list right away.
 214				 */
 215				list_move_tail(&rm->m_conn_item, &conn->c_retrans);
 216			}
 217
 218			spin_unlock_irqrestore(&conn->c_lock, flags);
 219
 220			if (!rm)
 221				break;
 222
 223			/* Unfortunately, the way Infiniband deals with
 224			 * RDMA to a bad MR key is by moving the entire
 225			 * queue pair to error state. We cold possibly
 226			 * recover from that, but right now we drop the
 227			 * connection.
 228			 * Therefore, we never retransmit messages with RDMA ops.
 229			 */
 230			if (rm->rdma.op_active &&
 231			    test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
 232				spin_lock_irqsave(&conn->c_lock, flags);
 233				if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
 234					list_move(&rm->m_conn_item, &to_be_dropped);
 235				spin_unlock_irqrestore(&conn->c_lock, flags);
 236				continue;
 237			}
 238
 239			/* Require an ACK every once in a while */
 240			len = ntohl(rm->m_inc.i_hdr.h_len);
 241			if (conn->c_unacked_packets == 0 ||
 242			    conn->c_unacked_bytes < len) {
 243				__set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
 244
 245				conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
 246				conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
 247				rds_stats_inc(s_send_ack_required);
 248			} else {
 249				conn->c_unacked_bytes -= len;
 250				conn->c_unacked_packets--;
 251			}
 252
 253			conn->c_xmit_rm = rm;
 254		}
 255
 256		/* The transport either sends the whole rdma or none of it */
 257		if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
 258			rm->m_final_op = &rm->rdma;
 259			ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
 260			if (ret)
 261				break;
 262			conn->c_xmit_rdma_sent = 1;
 263
 264			/* The transport owns the mapped memory for now.
 265			 * You can't unmap it while it's on the send queue */
 266			set_bit(RDS_MSG_MAPPED, &rm->m_flags);
 267		}
 268
 269		if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
 270			rm->m_final_op = &rm->atomic;
 271			ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
 272			if (ret)
 273				break;
 274			conn->c_xmit_atomic_sent = 1;
 275
 276			/* The transport owns the mapped memory for now.
 277			 * You can't unmap it while it's on the send queue */
 278			set_bit(RDS_MSG_MAPPED, &rm->m_flags);
 279		}
 280
 281		/*
 282		 * A number of cases require an RDS header to be sent
 283		 * even if there is no data.
 284		 * We permit 0-byte sends; rds-ping depends on this.
 285		 * However, if there are exclusively attached silent ops,
 286		 * we skip the hdr/data send, to enable silent operation.
 287		 */
 288		if (rm->data.op_nents == 0) {
 289			int ops_present;
 290			int all_ops_are_silent = 1;
 291
 292			ops_present = (rm->atomic.op_active || rm->rdma.op_active);
 293			if (rm->atomic.op_active && !rm->atomic.op_silent)
 294				all_ops_are_silent = 0;
 295			if (rm->rdma.op_active && !rm->rdma.op_silent)
 296				all_ops_are_silent = 0;
 297
 298			if (ops_present && all_ops_are_silent
 299			    && !rm->m_rdma_cookie)
 300				rm->data.op_active = 0;
 301		}
 302
 303		if (rm->data.op_active && !conn->c_xmit_data_sent) {
 304			rm->m_final_op = &rm->data;
 305			ret = conn->c_trans->xmit(conn, rm,
 306						  conn->c_xmit_hdr_off,
 307						  conn->c_xmit_sg,
 308						  conn->c_xmit_data_off);
 309			if (ret <= 0)
 310				break;
 311
 312			if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
 313				tmp = min_t(int, ret,
 314					    sizeof(struct rds_header) -
 315					    conn->c_xmit_hdr_off);
 316				conn->c_xmit_hdr_off += tmp;
 317				ret -= tmp;
 318			}
 319
 320			sg = &rm->data.op_sg[conn->c_xmit_sg];
 321			while (ret) {
 322				tmp = min_t(int, ret, sg->length -
 323						      conn->c_xmit_data_off);
 324				conn->c_xmit_data_off += tmp;
 325				ret -= tmp;
 326				if (conn->c_xmit_data_off == sg->length) {
 327					conn->c_xmit_data_off = 0;
 328					sg++;
 329					conn->c_xmit_sg++;
 330					BUG_ON(ret != 0 &&
 331					       conn->c_xmit_sg == rm->data.op_nents);
 332				}
 333			}
 334
 335			if (conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
 336			    (conn->c_xmit_sg == rm->data.op_nents))
 337				conn->c_xmit_data_sent = 1;
 338		}
 339
 340		/*
 341		 * A rm will only take multiple times through this loop
 342		 * if there is a data op. Thus, if the data is sent (or there was
 343		 * none), then we're done with the rm.
 344		 */
 345		if (!rm->data.op_active || conn->c_xmit_data_sent) {
 346			conn->c_xmit_rm = NULL;
 347			conn->c_xmit_sg = 0;
 348			conn->c_xmit_hdr_off = 0;
 349			conn->c_xmit_data_off = 0;
 350			conn->c_xmit_rdma_sent = 0;
 351			conn->c_xmit_atomic_sent = 0;
 352			conn->c_xmit_data_sent = 0;
 353
 354			rds_message_put(rm);
 355		}
 356	}
 357
 358	if (conn->c_trans->xmit_complete)
 359		conn->c_trans->xmit_complete(conn);
 360
 361	release_in_xmit(conn);
 362
 363	/* Nuke any messages we decided not to retransmit. */
 364	if (!list_empty(&to_be_dropped)) {
 365		/* irqs on here, so we can put(), unlike above */
 366		list_for_each_entry(rm, &to_be_dropped, m_conn_item)
 367			rds_message_put(rm);
 368		rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
 369	}
 370
 371	/*
 372	 * Other senders can queue a message after we last test the send queue
 373	 * but before we clear RDS_IN_XMIT.  In that case they'd back off and
 374	 * not try and send their newly queued message.  We need to check the
 375	 * send queue after having cleared RDS_IN_XMIT so that their message
 376	 * doesn't get stuck on the send queue.
 377	 *
 378	 * If the transport cannot continue (i.e ret != 0), then it must
 379	 * call us when more room is available, such as from the tx
 380	 * completion handler.
 381	 */
 382	if (ret == 0) {
 383		smp_mb();
 384		if (!list_empty(&conn->c_send_queue)) {
 385			rds_stats_inc(s_send_lock_queue_raced);
 386			goto restart;
 387		}
 388	}
 389out:
 390	return ret;
 391}
 392
 393static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
 394{
 395	u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
 396
 397	assert_spin_locked(&rs->rs_lock);
 398
 399	BUG_ON(rs->rs_snd_bytes < len);
 400	rs->rs_snd_bytes -= len;
 401
 402	if (rs->rs_snd_bytes == 0)
 403		rds_stats_inc(s_send_queue_empty);
 404}
 405
 406static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
 407				    is_acked_func is_acked)
 408{
 409	if (is_acked)
 410		return is_acked(rm, ack);
 411	return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
 412}
 413
 414/*
 415 * This is pretty similar to what happens below in the ACK
 416 * handling code - except that we call here as soon as we get
 417 * the IB send completion on the RDMA op and the accompanying
 418 * message.
 419 */
 420void rds_rdma_send_complete(struct rds_message *rm, int status)
 421{
 422	struct rds_sock *rs = NULL;
 423	struct rm_rdma_op *ro;
 424	struct rds_notifier *notifier;
 425	unsigned long flags;
 426
 427	spin_lock_irqsave(&rm->m_rs_lock, flags);
 428
 429	ro = &rm->rdma;
 430	if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
 431	    ro->op_active && ro->op_notify && ro->op_notifier) {
 432		notifier = ro->op_notifier;
 433		rs = rm->m_rs;
 434		sock_hold(rds_rs_to_sk(rs));
 435
 436		notifier->n_status = status;
 437		spin_lock(&rs->rs_lock);
 438		list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
 439		spin_unlock(&rs->rs_lock);
 440
 441		ro->op_notifier = NULL;
 442	}
 443
 444	spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 445
 446	if (rs) {
 447		rds_wake_sk_sleep(rs);
 448		sock_put(rds_rs_to_sk(rs));
 449	}
 450}
 451EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
 452
 453/*
 454 * Just like above, except looks at atomic op
 455 */
 456void rds_atomic_send_complete(struct rds_message *rm, int status)
 457{
 458	struct rds_sock *rs = NULL;
 459	struct rm_atomic_op *ao;
 460	struct rds_notifier *notifier;
 461	unsigned long flags;
 462
 463	spin_lock_irqsave(&rm->m_rs_lock, flags);
 464
 465	ao = &rm->atomic;
 466	if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
 467	    && ao->op_active && ao->op_notify && ao->op_notifier) {
 468		notifier = ao->op_notifier;
 469		rs = rm->m_rs;
 470		sock_hold(rds_rs_to_sk(rs));
 471
 472		notifier->n_status = status;
 473		spin_lock(&rs->rs_lock);
 474		list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
 475		spin_unlock(&rs->rs_lock);
 476
 477		ao->op_notifier = NULL;
 478	}
 479
 480	spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 481
 482	if (rs) {
 483		rds_wake_sk_sleep(rs);
 484		sock_put(rds_rs_to_sk(rs));
 485	}
 486}
 487EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
 488
 489/*
 490 * This is the same as rds_rdma_send_complete except we
 491 * don't do any locking - we have all the ingredients (message,
 492 * socket, socket lock) and can just move the notifier.
 493 */
 494static inline void
 495__rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
 496{
 497	struct rm_rdma_op *ro;
 498	struct rm_atomic_op *ao;
 499
 500	ro = &rm->rdma;
 501	if (ro->op_active && ro->op_notify && ro->op_notifier) {
 502		ro->op_notifier->n_status = status;
 503		list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
 504		ro->op_notifier = NULL;
 505	}
 506
 507	ao = &rm->atomic;
 508	if (ao->op_active && ao->op_notify && ao->op_notifier) {
 509		ao->op_notifier->n_status = status;
 510		list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
 511		ao->op_notifier = NULL;
 512	}
 513
 514	/* No need to wake the app - caller does this */
 515}
 516
 517/*
 518 * This is called from the IB send completion when we detect
 519 * a RDMA operation that failed with remote access error.
 520 * So speed is not an issue here.
 521 */
 522struct rds_message *rds_send_get_message(struct rds_connection *conn,
 523					 struct rm_rdma_op *op)
 524{
 525	struct rds_message *rm, *tmp, *found = NULL;
 526	unsigned long flags;
 527
 528	spin_lock_irqsave(&conn->c_lock, flags);
 529
 530	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
 531		if (&rm->rdma == op) {
 532			atomic_inc(&rm->m_refcount);
 533			found = rm;
 534			goto out;
 535		}
 536	}
 537
 538	list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
 539		if (&rm->rdma == op) {
 540			atomic_inc(&rm->m_refcount);
 541			found = rm;
 542			break;
 543		}
 544	}
 545
 546out:
 547	spin_unlock_irqrestore(&conn->c_lock, flags);
 548
 549	return found;
 550}
 551EXPORT_SYMBOL_GPL(rds_send_get_message);
 552
 553/*
 554 * This removes messages from the socket's list if they're on it.  The list
 555 * argument must be private to the caller, we must be able to modify it
 556 * without locks.  The messages must have a reference held for their
 557 * position on the list.  This function will drop that reference after
 558 * removing the messages from the 'messages' list regardless of if it found
 559 * the messages on the socket list or not.
 560 */
 561static void rds_send_remove_from_sock(struct list_head *messages, int status)
 562{
 563	unsigned long flags;
 564	struct rds_sock *rs = NULL;
 565	struct rds_message *rm;
 566
 567	while (!list_empty(messages)) {
 568		int was_on_sock = 0;
 569
 570		rm = list_entry(messages->next, struct rds_message,
 571				m_conn_item);
 572		list_del_init(&rm->m_conn_item);
 573
 574		/*
 575		 * If we see this flag cleared then we're *sure* that someone
 576		 * else beat us to removing it from the sock.  If we race
 577		 * with their flag update we'll get the lock and then really
 578		 * see that the flag has been cleared.
 579		 *
 580		 * The message spinlock makes sure nobody clears rm->m_rs
 581		 * while we're messing with it. It does not prevent the
 582		 * message from being removed from the socket, though.
 583		 */
 584		spin_lock_irqsave(&rm->m_rs_lock, flags);
 585		if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
 586			goto unlock_and_drop;
 587
 588		if (rs != rm->m_rs) {
 589			if (rs) {
 590				rds_wake_sk_sleep(rs);
 591				sock_put(rds_rs_to_sk(rs));
 592			}
 593			rs = rm->m_rs;
 594			sock_hold(rds_rs_to_sk(rs));
 595		}
 596		spin_lock(&rs->rs_lock);
 597
 598		if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
 599			struct rm_rdma_op *ro = &rm->rdma;
 600			struct rds_notifier *notifier;
 601
 602			list_del_init(&rm->m_sock_item);
 603			rds_send_sndbuf_remove(rs, rm);
 604
 605			if (ro->op_active && ro->op_notifier &&
 606			       (ro->op_notify || (ro->op_recverr && status))) {
 607				notifier = ro->op_notifier;
 608				list_add_tail(&notifier->n_list,
 609						&rs->rs_notify_queue);
 610				if (!notifier->n_status)
 611					notifier->n_status = status;
 612				rm->rdma.op_notifier = NULL;
 613			}
 614			was_on_sock = 1;
 615			rm->m_rs = NULL;
 616		}
 617		spin_unlock(&rs->rs_lock);
 618
 619unlock_and_drop:
 620		spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 621		rds_message_put(rm);
 622		if (was_on_sock)
 623			rds_message_put(rm);
 624	}
 625
 626	if (rs) {
 627		rds_wake_sk_sleep(rs);
 628		sock_put(rds_rs_to_sk(rs));
 629	}
 630}
 631
 632/*
 633 * Transports call here when they've determined that the receiver queued
 634 * messages up to, and including, the given sequence number.  Messages are
 635 * moved to the retrans queue when rds_send_xmit picks them off the send
 636 * queue. This means that in the TCP case, the message may not have been
 637 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
 638 * checks the RDS_MSG_HAS_ACK_SEQ bit.
 639 *
 640 * XXX It's not clear to me how this is safely serialized with socket
 641 * destruction.  Maybe it should bail if it sees SOCK_DEAD.
 642 */
 643void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
 644			 is_acked_func is_acked)
 645{
 646	struct rds_message *rm, *tmp;
 647	unsigned long flags;
 648	LIST_HEAD(list);
 649
 650	spin_lock_irqsave(&conn->c_lock, flags);
 651
 652	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
 653		if (!rds_send_is_acked(rm, ack, is_acked))
 654			break;
 655
 656		list_move(&rm->m_conn_item, &list);
 657		clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
 658	}
 659
 660	/* order flag updates with spin locks */
 661	if (!list_empty(&list))
 662		smp_mb__after_clear_bit();
 663
 664	spin_unlock_irqrestore(&conn->c_lock, flags);
 665
 666	/* now remove the messages from the sock list as needed */
 667	rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
 668}
 669EXPORT_SYMBOL_GPL(rds_send_drop_acked);
 670
 671void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
 672{
 673	struct rds_message *rm, *tmp;
 674	struct rds_connection *conn;
 675	unsigned long flags;
 676	LIST_HEAD(list);
 677
 678	/* get all the messages we're dropping under the rs lock */
 679	spin_lock_irqsave(&rs->rs_lock, flags);
 680
 681	list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
 682		if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
 683			     dest->sin_port != rm->m_inc.i_hdr.h_dport))
 684			continue;
 685
 686		list_move(&rm->m_sock_item, &list);
 687		rds_send_sndbuf_remove(rs, rm);
 688		clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
 689	}
 690
 691	/* order flag updates with the rs lock */
 692	smp_mb__after_clear_bit();
 693
 694	spin_unlock_irqrestore(&rs->rs_lock, flags);
 695
 696	if (list_empty(&list))
 697		return;
 698
 699	/* Remove the messages from the conn */
 700	list_for_each_entry(rm, &list, m_sock_item) {
 701
 702		conn = rm->m_inc.i_conn;
 703
 704		spin_lock_irqsave(&conn->c_lock, flags);
 705		/*
 706		 * Maybe someone else beat us to removing rm from the conn.
 707		 * If we race with their flag update we'll get the lock and
 708		 * then really see that the flag has been cleared.
 709		 */
 710		if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
 711			spin_unlock_irqrestore(&conn->c_lock, flags);
 712			continue;
 713		}
 714		list_del_init(&rm->m_conn_item);
 715		spin_unlock_irqrestore(&conn->c_lock, flags);
 716
 717		/*
 718		 * Couldn't grab m_rs_lock in top loop (lock ordering),
 719		 * but we can now.
 720		 */
 721		spin_lock_irqsave(&rm->m_rs_lock, flags);
 722
 723		spin_lock(&rs->rs_lock);
 724		__rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
 725		spin_unlock(&rs->rs_lock);
 726
 727		rm->m_rs = NULL;
 728		spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 729
 730		rds_message_put(rm);
 731	}
 732
 733	rds_wake_sk_sleep(rs);
 734
 735	while (!list_empty(&list)) {
 736		rm = list_entry(list.next, struct rds_message, m_sock_item);
 737		list_del_init(&rm->m_sock_item);
 738
 739		rds_message_wait(rm);
 740		rds_message_put(rm);
 741	}
 742}
 743
 744/*
 745 * we only want this to fire once so we use the callers 'queued'.  It's
 746 * possible that another thread can race with us and remove the
 747 * message from the flow with RDS_CANCEL_SENT_TO.
 748 */
 749static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
 750			     struct rds_message *rm, __be16 sport,
 751			     __be16 dport, int *queued)
 752{
 753	unsigned long flags;
 754	u32 len;
 755
 756	if (*queued)
 757		goto out;
 758
 759	len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
 760
 761	/* this is the only place which holds both the socket's rs_lock
 762	 * and the connection's c_lock */
 763	spin_lock_irqsave(&rs->rs_lock, flags);
 764
 765	/*
 766	 * If there is a little space in sndbuf, we don't queue anything,
 767	 * and userspace gets -EAGAIN. But poll() indicates there's send
 768	 * room. This can lead to bad behavior (spinning) if snd_bytes isn't
 769	 * freed up by incoming acks. So we check the *old* value of
 770	 * rs_snd_bytes here to allow the last msg to exceed the buffer,
 771	 * and poll() now knows no more data can be sent.
 772	 */
 773	if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
 774		rs->rs_snd_bytes += len;
 775
 776		/* let recv side know we are close to send space exhaustion.
 777		 * This is probably not the optimal way to do it, as this
 778		 * means we set the flag on *all* messages as soon as our
 779		 * throughput hits a certain threshold.
 780		 */
 781		if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
 782			__set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
 783
 784		list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
 785		set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
 786		rds_message_addref(rm);
 787		rm->m_rs = rs;
 788
 789		/* The code ordering is a little weird, but we're
 790		   trying to minimize the time we hold c_lock */
 791		rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
 792		rm->m_inc.i_conn = conn;
 793		rds_message_addref(rm);
 794
 795		spin_lock(&conn->c_lock);
 796		rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
 797		list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
 798		set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
 799		spin_unlock(&conn->c_lock);
 800
 801		rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
 802			 rm, len, rs, rs->rs_snd_bytes,
 803			 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
 804
 805		*queued = 1;
 806	}
 807
 808	spin_unlock_irqrestore(&rs->rs_lock, flags);
 809out:
 810	return *queued;
 811}
 812
 813/*
 814 * rds_message is getting to be quite complicated, and we'd like to allocate
 815 * it all in one go. This figures out how big it needs to be up front.
 816 */
 817static int rds_rm_size(struct msghdr *msg, int data_len)
 818{
 819	struct cmsghdr *cmsg;
 820	int size = 0;
 821	int cmsg_groups = 0;
 822	int retval;
 823
 824	for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
 825		if (!CMSG_OK(msg, cmsg))
 826			return -EINVAL;
 827
 828		if (cmsg->cmsg_level != SOL_RDS)
 829			continue;
 830
 831		switch (cmsg->cmsg_type) {
 832		case RDS_CMSG_RDMA_ARGS:
 833			cmsg_groups |= 1;
 834			retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
 835			if (retval < 0)
 836				return retval;
 837			size += retval;
 838
 839			break;
 840
 841		case RDS_CMSG_RDMA_DEST:
 842		case RDS_CMSG_RDMA_MAP:
 843			cmsg_groups |= 2;
 844			/* these are valid but do no add any size */
 845			break;
 846
 847		case RDS_CMSG_ATOMIC_CSWP:
 848		case RDS_CMSG_ATOMIC_FADD:
 849		case RDS_CMSG_MASKED_ATOMIC_CSWP:
 850		case RDS_CMSG_MASKED_ATOMIC_FADD:
 851			cmsg_groups |= 1;
 852			size += sizeof(struct scatterlist);
 853			break;
 854
 855		default:
 856			return -EINVAL;
 857		}
 858
 859	}
 860
 861	size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
 862
 863	/* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
 864	if (cmsg_groups == 3)
 865		return -EINVAL;
 866
 867	return size;
 868}
 869
 870static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
 871			 struct msghdr *msg, int *allocated_mr)
 872{
 873	struct cmsghdr *cmsg;
 874	int ret = 0;
 875
 876	for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
 877		if (!CMSG_OK(msg, cmsg))
 878			return -EINVAL;
 879
 880		if (cmsg->cmsg_level != SOL_RDS)
 881			continue;
 882
 883		/* As a side effect, RDMA_DEST and RDMA_MAP will set
 884		 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
 885		 */
 886		switch (cmsg->cmsg_type) {
 887		case RDS_CMSG_RDMA_ARGS:
 888			ret = rds_cmsg_rdma_args(rs, rm, cmsg);
 889			break;
 890
 891		case RDS_CMSG_RDMA_DEST:
 892			ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
 893			break;
 894
 895		case RDS_CMSG_RDMA_MAP:
 896			ret = rds_cmsg_rdma_map(rs, rm, cmsg);
 897			if (!ret)
 898				*allocated_mr = 1;
 899			break;
 900		case RDS_CMSG_ATOMIC_CSWP:
 901		case RDS_CMSG_ATOMIC_FADD:
 902		case RDS_CMSG_MASKED_ATOMIC_CSWP:
 903		case RDS_CMSG_MASKED_ATOMIC_FADD:
 904			ret = rds_cmsg_atomic(rs, rm, cmsg);
 905			break;
 906
 907		default:
 908			return -EINVAL;
 909		}
 910
 911		if (ret)
 912			break;
 913	}
 914
 915	return ret;
 916}
 917
 918int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
 919		size_t payload_len)
 920{
 921	struct sock *sk = sock->sk;
 922	struct rds_sock *rs = rds_sk_to_rs(sk);
 923	struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
 924	__be32 daddr;
 925	__be16 dport;
 926	struct rds_message *rm = NULL;
 927	struct rds_connection *conn;
 928	int ret = 0;
 929	int queued = 0, allocated_mr = 0;
 930	int nonblock = msg->msg_flags & MSG_DONTWAIT;
 931	long timeo = sock_sndtimeo(sk, nonblock);
 932
 933	/* Mirror Linux UDP mirror of BSD error message compatibility */
 934	/* XXX: Perhaps MSG_MORE someday */
 935	if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
 936		printk(KERN_INFO "msg_flags 0x%08X\n", msg->msg_flags);
 937		ret = -EOPNOTSUPP;
 938		goto out;
 939	}
 940
 941	if (msg->msg_namelen) {
 942		/* XXX fail non-unicast destination IPs? */
 943		if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
 944			ret = -EINVAL;
 945			goto out;
 946		}
 947		daddr = usin->sin_addr.s_addr;
 948		dport = usin->sin_port;
 949	} else {
 950		/* We only care about consistency with ->connect() */
 951		lock_sock(sk);
 952		daddr = rs->rs_conn_addr;
 953		dport = rs->rs_conn_port;
 954		release_sock(sk);
 955	}
 956
 957	/* racing with another thread binding seems ok here */
 958	if (daddr == 0 || rs->rs_bound_addr == 0) {
 959		ret = -ENOTCONN; /* XXX not a great errno */
 960		goto out;
 961	}
 962
 963	/* size of rm including all sgs */
 964	ret = rds_rm_size(msg, payload_len);
 965	if (ret < 0)
 966		goto out;
 967
 968	rm = rds_message_alloc(ret, GFP_KERNEL);
 969	if (!rm) {
 970		ret = -ENOMEM;
 971		goto out;
 972	}
 973
 974	/* Attach data to the rm */
 975	if (payload_len) {
 976		rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
 977		if (!rm->data.op_sg) {
 978			ret = -ENOMEM;
 979			goto out;
 980		}
 981		ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len);
 982		if (ret)
 983			goto out;
 984	}
 985	rm->data.op_active = 1;
 986
 987	rm->m_daddr = daddr;
 988
 989	/* rds_conn_create has a spinlock that runs with IRQ off.
 990	 * Caching the conn in the socket helps a lot. */
 991	if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
 992		conn = rs->rs_conn;
 993	else {
 994		conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr,
 995					rs->rs_transport,
 996					sock->sk->sk_allocation);
 997		if (IS_ERR(conn)) {
 998			ret = PTR_ERR(conn);
 999			goto out;
1000		}
1001		rs->rs_conn = conn;
1002	}
1003
1004	/* Parse any control messages the user may have included. */
1005	ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
1006	if (ret)
1007		goto out;
1008
1009	if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
1010		printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
1011			       &rm->rdma, conn->c_trans->xmit_rdma);
1012		ret = -EOPNOTSUPP;
1013		goto out;
1014	}
1015
1016	if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
1017		printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
1018			       &rm->atomic, conn->c_trans->xmit_atomic);
1019		ret = -EOPNOTSUPP;
1020		goto out;
1021	}
1022
1023	rds_conn_connect_if_down(conn);
1024
1025	ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
1026	if (ret) {
1027		rs->rs_seen_congestion = 1;
1028		goto out;
1029	}
1030
1031	while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
1032				  dport, &queued)) {
1033		rds_stats_inc(s_send_queue_full);
1034		/* XXX make sure this is reasonable */
1035		if (payload_len > rds_sk_sndbuf(rs)) {
1036			ret = -EMSGSIZE;
1037			goto out;
1038		}
1039		if (nonblock) {
1040			ret = -EAGAIN;
1041			goto out;
1042		}
1043
1044		timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
1045					rds_send_queue_rm(rs, conn, rm,
1046							  rs->rs_bound_port,
1047							  dport,
1048							  &queued),
1049					timeo);
1050		rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1051		if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1052			continue;
1053
1054		ret = timeo;
1055		if (ret == 0)
1056			ret = -ETIMEDOUT;
1057		goto out;
1058	}
1059
1060	/*
1061	 * By now we've committed to the send.  We reuse rds_send_worker()
1062	 * to retry sends in the rds thread if the transport asks us to.
1063	 */
1064	rds_stats_inc(s_send_queued);
1065
1066	if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1067		rds_send_xmit(conn);
1068
1069	rds_message_put(rm);
1070	return payload_len;
1071
1072out:
1073	/* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1074	 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1075	 * or in any other way, we need to destroy the MR again */
1076	if (allocated_mr)
1077		rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1078
1079	if (rm)
1080		rds_message_put(rm);
1081	return ret;
1082}
1083
1084/*
1085 * Reply to a ping packet.
1086 */
1087int
1088rds_send_pong(struct rds_connection *conn, __be16 dport)
1089{
1090	struct rds_message *rm;
1091	unsigned long flags;
1092	int ret = 0;
1093
1094	rm = rds_message_alloc(0, GFP_ATOMIC);
1095	if (!rm) {
1096		ret = -ENOMEM;
1097		goto out;
1098	}
1099
1100	rm->m_daddr = conn->c_faddr;
1101	rm->data.op_active = 1;
1102
1103	rds_conn_connect_if_down(conn);
1104
1105	ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
1106	if (ret)
1107		goto out;
1108
1109	spin_lock_irqsave(&conn->c_lock, flags);
1110	list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
1111	set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1112	rds_message_addref(rm);
1113	rm->m_inc.i_conn = conn;
1114
1115	rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport,
1116				    conn->c_next_tx_seq);
1117	conn->c_next_tx_seq++;
1118	spin_unlock_irqrestore(&conn->c_lock, flags);
1119
1120	rds_stats_inc(s_send_queued);
1121	rds_stats_inc(s_send_pong);
1122
1123	if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1124		rds_send_xmit(conn);
1125
1126	rds_message_put(rm);
1127	return 0;
1128
1129out:
1130	if (rm)
1131		rds_message_put(rm);
1132	return ret;
1133}