Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 * Copyright (c) 2006 Oracle.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 *
  32 */
  33#include <linux/kernel.h>
  34#include <linux/moduleparam.h>
  35#include <linux/gfp.h>
  36#include <net/sock.h>
  37#include <linux/in.h>
  38#include <linux/list.h>
  39#include <linux/ratelimit.h>
  40#include <linux/export.h>
  41#include <linux/sizes.h>
  42
  43#include "rds.h"
  44
  45/* When transmitting messages in rds_send_xmit, we need to emerge from
  46 * time to time and briefly release the CPU. Otherwise the softlock watchdog
  47 * will kick our shin.
  48 * Also, it seems fairer to not let one busy connection stall all the
  49 * others.
  50 *
  51 * send_batch_count is the number of times we'll loop in send_xmit. Setting
  52 * it to 0 will restore the old behavior (where we looped until we had
  53 * drained the queue).
  54 */
  55static int send_batch_count = SZ_1K;
  56module_param(send_batch_count, int, 0444);
  57MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
  58
  59static void rds_send_remove_from_sock(struct list_head *messages, int status);
  60
  61/*
  62 * Reset the send state.  Callers must ensure that this doesn't race with
  63 * rds_send_xmit().
  64 */
  65void rds_send_reset(struct rds_connection *conn)
  66{
  67	struct rds_message *rm, *tmp;
  68	unsigned long flags;
  69
  70	if (conn->c_xmit_rm) {
  71		rm = conn->c_xmit_rm;
  72		conn->c_xmit_rm = NULL;
  73		/* Tell the user the RDMA op is no longer mapped by the
  74		 * transport. This isn't entirely true (it's flushed out
  75		 * independently) but as the connection is down, there's
  76		 * no ongoing RDMA to/from that memory */
  77		rds_message_unmapped(rm);
  78		rds_message_put(rm);
  79	}
  80
  81	conn->c_xmit_sg = 0;
  82	conn->c_xmit_hdr_off = 0;
  83	conn->c_xmit_data_off = 0;
  84	conn->c_xmit_atomic_sent = 0;
  85	conn->c_xmit_rdma_sent = 0;
  86	conn->c_xmit_data_sent = 0;
  87
  88	conn->c_map_queued = 0;
  89
  90	conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
  91	conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
  92
  93	/* Mark messages as retransmissions, and move them to the send q */
  94	spin_lock_irqsave(&conn->c_lock, flags);
  95	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
  96		set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
  97		set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
  98	}
  99	list_splice_init(&conn->c_retrans, &conn->c_send_queue);
 100	spin_unlock_irqrestore(&conn->c_lock, flags);
 101}
 102
 103static int acquire_in_xmit(struct rds_connection *conn)
 104{
 105	return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0;
 106}
 107
 108static void release_in_xmit(struct rds_connection *conn)
 109{
 110	clear_bit(RDS_IN_XMIT, &conn->c_flags);
 111	smp_mb__after_atomic();
 112	/*
 113	 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
 114	 * hot path and finding waiters is very rare.  We don't want to walk
 115	 * the system-wide hashed waitqueue buckets in the fast path only to
 116	 * almost never find waiters.
 117	 */
 118	if (waitqueue_active(&conn->c_waitq))
 119		wake_up_all(&conn->c_waitq);
 120}
 121
 122/*
 123 * We're making the conscious trade-off here to only send one message
 124 * down the connection at a time.
 125 *   Pro:
 126 *      - tx queueing is a simple fifo list
 127 *   	- reassembly is optional and easily done by transports per conn
 128 *      - no per flow rx lookup at all, straight to the socket
 129 *   	- less per-frag memory and wire overhead
 130 *   Con:
 131 *      - queued acks can be delayed behind large messages
 132 *   Depends:
 133 *      - small message latency is higher behind queued large messages
 134 *      - large message latency isn't starved by intervening small sends
 135 */
 136int rds_send_xmit(struct rds_connection *conn)
 137{
 138	struct rds_message *rm;
 139	unsigned long flags;
 140	unsigned int tmp;
 141	struct scatterlist *sg;
 142	int ret = 0;
 143	LIST_HEAD(to_be_dropped);
 144	int batch_count;
 145	unsigned long send_gen = 0;
 146
 147restart:
 148	batch_count = 0;
 149
 150	/*
 151	 * sendmsg calls here after having queued its message on the send
 152	 * queue.  We only have one task feeding the connection at a time.  If
 153	 * another thread is already feeding the queue then we back off.  This
 154	 * avoids blocking the caller and trading per-connection data between
 155	 * caches per message.
 156	 */
 157	if (!acquire_in_xmit(conn)) {
 158		rds_stats_inc(s_send_lock_contention);
 159		ret = -ENOMEM;
 160		goto out;
 161	}
 162
 163	/*
 164	 * we record the send generation after doing the xmit acquire.
 165	 * if someone else manages to jump in and do some work, we'll use
 166	 * this to avoid a goto restart farther down.
 167	 *
 168	 * The acquire_in_xmit() check above ensures that only one
 169	 * caller can increment c_send_gen at any time.
 170	 */
 171	conn->c_send_gen++;
 172	send_gen = conn->c_send_gen;
 173
 174	/*
 175	 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
 176	 * we do the opposite to avoid races.
 177	 */
 178	if (!rds_conn_up(conn)) {
 179		release_in_xmit(conn);
 180		ret = 0;
 181		goto out;
 182	}
 183
 184	if (conn->c_trans->xmit_prepare)
 185		conn->c_trans->xmit_prepare(conn);
 186
 187	/*
 188	 * spin trying to push headers and data down the connection until
 189	 * the connection doesn't make forward progress.
 190	 */
 191	while (1) {
 192
 193		rm = conn->c_xmit_rm;
 194
 195		/*
 196		 * If between sending messages, we can send a pending congestion
 197		 * map update.
 198		 */
 199		if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
 200			rm = rds_cong_update_alloc(conn);
 201			if (IS_ERR(rm)) {
 202				ret = PTR_ERR(rm);
 203				break;
 204			}
 205			rm->data.op_active = 1;
 206
 207			conn->c_xmit_rm = rm;
 208		}
 209
 210		/*
 211		 * If not already working on one, grab the next message.
 212		 *
 213		 * c_xmit_rm holds a ref while we're sending this message down
 214		 * the connction.  We can use this ref while holding the
 215		 * send_sem.. rds_send_reset() is serialized with it.
 216		 */
 217		if (!rm) {
 218			unsigned int len;
 219
 220			batch_count++;
 221
 222			/* we want to process as big a batch as we can, but
 223			 * we also want to avoid softlockups.  If we've been
 224			 * through a lot of messages, lets back off and see
 225			 * if anyone else jumps in
 226			 */
 227			if (batch_count >= send_batch_count)
 228				goto over_batch;
 229
 230			spin_lock_irqsave(&conn->c_lock, flags);
 231
 232			if (!list_empty(&conn->c_send_queue)) {
 233				rm = list_entry(conn->c_send_queue.next,
 234						struct rds_message,
 235						m_conn_item);
 236				rds_message_addref(rm);
 237
 238				/*
 239				 * Move the message from the send queue to the retransmit
 240				 * list right away.
 241				 */
 242				list_move_tail(&rm->m_conn_item, &conn->c_retrans);
 243			}
 244
 245			spin_unlock_irqrestore(&conn->c_lock, flags);
 246
 247			if (!rm)
 248				break;
 249
 250			/* Unfortunately, the way Infiniband deals with
 251			 * RDMA to a bad MR key is by moving the entire
 252			 * queue pair to error state. We cold possibly
 253			 * recover from that, but right now we drop the
 254			 * connection.
 255			 * Therefore, we never retransmit messages with RDMA ops.
 256			 */
 257			if (rm->rdma.op_active &&
 258			    test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
 259				spin_lock_irqsave(&conn->c_lock, flags);
 260				if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
 261					list_move(&rm->m_conn_item, &to_be_dropped);
 262				spin_unlock_irqrestore(&conn->c_lock, flags);
 263				continue;
 264			}
 265
 266			/* Require an ACK every once in a while */
 267			len = ntohl(rm->m_inc.i_hdr.h_len);
 268			if (conn->c_unacked_packets == 0 ||
 269			    conn->c_unacked_bytes < len) {
 270				__set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
 271
 272				conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
 273				conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
 274				rds_stats_inc(s_send_ack_required);
 275			} else {
 276				conn->c_unacked_bytes -= len;
 277				conn->c_unacked_packets--;
 278			}
 279
 280			conn->c_xmit_rm = rm;
 281		}
 282
 283		/* The transport either sends the whole rdma or none of it */
 284		if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
 285			rm->m_final_op = &rm->rdma;
 286			/* The transport owns the mapped memory for now.
 287			 * You can't unmap it while it's on the send queue
 288			 */
 289			set_bit(RDS_MSG_MAPPED, &rm->m_flags);
 290			ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
 291			if (ret) {
 292				clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
 293				wake_up_interruptible(&rm->m_flush_wait);
 294				break;
 295			}
 296			conn->c_xmit_rdma_sent = 1;
 297
 
 
 
 298		}
 299
 300		if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
 301			rm->m_final_op = &rm->atomic;
 302			/* The transport owns the mapped memory for now.
 303			 * You can't unmap it while it's on the send queue
 304			 */
 305			set_bit(RDS_MSG_MAPPED, &rm->m_flags);
 306			ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
 307			if (ret) {
 308				clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
 309				wake_up_interruptible(&rm->m_flush_wait);
 310				break;
 311			}
 312			conn->c_xmit_atomic_sent = 1;
 313
 
 
 
 314		}
 315
 316		/*
 317		 * A number of cases require an RDS header to be sent
 318		 * even if there is no data.
 319		 * We permit 0-byte sends; rds-ping depends on this.
 320		 * However, if there are exclusively attached silent ops,
 321		 * we skip the hdr/data send, to enable silent operation.
 322		 */
 323		if (rm->data.op_nents == 0) {
 324			int ops_present;
 325			int all_ops_are_silent = 1;
 326
 327			ops_present = (rm->atomic.op_active || rm->rdma.op_active);
 328			if (rm->atomic.op_active && !rm->atomic.op_silent)
 329				all_ops_are_silent = 0;
 330			if (rm->rdma.op_active && !rm->rdma.op_silent)
 331				all_ops_are_silent = 0;
 332
 333			if (ops_present && all_ops_are_silent
 334			    && !rm->m_rdma_cookie)
 335				rm->data.op_active = 0;
 336		}
 337
 338		if (rm->data.op_active && !conn->c_xmit_data_sent) {
 339			rm->m_final_op = &rm->data;
 340			ret = conn->c_trans->xmit(conn, rm,
 341						  conn->c_xmit_hdr_off,
 342						  conn->c_xmit_sg,
 343						  conn->c_xmit_data_off);
 344			if (ret <= 0)
 345				break;
 346
 347			if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
 348				tmp = min_t(int, ret,
 349					    sizeof(struct rds_header) -
 350					    conn->c_xmit_hdr_off);
 351				conn->c_xmit_hdr_off += tmp;
 352				ret -= tmp;
 353			}
 354
 355			sg = &rm->data.op_sg[conn->c_xmit_sg];
 356			while (ret) {
 357				tmp = min_t(int, ret, sg->length -
 358						      conn->c_xmit_data_off);
 359				conn->c_xmit_data_off += tmp;
 360				ret -= tmp;
 361				if (conn->c_xmit_data_off == sg->length) {
 362					conn->c_xmit_data_off = 0;
 363					sg++;
 364					conn->c_xmit_sg++;
 365					BUG_ON(ret != 0 &&
 366					       conn->c_xmit_sg == rm->data.op_nents);
 367				}
 368			}
 369
 370			if (conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
 371			    (conn->c_xmit_sg == rm->data.op_nents))
 372				conn->c_xmit_data_sent = 1;
 373		}
 374
 375		/*
 376		 * A rm will only take multiple times through this loop
 377		 * if there is a data op. Thus, if the data is sent (or there was
 378		 * none), then we're done with the rm.
 379		 */
 380		if (!rm->data.op_active || conn->c_xmit_data_sent) {
 381			conn->c_xmit_rm = NULL;
 382			conn->c_xmit_sg = 0;
 383			conn->c_xmit_hdr_off = 0;
 384			conn->c_xmit_data_off = 0;
 385			conn->c_xmit_rdma_sent = 0;
 386			conn->c_xmit_atomic_sent = 0;
 387			conn->c_xmit_data_sent = 0;
 388
 389			rds_message_put(rm);
 390		}
 391	}
 392
 393over_batch:
 394	if (conn->c_trans->xmit_complete)
 395		conn->c_trans->xmit_complete(conn);
 
 396	release_in_xmit(conn);
 397
 398	/* Nuke any messages we decided not to retransmit. */
 399	if (!list_empty(&to_be_dropped)) {
 400		/* irqs on here, so we can put(), unlike above */
 401		list_for_each_entry(rm, &to_be_dropped, m_conn_item)
 402			rds_message_put(rm);
 403		rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
 404	}
 405
 406	/*
 407	 * Other senders can queue a message after we last test the send queue
 408	 * but before we clear RDS_IN_XMIT.  In that case they'd back off and
 409	 * not try and send their newly queued message.  We need to check the
 410	 * send queue after having cleared RDS_IN_XMIT so that their message
 411	 * doesn't get stuck on the send queue.
 412	 *
 413	 * If the transport cannot continue (i.e ret != 0), then it must
 414	 * call us when more room is available, such as from the tx
 415	 * completion handler.
 416	 *
 417	 * We have an extra generation check here so that if someone manages
 418	 * to jump in after our release_in_xmit, we'll see that they have done
 419	 * some work and we will skip our goto
 420	 */
 421	if (ret == 0) {
 422		smp_mb();
 423		if ((test_bit(0, &conn->c_map_queued) ||
 424		     !list_empty(&conn->c_send_queue)) &&
 425		    send_gen == conn->c_send_gen) {
 426			rds_stats_inc(s_send_lock_queue_raced);
 427			if (batch_count < send_batch_count)
 428				goto restart;
 429			queue_delayed_work(rds_wq, &conn->c_send_w, 1);
 430		}
 431	}
 432out:
 433	return ret;
 434}
 435EXPORT_SYMBOL_GPL(rds_send_xmit);
 436
 437static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
 438{
 439	u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
 440
 441	assert_spin_locked(&rs->rs_lock);
 442
 443	BUG_ON(rs->rs_snd_bytes < len);
 444	rs->rs_snd_bytes -= len;
 445
 446	if (rs->rs_snd_bytes == 0)
 447		rds_stats_inc(s_send_queue_empty);
 448}
 449
 450static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
 451				    is_acked_func is_acked)
 452{
 453	if (is_acked)
 454		return is_acked(rm, ack);
 455	return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
 456}
 457
 458/*
 459 * This is pretty similar to what happens below in the ACK
 460 * handling code - except that we call here as soon as we get
 461 * the IB send completion on the RDMA op and the accompanying
 462 * message.
 463 */
 464void rds_rdma_send_complete(struct rds_message *rm, int status)
 465{
 466	struct rds_sock *rs = NULL;
 467	struct rm_rdma_op *ro;
 468	struct rds_notifier *notifier;
 469	unsigned long flags;
 470
 471	spin_lock_irqsave(&rm->m_rs_lock, flags);
 472
 473	ro = &rm->rdma;
 474	if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
 475	    ro->op_active && ro->op_notify && ro->op_notifier) {
 476		notifier = ro->op_notifier;
 477		rs = rm->m_rs;
 478		sock_hold(rds_rs_to_sk(rs));
 479
 480		notifier->n_status = status;
 481		spin_lock(&rs->rs_lock);
 482		list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
 483		spin_unlock(&rs->rs_lock);
 484
 485		ro->op_notifier = NULL;
 486	}
 487
 488	spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 489
 490	if (rs) {
 491		rds_wake_sk_sleep(rs);
 492		sock_put(rds_rs_to_sk(rs));
 493	}
 494}
 495EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
 496
 497/*
 498 * Just like above, except looks at atomic op
 499 */
 500void rds_atomic_send_complete(struct rds_message *rm, int status)
 501{
 502	struct rds_sock *rs = NULL;
 503	struct rm_atomic_op *ao;
 504	struct rds_notifier *notifier;
 505	unsigned long flags;
 506
 507	spin_lock_irqsave(&rm->m_rs_lock, flags);
 508
 509	ao = &rm->atomic;
 510	if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
 511	    && ao->op_active && ao->op_notify && ao->op_notifier) {
 512		notifier = ao->op_notifier;
 513		rs = rm->m_rs;
 514		sock_hold(rds_rs_to_sk(rs));
 515
 516		notifier->n_status = status;
 517		spin_lock(&rs->rs_lock);
 518		list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
 519		spin_unlock(&rs->rs_lock);
 520
 521		ao->op_notifier = NULL;
 522	}
 523
 524	spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 525
 526	if (rs) {
 527		rds_wake_sk_sleep(rs);
 528		sock_put(rds_rs_to_sk(rs));
 529	}
 530}
 531EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
 532
 533/*
 534 * This is the same as rds_rdma_send_complete except we
 535 * don't do any locking - we have all the ingredients (message,
 536 * socket, socket lock) and can just move the notifier.
 537 */
 538static inline void
 539__rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
 540{
 541	struct rm_rdma_op *ro;
 542	struct rm_atomic_op *ao;
 543
 544	ro = &rm->rdma;
 545	if (ro->op_active && ro->op_notify && ro->op_notifier) {
 546		ro->op_notifier->n_status = status;
 547		list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
 548		ro->op_notifier = NULL;
 549	}
 550
 551	ao = &rm->atomic;
 552	if (ao->op_active && ao->op_notify && ao->op_notifier) {
 553		ao->op_notifier->n_status = status;
 554		list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
 555		ao->op_notifier = NULL;
 556	}
 557
 558	/* No need to wake the app - caller does this */
 559}
 560
 561/*
 562 * This is called from the IB send completion when we detect
 563 * a RDMA operation that failed with remote access error.
 564 * So speed is not an issue here.
 565 */
 566struct rds_message *rds_send_get_message(struct rds_connection *conn,
 567					 struct rm_rdma_op *op)
 568{
 569	struct rds_message *rm, *tmp, *found = NULL;
 570	unsigned long flags;
 571
 572	spin_lock_irqsave(&conn->c_lock, flags);
 573
 574	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
 575		if (&rm->rdma == op) {
 576			atomic_inc(&rm->m_refcount);
 577			found = rm;
 578			goto out;
 579		}
 580	}
 581
 582	list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
 583		if (&rm->rdma == op) {
 584			atomic_inc(&rm->m_refcount);
 585			found = rm;
 586			break;
 587		}
 588	}
 589
 590out:
 591	spin_unlock_irqrestore(&conn->c_lock, flags);
 592
 593	return found;
 594}
 595EXPORT_SYMBOL_GPL(rds_send_get_message);
 596
 597/*
 598 * This removes messages from the socket's list if they're on it.  The list
 599 * argument must be private to the caller, we must be able to modify it
 600 * without locks.  The messages must have a reference held for their
 601 * position on the list.  This function will drop that reference after
 602 * removing the messages from the 'messages' list regardless of if it found
 603 * the messages on the socket list or not.
 604 */
 605static void rds_send_remove_from_sock(struct list_head *messages, int status)
 606{
 607	unsigned long flags;
 608	struct rds_sock *rs = NULL;
 609	struct rds_message *rm;
 610
 611	while (!list_empty(messages)) {
 612		int was_on_sock = 0;
 613
 614		rm = list_entry(messages->next, struct rds_message,
 615				m_conn_item);
 616		list_del_init(&rm->m_conn_item);
 617
 618		/*
 619		 * If we see this flag cleared then we're *sure* that someone
 620		 * else beat us to removing it from the sock.  If we race
 621		 * with their flag update we'll get the lock and then really
 622		 * see that the flag has been cleared.
 623		 *
 624		 * The message spinlock makes sure nobody clears rm->m_rs
 625		 * while we're messing with it. It does not prevent the
 626		 * message from being removed from the socket, though.
 627		 */
 628		spin_lock_irqsave(&rm->m_rs_lock, flags);
 629		if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
 630			goto unlock_and_drop;
 631
 632		if (rs != rm->m_rs) {
 633			if (rs) {
 634				rds_wake_sk_sleep(rs);
 635				sock_put(rds_rs_to_sk(rs));
 636			}
 637			rs = rm->m_rs;
 638			if (rs)
 639				sock_hold(rds_rs_to_sk(rs));
 640		}
 641		if (!rs)
 642			goto unlock_and_drop;
 643		spin_lock(&rs->rs_lock);
 644
 645		if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
 646			struct rm_rdma_op *ro = &rm->rdma;
 647			struct rds_notifier *notifier;
 648
 649			list_del_init(&rm->m_sock_item);
 650			rds_send_sndbuf_remove(rs, rm);
 651
 652			if (ro->op_active && ro->op_notifier &&
 653			       (ro->op_notify || (ro->op_recverr && status))) {
 654				notifier = ro->op_notifier;
 655				list_add_tail(&notifier->n_list,
 656						&rs->rs_notify_queue);
 657				if (!notifier->n_status)
 658					notifier->n_status = status;
 659				rm->rdma.op_notifier = NULL;
 660			}
 661			was_on_sock = 1;
 662			rm->m_rs = NULL;
 663		}
 664		spin_unlock(&rs->rs_lock);
 665
 666unlock_and_drop:
 667		spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 668		rds_message_put(rm);
 669		if (was_on_sock)
 670			rds_message_put(rm);
 671	}
 672
 673	if (rs) {
 674		rds_wake_sk_sleep(rs);
 675		sock_put(rds_rs_to_sk(rs));
 676	}
 677}
 678
 679/*
 680 * Transports call here when they've determined that the receiver queued
 681 * messages up to, and including, the given sequence number.  Messages are
 682 * moved to the retrans queue when rds_send_xmit picks them off the send
 683 * queue. This means that in the TCP case, the message may not have been
 684 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
 685 * checks the RDS_MSG_HAS_ACK_SEQ bit.
 
 
 
 686 */
 687void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
 688			 is_acked_func is_acked)
 689{
 690	struct rds_message *rm, *tmp;
 691	unsigned long flags;
 692	LIST_HEAD(list);
 693
 694	spin_lock_irqsave(&conn->c_lock, flags);
 695
 696	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
 697		if (!rds_send_is_acked(rm, ack, is_acked))
 698			break;
 699
 700		list_move(&rm->m_conn_item, &list);
 701		clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
 702	}
 703
 704	/* order flag updates with spin locks */
 705	if (!list_empty(&list))
 706		smp_mb__after_atomic();
 707
 708	spin_unlock_irqrestore(&conn->c_lock, flags);
 709
 710	/* now remove the messages from the sock list as needed */
 711	rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
 712}
 713EXPORT_SYMBOL_GPL(rds_send_drop_acked);
 714
 715void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
 716{
 717	struct rds_message *rm, *tmp;
 718	struct rds_connection *conn;
 719	unsigned long flags;
 720	LIST_HEAD(list);
 721
 722	/* get all the messages we're dropping under the rs lock */
 723	spin_lock_irqsave(&rs->rs_lock, flags);
 724
 725	list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
 726		if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
 727			     dest->sin_port != rm->m_inc.i_hdr.h_dport))
 728			continue;
 729
 730		list_move(&rm->m_sock_item, &list);
 731		rds_send_sndbuf_remove(rs, rm);
 732		clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
 733	}
 734
 735	/* order flag updates with the rs lock */
 736	smp_mb__after_atomic();
 737
 738	spin_unlock_irqrestore(&rs->rs_lock, flags);
 739
 740	if (list_empty(&list))
 741		return;
 742
 743	/* Remove the messages from the conn */
 744	list_for_each_entry(rm, &list, m_sock_item) {
 745
 746		conn = rm->m_inc.i_conn;
 747
 748		spin_lock_irqsave(&conn->c_lock, flags);
 749		/*
 750		 * Maybe someone else beat us to removing rm from the conn.
 751		 * If we race with their flag update we'll get the lock and
 752		 * then really see that the flag has been cleared.
 753		 */
 754		if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
 755			spin_unlock_irqrestore(&conn->c_lock, flags);
 756			spin_lock_irqsave(&rm->m_rs_lock, flags);
 757			rm->m_rs = NULL;
 758			spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 759			continue;
 760		}
 761		list_del_init(&rm->m_conn_item);
 762		spin_unlock_irqrestore(&conn->c_lock, flags);
 763
 764		/*
 765		 * Couldn't grab m_rs_lock in top loop (lock ordering),
 766		 * but we can now.
 767		 */
 768		spin_lock_irqsave(&rm->m_rs_lock, flags);
 769
 770		spin_lock(&rs->rs_lock);
 771		__rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
 772		spin_unlock(&rs->rs_lock);
 773
 774		rm->m_rs = NULL;
 775		spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 776
 777		rds_message_put(rm);
 778	}
 779
 780	rds_wake_sk_sleep(rs);
 781
 782	while (!list_empty(&list)) {
 783		rm = list_entry(list.next, struct rds_message, m_sock_item);
 784		list_del_init(&rm->m_sock_item);
 785		rds_message_wait(rm);
 786
 787		/* just in case the code above skipped this message
 788		 * because RDS_MSG_ON_CONN wasn't set, run it again here
 789		 * taking m_rs_lock is the only thing that keeps us
 790		 * from racing with ack processing.
 791		 */
 792		spin_lock_irqsave(&rm->m_rs_lock, flags);
 793
 794		spin_lock(&rs->rs_lock);
 795		__rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
 796		spin_unlock(&rs->rs_lock);
 797
 798		rm->m_rs = NULL;
 799		spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 800
 
 801		rds_message_put(rm);
 802	}
 803}
 804
 805/*
 806 * we only want this to fire once so we use the callers 'queued'.  It's
 807 * possible that another thread can race with us and remove the
 808 * message from the flow with RDS_CANCEL_SENT_TO.
 809 */
 810static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
 811			     struct rds_message *rm, __be16 sport,
 812			     __be16 dport, int *queued)
 813{
 814	unsigned long flags;
 815	u32 len;
 816
 817	if (*queued)
 818		goto out;
 819
 820	len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
 821
 822	/* this is the only place which holds both the socket's rs_lock
 823	 * and the connection's c_lock */
 824	spin_lock_irqsave(&rs->rs_lock, flags);
 825
 826	/*
 827	 * If there is a little space in sndbuf, we don't queue anything,
 828	 * and userspace gets -EAGAIN. But poll() indicates there's send
 829	 * room. This can lead to bad behavior (spinning) if snd_bytes isn't
 830	 * freed up by incoming acks. So we check the *old* value of
 831	 * rs_snd_bytes here to allow the last msg to exceed the buffer,
 832	 * and poll() now knows no more data can be sent.
 833	 */
 834	if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
 835		rs->rs_snd_bytes += len;
 836
 837		/* let recv side know we are close to send space exhaustion.
 838		 * This is probably not the optimal way to do it, as this
 839		 * means we set the flag on *all* messages as soon as our
 840		 * throughput hits a certain threshold.
 841		 */
 842		if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
 843			__set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
 844
 845		list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
 846		set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
 847		rds_message_addref(rm);
 848		rm->m_rs = rs;
 849
 850		/* The code ordering is a little weird, but we're
 851		   trying to minimize the time we hold c_lock */
 852		rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
 853		rm->m_inc.i_conn = conn;
 854		rds_message_addref(rm);
 855
 856		spin_lock(&conn->c_lock);
 857		rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
 858		list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
 859		set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
 860		spin_unlock(&conn->c_lock);
 861
 862		rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
 863			 rm, len, rs, rs->rs_snd_bytes,
 864			 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
 865
 866		*queued = 1;
 867	}
 868
 869	spin_unlock_irqrestore(&rs->rs_lock, flags);
 870out:
 871	return *queued;
 872}
 873
 874/*
 875 * rds_message is getting to be quite complicated, and we'd like to allocate
 876 * it all in one go. This figures out how big it needs to be up front.
 877 */
 878static int rds_rm_size(struct msghdr *msg, int data_len)
 879{
 880	struct cmsghdr *cmsg;
 881	int size = 0;
 882	int cmsg_groups = 0;
 883	int retval;
 884
 885	for_each_cmsghdr(cmsg, msg) {
 886		if (!CMSG_OK(msg, cmsg))
 887			return -EINVAL;
 888
 889		if (cmsg->cmsg_level != SOL_RDS)
 890			continue;
 891
 892		switch (cmsg->cmsg_type) {
 893		case RDS_CMSG_RDMA_ARGS:
 894			cmsg_groups |= 1;
 895			retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
 896			if (retval < 0)
 897				return retval;
 898			size += retval;
 899
 900			break;
 901
 902		case RDS_CMSG_RDMA_DEST:
 903		case RDS_CMSG_RDMA_MAP:
 904			cmsg_groups |= 2;
 905			/* these are valid but do no add any size */
 906			break;
 907
 908		case RDS_CMSG_ATOMIC_CSWP:
 909		case RDS_CMSG_ATOMIC_FADD:
 910		case RDS_CMSG_MASKED_ATOMIC_CSWP:
 911		case RDS_CMSG_MASKED_ATOMIC_FADD:
 912			cmsg_groups |= 1;
 913			size += sizeof(struct scatterlist);
 914			break;
 915
 916		default:
 917			return -EINVAL;
 918		}
 919
 920	}
 921
 922	size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
 923
 924	/* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
 925	if (cmsg_groups == 3)
 926		return -EINVAL;
 927
 928	return size;
 929}
 930
 931static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
 932			 struct msghdr *msg, int *allocated_mr)
 933{
 934	struct cmsghdr *cmsg;
 935	int ret = 0;
 936
 937	for_each_cmsghdr(cmsg, msg) {
 938		if (!CMSG_OK(msg, cmsg))
 939			return -EINVAL;
 940
 941		if (cmsg->cmsg_level != SOL_RDS)
 942			continue;
 943
 944		/* As a side effect, RDMA_DEST and RDMA_MAP will set
 945		 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
 946		 */
 947		switch (cmsg->cmsg_type) {
 948		case RDS_CMSG_RDMA_ARGS:
 949			ret = rds_cmsg_rdma_args(rs, rm, cmsg);
 950			break;
 951
 952		case RDS_CMSG_RDMA_DEST:
 953			ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
 954			break;
 955
 956		case RDS_CMSG_RDMA_MAP:
 957			ret = rds_cmsg_rdma_map(rs, rm, cmsg);
 958			if (!ret)
 959				*allocated_mr = 1;
 960			break;
 961		case RDS_CMSG_ATOMIC_CSWP:
 962		case RDS_CMSG_ATOMIC_FADD:
 963		case RDS_CMSG_MASKED_ATOMIC_CSWP:
 964		case RDS_CMSG_MASKED_ATOMIC_FADD:
 965			ret = rds_cmsg_atomic(rs, rm, cmsg);
 966			break;
 967
 968		default:
 969			return -EINVAL;
 970		}
 971
 972		if (ret)
 973			break;
 974	}
 975
 976	return ret;
 977}
 978
 979int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
 
 980{
 981	struct sock *sk = sock->sk;
 982	struct rds_sock *rs = rds_sk_to_rs(sk);
 983	DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
 984	__be32 daddr;
 985	__be16 dport;
 986	struct rds_message *rm = NULL;
 987	struct rds_connection *conn;
 988	int ret = 0;
 989	int queued = 0, allocated_mr = 0;
 990	int nonblock = msg->msg_flags & MSG_DONTWAIT;
 991	long timeo = sock_sndtimeo(sk, nonblock);
 992
 993	/* Mirror Linux UDP mirror of BSD error message compatibility */
 994	/* XXX: Perhaps MSG_MORE someday */
 995	if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
 
 996		ret = -EOPNOTSUPP;
 997		goto out;
 998	}
 999
1000	if (msg->msg_namelen) {
1001		/* XXX fail non-unicast destination IPs? */
1002		if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
1003			ret = -EINVAL;
1004			goto out;
1005		}
1006		daddr = usin->sin_addr.s_addr;
1007		dport = usin->sin_port;
1008	} else {
1009		/* We only care about consistency with ->connect() */
1010		lock_sock(sk);
1011		daddr = rs->rs_conn_addr;
1012		dport = rs->rs_conn_port;
1013		release_sock(sk);
1014	}
1015
1016	lock_sock(sk);
1017	if (daddr == 0 || rs->rs_bound_addr == 0) {
1018		release_sock(sk);
1019		ret = -ENOTCONN; /* XXX not a great errno */
1020		goto out;
1021	}
1022	release_sock(sk);
1023
1024	if (payload_len > rds_sk_sndbuf(rs)) {
1025		ret = -EMSGSIZE;
1026		goto out;
1027	}
1028
1029	/* size of rm including all sgs */
1030	ret = rds_rm_size(msg, payload_len);
1031	if (ret < 0)
1032		goto out;
1033
1034	rm = rds_message_alloc(ret, GFP_KERNEL);
1035	if (!rm) {
1036		ret = -ENOMEM;
1037		goto out;
1038	}
1039
1040	/* Attach data to the rm */
1041	if (payload_len) {
1042		rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
1043		if (!rm->data.op_sg) {
1044			ret = -ENOMEM;
1045			goto out;
1046		}
1047		ret = rds_message_copy_from_user(rm, &msg->msg_iter);
1048		if (ret)
1049			goto out;
1050	}
1051	rm->data.op_active = 1;
1052
1053	rm->m_daddr = daddr;
1054
1055	/* rds_conn_create has a spinlock that runs with IRQ off.
1056	 * Caching the conn in the socket helps a lot. */
1057	if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
1058		conn = rs->rs_conn;
1059	else {
1060		conn = rds_conn_create_outgoing(sock_net(sock->sk),
1061						rs->rs_bound_addr, daddr,
1062					rs->rs_transport,
1063					sock->sk->sk_allocation);
1064		if (IS_ERR(conn)) {
1065			ret = PTR_ERR(conn);
1066			goto out;
1067		}
1068		rs->rs_conn = conn;
1069	}
1070
1071	/* Parse any control messages the user may have included. */
1072	ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
1073	if (ret)
1074		goto out;
1075
1076	if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
1077		printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
1078			       &rm->rdma, conn->c_trans->xmit_rdma);
1079		ret = -EOPNOTSUPP;
1080		goto out;
1081	}
1082
1083	if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
1084		printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
1085			       &rm->atomic, conn->c_trans->xmit_atomic);
1086		ret = -EOPNOTSUPP;
1087		goto out;
1088	}
1089
1090	rds_conn_connect_if_down(conn);
1091
1092	ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
1093	if (ret) {
1094		rs->rs_seen_congestion = 1;
1095		goto out;
1096	}
1097
1098	while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
1099				  dport, &queued)) {
1100		rds_stats_inc(s_send_queue_full);
1101
 
 
 
 
1102		if (nonblock) {
1103			ret = -EAGAIN;
1104			goto out;
1105		}
1106
1107		timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
1108					rds_send_queue_rm(rs, conn, rm,
1109							  rs->rs_bound_port,
1110							  dport,
1111							  &queued),
1112					timeo);
1113		rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1114		if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1115			continue;
1116
1117		ret = timeo;
1118		if (ret == 0)
1119			ret = -ETIMEDOUT;
1120		goto out;
1121	}
1122
1123	/*
1124	 * By now we've committed to the send.  We reuse rds_send_worker()
1125	 * to retry sends in the rds thread if the transport asks us to.
1126	 */
1127	rds_stats_inc(s_send_queued);
1128
1129	ret = rds_send_xmit(conn);
1130	if (ret == -ENOMEM || ret == -EAGAIN)
1131		queue_delayed_work(rds_wq, &conn->c_send_w, 1);
1132
1133	rds_message_put(rm);
1134	return payload_len;
1135
1136out:
1137	/* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1138	 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1139	 * or in any other way, we need to destroy the MR again */
1140	if (allocated_mr)
1141		rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1142
1143	if (rm)
1144		rds_message_put(rm);
1145	return ret;
1146}
1147
1148/*
1149 * Reply to a ping packet.
1150 */
1151int
1152rds_send_pong(struct rds_connection *conn, __be16 dport)
1153{
1154	struct rds_message *rm;
1155	unsigned long flags;
1156	int ret = 0;
1157
1158	rm = rds_message_alloc(0, GFP_ATOMIC);
1159	if (!rm) {
1160		ret = -ENOMEM;
1161		goto out;
1162	}
1163
1164	rm->m_daddr = conn->c_faddr;
1165	rm->data.op_active = 1;
1166
1167	rds_conn_connect_if_down(conn);
1168
1169	ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
1170	if (ret)
1171		goto out;
1172
1173	spin_lock_irqsave(&conn->c_lock, flags);
1174	list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
1175	set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1176	rds_message_addref(rm);
1177	rm->m_inc.i_conn = conn;
1178
1179	rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport,
1180				    conn->c_next_tx_seq);
1181	conn->c_next_tx_seq++;
1182	spin_unlock_irqrestore(&conn->c_lock, flags);
1183
1184	rds_stats_inc(s_send_queued);
1185	rds_stats_inc(s_send_pong);
1186
1187	/* schedule the send work on rds_wq */
1188	queue_delayed_work(rds_wq, &conn->c_send_w, 1);
1189
1190	rds_message_put(rm);
1191	return 0;
1192
1193out:
1194	if (rm)
1195		rds_message_put(rm);
1196	return ret;
1197}
v3.1
   1/*
   2 * Copyright (c) 2006 Oracle.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 *
  32 */
  33#include <linux/kernel.h>
 
  34#include <linux/gfp.h>
  35#include <net/sock.h>
  36#include <linux/in.h>
  37#include <linux/list.h>
  38#include <linux/ratelimit.h>
 
 
  39
  40#include "rds.h"
  41
  42/* When transmitting messages in rds_send_xmit, we need to emerge from
  43 * time to time and briefly release the CPU. Otherwise the softlock watchdog
  44 * will kick our shin.
  45 * Also, it seems fairer to not let one busy connection stall all the
  46 * others.
  47 *
  48 * send_batch_count is the number of times we'll loop in send_xmit. Setting
  49 * it to 0 will restore the old behavior (where we looped until we had
  50 * drained the queue).
  51 */
  52static int send_batch_count = 64;
  53module_param(send_batch_count, int, 0444);
  54MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
  55
  56static void rds_send_remove_from_sock(struct list_head *messages, int status);
  57
  58/*
  59 * Reset the send state.  Callers must ensure that this doesn't race with
  60 * rds_send_xmit().
  61 */
  62void rds_send_reset(struct rds_connection *conn)
  63{
  64	struct rds_message *rm, *tmp;
  65	unsigned long flags;
  66
  67	if (conn->c_xmit_rm) {
  68		rm = conn->c_xmit_rm;
  69		conn->c_xmit_rm = NULL;
  70		/* Tell the user the RDMA op is no longer mapped by the
  71		 * transport. This isn't entirely true (it's flushed out
  72		 * independently) but as the connection is down, there's
  73		 * no ongoing RDMA to/from that memory */
  74		rds_message_unmapped(rm);
  75		rds_message_put(rm);
  76	}
  77
  78	conn->c_xmit_sg = 0;
  79	conn->c_xmit_hdr_off = 0;
  80	conn->c_xmit_data_off = 0;
  81	conn->c_xmit_atomic_sent = 0;
  82	conn->c_xmit_rdma_sent = 0;
  83	conn->c_xmit_data_sent = 0;
  84
  85	conn->c_map_queued = 0;
  86
  87	conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
  88	conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
  89
  90	/* Mark messages as retransmissions, and move them to the send q */
  91	spin_lock_irqsave(&conn->c_lock, flags);
  92	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
  93		set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
  94		set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
  95	}
  96	list_splice_init(&conn->c_retrans, &conn->c_send_queue);
  97	spin_unlock_irqrestore(&conn->c_lock, flags);
  98}
  99
 100static int acquire_in_xmit(struct rds_connection *conn)
 101{
 102	return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0;
 103}
 104
 105static void release_in_xmit(struct rds_connection *conn)
 106{
 107	clear_bit(RDS_IN_XMIT, &conn->c_flags);
 108	smp_mb__after_clear_bit();
 109	/*
 110	 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
 111	 * hot path and finding waiters is very rare.  We don't want to walk
 112	 * the system-wide hashed waitqueue buckets in the fast path only to
 113	 * almost never find waiters.
 114	 */
 115	if (waitqueue_active(&conn->c_waitq))
 116		wake_up_all(&conn->c_waitq);
 117}
 118
 119/*
 120 * We're making the conscious trade-off here to only send one message
 121 * down the connection at a time.
 122 *   Pro:
 123 *      - tx queueing is a simple fifo list
 124 *   	- reassembly is optional and easily done by transports per conn
 125 *      - no per flow rx lookup at all, straight to the socket
 126 *   	- less per-frag memory and wire overhead
 127 *   Con:
 128 *      - queued acks can be delayed behind large messages
 129 *   Depends:
 130 *      - small message latency is higher behind queued large messages
 131 *      - large message latency isn't starved by intervening small sends
 132 */
 133int rds_send_xmit(struct rds_connection *conn)
 134{
 135	struct rds_message *rm;
 136	unsigned long flags;
 137	unsigned int tmp;
 138	struct scatterlist *sg;
 139	int ret = 0;
 140	LIST_HEAD(to_be_dropped);
 
 
 141
 142restart:
 
 143
 144	/*
 145	 * sendmsg calls here after having queued its message on the send
 146	 * queue.  We only have one task feeding the connection at a time.  If
 147	 * another thread is already feeding the queue then we back off.  This
 148	 * avoids blocking the caller and trading per-connection data between
 149	 * caches per message.
 150	 */
 151	if (!acquire_in_xmit(conn)) {
 152		rds_stats_inc(s_send_lock_contention);
 153		ret = -ENOMEM;
 154		goto out;
 155	}
 156
 157	/*
 
 
 
 
 
 
 
 
 
 
 
 158	 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
 159	 * we do the opposite to avoid races.
 160	 */
 161	if (!rds_conn_up(conn)) {
 162		release_in_xmit(conn);
 163		ret = 0;
 164		goto out;
 165	}
 166
 167	if (conn->c_trans->xmit_prepare)
 168		conn->c_trans->xmit_prepare(conn);
 169
 170	/*
 171	 * spin trying to push headers and data down the connection until
 172	 * the connection doesn't make forward progress.
 173	 */
 174	while (1) {
 175
 176		rm = conn->c_xmit_rm;
 177
 178		/*
 179		 * If between sending messages, we can send a pending congestion
 180		 * map update.
 181		 */
 182		if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
 183			rm = rds_cong_update_alloc(conn);
 184			if (IS_ERR(rm)) {
 185				ret = PTR_ERR(rm);
 186				break;
 187			}
 188			rm->data.op_active = 1;
 189
 190			conn->c_xmit_rm = rm;
 191		}
 192
 193		/*
 194		 * If not already working on one, grab the next message.
 195		 *
 196		 * c_xmit_rm holds a ref while we're sending this message down
 197		 * the connction.  We can use this ref while holding the
 198		 * send_sem.. rds_send_reset() is serialized with it.
 199		 */
 200		if (!rm) {
 201			unsigned int len;
 202
 
 
 
 
 
 
 
 
 
 
 203			spin_lock_irqsave(&conn->c_lock, flags);
 204
 205			if (!list_empty(&conn->c_send_queue)) {
 206				rm = list_entry(conn->c_send_queue.next,
 207						struct rds_message,
 208						m_conn_item);
 209				rds_message_addref(rm);
 210
 211				/*
 212				 * Move the message from the send queue to the retransmit
 213				 * list right away.
 214				 */
 215				list_move_tail(&rm->m_conn_item, &conn->c_retrans);
 216			}
 217
 218			spin_unlock_irqrestore(&conn->c_lock, flags);
 219
 220			if (!rm)
 221				break;
 222
 223			/* Unfortunately, the way Infiniband deals with
 224			 * RDMA to a bad MR key is by moving the entire
 225			 * queue pair to error state. We cold possibly
 226			 * recover from that, but right now we drop the
 227			 * connection.
 228			 * Therefore, we never retransmit messages with RDMA ops.
 229			 */
 230			if (rm->rdma.op_active &&
 231			    test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
 232				spin_lock_irqsave(&conn->c_lock, flags);
 233				if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
 234					list_move(&rm->m_conn_item, &to_be_dropped);
 235				spin_unlock_irqrestore(&conn->c_lock, flags);
 236				continue;
 237			}
 238
 239			/* Require an ACK every once in a while */
 240			len = ntohl(rm->m_inc.i_hdr.h_len);
 241			if (conn->c_unacked_packets == 0 ||
 242			    conn->c_unacked_bytes < len) {
 243				__set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
 244
 245				conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
 246				conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
 247				rds_stats_inc(s_send_ack_required);
 248			} else {
 249				conn->c_unacked_bytes -= len;
 250				conn->c_unacked_packets--;
 251			}
 252
 253			conn->c_xmit_rm = rm;
 254		}
 255
 256		/* The transport either sends the whole rdma or none of it */
 257		if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
 258			rm->m_final_op = &rm->rdma;
 
 
 
 
 259			ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
 260			if (ret)
 
 
 261				break;
 
 262			conn->c_xmit_rdma_sent = 1;
 263
 264			/* The transport owns the mapped memory for now.
 265			 * You can't unmap it while it's on the send queue */
 266			set_bit(RDS_MSG_MAPPED, &rm->m_flags);
 267		}
 268
 269		if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
 270			rm->m_final_op = &rm->atomic;
 
 
 
 
 271			ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
 272			if (ret)
 
 
 273				break;
 
 274			conn->c_xmit_atomic_sent = 1;
 275
 276			/* The transport owns the mapped memory for now.
 277			 * You can't unmap it while it's on the send queue */
 278			set_bit(RDS_MSG_MAPPED, &rm->m_flags);
 279		}
 280
 281		/*
 282		 * A number of cases require an RDS header to be sent
 283		 * even if there is no data.
 284		 * We permit 0-byte sends; rds-ping depends on this.
 285		 * However, if there are exclusively attached silent ops,
 286		 * we skip the hdr/data send, to enable silent operation.
 287		 */
 288		if (rm->data.op_nents == 0) {
 289			int ops_present;
 290			int all_ops_are_silent = 1;
 291
 292			ops_present = (rm->atomic.op_active || rm->rdma.op_active);
 293			if (rm->atomic.op_active && !rm->atomic.op_silent)
 294				all_ops_are_silent = 0;
 295			if (rm->rdma.op_active && !rm->rdma.op_silent)
 296				all_ops_are_silent = 0;
 297
 298			if (ops_present && all_ops_are_silent
 299			    && !rm->m_rdma_cookie)
 300				rm->data.op_active = 0;
 301		}
 302
 303		if (rm->data.op_active && !conn->c_xmit_data_sent) {
 304			rm->m_final_op = &rm->data;
 305			ret = conn->c_trans->xmit(conn, rm,
 306						  conn->c_xmit_hdr_off,
 307						  conn->c_xmit_sg,
 308						  conn->c_xmit_data_off);
 309			if (ret <= 0)
 310				break;
 311
 312			if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
 313				tmp = min_t(int, ret,
 314					    sizeof(struct rds_header) -
 315					    conn->c_xmit_hdr_off);
 316				conn->c_xmit_hdr_off += tmp;
 317				ret -= tmp;
 318			}
 319
 320			sg = &rm->data.op_sg[conn->c_xmit_sg];
 321			while (ret) {
 322				tmp = min_t(int, ret, sg->length -
 323						      conn->c_xmit_data_off);
 324				conn->c_xmit_data_off += tmp;
 325				ret -= tmp;
 326				if (conn->c_xmit_data_off == sg->length) {
 327					conn->c_xmit_data_off = 0;
 328					sg++;
 329					conn->c_xmit_sg++;
 330					BUG_ON(ret != 0 &&
 331					       conn->c_xmit_sg == rm->data.op_nents);
 332				}
 333			}
 334
 335			if (conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
 336			    (conn->c_xmit_sg == rm->data.op_nents))
 337				conn->c_xmit_data_sent = 1;
 338		}
 339
 340		/*
 341		 * A rm will only take multiple times through this loop
 342		 * if there is a data op. Thus, if the data is sent (or there was
 343		 * none), then we're done with the rm.
 344		 */
 345		if (!rm->data.op_active || conn->c_xmit_data_sent) {
 346			conn->c_xmit_rm = NULL;
 347			conn->c_xmit_sg = 0;
 348			conn->c_xmit_hdr_off = 0;
 349			conn->c_xmit_data_off = 0;
 350			conn->c_xmit_rdma_sent = 0;
 351			conn->c_xmit_atomic_sent = 0;
 352			conn->c_xmit_data_sent = 0;
 353
 354			rds_message_put(rm);
 355		}
 356	}
 357
 
 358	if (conn->c_trans->xmit_complete)
 359		conn->c_trans->xmit_complete(conn);
 360
 361	release_in_xmit(conn);
 362
 363	/* Nuke any messages we decided not to retransmit. */
 364	if (!list_empty(&to_be_dropped)) {
 365		/* irqs on here, so we can put(), unlike above */
 366		list_for_each_entry(rm, &to_be_dropped, m_conn_item)
 367			rds_message_put(rm);
 368		rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
 369	}
 370
 371	/*
 372	 * Other senders can queue a message after we last test the send queue
 373	 * but before we clear RDS_IN_XMIT.  In that case they'd back off and
 374	 * not try and send their newly queued message.  We need to check the
 375	 * send queue after having cleared RDS_IN_XMIT so that their message
 376	 * doesn't get stuck on the send queue.
 377	 *
 378	 * If the transport cannot continue (i.e ret != 0), then it must
 379	 * call us when more room is available, such as from the tx
 380	 * completion handler.
 
 
 
 
 381	 */
 382	if (ret == 0) {
 383		smp_mb();
 384		if (!list_empty(&conn->c_send_queue)) {
 
 
 385			rds_stats_inc(s_send_lock_queue_raced);
 386			goto restart;
 
 
 387		}
 388	}
 389out:
 390	return ret;
 391}
 
 392
 393static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
 394{
 395	u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
 396
 397	assert_spin_locked(&rs->rs_lock);
 398
 399	BUG_ON(rs->rs_snd_bytes < len);
 400	rs->rs_snd_bytes -= len;
 401
 402	if (rs->rs_snd_bytes == 0)
 403		rds_stats_inc(s_send_queue_empty);
 404}
 405
 406static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
 407				    is_acked_func is_acked)
 408{
 409	if (is_acked)
 410		return is_acked(rm, ack);
 411	return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
 412}
 413
 414/*
 415 * This is pretty similar to what happens below in the ACK
 416 * handling code - except that we call here as soon as we get
 417 * the IB send completion on the RDMA op and the accompanying
 418 * message.
 419 */
 420void rds_rdma_send_complete(struct rds_message *rm, int status)
 421{
 422	struct rds_sock *rs = NULL;
 423	struct rm_rdma_op *ro;
 424	struct rds_notifier *notifier;
 425	unsigned long flags;
 426
 427	spin_lock_irqsave(&rm->m_rs_lock, flags);
 428
 429	ro = &rm->rdma;
 430	if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
 431	    ro->op_active && ro->op_notify && ro->op_notifier) {
 432		notifier = ro->op_notifier;
 433		rs = rm->m_rs;
 434		sock_hold(rds_rs_to_sk(rs));
 435
 436		notifier->n_status = status;
 437		spin_lock(&rs->rs_lock);
 438		list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
 439		spin_unlock(&rs->rs_lock);
 440
 441		ro->op_notifier = NULL;
 442	}
 443
 444	spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 445
 446	if (rs) {
 447		rds_wake_sk_sleep(rs);
 448		sock_put(rds_rs_to_sk(rs));
 449	}
 450}
 451EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
 452
 453/*
 454 * Just like above, except looks at atomic op
 455 */
 456void rds_atomic_send_complete(struct rds_message *rm, int status)
 457{
 458	struct rds_sock *rs = NULL;
 459	struct rm_atomic_op *ao;
 460	struct rds_notifier *notifier;
 461	unsigned long flags;
 462
 463	spin_lock_irqsave(&rm->m_rs_lock, flags);
 464
 465	ao = &rm->atomic;
 466	if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
 467	    && ao->op_active && ao->op_notify && ao->op_notifier) {
 468		notifier = ao->op_notifier;
 469		rs = rm->m_rs;
 470		sock_hold(rds_rs_to_sk(rs));
 471
 472		notifier->n_status = status;
 473		spin_lock(&rs->rs_lock);
 474		list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
 475		spin_unlock(&rs->rs_lock);
 476
 477		ao->op_notifier = NULL;
 478	}
 479
 480	spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 481
 482	if (rs) {
 483		rds_wake_sk_sleep(rs);
 484		sock_put(rds_rs_to_sk(rs));
 485	}
 486}
 487EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
 488
 489/*
 490 * This is the same as rds_rdma_send_complete except we
 491 * don't do any locking - we have all the ingredients (message,
 492 * socket, socket lock) and can just move the notifier.
 493 */
 494static inline void
 495__rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
 496{
 497	struct rm_rdma_op *ro;
 498	struct rm_atomic_op *ao;
 499
 500	ro = &rm->rdma;
 501	if (ro->op_active && ro->op_notify && ro->op_notifier) {
 502		ro->op_notifier->n_status = status;
 503		list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
 504		ro->op_notifier = NULL;
 505	}
 506
 507	ao = &rm->atomic;
 508	if (ao->op_active && ao->op_notify && ao->op_notifier) {
 509		ao->op_notifier->n_status = status;
 510		list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
 511		ao->op_notifier = NULL;
 512	}
 513
 514	/* No need to wake the app - caller does this */
 515}
 516
 517/*
 518 * This is called from the IB send completion when we detect
 519 * a RDMA operation that failed with remote access error.
 520 * So speed is not an issue here.
 521 */
 522struct rds_message *rds_send_get_message(struct rds_connection *conn,
 523					 struct rm_rdma_op *op)
 524{
 525	struct rds_message *rm, *tmp, *found = NULL;
 526	unsigned long flags;
 527
 528	spin_lock_irqsave(&conn->c_lock, flags);
 529
 530	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
 531		if (&rm->rdma == op) {
 532			atomic_inc(&rm->m_refcount);
 533			found = rm;
 534			goto out;
 535		}
 536	}
 537
 538	list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
 539		if (&rm->rdma == op) {
 540			atomic_inc(&rm->m_refcount);
 541			found = rm;
 542			break;
 543		}
 544	}
 545
 546out:
 547	spin_unlock_irqrestore(&conn->c_lock, flags);
 548
 549	return found;
 550}
 551EXPORT_SYMBOL_GPL(rds_send_get_message);
 552
 553/*
 554 * This removes messages from the socket's list if they're on it.  The list
 555 * argument must be private to the caller, we must be able to modify it
 556 * without locks.  The messages must have a reference held for their
 557 * position on the list.  This function will drop that reference after
 558 * removing the messages from the 'messages' list regardless of if it found
 559 * the messages on the socket list or not.
 560 */
 561static void rds_send_remove_from_sock(struct list_head *messages, int status)
 562{
 563	unsigned long flags;
 564	struct rds_sock *rs = NULL;
 565	struct rds_message *rm;
 566
 567	while (!list_empty(messages)) {
 568		int was_on_sock = 0;
 569
 570		rm = list_entry(messages->next, struct rds_message,
 571				m_conn_item);
 572		list_del_init(&rm->m_conn_item);
 573
 574		/*
 575		 * If we see this flag cleared then we're *sure* that someone
 576		 * else beat us to removing it from the sock.  If we race
 577		 * with their flag update we'll get the lock and then really
 578		 * see that the flag has been cleared.
 579		 *
 580		 * The message spinlock makes sure nobody clears rm->m_rs
 581		 * while we're messing with it. It does not prevent the
 582		 * message from being removed from the socket, though.
 583		 */
 584		spin_lock_irqsave(&rm->m_rs_lock, flags);
 585		if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
 586			goto unlock_and_drop;
 587
 588		if (rs != rm->m_rs) {
 589			if (rs) {
 590				rds_wake_sk_sleep(rs);
 591				sock_put(rds_rs_to_sk(rs));
 592			}
 593			rs = rm->m_rs;
 594			sock_hold(rds_rs_to_sk(rs));
 
 595		}
 
 
 596		spin_lock(&rs->rs_lock);
 597
 598		if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
 599			struct rm_rdma_op *ro = &rm->rdma;
 600			struct rds_notifier *notifier;
 601
 602			list_del_init(&rm->m_sock_item);
 603			rds_send_sndbuf_remove(rs, rm);
 604
 605			if (ro->op_active && ro->op_notifier &&
 606			       (ro->op_notify || (ro->op_recverr && status))) {
 607				notifier = ro->op_notifier;
 608				list_add_tail(&notifier->n_list,
 609						&rs->rs_notify_queue);
 610				if (!notifier->n_status)
 611					notifier->n_status = status;
 612				rm->rdma.op_notifier = NULL;
 613			}
 614			was_on_sock = 1;
 615			rm->m_rs = NULL;
 616		}
 617		spin_unlock(&rs->rs_lock);
 618
 619unlock_and_drop:
 620		spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 621		rds_message_put(rm);
 622		if (was_on_sock)
 623			rds_message_put(rm);
 624	}
 625
 626	if (rs) {
 627		rds_wake_sk_sleep(rs);
 628		sock_put(rds_rs_to_sk(rs));
 629	}
 630}
 631
 632/*
 633 * Transports call here when they've determined that the receiver queued
 634 * messages up to, and including, the given sequence number.  Messages are
 635 * moved to the retrans queue when rds_send_xmit picks them off the send
 636 * queue. This means that in the TCP case, the message may not have been
 637 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
 638 * checks the RDS_MSG_HAS_ACK_SEQ bit.
 639 *
 640 * XXX It's not clear to me how this is safely serialized with socket
 641 * destruction.  Maybe it should bail if it sees SOCK_DEAD.
 642 */
 643void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
 644			 is_acked_func is_acked)
 645{
 646	struct rds_message *rm, *tmp;
 647	unsigned long flags;
 648	LIST_HEAD(list);
 649
 650	spin_lock_irqsave(&conn->c_lock, flags);
 651
 652	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
 653		if (!rds_send_is_acked(rm, ack, is_acked))
 654			break;
 655
 656		list_move(&rm->m_conn_item, &list);
 657		clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
 658	}
 659
 660	/* order flag updates with spin locks */
 661	if (!list_empty(&list))
 662		smp_mb__after_clear_bit();
 663
 664	spin_unlock_irqrestore(&conn->c_lock, flags);
 665
 666	/* now remove the messages from the sock list as needed */
 667	rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
 668}
 669EXPORT_SYMBOL_GPL(rds_send_drop_acked);
 670
 671void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
 672{
 673	struct rds_message *rm, *tmp;
 674	struct rds_connection *conn;
 675	unsigned long flags;
 676	LIST_HEAD(list);
 677
 678	/* get all the messages we're dropping under the rs lock */
 679	spin_lock_irqsave(&rs->rs_lock, flags);
 680
 681	list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
 682		if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
 683			     dest->sin_port != rm->m_inc.i_hdr.h_dport))
 684			continue;
 685
 686		list_move(&rm->m_sock_item, &list);
 687		rds_send_sndbuf_remove(rs, rm);
 688		clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
 689	}
 690
 691	/* order flag updates with the rs lock */
 692	smp_mb__after_clear_bit();
 693
 694	spin_unlock_irqrestore(&rs->rs_lock, flags);
 695
 696	if (list_empty(&list))
 697		return;
 698
 699	/* Remove the messages from the conn */
 700	list_for_each_entry(rm, &list, m_sock_item) {
 701
 702		conn = rm->m_inc.i_conn;
 703
 704		spin_lock_irqsave(&conn->c_lock, flags);
 705		/*
 706		 * Maybe someone else beat us to removing rm from the conn.
 707		 * If we race with their flag update we'll get the lock and
 708		 * then really see that the flag has been cleared.
 709		 */
 710		if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
 711			spin_unlock_irqrestore(&conn->c_lock, flags);
 
 
 
 712			continue;
 713		}
 714		list_del_init(&rm->m_conn_item);
 715		spin_unlock_irqrestore(&conn->c_lock, flags);
 716
 717		/*
 718		 * Couldn't grab m_rs_lock in top loop (lock ordering),
 719		 * but we can now.
 720		 */
 721		spin_lock_irqsave(&rm->m_rs_lock, flags);
 722
 723		spin_lock(&rs->rs_lock);
 724		__rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
 725		spin_unlock(&rs->rs_lock);
 726
 727		rm->m_rs = NULL;
 728		spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 729
 730		rds_message_put(rm);
 731	}
 732
 733	rds_wake_sk_sleep(rs);
 734
 735	while (!list_empty(&list)) {
 736		rm = list_entry(list.next, struct rds_message, m_sock_item);
 737		list_del_init(&rm->m_sock_item);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 738
 739		rds_message_wait(rm);
 740		rds_message_put(rm);
 741	}
 742}
 743
 744/*
 745 * we only want this to fire once so we use the callers 'queued'.  It's
 746 * possible that another thread can race with us and remove the
 747 * message from the flow with RDS_CANCEL_SENT_TO.
 748 */
 749static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
 750			     struct rds_message *rm, __be16 sport,
 751			     __be16 dport, int *queued)
 752{
 753	unsigned long flags;
 754	u32 len;
 755
 756	if (*queued)
 757		goto out;
 758
 759	len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
 760
 761	/* this is the only place which holds both the socket's rs_lock
 762	 * and the connection's c_lock */
 763	spin_lock_irqsave(&rs->rs_lock, flags);
 764
 765	/*
 766	 * If there is a little space in sndbuf, we don't queue anything,
 767	 * and userspace gets -EAGAIN. But poll() indicates there's send
 768	 * room. This can lead to bad behavior (spinning) if snd_bytes isn't
 769	 * freed up by incoming acks. So we check the *old* value of
 770	 * rs_snd_bytes here to allow the last msg to exceed the buffer,
 771	 * and poll() now knows no more data can be sent.
 772	 */
 773	if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
 774		rs->rs_snd_bytes += len;
 775
 776		/* let recv side know we are close to send space exhaustion.
 777		 * This is probably not the optimal way to do it, as this
 778		 * means we set the flag on *all* messages as soon as our
 779		 * throughput hits a certain threshold.
 780		 */
 781		if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
 782			__set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
 783
 784		list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
 785		set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
 786		rds_message_addref(rm);
 787		rm->m_rs = rs;
 788
 789		/* The code ordering is a little weird, but we're
 790		   trying to minimize the time we hold c_lock */
 791		rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
 792		rm->m_inc.i_conn = conn;
 793		rds_message_addref(rm);
 794
 795		spin_lock(&conn->c_lock);
 796		rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
 797		list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
 798		set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
 799		spin_unlock(&conn->c_lock);
 800
 801		rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
 802			 rm, len, rs, rs->rs_snd_bytes,
 803			 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
 804
 805		*queued = 1;
 806	}
 807
 808	spin_unlock_irqrestore(&rs->rs_lock, flags);
 809out:
 810	return *queued;
 811}
 812
 813/*
 814 * rds_message is getting to be quite complicated, and we'd like to allocate
 815 * it all in one go. This figures out how big it needs to be up front.
 816 */
 817static int rds_rm_size(struct msghdr *msg, int data_len)
 818{
 819	struct cmsghdr *cmsg;
 820	int size = 0;
 821	int cmsg_groups = 0;
 822	int retval;
 823
 824	for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
 825		if (!CMSG_OK(msg, cmsg))
 826			return -EINVAL;
 827
 828		if (cmsg->cmsg_level != SOL_RDS)
 829			continue;
 830
 831		switch (cmsg->cmsg_type) {
 832		case RDS_CMSG_RDMA_ARGS:
 833			cmsg_groups |= 1;
 834			retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
 835			if (retval < 0)
 836				return retval;
 837			size += retval;
 838
 839			break;
 840
 841		case RDS_CMSG_RDMA_DEST:
 842		case RDS_CMSG_RDMA_MAP:
 843			cmsg_groups |= 2;
 844			/* these are valid but do no add any size */
 845			break;
 846
 847		case RDS_CMSG_ATOMIC_CSWP:
 848		case RDS_CMSG_ATOMIC_FADD:
 849		case RDS_CMSG_MASKED_ATOMIC_CSWP:
 850		case RDS_CMSG_MASKED_ATOMIC_FADD:
 851			cmsg_groups |= 1;
 852			size += sizeof(struct scatterlist);
 853			break;
 854
 855		default:
 856			return -EINVAL;
 857		}
 858
 859	}
 860
 861	size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
 862
 863	/* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
 864	if (cmsg_groups == 3)
 865		return -EINVAL;
 866
 867	return size;
 868}
 869
 870static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
 871			 struct msghdr *msg, int *allocated_mr)
 872{
 873	struct cmsghdr *cmsg;
 874	int ret = 0;
 875
 876	for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
 877		if (!CMSG_OK(msg, cmsg))
 878			return -EINVAL;
 879
 880		if (cmsg->cmsg_level != SOL_RDS)
 881			continue;
 882
 883		/* As a side effect, RDMA_DEST and RDMA_MAP will set
 884		 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
 885		 */
 886		switch (cmsg->cmsg_type) {
 887		case RDS_CMSG_RDMA_ARGS:
 888			ret = rds_cmsg_rdma_args(rs, rm, cmsg);
 889			break;
 890
 891		case RDS_CMSG_RDMA_DEST:
 892			ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
 893			break;
 894
 895		case RDS_CMSG_RDMA_MAP:
 896			ret = rds_cmsg_rdma_map(rs, rm, cmsg);
 897			if (!ret)
 898				*allocated_mr = 1;
 899			break;
 900		case RDS_CMSG_ATOMIC_CSWP:
 901		case RDS_CMSG_ATOMIC_FADD:
 902		case RDS_CMSG_MASKED_ATOMIC_CSWP:
 903		case RDS_CMSG_MASKED_ATOMIC_FADD:
 904			ret = rds_cmsg_atomic(rs, rm, cmsg);
 905			break;
 906
 907		default:
 908			return -EINVAL;
 909		}
 910
 911		if (ret)
 912			break;
 913	}
 914
 915	return ret;
 916}
 917
 918int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
 919		size_t payload_len)
 920{
 921	struct sock *sk = sock->sk;
 922	struct rds_sock *rs = rds_sk_to_rs(sk);
 923	struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
 924	__be32 daddr;
 925	__be16 dport;
 926	struct rds_message *rm = NULL;
 927	struct rds_connection *conn;
 928	int ret = 0;
 929	int queued = 0, allocated_mr = 0;
 930	int nonblock = msg->msg_flags & MSG_DONTWAIT;
 931	long timeo = sock_sndtimeo(sk, nonblock);
 932
 933	/* Mirror Linux UDP mirror of BSD error message compatibility */
 934	/* XXX: Perhaps MSG_MORE someday */
 935	if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
 936		printk(KERN_INFO "msg_flags 0x%08X\n", msg->msg_flags);
 937		ret = -EOPNOTSUPP;
 938		goto out;
 939	}
 940
 941	if (msg->msg_namelen) {
 942		/* XXX fail non-unicast destination IPs? */
 943		if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
 944			ret = -EINVAL;
 945			goto out;
 946		}
 947		daddr = usin->sin_addr.s_addr;
 948		dport = usin->sin_port;
 949	} else {
 950		/* We only care about consistency with ->connect() */
 951		lock_sock(sk);
 952		daddr = rs->rs_conn_addr;
 953		dport = rs->rs_conn_port;
 954		release_sock(sk);
 955	}
 956
 957	/* racing with another thread binding seems ok here */
 958	if (daddr == 0 || rs->rs_bound_addr == 0) {
 
 959		ret = -ENOTCONN; /* XXX not a great errno */
 960		goto out;
 961	}
 
 
 
 
 
 
 962
 963	/* size of rm including all sgs */
 964	ret = rds_rm_size(msg, payload_len);
 965	if (ret < 0)
 966		goto out;
 967
 968	rm = rds_message_alloc(ret, GFP_KERNEL);
 969	if (!rm) {
 970		ret = -ENOMEM;
 971		goto out;
 972	}
 973
 974	/* Attach data to the rm */
 975	if (payload_len) {
 976		rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
 977		if (!rm->data.op_sg) {
 978			ret = -ENOMEM;
 979			goto out;
 980		}
 981		ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len);
 982		if (ret)
 983			goto out;
 984	}
 985	rm->data.op_active = 1;
 986
 987	rm->m_daddr = daddr;
 988
 989	/* rds_conn_create has a spinlock that runs with IRQ off.
 990	 * Caching the conn in the socket helps a lot. */
 991	if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
 992		conn = rs->rs_conn;
 993	else {
 994		conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr,
 
 995					rs->rs_transport,
 996					sock->sk->sk_allocation);
 997		if (IS_ERR(conn)) {
 998			ret = PTR_ERR(conn);
 999			goto out;
1000		}
1001		rs->rs_conn = conn;
1002	}
1003
1004	/* Parse any control messages the user may have included. */
1005	ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
1006	if (ret)
1007		goto out;
1008
1009	if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
1010		printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
1011			       &rm->rdma, conn->c_trans->xmit_rdma);
1012		ret = -EOPNOTSUPP;
1013		goto out;
1014	}
1015
1016	if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
1017		printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
1018			       &rm->atomic, conn->c_trans->xmit_atomic);
1019		ret = -EOPNOTSUPP;
1020		goto out;
1021	}
1022
1023	rds_conn_connect_if_down(conn);
1024
1025	ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
1026	if (ret) {
1027		rs->rs_seen_congestion = 1;
1028		goto out;
1029	}
1030
1031	while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
1032				  dport, &queued)) {
1033		rds_stats_inc(s_send_queue_full);
1034		/* XXX make sure this is reasonable */
1035		if (payload_len > rds_sk_sndbuf(rs)) {
1036			ret = -EMSGSIZE;
1037			goto out;
1038		}
1039		if (nonblock) {
1040			ret = -EAGAIN;
1041			goto out;
1042		}
1043
1044		timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
1045					rds_send_queue_rm(rs, conn, rm,
1046							  rs->rs_bound_port,
1047							  dport,
1048							  &queued),
1049					timeo);
1050		rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1051		if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1052			continue;
1053
1054		ret = timeo;
1055		if (ret == 0)
1056			ret = -ETIMEDOUT;
1057		goto out;
1058	}
1059
1060	/*
1061	 * By now we've committed to the send.  We reuse rds_send_worker()
1062	 * to retry sends in the rds thread if the transport asks us to.
1063	 */
1064	rds_stats_inc(s_send_queued);
1065
1066	if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1067		rds_send_xmit(conn);
 
1068
1069	rds_message_put(rm);
1070	return payload_len;
1071
1072out:
1073	/* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1074	 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1075	 * or in any other way, we need to destroy the MR again */
1076	if (allocated_mr)
1077		rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1078
1079	if (rm)
1080		rds_message_put(rm);
1081	return ret;
1082}
1083
1084/*
1085 * Reply to a ping packet.
1086 */
1087int
1088rds_send_pong(struct rds_connection *conn, __be16 dport)
1089{
1090	struct rds_message *rm;
1091	unsigned long flags;
1092	int ret = 0;
1093
1094	rm = rds_message_alloc(0, GFP_ATOMIC);
1095	if (!rm) {
1096		ret = -ENOMEM;
1097		goto out;
1098	}
1099
1100	rm->m_daddr = conn->c_faddr;
1101	rm->data.op_active = 1;
1102
1103	rds_conn_connect_if_down(conn);
1104
1105	ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
1106	if (ret)
1107		goto out;
1108
1109	spin_lock_irqsave(&conn->c_lock, flags);
1110	list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
1111	set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1112	rds_message_addref(rm);
1113	rm->m_inc.i_conn = conn;
1114
1115	rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport,
1116				    conn->c_next_tx_seq);
1117	conn->c_next_tx_seq++;
1118	spin_unlock_irqrestore(&conn->c_lock, flags);
1119
1120	rds_stats_inc(s_send_queued);
1121	rds_stats_inc(s_send_pong);
1122
1123	if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1124		rds_send_xmit(conn);
1125
1126	rds_message_put(rm);
1127	return 0;
1128
1129out:
1130	if (rm)
1131		rds_message_put(rm);
1132	return ret;
1133}