Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
v5.9
   1/*
   2 * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 *
  32 */
  33#include <linux/kernel.h>
  34#include <linux/moduleparam.h>
  35#include <linux/gfp.h>
  36#include <net/sock.h>
  37#include <linux/in.h>
  38#include <linux/list.h>
  39#include <linux/ratelimit.h>
  40#include <linux/export.h>
  41#include <linux/sizes.h>
  42
  43#include "rds.h"
  44
  45/* When transmitting messages in rds_send_xmit, we need to emerge from
  46 * time to time and briefly release the CPU. Otherwise the softlock watchdog
  47 * will kick our shin.
  48 * Also, it seems fairer to not let one busy connection stall all the
  49 * others.
  50 *
  51 * send_batch_count is the number of times we'll loop in send_xmit. Setting
  52 * it to 0 will restore the old behavior (where we looped until we had
  53 * drained the queue).
  54 */
  55static int send_batch_count = SZ_1K;
  56module_param(send_batch_count, int, 0444);
  57MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
  58
  59static void rds_send_remove_from_sock(struct list_head *messages, int status);
  60
  61/*
  62 * Reset the send state.  Callers must ensure that this doesn't race with
  63 * rds_send_xmit().
  64 */
  65void rds_send_path_reset(struct rds_conn_path *cp)
  66{
  67	struct rds_message *rm, *tmp;
  68	unsigned long flags;
  69
  70	if (cp->cp_xmit_rm) {
  71		rm = cp->cp_xmit_rm;
  72		cp->cp_xmit_rm = NULL;
  73		/* Tell the user the RDMA op is no longer mapped by the
  74		 * transport. This isn't entirely true (it's flushed out
  75		 * independently) but as the connection is down, there's
  76		 * no ongoing RDMA to/from that memory */
  77		rds_message_unmapped(rm);
  78		rds_message_put(rm);
  79	}
  80
  81	cp->cp_xmit_sg = 0;
  82	cp->cp_xmit_hdr_off = 0;
  83	cp->cp_xmit_data_off = 0;
  84	cp->cp_xmit_atomic_sent = 0;
  85	cp->cp_xmit_rdma_sent = 0;
  86	cp->cp_xmit_data_sent = 0;
  87
  88	cp->cp_conn->c_map_queued = 0;
  89
  90	cp->cp_unacked_packets = rds_sysctl_max_unacked_packets;
  91	cp->cp_unacked_bytes = rds_sysctl_max_unacked_bytes;
  92
  93	/* Mark messages as retransmissions, and move them to the send q */
  94	spin_lock_irqsave(&cp->cp_lock, flags);
  95	list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) {
  96		set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
  97		set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
  98	}
  99	list_splice_init(&cp->cp_retrans, &cp->cp_send_queue);
 100	spin_unlock_irqrestore(&cp->cp_lock, flags);
 101}
 102EXPORT_SYMBOL_GPL(rds_send_path_reset);
 103
 104static int acquire_in_xmit(struct rds_conn_path *cp)
 105{
 106	return test_and_set_bit(RDS_IN_XMIT, &cp->cp_flags) == 0;
 107}
 108
 109static void release_in_xmit(struct rds_conn_path *cp)
 110{
 111	clear_bit(RDS_IN_XMIT, &cp->cp_flags);
 112	smp_mb__after_atomic();
 113	/*
 114	 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
 115	 * hot path and finding waiters is very rare.  We don't want to walk
 116	 * the system-wide hashed waitqueue buckets in the fast path only to
 117	 * almost never find waiters.
 118	 */
 119	if (waitqueue_active(&cp->cp_waitq))
 120		wake_up_all(&cp->cp_waitq);
 121}
 122
 123/*
 124 * We're making the conscious trade-off here to only send one message
 125 * down the connection at a time.
 126 *   Pro:
 127 *      - tx queueing is a simple fifo list
 128 *   	- reassembly is optional and easily done by transports per conn
 129 *      - no per flow rx lookup at all, straight to the socket
 130 *   	- less per-frag memory and wire overhead
 131 *   Con:
 132 *      - queued acks can be delayed behind large messages
 133 *   Depends:
 134 *      - small message latency is higher behind queued large messages
 135 *      - large message latency isn't starved by intervening small sends
 136 */
 137int rds_send_xmit(struct rds_conn_path *cp)
 138{
 139	struct rds_connection *conn = cp->cp_conn;
 140	struct rds_message *rm;
 141	unsigned long flags;
 142	unsigned int tmp;
 143	struct scatterlist *sg;
 144	int ret = 0;
 145	LIST_HEAD(to_be_dropped);
 146	int batch_count;
 147	unsigned long send_gen = 0;
 148	int same_rm = 0;
 149
 150restart:
 151	batch_count = 0;
 152
 153	/*
 154	 * sendmsg calls here after having queued its message on the send
 155	 * queue.  We only have one task feeding the connection at a time.  If
 156	 * another thread is already feeding the queue then we back off.  This
 157	 * avoids blocking the caller and trading per-connection data between
 158	 * caches per message.
 159	 */
 160	if (!acquire_in_xmit(cp)) {
 161		rds_stats_inc(s_send_lock_contention);
 162		ret = -ENOMEM;
 163		goto out;
 164	}
 165
 166	if (rds_destroy_pending(cp->cp_conn)) {
 167		release_in_xmit(cp);
 168		ret = -ENETUNREACH; /* dont requeue send work */
 169		goto out;
 170	}
 171
 172	/*
 173	 * we record the send generation after doing the xmit acquire.
 174	 * if someone else manages to jump in and do some work, we'll use
 175	 * this to avoid a goto restart farther down.
 176	 *
 177	 * The acquire_in_xmit() check above ensures that only one
 178	 * caller can increment c_send_gen at any time.
 179	 */
 180	send_gen = READ_ONCE(cp->cp_send_gen) + 1;
 181	WRITE_ONCE(cp->cp_send_gen, send_gen);
 182
 183	/*
 184	 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
 185	 * we do the opposite to avoid races.
 186	 */
 187	if (!rds_conn_path_up(cp)) {
 188		release_in_xmit(cp);
 189		ret = 0;
 190		goto out;
 191	}
 192
 193	if (conn->c_trans->xmit_path_prepare)
 194		conn->c_trans->xmit_path_prepare(cp);
 195
 196	/*
 197	 * spin trying to push headers and data down the connection until
 198	 * the connection doesn't make forward progress.
 199	 */
 200	while (1) {
 201
 202		rm = cp->cp_xmit_rm;
 203
 204		if (!rm) {
 205			same_rm = 0;
 206		} else {
 207			same_rm++;
 208			if (same_rm >= 4096) {
 209				rds_stats_inc(s_send_stuck_rm);
 210				ret = -EAGAIN;
 211				break;
 212			}
 213		}
 214
 215		/*
 216		 * If between sending messages, we can send a pending congestion
 217		 * map update.
 218		 */
 219		if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
 220			rm = rds_cong_update_alloc(conn);
 221			if (IS_ERR(rm)) {
 222				ret = PTR_ERR(rm);
 223				break;
 224			}
 225			rm->data.op_active = 1;
 226			rm->m_inc.i_conn_path = cp;
 227			rm->m_inc.i_conn = cp->cp_conn;
 228
 229			cp->cp_xmit_rm = rm;
 230		}
 231
 232		/*
 233		 * If not already working on one, grab the next message.
 234		 *
 235		 * cp_xmit_rm holds a ref while we're sending this message down
 236		 * the connction.  We can use this ref while holding the
 237		 * send_sem.. rds_send_reset() is serialized with it.
 238		 */
 239		if (!rm) {
 240			unsigned int len;
 241
 242			batch_count++;
 243
 244			/* we want to process as big a batch as we can, but
 245			 * we also want to avoid softlockups.  If we've been
 246			 * through a lot of messages, lets back off and see
 247			 * if anyone else jumps in
 248			 */
 249			if (batch_count >= send_batch_count)
 250				goto over_batch;
 251
 252			spin_lock_irqsave(&cp->cp_lock, flags);
 253
 254			if (!list_empty(&cp->cp_send_queue)) {
 255				rm = list_entry(cp->cp_send_queue.next,
 256						struct rds_message,
 257						m_conn_item);
 258				rds_message_addref(rm);
 259
 260				/*
 261				 * Move the message from the send queue to the retransmit
 262				 * list right away.
 263				 */
 264				list_move_tail(&rm->m_conn_item,
 265					       &cp->cp_retrans);
 266			}
 267
 268			spin_unlock_irqrestore(&cp->cp_lock, flags);
 269
 270			if (!rm)
 271				break;
 272
 273			/* Unfortunately, the way Infiniband deals with
 274			 * RDMA to a bad MR key is by moving the entire
 275			 * queue pair to error state. We cold possibly
 276			 * recover from that, but right now we drop the
 277			 * connection.
 278			 * Therefore, we never retransmit messages with RDMA ops.
 279			 */
 280			if (test_bit(RDS_MSG_FLUSH, &rm->m_flags) ||
 281			    (rm->rdma.op_active &&
 282			    test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))) {
 283				spin_lock_irqsave(&cp->cp_lock, flags);
 284				if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
 285					list_move(&rm->m_conn_item, &to_be_dropped);
 286				spin_unlock_irqrestore(&cp->cp_lock, flags);
 287				continue;
 288			}
 289
 290			/* Require an ACK every once in a while */
 291			len = ntohl(rm->m_inc.i_hdr.h_len);
 292			if (cp->cp_unacked_packets == 0 ||
 293			    cp->cp_unacked_bytes < len) {
 294				set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
 295
 296				cp->cp_unacked_packets =
 297					rds_sysctl_max_unacked_packets;
 298				cp->cp_unacked_bytes =
 299					rds_sysctl_max_unacked_bytes;
 300				rds_stats_inc(s_send_ack_required);
 301			} else {
 302				cp->cp_unacked_bytes -= len;
 303				cp->cp_unacked_packets--;
 304			}
 305
 306			cp->cp_xmit_rm = rm;
 307		}
 308
 309		/* The transport either sends the whole rdma or none of it */
 310		if (rm->rdma.op_active && !cp->cp_xmit_rdma_sent) {
 311			rm->m_final_op = &rm->rdma;
 312			/* The transport owns the mapped memory for now.
 313			 * You can't unmap it while it's on the send queue
 314			 */
 315			set_bit(RDS_MSG_MAPPED, &rm->m_flags);
 316			ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
 317			if (ret) {
 318				clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
 319				wake_up_interruptible(&rm->m_flush_wait);
 320				break;
 321			}
 322			cp->cp_xmit_rdma_sent = 1;
 323
 
 
 
 324		}
 325
 326		if (rm->atomic.op_active && !cp->cp_xmit_atomic_sent) {
 327			rm->m_final_op = &rm->atomic;
 328			/* The transport owns the mapped memory for now.
 329			 * You can't unmap it while it's on the send queue
 330			 */
 331			set_bit(RDS_MSG_MAPPED, &rm->m_flags);
 332			ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
 333			if (ret) {
 334				clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
 335				wake_up_interruptible(&rm->m_flush_wait);
 336				break;
 337			}
 338			cp->cp_xmit_atomic_sent = 1;
 339
 
 
 
 340		}
 341
 342		/*
 343		 * A number of cases require an RDS header to be sent
 344		 * even if there is no data.
 345		 * We permit 0-byte sends; rds-ping depends on this.
 346		 * However, if there are exclusively attached silent ops,
 347		 * we skip the hdr/data send, to enable silent operation.
 348		 */
 349		if (rm->data.op_nents == 0) {
 350			int ops_present;
 351			int all_ops_are_silent = 1;
 352
 353			ops_present = (rm->atomic.op_active || rm->rdma.op_active);
 354			if (rm->atomic.op_active && !rm->atomic.op_silent)
 355				all_ops_are_silent = 0;
 356			if (rm->rdma.op_active && !rm->rdma.op_silent)
 357				all_ops_are_silent = 0;
 358
 359			if (ops_present && all_ops_are_silent
 360			    && !rm->m_rdma_cookie)
 361				rm->data.op_active = 0;
 362		}
 363
 364		if (rm->data.op_active && !cp->cp_xmit_data_sent) {
 365			rm->m_final_op = &rm->data;
 366
 367			ret = conn->c_trans->xmit(conn, rm,
 368						  cp->cp_xmit_hdr_off,
 369						  cp->cp_xmit_sg,
 370						  cp->cp_xmit_data_off);
 371			if (ret <= 0)
 372				break;
 373
 374			if (cp->cp_xmit_hdr_off < sizeof(struct rds_header)) {
 375				tmp = min_t(int, ret,
 376					    sizeof(struct rds_header) -
 377					    cp->cp_xmit_hdr_off);
 378				cp->cp_xmit_hdr_off += tmp;
 379				ret -= tmp;
 380			}
 381
 382			sg = &rm->data.op_sg[cp->cp_xmit_sg];
 383			while (ret) {
 384				tmp = min_t(int, ret, sg->length -
 385						      cp->cp_xmit_data_off);
 386				cp->cp_xmit_data_off += tmp;
 387				ret -= tmp;
 388				if (cp->cp_xmit_data_off == sg->length) {
 389					cp->cp_xmit_data_off = 0;
 390					sg++;
 391					cp->cp_xmit_sg++;
 392					BUG_ON(ret != 0 && cp->cp_xmit_sg ==
 393					       rm->data.op_nents);
 394				}
 395			}
 396
 397			if (cp->cp_xmit_hdr_off == sizeof(struct rds_header) &&
 398			    (cp->cp_xmit_sg == rm->data.op_nents))
 399				cp->cp_xmit_data_sent = 1;
 400		}
 401
 402		/*
 403		 * A rm will only take multiple times through this loop
 404		 * if there is a data op. Thus, if the data is sent (or there was
 405		 * none), then we're done with the rm.
 406		 */
 407		if (!rm->data.op_active || cp->cp_xmit_data_sent) {
 408			cp->cp_xmit_rm = NULL;
 409			cp->cp_xmit_sg = 0;
 410			cp->cp_xmit_hdr_off = 0;
 411			cp->cp_xmit_data_off = 0;
 412			cp->cp_xmit_rdma_sent = 0;
 413			cp->cp_xmit_atomic_sent = 0;
 414			cp->cp_xmit_data_sent = 0;
 415
 416			rds_message_put(rm);
 417		}
 418	}
 419
 420over_batch:
 421	if (conn->c_trans->xmit_path_complete)
 422		conn->c_trans->xmit_path_complete(cp);
 423	release_in_xmit(cp);
 424
 425	/* Nuke any messages we decided not to retransmit. */
 426	if (!list_empty(&to_be_dropped)) {
 427		/* irqs on here, so we can put(), unlike above */
 428		list_for_each_entry(rm, &to_be_dropped, m_conn_item)
 429			rds_message_put(rm);
 430		rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
 431	}
 432
 433	/*
 434	 * Other senders can queue a message after we last test the send queue
 435	 * but before we clear RDS_IN_XMIT.  In that case they'd back off and
 436	 * not try and send their newly queued message.  We need to check the
 437	 * send queue after having cleared RDS_IN_XMIT so that their message
 438	 * doesn't get stuck on the send queue.
 439	 *
 440	 * If the transport cannot continue (i.e ret != 0), then it must
 441	 * call us when more room is available, such as from the tx
 442	 * completion handler.
 443	 *
 444	 * We have an extra generation check here so that if someone manages
 445	 * to jump in after our release_in_xmit, we'll see that they have done
 446	 * some work and we will skip our goto
 447	 */
 448	if (ret == 0) {
 449		bool raced;
 450
 451		smp_mb();
 452		raced = send_gen != READ_ONCE(cp->cp_send_gen);
 453
 454		if ((test_bit(0, &conn->c_map_queued) ||
 455		    !list_empty(&cp->cp_send_queue)) && !raced) {
 456			if (batch_count < send_batch_count)
 457				goto restart;
 458			rcu_read_lock();
 459			if (rds_destroy_pending(cp->cp_conn))
 460				ret = -ENETUNREACH;
 461			else
 462				queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
 463			rcu_read_unlock();
 464		} else if (raced) {
 465			rds_stats_inc(s_send_lock_queue_raced);
 
 466		}
 467	}
 468out:
 469	return ret;
 470}
 471EXPORT_SYMBOL_GPL(rds_send_xmit);
 472
 473static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
 474{
 475	u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
 476
 477	assert_spin_locked(&rs->rs_lock);
 478
 479	BUG_ON(rs->rs_snd_bytes < len);
 480	rs->rs_snd_bytes -= len;
 481
 482	if (rs->rs_snd_bytes == 0)
 483		rds_stats_inc(s_send_queue_empty);
 484}
 485
 486static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
 487				    is_acked_func is_acked)
 488{
 489	if (is_acked)
 490		return is_acked(rm, ack);
 491	return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
 492}
 493
 494/*
 495 * This is pretty similar to what happens below in the ACK
 496 * handling code - except that we call here as soon as we get
 497 * the IB send completion on the RDMA op and the accompanying
 498 * message.
 499 */
 500void rds_rdma_send_complete(struct rds_message *rm, int status)
 501{
 502	struct rds_sock *rs = NULL;
 503	struct rm_rdma_op *ro;
 504	struct rds_notifier *notifier;
 505	unsigned long flags;
 506
 507	spin_lock_irqsave(&rm->m_rs_lock, flags);
 508
 509	ro = &rm->rdma;
 510	if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
 511	    ro->op_active && ro->op_notify && ro->op_notifier) {
 512		notifier = ro->op_notifier;
 513		rs = rm->m_rs;
 514		sock_hold(rds_rs_to_sk(rs));
 515
 516		notifier->n_status = status;
 517		spin_lock(&rs->rs_lock);
 518		list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
 519		spin_unlock(&rs->rs_lock);
 520
 521		ro->op_notifier = NULL;
 522	}
 523
 524	spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 525
 526	if (rs) {
 527		rds_wake_sk_sleep(rs);
 528		sock_put(rds_rs_to_sk(rs));
 529	}
 530}
 531EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
 532
 533/*
 534 * Just like above, except looks at atomic op
 535 */
 536void rds_atomic_send_complete(struct rds_message *rm, int status)
 537{
 538	struct rds_sock *rs = NULL;
 539	struct rm_atomic_op *ao;
 540	struct rds_notifier *notifier;
 541	unsigned long flags;
 542
 543	spin_lock_irqsave(&rm->m_rs_lock, flags);
 544
 545	ao = &rm->atomic;
 546	if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
 547	    && ao->op_active && ao->op_notify && ao->op_notifier) {
 548		notifier = ao->op_notifier;
 549		rs = rm->m_rs;
 550		sock_hold(rds_rs_to_sk(rs));
 551
 552		notifier->n_status = status;
 553		spin_lock(&rs->rs_lock);
 554		list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
 555		spin_unlock(&rs->rs_lock);
 556
 557		ao->op_notifier = NULL;
 558	}
 559
 560	spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 561
 562	if (rs) {
 563		rds_wake_sk_sleep(rs);
 564		sock_put(rds_rs_to_sk(rs));
 565	}
 566}
 567EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
 568
 569/*
 570 * This is the same as rds_rdma_send_complete except we
 571 * don't do any locking - we have all the ingredients (message,
 572 * socket, socket lock) and can just move the notifier.
 573 */
 574static inline void
 575__rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
 576{
 577	struct rm_rdma_op *ro;
 578	struct rm_atomic_op *ao;
 579
 580	ro = &rm->rdma;
 581	if (ro->op_active && ro->op_notify && ro->op_notifier) {
 582		ro->op_notifier->n_status = status;
 583		list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
 584		ro->op_notifier = NULL;
 585	}
 586
 587	ao = &rm->atomic;
 588	if (ao->op_active && ao->op_notify && ao->op_notifier) {
 589		ao->op_notifier->n_status = status;
 590		list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
 591		ao->op_notifier = NULL;
 592	}
 593
 594	/* No need to wake the app - caller does this */
 595}
 596
 597/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 598 * This removes messages from the socket's list if they're on it.  The list
 599 * argument must be private to the caller, we must be able to modify it
 600 * without locks.  The messages must have a reference held for their
 601 * position on the list.  This function will drop that reference after
 602 * removing the messages from the 'messages' list regardless of if it found
 603 * the messages on the socket list or not.
 604 */
 605static void rds_send_remove_from_sock(struct list_head *messages, int status)
 606{
 607	unsigned long flags;
 608	struct rds_sock *rs = NULL;
 609	struct rds_message *rm;
 610
 611	while (!list_empty(messages)) {
 612		int was_on_sock = 0;
 613
 614		rm = list_entry(messages->next, struct rds_message,
 615				m_conn_item);
 616		list_del_init(&rm->m_conn_item);
 617
 618		/*
 619		 * If we see this flag cleared then we're *sure* that someone
 620		 * else beat us to removing it from the sock.  If we race
 621		 * with their flag update we'll get the lock and then really
 622		 * see that the flag has been cleared.
 623		 *
 624		 * The message spinlock makes sure nobody clears rm->m_rs
 625		 * while we're messing with it. It does not prevent the
 626		 * message from being removed from the socket, though.
 627		 */
 628		spin_lock_irqsave(&rm->m_rs_lock, flags);
 629		if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
 630			goto unlock_and_drop;
 631
 632		if (rs != rm->m_rs) {
 633			if (rs) {
 634				rds_wake_sk_sleep(rs);
 635				sock_put(rds_rs_to_sk(rs));
 636			}
 637			rs = rm->m_rs;
 638			if (rs)
 639				sock_hold(rds_rs_to_sk(rs));
 640		}
 641		if (!rs)
 642			goto unlock_and_drop;
 643		spin_lock(&rs->rs_lock);
 644
 645		if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
 646			struct rm_rdma_op *ro = &rm->rdma;
 647			struct rds_notifier *notifier;
 648
 649			list_del_init(&rm->m_sock_item);
 650			rds_send_sndbuf_remove(rs, rm);
 651
 652			if (ro->op_active && ro->op_notifier &&
 653			       (ro->op_notify || (ro->op_recverr && status))) {
 654				notifier = ro->op_notifier;
 655				list_add_tail(&notifier->n_list,
 656						&rs->rs_notify_queue);
 657				if (!notifier->n_status)
 658					notifier->n_status = status;
 659				rm->rdma.op_notifier = NULL;
 660			}
 661			was_on_sock = 1;
 
 662		}
 663		spin_unlock(&rs->rs_lock);
 664
 665unlock_and_drop:
 666		spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 667		rds_message_put(rm);
 668		if (was_on_sock)
 669			rds_message_put(rm);
 670	}
 671
 672	if (rs) {
 673		rds_wake_sk_sleep(rs);
 674		sock_put(rds_rs_to_sk(rs));
 675	}
 676}
 677
 678/*
 679 * Transports call here when they've determined that the receiver queued
 680 * messages up to, and including, the given sequence number.  Messages are
 681 * moved to the retrans queue when rds_send_xmit picks them off the send
 682 * queue. This means that in the TCP case, the message may not have been
 683 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
 684 * checks the RDS_MSG_HAS_ACK_SEQ bit.
 
 
 
 685 */
 686void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack,
 687			      is_acked_func is_acked)
 688{
 689	struct rds_message *rm, *tmp;
 690	unsigned long flags;
 691	LIST_HEAD(list);
 692
 693	spin_lock_irqsave(&cp->cp_lock, flags);
 694
 695	list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) {
 696		if (!rds_send_is_acked(rm, ack, is_acked))
 697			break;
 698
 699		list_move(&rm->m_conn_item, &list);
 700		clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
 701	}
 702
 703	/* order flag updates with spin locks */
 704	if (!list_empty(&list))
 705		smp_mb__after_atomic();
 706
 707	spin_unlock_irqrestore(&cp->cp_lock, flags);
 708
 709	/* now remove the messages from the sock list as needed */
 710	rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
 711}
 712EXPORT_SYMBOL_GPL(rds_send_path_drop_acked);
 713
 714void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
 715			 is_acked_func is_acked)
 716{
 717	WARN_ON(conn->c_trans->t_mp_capable);
 718	rds_send_path_drop_acked(&conn->c_path[0], ack, is_acked);
 719}
 720EXPORT_SYMBOL_GPL(rds_send_drop_acked);
 721
 722void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in6 *dest)
 723{
 724	struct rds_message *rm, *tmp;
 725	struct rds_connection *conn;
 726	struct rds_conn_path *cp;
 727	unsigned long flags;
 728	LIST_HEAD(list);
 729
 730	/* get all the messages we're dropping under the rs lock */
 731	spin_lock_irqsave(&rs->rs_lock, flags);
 732
 733	list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
 734		if (dest &&
 735		    (!ipv6_addr_equal(&dest->sin6_addr, &rm->m_daddr) ||
 736		     dest->sin6_port != rm->m_inc.i_hdr.h_dport))
 737			continue;
 738
 739		list_move(&rm->m_sock_item, &list);
 740		rds_send_sndbuf_remove(rs, rm);
 741		clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
 742	}
 743
 744	/* order flag updates with the rs lock */
 745	smp_mb__after_atomic();
 746
 747	spin_unlock_irqrestore(&rs->rs_lock, flags);
 748
 749	if (list_empty(&list))
 750		return;
 751
 752	/* Remove the messages from the conn */
 753	list_for_each_entry(rm, &list, m_sock_item) {
 754
 755		conn = rm->m_inc.i_conn;
 756		if (conn->c_trans->t_mp_capable)
 757			cp = rm->m_inc.i_conn_path;
 758		else
 759			cp = &conn->c_path[0];
 760
 761		spin_lock_irqsave(&cp->cp_lock, flags);
 762		/*
 763		 * Maybe someone else beat us to removing rm from the conn.
 764		 * If we race with their flag update we'll get the lock and
 765		 * then really see that the flag has been cleared.
 766		 */
 767		if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
 768			spin_unlock_irqrestore(&cp->cp_lock, flags);
 769			continue;
 770		}
 771		list_del_init(&rm->m_conn_item);
 772		spin_unlock_irqrestore(&cp->cp_lock, flags);
 773
 774		/*
 775		 * Couldn't grab m_rs_lock in top loop (lock ordering),
 776		 * but we can now.
 777		 */
 778		spin_lock_irqsave(&rm->m_rs_lock, flags);
 779
 780		spin_lock(&rs->rs_lock);
 781		__rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
 782		spin_unlock(&rs->rs_lock);
 783
 
 784		spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 785
 786		rds_message_put(rm);
 787	}
 788
 789	rds_wake_sk_sleep(rs);
 790
 791	while (!list_empty(&list)) {
 792		rm = list_entry(list.next, struct rds_message, m_sock_item);
 793		list_del_init(&rm->m_sock_item);
 794		rds_message_wait(rm);
 795
 796		/* just in case the code above skipped this message
 797		 * because RDS_MSG_ON_CONN wasn't set, run it again here
 798		 * taking m_rs_lock is the only thing that keeps us
 799		 * from racing with ack processing.
 800		 */
 801		spin_lock_irqsave(&rm->m_rs_lock, flags);
 802
 803		spin_lock(&rs->rs_lock);
 804		__rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
 805		spin_unlock(&rs->rs_lock);
 806
 807		spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 808
 
 809		rds_message_put(rm);
 810	}
 811}
 812
 813/*
 814 * we only want this to fire once so we use the callers 'queued'.  It's
 815 * possible that another thread can race with us and remove the
 816 * message from the flow with RDS_CANCEL_SENT_TO.
 817 */
 818static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
 819			     struct rds_conn_path *cp,
 820			     struct rds_message *rm, __be16 sport,
 821			     __be16 dport, int *queued)
 822{
 823	unsigned long flags;
 824	u32 len;
 825
 826	if (*queued)
 827		goto out;
 828
 829	len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
 830
 831	/* this is the only place which holds both the socket's rs_lock
 832	 * and the connection's c_lock */
 833	spin_lock_irqsave(&rs->rs_lock, flags);
 834
 835	/*
 836	 * If there is a little space in sndbuf, we don't queue anything,
 837	 * and userspace gets -EAGAIN. But poll() indicates there's send
 838	 * room. This can lead to bad behavior (spinning) if snd_bytes isn't
 839	 * freed up by incoming acks. So we check the *old* value of
 840	 * rs_snd_bytes here to allow the last msg to exceed the buffer,
 841	 * and poll() now knows no more data can be sent.
 842	 */
 843	if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
 844		rs->rs_snd_bytes += len;
 845
 846		/* let recv side know we are close to send space exhaustion.
 847		 * This is probably not the optimal way to do it, as this
 848		 * means we set the flag on *all* messages as soon as our
 849		 * throughput hits a certain threshold.
 850		 */
 851		if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
 852			set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
 853
 854		list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
 855		set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
 856		rds_message_addref(rm);
 857		sock_hold(rds_rs_to_sk(rs));
 858		rm->m_rs = rs;
 859
 860		/* The code ordering is a little weird, but we're
 861		   trying to minimize the time we hold c_lock */
 862		rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
 863		rm->m_inc.i_conn = conn;
 864		rm->m_inc.i_conn_path = cp;
 865		rds_message_addref(rm);
 866
 867		spin_lock(&cp->cp_lock);
 868		rm->m_inc.i_hdr.h_sequence = cpu_to_be64(cp->cp_next_tx_seq++);
 869		list_add_tail(&rm->m_conn_item, &cp->cp_send_queue);
 870		set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
 871		spin_unlock(&cp->cp_lock);
 872
 873		rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
 874			 rm, len, rs, rs->rs_snd_bytes,
 875			 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
 876
 877		*queued = 1;
 878	}
 879
 880	spin_unlock_irqrestore(&rs->rs_lock, flags);
 881out:
 882	return *queued;
 883}
 884
 885/*
 886 * rds_message is getting to be quite complicated, and we'd like to allocate
 887 * it all in one go. This figures out how big it needs to be up front.
 888 */
 889static int rds_rm_size(struct msghdr *msg, int num_sgs,
 890		       struct rds_iov_vector_arr *vct)
 891{
 892	struct cmsghdr *cmsg;
 893	int size = 0;
 894	int cmsg_groups = 0;
 895	int retval;
 896	bool zcopy_cookie = false;
 897	struct rds_iov_vector *iov, *tmp_iov;
 898
 899	if (num_sgs < 0)
 900		return -EINVAL;
 901
 902	for_each_cmsghdr(cmsg, msg) {
 903		if (!CMSG_OK(msg, cmsg))
 904			return -EINVAL;
 905
 906		if (cmsg->cmsg_level != SOL_RDS)
 907			continue;
 908
 909		switch (cmsg->cmsg_type) {
 910		case RDS_CMSG_RDMA_ARGS:
 911			if (vct->indx >= vct->len) {
 912				vct->len += vct->incr;
 913				tmp_iov =
 914					krealloc(vct->vec,
 915						 vct->len *
 916						 sizeof(struct rds_iov_vector),
 917						 GFP_KERNEL);
 918				if (!tmp_iov) {
 919					vct->len -= vct->incr;
 920					return -ENOMEM;
 921				}
 922				vct->vec = tmp_iov;
 923			}
 924			iov = &vct->vec[vct->indx];
 925			memset(iov, 0, sizeof(struct rds_iov_vector));
 926			vct->indx++;
 927			cmsg_groups |= 1;
 928			retval = rds_rdma_extra_size(CMSG_DATA(cmsg), iov);
 929			if (retval < 0)
 930				return retval;
 931			size += retval;
 932
 933			break;
 934
 935		case RDS_CMSG_ZCOPY_COOKIE:
 936			zcopy_cookie = true;
 937			fallthrough;
 938
 939		case RDS_CMSG_RDMA_DEST:
 940		case RDS_CMSG_RDMA_MAP:
 941			cmsg_groups |= 2;
 942			/* these are valid but do no add any size */
 943			break;
 944
 945		case RDS_CMSG_ATOMIC_CSWP:
 946		case RDS_CMSG_ATOMIC_FADD:
 947		case RDS_CMSG_MASKED_ATOMIC_CSWP:
 948		case RDS_CMSG_MASKED_ATOMIC_FADD:
 949			cmsg_groups |= 1;
 950			size += sizeof(struct scatterlist);
 951			break;
 952
 953		default:
 954			return -EINVAL;
 955		}
 956
 957	}
 958
 959	if ((msg->msg_flags & MSG_ZEROCOPY) && !zcopy_cookie)
 960		return -EINVAL;
 961
 962	size += num_sgs * sizeof(struct scatterlist);
 963
 964	/* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
 965	if (cmsg_groups == 3)
 966		return -EINVAL;
 967
 968	return size;
 969}
 970
 971static int rds_cmsg_zcopy(struct rds_sock *rs, struct rds_message *rm,
 972			  struct cmsghdr *cmsg)
 973{
 974	u32 *cookie;
 975
 976	if (cmsg->cmsg_len < CMSG_LEN(sizeof(*cookie)) ||
 977	    !rm->data.op_mmp_znotifier)
 978		return -EINVAL;
 979	cookie = CMSG_DATA(cmsg);
 980	rm->data.op_mmp_znotifier->z_cookie = *cookie;
 981	return 0;
 982}
 983
 984static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
 985			 struct msghdr *msg, int *allocated_mr,
 986			 struct rds_iov_vector_arr *vct)
 987{
 988	struct cmsghdr *cmsg;
 989	int ret = 0, ind = 0;
 990
 991	for_each_cmsghdr(cmsg, msg) {
 992		if (!CMSG_OK(msg, cmsg))
 993			return -EINVAL;
 994
 995		if (cmsg->cmsg_level != SOL_RDS)
 996			continue;
 997
 998		/* As a side effect, RDMA_DEST and RDMA_MAP will set
 999		 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
1000		 */
1001		switch (cmsg->cmsg_type) {
1002		case RDS_CMSG_RDMA_ARGS:
1003			if (ind >= vct->indx)
1004				return -ENOMEM;
1005			ret = rds_cmsg_rdma_args(rs, rm, cmsg, &vct->vec[ind]);
1006			ind++;
1007			break;
1008
1009		case RDS_CMSG_RDMA_DEST:
1010			ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
1011			break;
1012
1013		case RDS_CMSG_RDMA_MAP:
1014			ret = rds_cmsg_rdma_map(rs, rm, cmsg);
1015			if (!ret)
1016				*allocated_mr = 1;
1017			else if (ret == -ENODEV)
1018				/* Accommodate the get_mr() case which can fail
1019				 * if connection isn't established yet.
1020				 */
1021				ret = -EAGAIN;
1022			break;
1023		case RDS_CMSG_ATOMIC_CSWP:
1024		case RDS_CMSG_ATOMIC_FADD:
1025		case RDS_CMSG_MASKED_ATOMIC_CSWP:
1026		case RDS_CMSG_MASKED_ATOMIC_FADD:
1027			ret = rds_cmsg_atomic(rs, rm, cmsg);
1028			break;
1029
1030		case RDS_CMSG_ZCOPY_COOKIE:
1031			ret = rds_cmsg_zcopy(rs, rm, cmsg);
1032			break;
1033
1034		default:
1035			return -EINVAL;
1036		}
1037
1038		if (ret)
1039			break;
1040	}
1041
1042	return ret;
1043}
1044
1045static int rds_send_mprds_hash(struct rds_sock *rs,
1046			       struct rds_connection *conn, int nonblock)
1047{
1048	int hash;
1049
1050	if (conn->c_npaths == 0)
1051		hash = RDS_MPATH_HASH(rs, RDS_MPATH_WORKERS);
1052	else
1053		hash = RDS_MPATH_HASH(rs, conn->c_npaths);
1054	if (conn->c_npaths == 0 && hash != 0) {
1055		rds_send_ping(conn, 0);
1056
1057		/* The underlying connection is not up yet.  Need to wait
1058		 * until it is up to be sure that the non-zero c_path can be
1059		 * used.  But if we are interrupted, we have to use the zero
1060		 * c_path in case the connection ends up being non-MP capable.
1061		 */
1062		if (conn->c_npaths == 0) {
1063			/* Cannot wait for the connection be made, so just use
1064			 * the base c_path.
1065			 */
1066			if (nonblock)
1067				return 0;
1068			if (wait_event_interruptible(conn->c_hs_waitq,
1069						     conn->c_npaths != 0))
1070				hash = 0;
1071		}
1072		if (conn->c_npaths == 1)
1073			hash = 0;
1074	}
1075	return hash;
1076}
1077
1078static int rds_rdma_bytes(struct msghdr *msg, size_t *rdma_bytes)
1079{
1080	struct rds_rdma_args *args;
1081	struct cmsghdr *cmsg;
1082
1083	for_each_cmsghdr(cmsg, msg) {
1084		if (!CMSG_OK(msg, cmsg))
1085			return -EINVAL;
1086
1087		if (cmsg->cmsg_level != SOL_RDS)
1088			continue;
1089
1090		if (cmsg->cmsg_type == RDS_CMSG_RDMA_ARGS) {
1091			if (cmsg->cmsg_len <
1092			    CMSG_LEN(sizeof(struct rds_rdma_args)))
1093				return -EINVAL;
1094			args = CMSG_DATA(cmsg);
1095			*rdma_bytes += args->remote_vec.bytes;
1096		}
1097	}
1098	return 0;
1099}
1100
1101int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
1102{
1103	struct sock *sk = sock->sk;
1104	struct rds_sock *rs = rds_sk_to_rs(sk);
1105	DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1106	DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
1107	__be16 dport;
1108	struct rds_message *rm = NULL;
1109	struct rds_connection *conn;
1110	int ret = 0;
1111	int queued = 0, allocated_mr = 0;
1112	int nonblock = msg->msg_flags & MSG_DONTWAIT;
1113	long timeo = sock_sndtimeo(sk, nonblock);
1114	struct rds_conn_path *cpath;
1115	struct in6_addr daddr;
1116	__u32 scope_id = 0;
1117	size_t total_payload_len = payload_len, rdma_payload_len = 0;
1118	bool zcopy = ((msg->msg_flags & MSG_ZEROCOPY) &&
1119		      sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY));
1120	int num_sgs = DIV_ROUND_UP(payload_len, PAGE_SIZE);
1121	int namelen;
1122	struct rds_iov_vector_arr vct;
1123	int ind;
1124
1125	memset(&vct, 0, sizeof(vct));
1126
1127	/* expect 1 RDMA CMSG per rds_sendmsg. can still grow if more needed. */
1128	vct.incr = 1;
1129
1130	/* Mirror Linux UDP mirror of BSD error message compatibility */
1131	/* XXX: Perhaps MSG_MORE someday */
1132	if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT | MSG_ZEROCOPY)) {
1133		ret = -EOPNOTSUPP;
1134		goto out;
1135	}
1136
1137	namelen = msg->msg_namelen;
1138	if (namelen != 0) {
1139		if (namelen < sizeof(*usin)) {
1140			ret = -EINVAL;
1141			goto out;
1142		}
1143		switch (usin->sin_family) {
1144		case AF_INET:
1145			if (usin->sin_addr.s_addr == htonl(INADDR_ANY) ||
1146			    usin->sin_addr.s_addr == htonl(INADDR_BROADCAST) ||
1147			    ipv4_is_multicast(usin->sin_addr.s_addr)) {
1148				ret = -EINVAL;
1149				goto out;
1150			}
1151			ipv6_addr_set_v4mapped(usin->sin_addr.s_addr, &daddr);
1152			dport = usin->sin_port;
1153			break;
1154
1155#if IS_ENABLED(CONFIG_IPV6)
1156		case AF_INET6: {
1157			int addr_type;
1158
1159			if (namelen < sizeof(*sin6)) {
1160				ret = -EINVAL;
1161				goto out;
1162			}
1163			addr_type = ipv6_addr_type(&sin6->sin6_addr);
1164			if (!(addr_type & IPV6_ADDR_UNICAST)) {
1165				__be32 addr4;
1166
1167				if (!(addr_type & IPV6_ADDR_MAPPED)) {
1168					ret = -EINVAL;
1169					goto out;
1170				}
1171
1172				/* It is a mapped address.  Need to do some
1173				 * sanity checks.
1174				 */
1175				addr4 = sin6->sin6_addr.s6_addr32[3];
1176				if (addr4 == htonl(INADDR_ANY) ||
1177				    addr4 == htonl(INADDR_BROADCAST) ||
1178				    ipv4_is_multicast(addr4)) {
1179					ret = -EINVAL;
1180					goto out;
1181				}
1182			}
1183			if (addr_type & IPV6_ADDR_LINKLOCAL) {
1184				if (sin6->sin6_scope_id == 0) {
1185					ret = -EINVAL;
1186					goto out;
1187				}
1188				scope_id = sin6->sin6_scope_id;
1189			}
1190
1191			daddr = sin6->sin6_addr;
1192			dport = sin6->sin6_port;
1193			break;
1194		}
1195#endif
1196
1197		default:
1198			ret = -EINVAL;
1199			goto out;
1200		}
 
 
1201	} else {
1202		/* We only care about consistency with ->connect() */
1203		lock_sock(sk);
1204		daddr = rs->rs_conn_addr;
1205		dport = rs->rs_conn_port;
1206		scope_id = rs->rs_bound_scope_id;
1207		release_sock(sk);
1208	}
1209
1210	lock_sock(sk);
1211	if (ipv6_addr_any(&rs->rs_bound_addr) || ipv6_addr_any(&daddr)) {
1212		release_sock(sk);
1213		ret = -ENOTCONN;
1214		goto out;
1215	} else if (namelen != 0) {
1216		/* Cannot send to an IPv4 address using an IPv6 source
1217		 * address and cannot send to an IPv6 address using an
1218		 * IPv4 source address.
1219		 */
1220		if (ipv6_addr_v4mapped(&daddr) ^
1221		    ipv6_addr_v4mapped(&rs->rs_bound_addr)) {
1222			release_sock(sk);
1223			ret = -EOPNOTSUPP;
1224			goto out;
1225		}
1226		/* If the socket is already bound to a link local address,
1227		 * it can only send to peers on the same link.  But allow
1228		 * communicating beween link local and non-link local address.
1229		 */
1230		if (scope_id != rs->rs_bound_scope_id) {
1231			if (!scope_id) {
1232				scope_id = rs->rs_bound_scope_id;
1233			} else if (rs->rs_bound_scope_id) {
1234				release_sock(sk);
1235				ret = -EINVAL;
1236				goto out;
1237			}
1238		}
1239	}
1240	release_sock(sk);
1241
1242	ret = rds_rdma_bytes(msg, &rdma_payload_len);
1243	if (ret)
1244		goto out;
1245
1246	total_payload_len += rdma_payload_len;
1247	if (max_t(size_t, payload_len, rdma_payload_len) > RDS_MAX_MSG_SIZE) {
1248		ret = -EMSGSIZE;
1249		goto out;
1250	}
1251
1252	if (payload_len > rds_sk_sndbuf(rs)) {
1253		ret = -EMSGSIZE;
1254		goto out;
1255	}
1256
1257	if (zcopy) {
1258		if (rs->rs_transport->t_type != RDS_TRANS_TCP) {
1259			ret = -EOPNOTSUPP;
1260			goto out;
1261		}
1262		num_sgs = iov_iter_npages(&msg->msg_iter, INT_MAX);
1263	}
1264	/* size of rm including all sgs */
1265	ret = rds_rm_size(msg, num_sgs, &vct);
1266	if (ret < 0)
1267		goto out;
1268
1269	rm = rds_message_alloc(ret, GFP_KERNEL);
1270	if (!rm) {
1271		ret = -ENOMEM;
1272		goto out;
1273	}
1274
1275	/* Attach data to the rm */
1276	if (payload_len) {
1277		rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
1278		if (IS_ERR(rm->data.op_sg)) {
1279			ret = PTR_ERR(rm->data.op_sg);
1280			goto out;
1281		}
1282		ret = rds_message_copy_from_user(rm, &msg->msg_iter, zcopy);
1283		if (ret)
1284			goto out;
1285	}
1286	rm->data.op_active = 1;
1287
1288	rm->m_daddr = daddr;
1289
1290	/* rds_conn_create has a spinlock that runs with IRQ off.
1291	 * Caching the conn in the socket helps a lot. */
1292	if (rs->rs_conn && ipv6_addr_equal(&rs->rs_conn->c_faddr, &daddr) &&
1293	    rs->rs_tos == rs->rs_conn->c_tos) {
1294		conn = rs->rs_conn;
1295	} else {
1296		conn = rds_conn_create_outgoing(sock_net(sock->sk),
1297						&rs->rs_bound_addr, &daddr,
1298						rs->rs_transport, rs->rs_tos,
1299						sock->sk->sk_allocation,
1300						scope_id);
1301		if (IS_ERR(conn)) {
1302			ret = PTR_ERR(conn);
1303			goto out;
1304		}
1305		rs->rs_conn = conn;
1306	}
1307
1308	if (conn->c_trans->t_mp_capable)
1309		cpath = &conn->c_path[rds_send_mprds_hash(rs, conn, nonblock)];
1310	else
1311		cpath = &conn->c_path[0];
1312
1313	rm->m_conn_path = cpath;
1314
1315	/* Parse any control messages the user may have included. */
1316	ret = rds_cmsg_send(rs, rm, msg, &allocated_mr, &vct);
1317	if (ret) {
1318		/* Trigger connection so that its ready for the next retry */
1319		if (ret ==  -EAGAIN)
1320			rds_conn_connect_if_down(conn);
1321		goto out;
1322	}
1323
1324	if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
1325		printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
1326			       &rm->rdma, conn->c_trans->xmit_rdma);
1327		ret = -EOPNOTSUPP;
1328		goto out;
1329	}
1330
1331	if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
1332		printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
1333			       &rm->atomic, conn->c_trans->xmit_atomic);
1334		ret = -EOPNOTSUPP;
1335		goto out;
1336	}
1337
1338	if (rds_destroy_pending(conn)) {
1339		ret = -EAGAIN;
1340		goto out;
1341	}
1342
1343	if (rds_conn_path_down(cpath))
1344		rds_check_all_paths(conn);
1345
1346	ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
1347	if (ret) {
1348		rs->rs_seen_congestion = 1;
1349		goto out;
1350	}
1351	while (!rds_send_queue_rm(rs, conn, cpath, rm, rs->rs_bound_port,
 
1352				  dport, &queued)) {
1353		rds_stats_inc(s_send_queue_full);
1354
 
 
 
 
1355		if (nonblock) {
1356			ret = -EAGAIN;
1357			goto out;
1358		}
1359
1360		timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
1361					rds_send_queue_rm(rs, conn, cpath, rm,
1362							  rs->rs_bound_port,
1363							  dport,
1364							  &queued),
1365					timeo);
1366		rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1367		if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1368			continue;
1369
1370		ret = timeo;
1371		if (ret == 0)
1372			ret = -ETIMEDOUT;
1373		goto out;
1374	}
1375
1376	/*
1377	 * By now we've committed to the send.  We reuse rds_send_worker()
1378	 * to retry sends in the rds thread if the transport asks us to.
1379	 */
1380	rds_stats_inc(s_send_queued);
1381
1382	ret = rds_send_xmit(cpath);
1383	if (ret == -ENOMEM || ret == -EAGAIN) {
1384		ret = 0;
1385		rcu_read_lock();
1386		if (rds_destroy_pending(cpath->cp_conn))
1387			ret = -ENETUNREACH;
1388		else
1389			queue_delayed_work(rds_wq, &cpath->cp_send_w, 1);
1390		rcu_read_unlock();
1391	}
1392	if (ret)
1393		goto out;
1394	rds_message_put(rm);
1395
1396	for (ind = 0; ind < vct.indx; ind++)
1397		kfree(vct.vec[ind].iov);
1398	kfree(vct.vec);
1399
 
1400	return payload_len;
1401
1402out:
1403	for (ind = 0; ind < vct.indx; ind++)
1404		kfree(vct.vec[ind].iov);
1405	kfree(vct.vec);
1406
1407	/* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1408	 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1409	 * or in any other way, we need to destroy the MR again */
1410	if (allocated_mr)
1411		rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1412
1413	if (rm)
1414		rds_message_put(rm);
1415	return ret;
1416}
1417
1418/*
1419 * send out a probe. Can be shared by rds_send_ping,
1420 * rds_send_pong, rds_send_hb.
1421 * rds_send_hb should use h_flags
1422 *   RDS_FLAG_HB_PING|RDS_FLAG_ACK_REQUIRED
1423 * or
1424 *   RDS_FLAG_HB_PONG|RDS_FLAG_ACK_REQUIRED
1425 */
1426static int
1427rds_send_probe(struct rds_conn_path *cp, __be16 sport,
1428	       __be16 dport, u8 h_flags)
1429{
1430	struct rds_message *rm;
1431	unsigned long flags;
1432	int ret = 0;
1433
1434	rm = rds_message_alloc(0, GFP_ATOMIC);
1435	if (!rm) {
1436		ret = -ENOMEM;
1437		goto out;
1438	}
1439
1440	rm->m_daddr = cp->cp_conn->c_faddr;
1441	rm->data.op_active = 1;
1442
1443	rds_conn_path_connect_if_down(cp);
1444
1445	ret = rds_cong_wait(cp->cp_conn->c_fcong, dport, 1, NULL);
1446	if (ret)
1447		goto out;
1448
1449	spin_lock_irqsave(&cp->cp_lock, flags);
1450	list_add_tail(&rm->m_conn_item, &cp->cp_send_queue);
1451	set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1452	rds_message_addref(rm);
1453	rm->m_inc.i_conn = cp->cp_conn;
1454	rm->m_inc.i_conn_path = cp;
1455
1456	rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport,
1457				    cp->cp_next_tx_seq);
1458	rm->m_inc.i_hdr.h_flags |= h_flags;
1459	cp->cp_next_tx_seq++;
1460
1461	if (RDS_HS_PROBE(be16_to_cpu(sport), be16_to_cpu(dport)) &&
1462	    cp->cp_conn->c_trans->t_mp_capable) {
1463		u16 npaths = cpu_to_be16(RDS_MPATH_WORKERS);
1464		u32 my_gen_num = cpu_to_be32(cp->cp_conn->c_my_gen_num);
1465
1466		rds_message_add_extension(&rm->m_inc.i_hdr,
1467					  RDS_EXTHDR_NPATHS, &npaths,
1468					  sizeof(npaths));
1469		rds_message_add_extension(&rm->m_inc.i_hdr,
1470					  RDS_EXTHDR_GEN_NUM,
1471					  &my_gen_num,
1472					  sizeof(u32));
1473	}
1474	spin_unlock_irqrestore(&cp->cp_lock, flags);
1475
1476	rds_stats_inc(s_send_queued);
1477	rds_stats_inc(s_send_pong);
1478
1479	/* schedule the send work on rds_wq */
1480	rcu_read_lock();
1481	if (!rds_destroy_pending(cp->cp_conn))
1482		queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
1483	rcu_read_unlock();
1484
1485	rds_message_put(rm);
1486	return 0;
1487
1488out:
1489	if (rm)
1490		rds_message_put(rm);
1491	return ret;
1492}
1493
1494int
1495rds_send_pong(struct rds_conn_path *cp, __be16 dport)
1496{
1497	return rds_send_probe(cp, 0, dport, 0);
1498}
1499
1500void
1501rds_send_ping(struct rds_connection *conn, int cp_index)
1502{
1503	unsigned long flags;
1504	struct rds_conn_path *cp = &conn->c_path[cp_index];
1505
1506	spin_lock_irqsave(&cp->cp_lock, flags);
1507	if (conn->c_ping_triggered) {
1508		spin_unlock_irqrestore(&cp->cp_lock, flags);
1509		return;
1510	}
1511	conn->c_ping_triggered = 1;
1512	spin_unlock_irqrestore(&cp->cp_lock, flags);
1513	rds_send_probe(cp, cpu_to_be16(RDS_FLAG_PROBE_PORT), 0, 0);
1514}
1515EXPORT_SYMBOL_GPL(rds_send_ping);
v3.5.6
   1/*
   2 * Copyright (c) 2006 Oracle.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 *
  32 */
  33#include <linux/kernel.h>
  34#include <linux/moduleparam.h>
  35#include <linux/gfp.h>
  36#include <net/sock.h>
  37#include <linux/in.h>
  38#include <linux/list.h>
  39#include <linux/ratelimit.h>
  40#include <linux/export.h>
 
  41
  42#include "rds.h"
  43
  44/* When transmitting messages in rds_send_xmit, we need to emerge from
  45 * time to time and briefly release the CPU. Otherwise the softlock watchdog
  46 * will kick our shin.
  47 * Also, it seems fairer to not let one busy connection stall all the
  48 * others.
  49 *
  50 * send_batch_count is the number of times we'll loop in send_xmit. Setting
  51 * it to 0 will restore the old behavior (where we looped until we had
  52 * drained the queue).
  53 */
  54static int send_batch_count = 64;
  55module_param(send_batch_count, int, 0444);
  56MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
  57
  58static void rds_send_remove_from_sock(struct list_head *messages, int status);
  59
  60/*
  61 * Reset the send state.  Callers must ensure that this doesn't race with
  62 * rds_send_xmit().
  63 */
  64void rds_send_reset(struct rds_connection *conn)
  65{
  66	struct rds_message *rm, *tmp;
  67	unsigned long flags;
  68
  69	if (conn->c_xmit_rm) {
  70		rm = conn->c_xmit_rm;
  71		conn->c_xmit_rm = NULL;
  72		/* Tell the user the RDMA op is no longer mapped by the
  73		 * transport. This isn't entirely true (it's flushed out
  74		 * independently) but as the connection is down, there's
  75		 * no ongoing RDMA to/from that memory */
  76		rds_message_unmapped(rm);
  77		rds_message_put(rm);
  78	}
  79
  80	conn->c_xmit_sg = 0;
  81	conn->c_xmit_hdr_off = 0;
  82	conn->c_xmit_data_off = 0;
  83	conn->c_xmit_atomic_sent = 0;
  84	conn->c_xmit_rdma_sent = 0;
  85	conn->c_xmit_data_sent = 0;
  86
  87	conn->c_map_queued = 0;
  88
  89	conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
  90	conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
  91
  92	/* Mark messages as retransmissions, and move them to the send q */
  93	spin_lock_irqsave(&conn->c_lock, flags);
  94	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
  95		set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
  96		set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
  97	}
  98	list_splice_init(&conn->c_retrans, &conn->c_send_queue);
  99	spin_unlock_irqrestore(&conn->c_lock, flags);
 100}
 
 101
 102static int acquire_in_xmit(struct rds_connection *conn)
 103{
 104	return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0;
 105}
 106
 107static void release_in_xmit(struct rds_connection *conn)
 108{
 109	clear_bit(RDS_IN_XMIT, &conn->c_flags);
 110	smp_mb__after_clear_bit();
 111	/*
 112	 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
 113	 * hot path and finding waiters is very rare.  We don't want to walk
 114	 * the system-wide hashed waitqueue buckets in the fast path only to
 115	 * almost never find waiters.
 116	 */
 117	if (waitqueue_active(&conn->c_waitq))
 118		wake_up_all(&conn->c_waitq);
 119}
 120
 121/*
 122 * We're making the conscious trade-off here to only send one message
 123 * down the connection at a time.
 124 *   Pro:
 125 *      - tx queueing is a simple fifo list
 126 *   	- reassembly is optional and easily done by transports per conn
 127 *      - no per flow rx lookup at all, straight to the socket
 128 *   	- less per-frag memory and wire overhead
 129 *   Con:
 130 *      - queued acks can be delayed behind large messages
 131 *   Depends:
 132 *      - small message latency is higher behind queued large messages
 133 *      - large message latency isn't starved by intervening small sends
 134 */
 135int rds_send_xmit(struct rds_connection *conn)
 136{
 
 137	struct rds_message *rm;
 138	unsigned long flags;
 139	unsigned int tmp;
 140	struct scatterlist *sg;
 141	int ret = 0;
 142	LIST_HEAD(to_be_dropped);
 
 
 
 143
 144restart:
 
 145
 146	/*
 147	 * sendmsg calls here after having queued its message on the send
 148	 * queue.  We only have one task feeding the connection at a time.  If
 149	 * another thread is already feeding the queue then we back off.  This
 150	 * avoids blocking the caller and trading per-connection data between
 151	 * caches per message.
 152	 */
 153	if (!acquire_in_xmit(conn)) {
 154		rds_stats_inc(s_send_lock_contention);
 155		ret = -ENOMEM;
 156		goto out;
 157	}
 158
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 159	/*
 160	 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
 161	 * we do the opposite to avoid races.
 162	 */
 163	if (!rds_conn_up(conn)) {
 164		release_in_xmit(conn);
 165		ret = 0;
 166		goto out;
 167	}
 168
 169	if (conn->c_trans->xmit_prepare)
 170		conn->c_trans->xmit_prepare(conn);
 171
 172	/*
 173	 * spin trying to push headers and data down the connection until
 174	 * the connection doesn't make forward progress.
 175	 */
 176	while (1) {
 177
 178		rm = conn->c_xmit_rm;
 
 
 
 
 
 
 
 
 
 
 
 179
 180		/*
 181		 * If between sending messages, we can send a pending congestion
 182		 * map update.
 183		 */
 184		if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
 185			rm = rds_cong_update_alloc(conn);
 186			if (IS_ERR(rm)) {
 187				ret = PTR_ERR(rm);
 188				break;
 189			}
 190			rm->data.op_active = 1;
 
 
 191
 192			conn->c_xmit_rm = rm;
 193		}
 194
 195		/*
 196		 * If not already working on one, grab the next message.
 197		 *
 198		 * c_xmit_rm holds a ref while we're sending this message down
 199		 * the connction.  We can use this ref while holding the
 200		 * send_sem.. rds_send_reset() is serialized with it.
 201		 */
 202		if (!rm) {
 203			unsigned int len;
 204
 205			spin_lock_irqsave(&conn->c_lock, flags);
 206
 207			if (!list_empty(&conn->c_send_queue)) {
 208				rm = list_entry(conn->c_send_queue.next,
 
 
 
 
 
 
 
 
 
 
 209						struct rds_message,
 210						m_conn_item);
 211				rds_message_addref(rm);
 212
 213				/*
 214				 * Move the message from the send queue to the retransmit
 215				 * list right away.
 216				 */
 217				list_move_tail(&rm->m_conn_item, &conn->c_retrans);
 
 218			}
 219
 220			spin_unlock_irqrestore(&conn->c_lock, flags);
 221
 222			if (!rm)
 223				break;
 224
 225			/* Unfortunately, the way Infiniband deals with
 226			 * RDMA to a bad MR key is by moving the entire
 227			 * queue pair to error state. We cold possibly
 228			 * recover from that, but right now we drop the
 229			 * connection.
 230			 * Therefore, we never retransmit messages with RDMA ops.
 231			 */
 232			if (rm->rdma.op_active &&
 233			    test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
 234				spin_lock_irqsave(&conn->c_lock, flags);
 
 235				if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
 236					list_move(&rm->m_conn_item, &to_be_dropped);
 237				spin_unlock_irqrestore(&conn->c_lock, flags);
 238				continue;
 239			}
 240
 241			/* Require an ACK every once in a while */
 242			len = ntohl(rm->m_inc.i_hdr.h_len);
 243			if (conn->c_unacked_packets == 0 ||
 244			    conn->c_unacked_bytes < len) {
 245				__set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
 246
 247				conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
 248				conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
 
 
 249				rds_stats_inc(s_send_ack_required);
 250			} else {
 251				conn->c_unacked_bytes -= len;
 252				conn->c_unacked_packets--;
 253			}
 254
 255			conn->c_xmit_rm = rm;
 256		}
 257
 258		/* The transport either sends the whole rdma or none of it */
 259		if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
 260			rm->m_final_op = &rm->rdma;
 
 
 
 
 261			ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
 262			if (ret)
 
 
 263				break;
 264			conn->c_xmit_rdma_sent = 1;
 
 265
 266			/* The transport owns the mapped memory for now.
 267			 * You can't unmap it while it's on the send queue */
 268			set_bit(RDS_MSG_MAPPED, &rm->m_flags);
 269		}
 270
 271		if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
 272			rm->m_final_op = &rm->atomic;
 
 
 
 
 273			ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
 274			if (ret)
 
 
 275				break;
 276			conn->c_xmit_atomic_sent = 1;
 
 277
 278			/* The transport owns the mapped memory for now.
 279			 * You can't unmap it while it's on the send queue */
 280			set_bit(RDS_MSG_MAPPED, &rm->m_flags);
 281		}
 282
 283		/*
 284		 * A number of cases require an RDS header to be sent
 285		 * even if there is no data.
 286		 * We permit 0-byte sends; rds-ping depends on this.
 287		 * However, if there are exclusively attached silent ops,
 288		 * we skip the hdr/data send, to enable silent operation.
 289		 */
 290		if (rm->data.op_nents == 0) {
 291			int ops_present;
 292			int all_ops_are_silent = 1;
 293
 294			ops_present = (rm->atomic.op_active || rm->rdma.op_active);
 295			if (rm->atomic.op_active && !rm->atomic.op_silent)
 296				all_ops_are_silent = 0;
 297			if (rm->rdma.op_active && !rm->rdma.op_silent)
 298				all_ops_are_silent = 0;
 299
 300			if (ops_present && all_ops_are_silent
 301			    && !rm->m_rdma_cookie)
 302				rm->data.op_active = 0;
 303		}
 304
 305		if (rm->data.op_active && !conn->c_xmit_data_sent) {
 306			rm->m_final_op = &rm->data;
 
 307			ret = conn->c_trans->xmit(conn, rm,
 308						  conn->c_xmit_hdr_off,
 309						  conn->c_xmit_sg,
 310						  conn->c_xmit_data_off);
 311			if (ret <= 0)
 312				break;
 313
 314			if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
 315				tmp = min_t(int, ret,
 316					    sizeof(struct rds_header) -
 317					    conn->c_xmit_hdr_off);
 318				conn->c_xmit_hdr_off += tmp;
 319				ret -= tmp;
 320			}
 321
 322			sg = &rm->data.op_sg[conn->c_xmit_sg];
 323			while (ret) {
 324				tmp = min_t(int, ret, sg->length -
 325						      conn->c_xmit_data_off);
 326				conn->c_xmit_data_off += tmp;
 327				ret -= tmp;
 328				if (conn->c_xmit_data_off == sg->length) {
 329					conn->c_xmit_data_off = 0;
 330					sg++;
 331					conn->c_xmit_sg++;
 332					BUG_ON(ret != 0 &&
 333					       conn->c_xmit_sg == rm->data.op_nents);
 334				}
 335			}
 336
 337			if (conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
 338			    (conn->c_xmit_sg == rm->data.op_nents))
 339				conn->c_xmit_data_sent = 1;
 340		}
 341
 342		/*
 343		 * A rm will only take multiple times through this loop
 344		 * if there is a data op. Thus, if the data is sent (or there was
 345		 * none), then we're done with the rm.
 346		 */
 347		if (!rm->data.op_active || conn->c_xmit_data_sent) {
 348			conn->c_xmit_rm = NULL;
 349			conn->c_xmit_sg = 0;
 350			conn->c_xmit_hdr_off = 0;
 351			conn->c_xmit_data_off = 0;
 352			conn->c_xmit_rdma_sent = 0;
 353			conn->c_xmit_atomic_sent = 0;
 354			conn->c_xmit_data_sent = 0;
 355
 356			rds_message_put(rm);
 357		}
 358	}
 359
 360	if (conn->c_trans->xmit_complete)
 361		conn->c_trans->xmit_complete(conn);
 362
 363	release_in_xmit(conn);
 364
 365	/* Nuke any messages we decided not to retransmit. */
 366	if (!list_empty(&to_be_dropped)) {
 367		/* irqs on here, so we can put(), unlike above */
 368		list_for_each_entry(rm, &to_be_dropped, m_conn_item)
 369			rds_message_put(rm);
 370		rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
 371	}
 372
 373	/*
 374	 * Other senders can queue a message after we last test the send queue
 375	 * but before we clear RDS_IN_XMIT.  In that case they'd back off and
 376	 * not try and send their newly queued message.  We need to check the
 377	 * send queue after having cleared RDS_IN_XMIT so that their message
 378	 * doesn't get stuck on the send queue.
 379	 *
 380	 * If the transport cannot continue (i.e ret != 0), then it must
 381	 * call us when more room is available, such as from the tx
 382	 * completion handler.
 
 
 
 
 383	 */
 384	if (ret == 0) {
 
 
 385		smp_mb();
 386		if (!list_empty(&conn->c_send_queue)) {
 
 
 
 
 
 
 
 
 
 
 
 
 387			rds_stats_inc(s_send_lock_queue_raced);
 388			goto restart;
 389		}
 390	}
 391out:
 392	return ret;
 393}
 
 394
 395static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
 396{
 397	u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
 398
 399	assert_spin_locked(&rs->rs_lock);
 400
 401	BUG_ON(rs->rs_snd_bytes < len);
 402	rs->rs_snd_bytes -= len;
 403
 404	if (rs->rs_snd_bytes == 0)
 405		rds_stats_inc(s_send_queue_empty);
 406}
 407
 408static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
 409				    is_acked_func is_acked)
 410{
 411	if (is_acked)
 412		return is_acked(rm, ack);
 413	return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
 414}
 415
 416/*
 417 * This is pretty similar to what happens below in the ACK
 418 * handling code - except that we call here as soon as we get
 419 * the IB send completion on the RDMA op and the accompanying
 420 * message.
 421 */
 422void rds_rdma_send_complete(struct rds_message *rm, int status)
 423{
 424	struct rds_sock *rs = NULL;
 425	struct rm_rdma_op *ro;
 426	struct rds_notifier *notifier;
 427	unsigned long flags;
 428
 429	spin_lock_irqsave(&rm->m_rs_lock, flags);
 430
 431	ro = &rm->rdma;
 432	if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
 433	    ro->op_active && ro->op_notify && ro->op_notifier) {
 434		notifier = ro->op_notifier;
 435		rs = rm->m_rs;
 436		sock_hold(rds_rs_to_sk(rs));
 437
 438		notifier->n_status = status;
 439		spin_lock(&rs->rs_lock);
 440		list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
 441		spin_unlock(&rs->rs_lock);
 442
 443		ro->op_notifier = NULL;
 444	}
 445
 446	spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 447
 448	if (rs) {
 449		rds_wake_sk_sleep(rs);
 450		sock_put(rds_rs_to_sk(rs));
 451	}
 452}
 453EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
 454
 455/*
 456 * Just like above, except looks at atomic op
 457 */
 458void rds_atomic_send_complete(struct rds_message *rm, int status)
 459{
 460	struct rds_sock *rs = NULL;
 461	struct rm_atomic_op *ao;
 462	struct rds_notifier *notifier;
 463	unsigned long flags;
 464
 465	spin_lock_irqsave(&rm->m_rs_lock, flags);
 466
 467	ao = &rm->atomic;
 468	if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
 469	    && ao->op_active && ao->op_notify && ao->op_notifier) {
 470		notifier = ao->op_notifier;
 471		rs = rm->m_rs;
 472		sock_hold(rds_rs_to_sk(rs));
 473
 474		notifier->n_status = status;
 475		spin_lock(&rs->rs_lock);
 476		list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
 477		spin_unlock(&rs->rs_lock);
 478
 479		ao->op_notifier = NULL;
 480	}
 481
 482	spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 483
 484	if (rs) {
 485		rds_wake_sk_sleep(rs);
 486		sock_put(rds_rs_to_sk(rs));
 487	}
 488}
 489EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
 490
 491/*
 492 * This is the same as rds_rdma_send_complete except we
 493 * don't do any locking - we have all the ingredients (message,
 494 * socket, socket lock) and can just move the notifier.
 495 */
 496static inline void
 497__rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
 498{
 499	struct rm_rdma_op *ro;
 500	struct rm_atomic_op *ao;
 501
 502	ro = &rm->rdma;
 503	if (ro->op_active && ro->op_notify && ro->op_notifier) {
 504		ro->op_notifier->n_status = status;
 505		list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
 506		ro->op_notifier = NULL;
 507	}
 508
 509	ao = &rm->atomic;
 510	if (ao->op_active && ao->op_notify && ao->op_notifier) {
 511		ao->op_notifier->n_status = status;
 512		list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
 513		ao->op_notifier = NULL;
 514	}
 515
 516	/* No need to wake the app - caller does this */
 517}
 518
 519/*
 520 * This is called from the IB send completion when we detect
 521 * a RDMA operation that failed with remote access error.
 522 * So speed is not an issue here.
 523 */
 524struct rds_message *rds_send_get_message(struct rds_connection *conn,
 525					 struct rm_rdma_op *op)
 526{
 527	struct rds_message *rm, *tmp, *found = NULL;
 528	unsigned long flags;
 529
 530	spin_lock_irqsave(&conn->c_lock, flags);
 531
 532	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
 533		if (&rm->rdma == op) {
 534			atomic_inc(&rm->m_refcount);
 535			found = rm;
 536			goto out;
 537		}
 538	}
 539
 540	list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
 541		if (&rm->rdma == op) {
 542			atomic_inc(&rm->m_refcount);
 543			found = rm;
 544			break;
 545		}
 546	}
 547
 548out:
 549	spin_unlock_irqrestore(&conn->c_lock, flags);
 550
 551	return found;
 552}
 553EXPORT_SYMBOL_GPL(rds_send_get_message);
 554
 555/*
 556 * This removes messages from the socket's list if they're on it.  The list
 557 * argument must be private to the caller, we must be able to modify it
 558 * without locks.  The messages must have a reference held for their
 559 * position on the list.  This function will drop that reference after
 560 * removing the messages from the 'messages' list regardless of if it found
 561 * the messages on the socket list or not.
 562 */
 563static void rds_send_remove_from_sock(struct list_head *messages, int status)
 564{
 565	unsigned long flags;
 566	struct rds_sock *rs = NULL;
 567	struct rds_message *rm;
 568
 569	while (!list_empty(messages)) {
 570		int was_on_sock = 0;
 571
 572		rm = list_entry(messages->next, struct rds_message,
 573				m_conn_item);
 574		list_del_init(&rm->m_conn_item);
 575
 576		/*
 577		 * If we see this flag cleared then we're *sure* that someone
 578		 * else beat us to removing it from the sock.  If we race
 579		 * with their flag update we'll get the lock and then really
 580		 * see that the flag has been cleared.
 581		 *
 582		 * The message spinlock makes sure nobody clears rm->m_rs
 583		 * while we're messing with it. It does not prevent the
 584		 * message from being removed from the socket, though.
 585		 */
 586		spin_lock_irqsave(&rm->m_rs_lock, flags);
 587		if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
 588			goto unlock_and_drop;
 589
 590		if (rs != rm->m_rs) {
 591			if (rs) {
 592				rds_wake_sk_sleep(rs);
 593				sock_put(rds_rs_to_sk(rs));
 594			}
 595			rs = rm->m_rs;
 596			sock_hold(rds_rs_to_sk(rs));
 
 597		}
 
 
 598		spin_lock(&rs->rs_lock);
 599
 600		if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
 601			struct rm_rdma_op *ro = &rm->rdma;
 602			struct rds_notifier *notifier;
 603
 604			list_del_init(&rm->m_sock_item);
 605			rds_send_sndbuf_remove(rs, rm);
 606
 607			if (ro->op_active && ro->op_notifier &&
 608			       (ro->op_notify || (ro->op_recverr && status))) {
 609				notifier = ro->op_notifier;
 610				list_add_tail(&notifier->n_list,
 611						&rs->rs_notify_queue);
 612				if (!notifier->n_status)
 613					notifier->n_status = status;
 614				rm->rdma.op_notifier = NULL;
 615			}
 616			was_on_sock = 1;
 617			rm->m_rs = NULL;
 618		}
 619		spin_unlock(&rs->rs_lock);
 620
 621unlock_and_drop:
 622		spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 623		rds_message_put(rm);
 624		if (was_on_sock)
 625			rds_message_put(rm);
 626	}
 627
 628	if (rs) {
 629		rds_wake_sk_sleep(rs);
 630		sock_put(rds_rs_to_sk(rs));
 631	}
 632}
 633
 634/*
 635 * Transports call here when they've determined that the receiver queued
 636 * messages up to, and including, the given sequence number.  Messages are
 637 * moved to the retrans queue when rds_send_xmit picks them off the send
 638 * queue. This means that in the TCP case, the message may not have been
 639 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
 640 * checks the RDS_MSG_HAS_ACK_SEQ bit.
 641 *
 642 * XXX It's not clear to me how this is safely serialized with socket
 643 * destruction.  Maybe it should bail if it sees SOCK_DEAD.
 644 */
 645void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
 646			 is_acked_func is_acked)
 647{
 648	struct rds_message *rm, *tmp;
 649	unsigned long flags;
 650	LIST_HEAD(list);
 651
 652	spin_lock_irqsave(&conn->c_lock, flags);
 653
 654	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
 655		if (!rds_send_is_acked(rm, ack, is_acked))
 656			break;
 657
 658		list_move(&rm->m_conn_item, &list);
 659		clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
 660	}
 661
 662	/* order flag updates with spin locks */
 663	if (!list_empty(&list))
 664		smp_mb__after_clear_bit();
 665
 666	spin_unlock_irqrestore(&conn->c_lock, flags);
 667
 668	/* now remove the messages from the sock list as needed */
 669	rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
 670}
 
 
 
 
 
 
 
 
 671EXPORT_SYMBOL_GPL(rds_send_drop_acked);
 672
 673void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
 674{
 675	struct rds_message *rm, *tmp;
 676	struct rds_connection *conn;
 
 677	unsigned long flags;
 678	LIST_HEAD(list);
 679
 680	/* get all the messages we're dropping under the rs lock */
 681	spin_lock_irqsave(&rs->rs_lock, flags);
 682
 683	list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
 684		if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
 685			     dest->sin_port != rm->m_inc.i_hdr.h_dport))
 
 686			continue;
 687
 688		list_move(&rm->m_sock_item, &list);
 689		rds_send_sndbuf_remove(rs, rm);
 690		clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
 691	}
 692
 693	/* order flag updates with the rs lock */
 694	smp_mb__after_clear_bit();
 695
 696	spin_unlock_irqrestore(&rs->rs_lock, flags);
 697
 698	if (list_empty(&list))
 699		return;
 700
 701	/* Remove the messages from the conn */
 702	list_for_each_entry(rm, &list, m_sock_item) {
 703
 704		conn = rm->m_inc.i_conn;
 
 
 
 
 705
 706		spin_lock_irqsave(&conn->c_lock, flags);
 707		/*
 708		 * Maybe someone else beat us to removing rm from the conn.
 709		 * If we race with their flag update we'll get the lock and
 710		 * then really see that the flag has been cleared.
 711		 */
 712		if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
 713			spin_unlock_irqrestore(&conn->c_lock, flags);
 714			continue;
 715		}
 716		list_del_init(&rm->m_conn_item);
 717		spin_unlock_irqrestore(&conn->c_lock, flags);
 718
 719		/*
 720		 * Couldn't grab m_rs_lock in top loop (lock ordering),
 721		 * but we can now.
 722		 */
 723		spin_lock_irqsave(&rm->m_rs_lock, flags);
 724
 725		spin_lock(&rs->rs_lock);
 726		__rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
 727		spin_unlock(&rs->rs_lock);
 728
 729		rm->m_rs = NULL;
 730		spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 731
 732		rds_message_put(rm);
 733	}
 734
 735	rds_wake_sk_sleep(rs);
 736
 737	while (!list_empty(&list)) {
 738		rm = list_entry(list.next, struct rds_message, m_sock_item);
 739		list_del_init(&rm->m_sock_item);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 740
 741		rds_message_wait(rm);
 742		rds_message_put(rm);
 743	}
 744}
 745
 746/*
 747 * we only want this to fire once so we use the callers 'queued'.  It's
 748 * possible that another thread can race with us and remove the
 749 * message from the flow with RDS_CANCEL_SENT_TO.
 750 */
 751static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
 
 752			     struct rds_message *rm, __be16 sport,
 753			     __be16 dport, int *queued)
 754{
 755	unsigned long flags;
 756	u32 len;
 757
 758	if (*queued)
 759		goto out;
 760
 761	len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
 762
 763	/* this is the only place which holds both the socket's rs_lock
 764	 * and the connection's c_lock */
 765	spin_lock_irqsave(&rs->rs_lock, flags);
 766
 767	/*
 768	 * If there is a little space in sndbuf, we don't queue anything,
 769	 * and userspace gets -EAGAIN. But poll() indicates there's send
 770	 * room. This can lead to bad behavior (spinning) if snd_bytes isn't
 771	 * freed up by incoming acks. So we check the *old* value of
 772	 * rs_snd_bytes here to allow the last msg to exceed the buffer,
 773	 * and poll() now knows no more data can be sent.
 774	 */
 775	if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
 776		rs->rs_snd_bytes += len;
 777
 778		/* let recv side know we are close to send space exhaustion.
 779		 * This is probably not the optimal way to do it, as this
 780		 * means we set the flag on *all* messages as soon as our
 781		 * throughput hits a certain threshold.
 782		 */
 783		if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
 784			__set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
 785
 786		list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
 787		set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
 788		rds_message_addref(rm);
 
 789		rm->m_rs = rs;
 790
 791		/* The code ordering is a little weird, but we're
 792		   trying to minimize the time we hold c_lock */
 793		rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
 794		rm->m_inc.i_conn = conn;
 
 795		rds_message_addref(rm);
 796
 797		spin_lock(&conn->c_lock);
 798		rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
 799		list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
 800		set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
 801		spin_unlock(&conn->c_lock);
 802
 803		rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
 804			 rm, len, rs, rs->rs_snd_bytes,
 805			 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
 806
 807		*queued = 1;
 808	}
 809
 810	spin_unlock_irqrestore(&rs->rs_lock, flags);
 811out:
 812	return *queued;
 813}
 814
 815/*
 816 * rds_message is getting to be quite complicated, and we'd like to allocate
 817 * it all in one go. This figures out how big it needs to be up front.
 818 */
 819static int rds_rm_size(struct msghdr *msg, int data_len)
 
 820{
 821	struct cmsghdr *cmsg;
 822	int size = 0;
 823	int cmsg_groups = 0;
 824	int retval;
 
 
 
 
 
 825
 826	for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
 827		if (!CMSG_OK(msg, cmsg))
 828			return -EINVAL;
 829
 830		if (cmsg->cmsg_level != SOL_RDS)
 831			continue;
 832
 833		switch (cmsg->cmsg_type) {
 834		case RDS_CMSG_RDMA_ARGS:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 835			cmsg_groups |= 1;
 836			retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
 837			if (retval < 0)
 838				return retval;
 839			size += retval;
 840
 841			break;
 842
 
 
 
 
 843		case RDS_CMSG_RDMA_DEST:
 844		case RDS_CMSG_RDMA_MAP:
 845			cmsg_groups |= 2;
 846			/* these are valid but do no add any size */
 847			break;
 848
 849		case RDS_CMSG_ATOMIC_CSWP:
 850		case RDS_CMSG_ATOMIC_FADD:
 851		case RDS_CMSG_MASKED_ATOMIC_CSWP:
 852		case RDS_CMSG_MASKED_ATOMIC_FADD:
 853			cmsg_groups |= 1;
 854			size += sizeof(struct scatterlist);
 855			break;
 856
 857		default:
 858			return -EINVAL;
 859		}
 860
 861	}
 862
 863	size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
 
 
 
 864
 865	/* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
 866	if (cmsg_groups == 3)
 867		return -EINVAL;
 868
 869	return size;
 870}
 871
 
 
 
 
 
 
 
 
 
 
 
 
 
 872static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
 873			 struct msghdr *msg, int *allocated_mr)
 
 874{
 875	struct cmsghdr *cmsg;
 876	int ret = 0;
 877
 878	for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
 879		if (!CMSG_OK(msg, cmsg))
 880			return -EINVAL;
 881
 882		if (cmsg->cmsg_level != SOL_RDS)
 883			continue;
 884
 885		/* As a side effect, RDMA_DEST and RDMA_MAP will set
 886		 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
 887		 */
 888		switch (cmsg->cmsg_type) {
 889		case RDS_CMSG_RDMA_ARGS:
 890			ret = rds_cmsg_rdma_args(rs, rm, cmsg);
 
 
 
 891			break;
 892
 893		case RDS_CMSG_RDMA_DEST:
 894			ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
 895			break;
 896
 897		case RDS_CMSG_RDMA_MAP:
 898			ret = rds_cmsg_rdma_map(rs, rm, cmsg);
 899			if (!ret)
 900				*allocated_mr = 1;
 
 
 
 
 
 901			break;
 902		case RDS_CMSG_ATOMIC_CSWP:
 903		case RDS_CMSG_ATOMIC_FADD:
 904		case RDS_CMSG_MASKED_ATOMIC_CSWP:
 905		case RDS_CMSG_MASKED_ATOMIC_FADD:
 906			ret = rds_cmsg_atomic(rs, rm, cmsg);
 907			break;
 908
 
 
 
 
 909		default:
 910			return -EINVAL;
 911		}
 912
 913		if (ret)
 914			break;
 915	}
 916
 917	return ret;
 918}
 919
 920int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
 921		size_t payload_len)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 922{
 923	struct sock *sk = sock->sk;
 924	struct rds_sock *rs = rds_sk_to_rs(sk);
 925	struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
 926	__be32 daddr;
 927	__be16 dport;
 928	struct rds_message *rm = NULL;
 929	struct rds_connection *conn;
 930	int ret = 0;
 931	int queued = 0, allocated_mr = 0;
 932	int nonblock = msg->msg_flags & MSG_DONTWAIT;
 933	long timeo = sock_sndtimeo(sk, nonblock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 934
 935	/* Mirror Linux UDP mirror of BSD error message compatibility */
 936	/* XXX: Perhaps MSG_MORE someday */
 937	if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
 938		ret = -EOPNOTSUPP;
 939		goto out;
 940	}
 941
 942	if (msg->msg_namelen) {
 943		/* XXX fail non-unicast destination IPs? */
 944		if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 945			ret = -EINVAL;
 946			goto out;
 947		}
 948		daddr = usin->sin_addr.s_addr;
 949		dport = usin->sin_port;
 950	} else {
 951		/* We only care about consistency with ->connect() */
 952		lock_sock(sk);
 953		daddr = rs->rs_conn_addr;
 954		dport = rs->rs_conn_port;
 
 955		release_sock(sk);
 956	}
 957
 958	/* racing with another thread binding seems ok here */
 959	if (daddr == 0 || rs->rs_bound_addr == 0) {
 960		ret = -ENOTCONN; /* XXX not a great errno */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 961		goto out;
 962	}
 963
 
 
 
 
 
 
 
 964	/* size of rm including all sgs */
 965	ret = rds_rm_size(msg, payload_len);
 966	if (ret < 0)
 967		goto out;
 968
 969	rm = rds_message_alloc(ret, GFP_KERNEL);
 970	if (!rm) {
 971		ret = -ENOMEM;
 972		goto out;
 973	}
 974
 975	/* Attach data to the rm */
 976	if (payload_len) {
 977		rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
 978		if (!rm->data.op_sg) {
 979			ret = -ENOMEM;
 980			goto out;
 981		}
 982		ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len);
 983		if (ret)
 984			goto out;
 985	}
 986	rm->data.op_active = 1;
 987
 988	rm->m_daddr = daddr;
 989
 990	/* rds_conn_create has a spinlock that runs with IRQ off.
 991	 * Caching the conn in the socket helps a lot. */
 992	if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
 
 993		conn = rs->rs_conn;
 994	else {
 995		conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr,
 996					rs->rs_transport,
 997					sock->sk->sk_allocation);
 
 
 998		if (IS_ERR(conn)) {
 999			ret = PTR_ERR(conn);
1000			goto out;
1001		}
1002		rs->rs_conn = conn;
1003	}
1004
 
 
 
 
 
 
 
1005	/* Parse any control messages the user may have included. */
1006	ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
1007	if (ret)
 
 
 
1008		goto out;
 
1009
1010	if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
1011		printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
1012			       &rm->rdma, conn->c_trans->xmit_rdma);
1013		ret = -EOPNOTSUPP;
1014		goto out;
1015	}
1016
1017	if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
1018		printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
1019			       &rm->atomic, conn->c_trans->xmit_atomic);
1020		ret = -EOPNOTSUPP;
1021		goto out;
1022	}
1023
1024	rds_conn_connect_if_down(conn);
 
 
 
 
 
 
1025
1026	ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
1027	if (ret) {
1028		rs->rs_seen_congestion = 1;
1029		goto out;
1030	}
1031
1032	while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
1033				  dport, &queued)) {
1034		rds_stats_inc(s_send_queue_full);
1035		/* XXX make sure this is reasonable */
1036		if (payload_len > rds_sk_sndbuf(rs)) {
1037			ret = -EMSGSIZE;
1038			goto out;
1039		}
1040		if (nonblock) {
1041			ret = -EAGAIN;
1042			goto out;
1043		}
1044
1045		timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
1046					rds_send_queue_rm(rs, conn, rm,
1047							  rs->rs_bound_port,
1048							  dport,
1049							  &queued),
1050					timeo);
1051		rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1052		if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1053			continue;
1054
1055		ret = timeo;
1056		if (ret == 0)
1057			ret = -ETIMEDOUT;
1058		goto out;
1059	}
1060
1061	/*
1062	 * By now we've committed to the send.  We reuse rds_send_worker()
1063	 * to retry sends in the rds thread if the transport asks us to.
1064	 */
1065	rds_stats_inc(s_send_queued);
1066
1067	if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1068		rds_send_xmit(conn);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1069
1070	rds_message_put(rm);
1071	return payload_len;
1072
1073out:
 
 
 
 
1074	/* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1075	 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1076	 * or in any other way, we need to destroy the MR again */
1077	if (allocated_mr)
1078		rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1079
1080	if (rm)
1081		rds_message_put(rm);
1082	return ret;
1083}
1084
1085/*
1086 * Reply to a ping packet.
 
 
 
 
 
1087 */
1088int
1089rds_send_pong(struct rds_connection *conn, __be16 dport)
 
1090{
1091	struct rds_message *rm;
1092	unsigned long flags;
1093	int ret = 0;
1094
1095	rm = rds_message_alloc(0, GFP_ATOMIC);
1096	if (!rm) {
1097		ret = -ENOMEM;
1098		goto out;
1099	}
1100
1101	rm->m_daddr = conn->c_faddr;
1102	rm->data.op_active = 1;
1103
1104	rds_conn_connect_if_down(conn);
1105
1106	ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
1107	if (ret)
1108		goto out;
1109
1110	spin_lock_irqsave(&conn->c_lock, flags);
1111	list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
1112	set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1113	rds_message_addref(rm);
1114	rm->m_inc.i_conn = conn;
 
1115
1116	rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport,
1117				    conn->c_next_tx_seq);
1118	conn->c_next_tx_seq++;
1119	spin_unlock_irqrestore(&conn->c_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1120
1121	rds_stats_inc(s_send_queued);
1122	rds_stats_inc(s_send_pong);
1123
1124	if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1125		rds_send_xmit(conn);
 
 
 
1126
1127	rds_message_put(rm);
1128	return 0;
1129
1130out:
1131	if (rm)
1132		rds_message_put(rm);
1133	return ret;
1134}