Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 * Copyright (c) 2006 Oracle.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 *
  32 */
  33#include <linux/kernel.h>
  34#include <linux/moduleparam.h>
  35#include <linux/gfp.h>
  36#include <net/sock.h>
  37#include <linux/in.h>
  38#include <linux/list.h>
  39#include <linux/ratelimit.h>
  40#include <linux/export.h>
  41#include <linux/sizes.h>
  42
  43#include "rds.h"
  44
  45/* When transmitting messages in rds_send_xmit, we need to emerge from
  46 * time to time and briefly release the CPU. Otherwise the softlock watchdog
  47 * will kick our shin.
  48 * Also, it seems fairer to not let one busy connection stall all the
  49 * others.
  50 *
  51 * send_batch_count is the number of times we'll loop in send_xmit. Setting
  52 * it to 0 will restore the old behavior (where we looped until we had
  53 * drained the queue).
  54 */
  55static int send_batch_count = SZ_1K;
  56module_param(send_batch_count, int, 0444);
  57MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
  58
  59static void rds_send_remove_from_sock(struct list_head *messages, int status);
  60
  61/*
  62 * Reset the send state.  Callers must ensure that this doesn't race with
  63 * rds_send_xmit().
  64 */
  65void rds_send_reset(struct rds_connection *conn)
  66{
  67	struct rds_message *rm, *tmp;
  68	unsigned long flags;
  69
  70	if (conn->c_xmit_rm) {
  71		rm = conn->c_xmit_rm;
  72		conn->c_xmit_rm = NULL;
  73		/* Tell the user the RDMA op is no longer mapped by the
  74		 * transport. This isn't entirely true (it's flushed out
  75		 * independently) but as the connection is down, there's
  76		 * no ongoing RDMA to/from that memory */
  77		rds_message_unmapped(rm);
  78		rds_message_put(rm);
  79	}
  80
  81	conn->c_xmit_sg = 0;
  82	conn->c_xmit_hdr_off = 0;
  83	conn->c_xmit_data_off = 0;
  84	conn->c_xmit_atomic_sent = 0;
  85	conn->c_xmit_rdma_sent = 0;
  86	conn->c_xmit_data_sent = 0;
  87
  88	conn->c_map_queued = 0;
  89
  90	conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
  91	conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
  92
  93	/* Mark messages as retransmissions, and move them to the send q */
  94	spin_lock_irqsave(&conn->c_lock, flags);
  95	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
  96		set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
  97		set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
  98	}
  99	list_splice_init(&conn->c_retrans, &conn->c_send_queue);
 100	spin_unlock_irqrestore(&conn->c_lock, flags);
 101}
 
 102
 103static int acquire_in_xmit(struct rds_connection *conn)
 104{
 105	return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0;
 106}
 107
 108static void release_in_xmit(struct rds_connection *conn)
 109{
 110	clear_bit(RDS_IN_XMIT, &conn->c_flags);
 111	smp_mb__after_atomic();
 112	/*
 113	 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
 114	 * hot path and finding waiters is very rare.  We don't want to walk
 115	 * the system-wide hashed waitqueue buckets in the fast path only to
 116	 * almost never find waiters.
 117	 */
 118	if (waitqueue_active(&conn->c_waitq))
 119		wake_up_all(&conn->c_waitq);
 120}
 121
 122/*
 123 * We're making the conscious trade-off here to only send one message
 124 * down the connection at a time.
 125 *   Pro:
 126 *      - tx queueing is a simple fifo list
 127 *   	- reassembly is optional and easily done by transports per conn
 128 *      - no per flow rx lookup at all, straight to the socket
 129 *   	- less per-frag memory and wire overhead
 130 *   Con:
 131 *      - queued acks can be delayed behind large messages
 132 *   Depends:
 133 *      - small message latency is higher behind queued large messages
 134 *      - large message latency isn't starved by intervening small sends
 135 */
 136int rds_send_xmit(struct rds_connection *conn)
 137{
 
 138	struct rds_message *rm;
 139	unsigned long flags;
 140	unsigned int tmp;
 141	struct scatterlist *sg;
 142	int ret = 0;
 143	LIST_HEAD(to_be_dropped);
 144	int batch_count;
 145	unsigned long send_gen = 0;
 146
 147restart:
 148	batch_count = 0;
 149
 150	/*
 151	 * sendmsg calls here after having queued its message on the send
 152	 * queue.  We only have one task feeding the connection at a time.  If
 153	 * another thread is already feeding the queue then we back off.  This
 154	 * avoids blocking the caller and trading per-connection data between
 155	 * caches per message.
 156	 */
 157	if (!acquire_in_xmit(conn)) {
 158		rds_stats_inc(s_send_lock_contention);
 159		ret = -ENOMEM;
 160		goto out;
 161	}
 162
 
 
 
 
 
 
 163	/*
 164	 * we record the send generation after doing the xmit acquire.
 165	 * if someone else manages to jump in and do some work, we'll use
 166	 * this to avoid a goto restart farther down.
 167	 *
 168	 * The acquire_in_xmit() check above ensures that only one
 169	 * caller can increment c_send_gen at any time.
 170	 */
 171	conn->c_send_gen++;
 172	send_gen = conn->c_send_gen;
 173
 174	/*
 175	 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
 176	 * we do the opposite to avoid races.
 177	 */
 178	if (!rds_conn_up(conn)) {
 179		release_in_xmit(conn);
 180		ret = 0;
 181		goto out;
 182	}
 183
 184	if (conn->c_trans->xmit_prepare)
 185		conn->c_trans->xmit_prepare(conn);
 186
 187	/*
 188	 * spin trying to push headers and data down the connection until
 189	 * the connection doesn't make forward progress.
 190	 */
 191	while (1) {
 192
 193		rm = conn->c_xmit_rm;
 194
 195		/*
 196		 * If between sending messages, we can send a pending congestion
 197		 * map update.
 198		 */
 199		if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
 200			rm = rds_cong_update_alloc(conn);
 201			if (IS_ERR(rm)) {
 202				ret = PTR_ERR(rm);
 203				break;
 204			}
 205			rm->data.op_active = 1;
 
 
 206
 207			conn->c_xmit_rm = rm;
 208		}
 209
 210		/*
 211		 * If not already working on one, grab the next message.
 212		 *
 213		 * c_xmit_rm holds a ref while we're sending this message down
 214		 * the connction.  We can use this ref while holding the
 215		 * send_sem.. rds_send_reset() is serialized with it.
 216		 */
 217		if (!rm) {
 218			unsigned int len;
 219
 220			batch_count++;
 221
 222			/* we want to process as big a batch as we can, but
 223			 * we also want to avoid softlockups.  If we've been
 224			 * through a lot of messages, lets back off and see
 225			 * if anyone else jumps in
 226			 */
 227			if (batch_count >= send_batch_count)
 228				goto over_batch;
 229
 230			spin_lock_irqsave(&conn->c_lock, flags);
 231
 232			if (!list_empty(&conn->c_send_queue)) {
 233				rm = list_entry(conn->c_send_queue.next,
 234						struct rds_message,
 235						m_conn_item);
 236				rds_message_addref(rm);
 237
 238				/*
 239				 * Move the message from the send queue to the retransmit
 240				 * list right away.
 241				 */
 242				list_move_tail(&rm->m_conn_item, &conn->c_retrans);
 
 243			}
 244
 245			spin_unlock_irqrestore(&conn->c_lock, flags);
 246
 247			if (!rm)
 248				break;
 249
 250			/* Unfortunately, the way Infiniband deals with
 251			 * RDMA to a bad MR key is by moving the entire
 252			 * queue pair to error state. We cold possibly
 253			 * recover from that, but right now we drop the
 254			 * connection.
 255			 * Therefore, we never retransmit messages with RDMA ops.
 256			 */
 257			if (rm->rdma.op_active &&
 258			    test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
 259				spin_lock_irqsave(&conn->c_lock, flags);
 
 260				if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
 261					list_move(&rm->m_conn_item, &to_be_dropped);
 262				spin_unlock_irqrestore(&conn->c_lock, flags);
 263				continue;
 264			}
 265
 266			/* Require an ACK every once in a while */
 267			len = ntohl(rm->m_inc.i_hdr.h_len);
 268			if (conn->c_unacked_packets == 0 ||
 269			    conn->c_unacked_bytes < len) {
 270				__set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
 271
 272				conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
 273				conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
 
 
 274				rds_stats_inc(s_send_ack_required);
 275			} else {
 276				conn->c_unacked_bytes -= len;
 277				conn->c_unacked_packets--;
 278			}
 279
 280			conn->c_xmit_rm = rm;
 281		}
 282
 283		/* The transport either sends the whole rdma or none of it */
 284		if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
 285			rm->m_final_op = &rm->rdma;
 286			/* The transport owns the mapped memory for now.
 287			 * You can't unmap it while it's on the send queue
 288			 */
 289			set_bit(RDS_MSG_MAPPED, &rm->m_flags);
 290			ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
 291			if (ret) {
 292				clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
 293				wake_up_interruptible(&rm->m_flush_wait);
 294				break;
 295			}
 296			conn->c_xmit_rdma_sent = 1;
 297
 298		}
 299
 300		if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
 301			rm->m_final_op = &rm->atomic;
 302			/* The transport owns the mapped memory for now.
 303			 * You can't unmap it while it's on the send queue
 304			 */
 305			set_bit(RDS_MSG_MAPPED, &rm->m_flags);
 306			ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
 307			if (ret) {
 308				clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
 309				wake_up_interruptible(&rm->m_flush_wait);
 310				break;
 311			}
 312			conn->c_xmit_atomic_sent = 1;
 313
 314		}
 315
 316		/*
 317		 * A number of cases require an RDS header to be sent
 318		 * even if there is no data.
 319		 * We permit 0-byte sends; rds-ping depends on this.
 320		 * However, if there are exclusively attached silent ops,
 321		 * we skip the hdr/data send, to enable silent operation.
 322		 */
 323		if (rm->data.op_nents == 0) {
 324			int ops_present;
 325			int all_ops_are_silent = 1;
 326
 327			ops_present = (rm->atomic.op_active || rm->rdma.op_active);
 328			if (rm->atomic.op_active && !rm->atomic.op_silent)
 329				all_ops_are_silent = 0;
 330			if (rm->rdma.op_active && !rm->rdma.op_silent)
 331				all_ops_are_silent = 0;
 332
 333			if (ops_present && all_ops_are_silent
 334			    && !rm->m_rdma_cookie)
 335				rm->data.op_active = 0;
 336		}
 337
 338		if (rm->data.op_active && !conn->c_xmit_data_sent) {
 339			rm->m_final_op = &rm->data;
 
 340			ret = conn->c_trans->xmit(conn, rm,
 341						  conn->c_xmit_hdr_off,
 342						  conn->c_xmit_sg,
 343						  conn->c_xmit_data_off);
 344			if (ret <= 0)
 345				break;
 346
 347			if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
 348				tmp = min_t(int, ret,
 349					    sizeof(struct rds_header) -
 350					    conn->c_xmit_hdr_off);
 351				conn->c_xmit_hdr_off += tmp;
 352				ret -= tmp;
 353			}
 354
 355			sg = &rm->data.op_sg[conn->c_xmit_sg];
 356			while (ret) {
 357				tmp = min_t(int, ret, sg->length -
 358						      conn->c_xmit_data_off);
 359				conn->c_xmit_data_off += tmp;
 360				ret -= tmp;
 361				if (conn->c_xmit_data_off == sg->length) {
 362					conn->c_xmit_data_off = 0;
 363					sg++;
 364					conn->c_xmit_sg++;
 365					BUG_ON(ret != 0 &&
 366					       conn->c_xmit_sg == rm->data.op_nents);
 367				}
 368			}
 369
 370			if (conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
 371			    (conn->c_xmit_sg == rm->data.op_nents))
 372				conn->c_xmit_data_sent = 1;
 373		}
 374
 375		/*
 376		 * A rm will only take multiple times through this loop
 377		 * if there is a data op. Thus, if the data is sent (or there was
 378		 * none), then we're done with the rm.
 379		 */
 380		if (!rm->data.op_active || conn->c_xmit_data_sent) {
 381			conn->c_xmit_rm = NULL;
 382			conn->c_xmit_sg = 0;
 383			conn->c_xmit_hdr_off = 0;
 384			conn->c_xmit_data_off = 0;
 385			conn->c_xmit_rdma_sent = 0;
 386			conn->c_xmit_atomic_sent = 0;
 387			conn->c_xmit_data_sent = 0;
 388
 389			rds_message_put(rm);
 390		}
 391	}
 392
 393over_batch:
 394	if (conn->c_trans->xmit_complete)
 395		conn->c_trans->xmit_complete(conn);
 396	release_in_xmit(conn);
 397
 398	/* Nuke any messages we decided not to retransmit. */
 399	if (!list_empty(&to_be_dropped)) {
 400		/* irqs on here, so we can put(), unlike above */
 401		list_for_each_entry(rm, &to_be_dropped, m_conn_item)
 402			rds_message_put(rm);
 403		rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
 404	}
 405
 406	/*
 407	 * Other senders can queue a message after we last test the send queue
 408	 * but before we clear RDS_IN_XMIT.  In that case they'd back off and
 409	 * not try and send their newly queued message.  We need to check the
 410	 * send queue after having cleared RDS_IN_XMIT so that their message
 411	 * doesn't get stuck on the send queue.
 412	 *
 413	 * If the transport cannot continue (i.e ret != 0), then it must
 414	 * call us when more room is available, such as from the tx
 415	 * completion handler.
 416	 *
 417	 * We have an extra generation check here so that if someone manages
 418	 * to jump in after our release_in_xmit, we'll see that they have done
 419	 * some work and we will skip our goto
 420	 */
 421	if (ret == 0) {
 
 
 422		smp_mb();
 
 
 423		if ((test_bit(0, &conn->c_map_queued) ||
 424		     !list_empty(&conn->c_send_queue)) &&
 425		    send_gen == conn->c_send_gen) {
 426			rds_stats_inc(s_send_lock_queue_raced);
 427			if (batch_count < send_batch_count)
 428				goto restart;
 429			queue_delayed_work(rds_wq, &conn->c_send_w, 1);
 
 
 
 
 
 
 
 430		}
 431	}
 432out:
 433	return ret;
 434}
 435EXPORT_SYMBOL_GPL(rds_send_xmit);
 436
 437static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
 438{
 439	u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
 440
 441	assert_spin_locked(&rs->rs_lock);
 442
 443	BUG_ON(rs->rs_snd_bytes < len);
 444	rs->rs_snd_bytes -= len;
 445
 446	if (rs->rs_snd_bytes == 0)
 447		rds_stats_inc(s_send_queue_empty);
 448}
 449
 450static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
 451				    is_acked_func is_acked)
 452{
 453	if (is_acked)
 454		return is_acked(rm, ack);
 455	return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
 456}
 457
 458/*
 459 * This is pretty similar to what happens below in the ACK
 460 * handling code - except that we call here as soon as we get
 461 * the IB send completion on the RDMA op and the accompanying
 462 * message.
 463 */
 464void rds_rdma_send_complete(struct rds_message *rm, int status)
 465{
 466	struct rds_sock *rs = NULL;
 467	struct rm_rdma_op *ro;
 468	struct rds_notifier *notifier;
 469	unsigned long flags;
 
 470
 471	spin_lock_irqsave(&rm->m_rs_lock, flags);
 472
 
 473	ro = &rm->rdma;
 474	if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
 475	    ro->op_active && ro->op_notify && ro->op_notifier) {
 476		notifier = ro->op_notifier;
 477		rs = rm->m_rs;
 478		sock_hold(rds_rs_to_sk(rs));
 479
 480		notifier->n_status = status;
 481		spin_lock(&rs->rs_lock);
 482		list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
 483		spin_unlock(&rs->rs_lock);
 484
 485		ro->op_notifier = NULL;
 486	}
 487
 488	spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 489
 490	if (rs) {
 491		rds_wake_sk_sleep(rs);
 492		sock_put(rds_rs_to_sk(rs));
 493	}
 494}
 495EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
 496
 497/*
 498 * Just like above, except looks at atomic op
 499 */
 500void rds_atomic_send_complete(struct rds_message *rm, int status)
 501{
 502	struct rds_sock *rs = NULL;
 503	struct rm_atomic_op *ao;
 504	struct rds_notifier *notifier;
 505	unsigned long flags;
 506
 507	spin_lock_irqsave(&rm->m_rs_lock, flags);
 508
 509	ao = &rm->atomic;
 510	if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
 511	    && ao->op_active && ao->op_notify && ao->op_notifier) {
 512		notifier = ao->op_notifier;
 513		rs = rm->m_rs;
 514		sock_hold(rds_rs_to_sk(rs));
 515
 516		notifier->n_status = status;
 517		spin_lock(&rs->rs_lock);
 518		list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
 519		spin_unlock(&rs->rs_lock);
 520
 521		ao->op_notifier = NULL;
 522	}
 523
 524	spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 525
 526	if (rs) {
 527		rds_wake_sk_sleep(rs);
 528		sock_put(rds_rs_to_sk(rs));
 529	}
 530}
 531EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
 532
 533/*
 534 * This is the same as rds_rdma_send_complete except we
 535 * don't do any locking - we have all the ingredients (message,
 536 * socket, socket lock) and can just move the notifier.
 537 */
 538static inline void
 539__rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
 540{
 541	struct rm_rdma_op *ro;
 542	struct rm_atomic_op *ao;
 543
 544	ro = &rm->rdma;
 545	if (ro->op_active && ro->op_notify && ro->op_notifier) {
 546		ro->op_notifier->n_status = status;
 547		list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
 548		ro->op_notifier = NULL;
 549	}
 550
 551	ao = &rm->atomic;
 552	if (ao->op_active && ao->op_notify && ao->op_notifier) {
 553		ao->op_notifier->n_status = status;
 554		list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
 555		ao->op_notifier = NULL;
 556	}
 557
 558	/* No need to wake the app - caller does this */
 559}
 560
 561/*
 562 * This is called from the IB send completion when we detect
 563 * a RDMA operation that failed with remote access error.
 564 * So speed is not an issue here.
 565 */
 566struct rds_message *rds_send_get_message(struct rds_connection *conn,
 567					 struct rm_rdma_op *op)
 568{
 569	struct rds_message *rm, *tmp, *found = NULL;
 570	unsigned long flags;
 571
 572	spin_lock_irqsave(&conn->c_lock, flags);
 573
 574	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
 575		if (&rm->rdma == op) {
 576			atomic_inc(&rm->m_refcount);
 577			found = rm;
 578			goto out;
 579		}
 580	}
 581
 582	list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
 583		if (&rm->rdma == op) {
 584			atomic_inc(&rm->m_refcount);
 585			found = rm;
 586			break;
 587		}
 588	}
 589
 590out:
 591	spin_unlock_irqrestore(&conn->c_lock, flags);
 592
 593	return found;
 594}
 595EXPORT_SYMBOL_GPL(rds_send_get_message);
 596
 597/*
 598 * This removes messages from the socket's list if they're on it.  The list
 599 * argument must be private to the caller, we must be able to modify it
 600 * without locks.  The messages must have a reference held for their
 601 * position on the list.  This function will drop that reference after
 602 * removing the messages from the 'messages' list regardless of if it found
 603 * the messages on the socket list or not.
 604 */
 605static void rds_send_remove_from_sock(struct list_head *messages, int status)
 606{
 607	unsigned long flags;
 608	struct rds_sock *rs = NULL;
 609	struct rds_message *rm;
 610
 611	while (!list_empty(messages)) {
 612		int was_on_sock = 0;
 613
 614		rm = list_entry(messages->next, struct rds_message,
 615				m_conn_item);
 616		list_del_init(&rm->m_conn_item);
 617
 618		/*
 619		 * If we see this flag cleared then we're *sure* that someone
 620		 * else beat us to removing it from the sock.  If we race
 621		 * with their flag update we'll get the lock and then really
 622		 * see that the flag has been cleared.
 623		 *
 624		 * The message spinlock makes sure nobody clears rm->m_rs
 625		 * while we're messing with it. It does not prevent the
 626		 * message from being removed from the socket, though.
 627		 */
 628		spin_lock_irqsave(&rm->m_rs_lock, flags);
 629		if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
 630			goto unlock_and_drop;
 631
 632		if (rs != rm->m_rs) {
 633			if (rs) {
 634				rds_wake_sk_sleep(rs);
 635				sock_put(rds_rs_to_sk(rs));
 636			}
 637			rs = rm->m_rs;
 638			if (rs)
 639				sock_hold(rds_rs_to_sk(rs));
 640		}
 641		if (!rs)
 642			goto unlock_and_drop;
 643		spin_lock(&rs->rs_lock);
 644
 645		if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
 646			struct rm_rdma_op *ro = &rm->rdma;
 647			struct rds_notifier *notifier;
 648
 649			list_del_init(&rm->m_sock_item);
 650			rds_send_sndbuf_remove(rs, rm);
 651
 652			if (ro->op_active && ro->op_notifier &&
 653			       (ro->op_notify || (ro->op_recverr && status))) {
 654				notifier = ro->op_notifier;
 655				list_add_tail(&notifier->n_list,
 656						&rs->rs_notify_queue);
 657				if (!notifier->n_status)
 658					notifier->n_status = status;
 659				rm->rdma.op_notifier = NULL;
 660			}
 661			was_on_sock = 1;
 662			rm->m_rs = NULL;
 663		}
 664		spin_unlock(&rs->rs_lock);
 665
 666unlock_and_drop:
 667		spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 668		rds_message_put(rm);
 669		if (was_on_sock)
 670			rds_message_put(rm);
 671	}
 672
 673	if (rs) {
 674		rds_wake_sk_sleep(rs);
 675		sock_put(rds_rs_to_sk(rs));
 676	}
 677}
 678
 679/*
 680 * Transports call here when they've determined that the receiver queued
 681 * messages up to, and including, the given sequence number.  Messages are
 682 * moved to the retrans queue when rds_send_xmit picks them off the send
 683 * queue. This means that in the TCP case, the message may not have been
 684 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
 685 * checks the RDS_MSG_HAS_ACK_SEQ bit.
 686 */
 687void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
 688			 is_acked_func is_acked)
 689{
 690	struct rds_message *rm, *tmp;
 691	unsigned long flags;
 692	LIST_HEAD(list);
 693
 694	spin_lock_irqsave(&conn->c_lock, flags);
 695
 696	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
 697		if (!rds_send_is_acked(rm, ack, is_acked))
 698			break;
 699
 700		list_move(&rm->m_conn_item, &list);
 701		clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
 702	}
 703
 704	/* order flag updates with spin locks */
 705	if (!list_empty(&list))
 706		smp_mb__after_atomic();
 707
 708	spin_unlock_irqrestore(&conn->c_lock, flags);
 709
 710	/* now remove the messages from the sock list as needed */
 711	rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
 712}
 
 
 
 
 
 
 
 
 713EXPORT_SYMBOL_GPL(rds_send_drop_acked);
 714
 715void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
 716{
 717	struct rds_message *rm, *tmp;
 718	struct rds_connection *conn;
 
 719	unsigned long flags;
 720	LIST_HEAD(list);
 721
 722	/* get all the messages we're dropping under the rs lock */
 723	spin_lock_irqsave(&rs->rs_lock, flags);
 724
 725	list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
 726		if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
 727			     dest->sin_port != rm->m_inc.i_hdr.h_dport))
 728			continue;
 729
 730		list_move(&rm->m_sock_item, &list);
 731		rds_send_sndbuf_remove(rs, rm);
 732		clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
 733	}
 734
 735	/* order flag updates with the rs lock */
 736	smp_mb__after_atomic();
 737
 738	spin_unlock_irqrestore(&rs->rs_lock, flags);
 739
 740	if (list_empty(&list))
 741		return;
 742
 743	/* Remove the messages from the conn */
 744	list_for_each_entry(rm, &list, m_sock_item) {
 745
 746		conn = rm->m_inc.i_conn;
 
 
 
 
 747
 748		spin_lock_irqsave(&conn->c_lock, flags);
 749		/*
 750		 * Maybe someone else beat us to removing rm from the conn.
 751		 * If we race with their flag update we'll get the lock and
 752		 * then really see that the flag has been cleared.
 753		 */
 754		if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
 755			spin_unlock_irqrestore(&conn->c_lock, flags);
 756			spin_lock_irqsave(&rm->m_rs_lock, flags);
 757			rm->m_rs = NULL;
 758			spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 759			continue;
 760		}
 761		list_del_init(&rm->m_conn_item);
 762		spin_unlock_irqrestore(&conn->c_lock, flags);
 763
 764		/*
 765		 * Couldn't grab m_rs_lock in top loop (lock ordering),
 766		 * but we can now.
 767		 */
 768		spin_lock_irqsave(&rm->m_rs_lock, flags);
 769
 770		spin_lock(&rs->rs_lock);
 771		__rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
 772		spin_unlock(&rs->rs_lock);
 773
 774		rm->m_rs = NULL;
 775		spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 776
 777		rds_message_put(rm);
 778	}
 779
 780	rds_wake_sk_sleep(rs);
 781
 782	while (!list_empty(&list)) {
 783		rm = list_entry(list.next, struct rds_message, m_sock_item);
 784		list_del_init(&rm->m_sock_item);
 785		rds_message_wait(rm);
 786
 787		/* just in case the code above skipped this message
 788		 * because RDS_MSG_ON_CONN wasn't set, run it again here
 789		 * taking m_rs_lock is the only thing that keeps us
 790		 * from racing with ack processing.
 791		 */
 792		spin_lock_irqsave(&rm->m_rs_lock, flags);
 793
 794		spin_lock(&rs->rs_lock);
 795		__rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
 796		spin_unlock(&rs->rs_lock);
 797
 798		rm->m_rs = NULL;
 799		spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 800
 801		rds_message_put(rm);
 802	}
 803}
 804
 805/*
 806 * we only want this to fire once so we use the callers 'queued'.  It's
 807 * possible that another thread can race with us and remove the
 808 * message from the flow with RDS_CANCEL_SENT_TO.
 809 */
 810static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
 
 811			     struct rds_message *rm, __be16 sport,
 812			     __be16 dport, int *queued)
 813{
 814	unsigned long flags;
 815	u32 len;
 816
 817	if (*queued)
 818		goto out;
 819
 820	len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
 821
 822	/* this is the only place which holds both the socket's rs_lock
 823	 * and the connection's c_lock */
 824	spin_lock_irqsave(&rs->rs_lock, flags);
 825
 826	/*
 827	 * If there is a little space in sndbuf, we don't queue anything,
 828	 * and userspace gets -EAGAIN. But poll() indicates there's send
 829	 * room. This can lead to bad behavior (spinning) if snd_bytes isn't
 830	 * freed up by incoming acks. So we check the *old* value of
 831	 * rs_snd_bytes here to allow the last msg to exceed the buffer,
 832	 * and poll() now knows no more data can be sent.
 833	 */
 834	if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
 835		rs->rs_snd_bytes += len;
 836
 837		/* let recv side know we are close to send space exhaustion.
 838		 * This is probably not the optimal way to do it, as this
 839		 * means we set the flag on *all* messages as soon as our
 840		 * throughput hits a certain threshold.
 841		 */
 842		if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
 843			__set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
 844
 845		list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
 846		set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
 847		rds_message_addref(rm);
 
 848		rm->m_rs = rs;
 849
 850		/* The code ordering is a little weird, but we're
 851		   trying to minimize the time we hold c_lock */
 852		rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
 853		rm->m_inc.i_conn = conn;
 
 854		rds_message_addref(rm);
 855
 856		spin_lock(&conn->c_lock);
 857		rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
 858		list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
 859		set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
 860		spin_unlock(&conn->c_lock);
 861
 862		rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
 863			 rm, len, rs, rs->rs_snd_bytes,
 864			 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
 865
 866		*queued = 1;
 867	}
 868
 869	spin_unlock_irqrestore(&rs->rs_lock, flags);
 870out:
 871	return *queued;
 872}
 873
 874/*
 875 * rds_message is getting to be quite complicated, and we'd like to allocate
 876 * it all in one go. This figures out how big it needs to be up front.
 877 */
 878static int rds_rm_size(struct msghdr *msg, int data_len)
 879{
 880	struct cmsghdr *cmsg;
 881	int size = 0;
 882	int cmsg_groups = 0;
 883	int retval;
 
 884
 885	for_each_cmsghdr(cmsg, msg) {
 886		if (!CMSG_OK(msg, cmsg))
 887			return -EINVAL;
 888
 889		if (cmsg->cmsg_level != SOL_RDS)
 890			continue;
 891
 892		switch (cmsg->cmsg_type) {
 893		case RDS_CMSG_RDMA_ARGS:
 894			cmsg_groups |= 1;
 895			retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
 896			if (retval < 0)
 897				return retval;
 898			size += retval;
 899
 900			break;
 901
 
 
 
 
 902		case RDS_CMSG_RDMA_DEST:
 903		case RDS_CMSG_RDMA_MAP:
 904			cmsg_groups |= 2;
 905			/* these are valid but do no add any size */
 906			break;
 907
 908		case RDS_CMSG_ATOMIC_CSWP:
 909		case RDS_CMSG_ATOMIC_FADD:
 910		case RDS_CMSG_MASKED_ATOMIC_CSWP:
 911		case RDS_CMSG_MASKED_ATOMIC_FADD:
 912			cmsg_groups |= 1;
 913			size += sizeof(struct scatterlist);
 914			break;
 915
 916		default:
 917			return -EINVAL;
 918		}
 919
 920	}
 921
 922	size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
 
 
 
 923
 924	/* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
 925	if (cmsg_groups == 3)
 926		return -EINVAL;
 927
 928	return size;
 929}
 930
 
 
 
 
 
 
 
 
 
 
 
 
 
 931static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
 932			 struct msghdr *msg, int *allocated_mr)
 933{
 934	struct cmsghdr *cmsg;
 935	int ret = 0;
 936
 937	for_each_cmsghdr(cmsg, msg) {
 938		if (!CMSG_OK(msg, cmsg))
 939			return -EINVAL;
 940
 941		if (cmsg->cmsg_level != SOL_RDS)
 942			continue;
 943
 944		/* As a side effect, RDMA_DEST and RDMA_MAP will set
 945		 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
 946		 */
 947		switch (cmsg->cmsg_type) {
 948		case RDS_CMSG_RDMA_ARGS:
 949			ret = rds_cmsg_rdma_args(rs, rm, cmsg);
 950			break;
 951
 952		case RDS_CMSG_RDMA_DEST:
 953			ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
 954			break;
 955
 956		case RDS_CMSG_RDMA_MAP:
 957			ret = rds_cmsg_rdma_map(rs, rm, cmsg);
 958			if (!ret)
 959				*allocated_mr = 1;
 
 
 
 
 
 960			break;
 961		case RDS_CMSG_ATOMIC_CSWP:
 962		case RDS_CMSG_ATOMIC_FADD:
 963		case RDS_CMSG_MASKED_ATOMIC_CSWP:
 964		case RDS_CMSG_MASKED_ATOMIC_FADD:
 965			ret = rds_cmsg_atomic(rs, rm, cmsg);
 966			break;
 967
 
 
 
 
 968		default:
 969			return -EINVAL;
 970		}
 971
 972		if (ret)
 973			break;
 974	}
 975
 976	return ret;
 977}
 978
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 979int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
 980{
 981	struct sock *sk = sock->sk;
 982	struct rds_sock *rs = rds_sk_to_rs(sk);
 983	DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
 984	__be32 daddr;
 985	__be16 dport;
 986	struct rds_message *rm = NULL;
 987	struct rds_connection *conn;
 988	int ret = 0;
 989	int queued = 0, allocated_mr = 0;
 990	int nonblock = msg->msg_flags & MSG_DONTWAIT;
 991	long timeo = sock_sndtimeo(sk, nonblock);
 
 
 
 
 
 992
 993	/* Mirror Linux UDP mirror of BSD error message compatibility */
 994	/* XXX: Perhaps MSG_MORE someday */
 995	if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
 996		ret = -EOPNOTSUPP;
 997		goto out;
 998	}
 999
1000	if (msg->msg_namelen) {
1001		/* XXX fail non-unicast destination IPs? */
1002		if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
1003			ret = -EINVAL;
1004			goto out;
1005		}
1006		daddr = usin->sin_addr.s_addr;
1007		dport = usin->sin_port;
1008	} else {
1009		/* We only care about consistency with ->connect() */
1010		lock_sock(sk);
1011		daddr = rs->rs_conn_addr;
1012		dport = rs->rs_conn_port;
1013		release_sock(sk);
1014	}
1015
1016	lock_sock(sk);
1017	if (daddr == 0 || rs->rs_bound_addr == 0) {
1018		release_sock(sk);
1019		ret = -ENOTCONN; /* XXX not a great errno */
1020		goto out;
1021	}
1022	release_sock(sk);
1023
 
 
 
 
 
 
 
 
 
 
1024	if (payload_len > rds_sk_sndbuf(rs)) {
1025		ret = -EMSGSIZE;
1026		goto out;
1027	}
1028
 
 
 
 
 
 
 
1029	/* size of rm including all sgs */
1030	ret = rds_rm_size(msg, payload_len);
1031	if (ret < 0)
1032		goto out;
1033
1034	rm = rds_message_alloc(ret, GFP_KERNEL);
1035	if (!rm) {
1036		ret = -ENOMEM;
1037		goto out;
1038	}
1039
1040	/* Attach data to the rm */
1041	if (payload_len) {
1042		rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
1043		if (!rm->data.op_sg) {
1044			ret = -ENOMEM;
1045			goto out;
1046		}
1047		ret = rds_message_copy_from_user(rm, &msg->msg_iter);
1048		if (ret)
1049			goto out;
1050	}
1051	rm->data.op_active = 1;
1052
1053	rm->m_daddr = daddr;
1054
1055	/* rds_conn_create has a spinlock that runs with IRQ off.
1056	 * Caching the conn in the socket helps a lot. */
1057	if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
1058		conn = rs->rs_conn;
1059	else {
1060		conn = rds_conn_create_outgoing(sock_net(sock->sk),
1061						rs->rs_bound_addr, daddr,
1062					rs->rs_transport,
1063					sock->sk->sk_allocation);
1064		if (IS_ERR(conn)) {
1065			ret = PTR_ERR(conn);
1066			goto out;
1067		}
1068		rs->rs_conn = conn;
1069	}
1070
1071	/* Parse any control messages the user may have included. */
1072	ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
1073	if (ret)
 
 
 
1074		goto out;
 
1075
1076	if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
1077		printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
1078			       &rm->rdma, conn->c_trans->xmit_rdma);
1079		ret = -EOPNOTSUPP;
1080		goto out;
1081	}
1082
1083	if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
1084		printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
1085			       &rm->atomic, conn->c_trans->xmit_atomic);
1086		ret = -EOPNOTSUPP;
1087		goto out;
1088	}
1089
1090	rds_conn_connect_if_down(conn);
 
 
 
 
 
 
 
 
 
 
1091
1092	ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
1093	if (ret) {
1094		rs->rs_seen_congestion = 1;
1095		goto out;
1096	}
1097
1098	while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
1099				  dport, &queued)) {
1100		rds_stats_inc(s_send_queue_full);
1101
1102		if (nonblock) {
1103			ret = -EAGAIN;
1104			goto out;
1105		}
1106
1107		timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
1108					rds_send_queue_rm(rs, conn, rm,
1109							  rs->rs_bound_port,
1110							  dport,
1111							  &queued),
1112					timeo);
1113		rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1114		if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1115			continue;
1116
1117		ret = timeo;
1118		if (ret == 0)
1119			ret = -ETIMEDOUT;
1120		goto out;
1121	}
1122
1123	/*
1124	 * By now we've committed to the send.  We reuse rds_send_worker()
1125	 * to retry sends in the rds thread if the transport asks us to.
1126	 */
1127	rds_stats_inc(s_send_queued);
1128
1129	ret = rds_send_xmit(conn);
1130	if (ret == -ENOMEM || ret == -EAGAIN)
1131		queue_delayed_work(rds_wq, &conn->c_send_w, 1);
1132
 
 
 
 
 
 
 
 
1133	rds_message_put(rm);
1134	return payload_len;
1135
1136out:
1137	/* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1138	 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1139	 * or in any other way, we need to destroy the MR again */
1140	if (allocated_mr)
1141		rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1142
1143	if (rm)
1144		rds_message_put(rm);
1145	return ret;
1146}
1147
1148/*
1149 * Reply to a ping packet.
 
 
 
 
 
1150 */
1151int
1152rds_send_pong(struct rds_connection *conn, __be16 dport)
 
1153{
1154	struct rds_message *rm;
1155	unsigned long flags;
1156	int ret = 0;
1157
1158	rm = rds_message_alloc(0, GFP_ATOMIC);
1159	if (!rm) {
1160		ret = -ENOMEM;
1161		goto out;
1162	}
1163
1164	rm->m_daddr = conn->c_faddr;
1165	rm->data.op_active = 1;
1166
1167	rds_conn_connect_if_down(conn);
1168
1169	ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
1170	if (ret)
1171		goto out;
1172
1173	spin_lock_irqsave(&conn->c_lock, flags);
1174	list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
1175	set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1176	rds_message_addref(rm);
1177	rm->m_inc.i_conn = conn;
 
1178
1179	rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport,
1180				    conn->c_next_tx_seq);
1181	conn->c_next_tx_seq++;
1182	spin_unlock_irqrestore(&conn->c_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1183
1184	rds_stats_inc(s_send_queued);
1185	rds_stats_inc(s_send_pong);
1186
1187	/* schedule the send work on rds_wq */
1188	queue_delayed_work(rds_wq, &conn->c_send_w, 1);
 
 
 
1189
1190	rds_message_put(rm);
1191	return 0;
1192
1193out:
1194	if (rm)
1195		rds_message_put(rm);
1196	return ret;
1197}
v4.17
   1/*
   2 * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 *
  32 */
  33#include <linux/kernel.h>
  34#include <linux/moduleparam.h>
  35#include <linux/gfp.h>
  36#include <net/sock.h>
  37#include <linux/in.h>
  38#include <linux/list.h>
  39#include <linux/ratelimit.h>
  40#include <linux/export.h>
  41#include <linux/sizes.h>
  42
  43#include "rds.h"
  44
  45/* When transmitting messages in rds_send_xmit, we need to emerge from
  46 * time to time and briefly release the CPU. Otherwise the softlock watchdog
  47 * will kick our shin.
  48 * Also, it seems fairer to not let one busy connection stall all the
  49 * others.
  50 *
  51 * send_batch_count is the number of times we'll loop in send_xmit. Setting
  52 * it to 0 will restore the old behavior (where we looped until we had
  53 * drained the queue).
  54 */
  55static int send_batch_count = SZ_1K;
  56module_param(send_batch_count, int, 0444);
  57MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
  58
  59static void rds_send_remove_from_sock(struct list_head *messages, int status);
  60
  61/*
  62 * Reset the send state.  Callers must ensure that this doesn't race with
  63 * rds_send_xmit().
  64 */
  65void rds_send_path_reset(struct rds_conn_path *cp)
  66{
  67	struct rds_message *rm, *tmp;
  68	unsigned long flags;
  69
  70	if (cp->cp_xmit_rm) {
  71		rm = cp->cp_xmit_rm;
  72		cp->cp_xmit_rm = NULL;
  73		/* Tell the user the RDMA op is no longer mapped by the
  74		 * transport. This isn't entirely true (it's flushed out
  75		 * independently) but as the connection is down, there's
  76		 * no ongoing RDMA to/from that memory */
  77		rds_message_unmapped(rm);
  78		rds_message_put(rm);
  79	}
  80
  81	cp->cp_xmit_sg = 0;
  82	cp->cp_xmit_hdr_off = 0;
  83	cp->cp_xmit_data_off = 0;
  84	cp->cp_xmit_atomic_sent = 0;
  85	cp->cp_xmit_rdma_sent = 0;
  86	cp->cp_xmit_data_sent = 0;
  87
  88	cp->cp_conn->c_map_queued = 0;
  89
  90	cp->cp_unacked_packets = rds_sysctl_max_unacked_packets;
  91	cp->cp_unacked_bytes = rds_sysctl_max_unacked_bytes;
  92
  93	/* Mark messages as retransmissions, and move them to the send q */
  94	spin_lock_irqsave(&cp->cp_lock, flags);
  95	list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) {
  96		set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
  97		set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
  98	}
  99	list_splice_init(&cp->cp_retrans, &cp->cp_send_queue);
 100	spin_unlock_irqrestore(&cp->cp_lock, flags);
 101}
 102EXPORT_SYMBOL_GPL(rds_send_path_reset);
 103
 104static int acquire_in_xmit(struct rds_conn_path *cp)
 105{
 106	return test_and_set_bit(RDS_IN_XMIT, &cp->cp_flags) == 0;
 107}
 108
 109static void release_in_xmit(struct rds_conn_path *cp)
 110{
 111	clear_bit(RDS_IN_XMIT, &cp->cp_flags);
 112	smp_mb__after_atomic();
 113	/*
 114	 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
 115	 * hot path and finding waiters is very rare.  We don't want to walk
 116	 * the system-wide hashed waitqueue buckets in the fast path only to
 117	 * almost never find waiters.
 118	 */
 119	if (waitqueue_active(&cp->cp_waitq))
 120		wake_up_all(&cp->cp_waitq);
 121}
 122
 123/*
 124 * We're making the conscious trade-off here to only send one message
 125 * down the connection at a time.
 126 *   Pro:
 127 *      - tx queueing is a simple fifo list
 128 *   	- reassembly is optional and easily done by transports per conn
 129 *      - no per flow rx lookup at all, straight to the socket
 130 *   	- less per-frag memory and wire overhead
 131 *   Con:
 132 *      - queued acks can be delayed behind large messages
 133 *   Depends:
 134 *      - small message latency is higher behind queued large messages
 135 *      - large message latency isn't starved by intervening small sends
 136 */
 137int rds_send_xmit(struct rds_conn_path *cp)
 138{
 139	struct rds_connection *conn = cp->cp_conn;
 140	struct rds_message *rm;
 141	unsigned long flags;
 142	unsigned int tmp;
 143	struct scatterlist *sg;
 144	int ret = 0;
 145	LIST_HEAD(to_be_dropped);
 146	int batch_count;
 147	unsigned long send_gen = 0;
 148
 149restart:
 150	batch_count = 0;
 151
 152	/*
 153	 * sendmsg calls here after having queued its message on the send
 154	 * queue.  We only have one task feeding the connection at a time.  If
 155	 * another thread is already feeding the queue then we back off.  This
 156	 * avoids blocking the caller and trading per-connection data between
 157	 * caches per message.
 158	 */
 159	if (!acquire_in_xmit(cp)) {
 160		rds_stats_inc(s_send_lock_contention);
 161		ret = -ENOMEM;
 162		goto out;
 163	}
 164
 165	if (rds_destroy_pending(cp->cp_conn)) {
 166		release_in_xmit(cp);
 167		ret = -ENETUNREACH; /* dont requeue send work */
 168		goto out;
 169	}
 170
 171	/*
 172	 * we record the send generation after doing the xmit acquire.
 173	 * if someone else manages to jump in and do some work, we'll use
 174	 * this to avoid a goto restart farther down.
 175	 *
 176	 * The acquire_in_xmit() check above ensures that only one
 177	 * caller can increment c_send_gen at any time.
 178	 */
 179	send_gen = READ_ONCE(cp->cp_send_gen) + 1;
 180	WRITE_ONCE(cp->cp_send_gen, send_gen);
 181
 182	/*
 183	 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
 184	 * we do the opposite to avoid races.
 185	 */
 186	if (!rds_conn_path_up(cp)) {
 187		release_in_xmit(cp);
 188		ret = 0;
 189		goto out;
 190	}
 191
 192	if (conn->c_trans->xmit_path_prepare)
 193		conn->c_trans->xmit_path_prepare(cp);
 194
 195	/*
 196	 * spin trying to push headers and data down the connection until
 197	 * the connection doesn't make forward progress.
 198	 */
 199	while (1) {
 200
 201		rm = cp->cp_xmit_rm;
 202
 203		/*
 204		 * If between sending messages, we can send a pending congestion
 205		 * map update.
 206		 */
 207		if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
 208			rm = rds_cong_update_alloc(conn);
 209			if (IS_ERR(rm)) {
 210				ret = PTR_ERR(rm);
 211				break;
 212			}
 213			rm->data.op_active = 1;
 214			rm->m_inc.i_conn_path = cp;
 215			rm->m_inc.i_conn = cp->cp_conn;
 216
 217			cp->cp_xmit_rm = rm;
 218		}
 219
 220		/*
 221		 * If not already working on one, grab the next message.
 222		 *
 223		 * cp_xmit_rm holds a ref while we're sending this message down
 224		 * the connction.  We can use this ref while holding the
 225		 * send_sem.. rds_send_reset() is serialized with it.
 226		 */
 227		if (!rm) {
 228			unsigned int len;
 229
 230			batch_count++;
 231
 232			/* we want to process as big a batch as we can, but
 233			 * we also want to avoid softlockups.  If we've been
 234			 * through a lot of messages, lets back off and see
 235			 * if anyone else jumps in
 236			 */
 237			if (batch_count >= send_batch_count)
 238				goto over_batch;
 239
 240			spin_lock_irqsave(&cp->cp_lock, flags);
 241
 242			if (!list_empty(&cp->cp_send_queue)) {
 243				rm = list_entry(cp->cp_send_queue.next,
 244						struct rds_message,
 245						m_conn_item);
 246				rds_message_addref(rm);
 247
 248				/*
 249				 * Move the message from the send queue to the retransmit
 250				 * list right away.
 251				 */
 252				list_move_tail(&rm->m_conn_item,
 253					       &cp->cp_retrans);
 254			}
 255
 256			spin_unlock_irqrestore(&cp->cp_lock, flags);
 257
 258			if (!rm)
 259				break;
 260
 261			/* Unfortunately, the way Infiniband deals with
 262			 * RDMA to a bad MR key is by moving the entire
 263			 * queue pair to error state. We cold possibly
 264			 * recover from that, but right now we drop the
 265			 * connection.
 266			 * Therefore, we never retransmit messages with RDMA ops.
 267			 */
 268			if (test_bit(RDS_MSG_FLUSH, &rm->m_flags) ||
 269			    (rm->rdma.op_active &&
 270			    test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))) {
 271				spin_lock_irqsave(&cp->cp_lock, flags);
 272				if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
 273					list_move(&rm->m_conn_item, &to_be_dropped);
 274				spin_unlock_irqrestore(&cp->cp_lock, flags);
 275				continue;
 276			}
 277
 278			/* Require an ACK every once in a while */
 279			len = ntohl(rm->m_inc.i_hdr.h_len);
 280			if (cp->cp_unacked_packets == 0 ||
 281			    cp->cp_unacked_bytes < len) {
 282				set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
 283
 284				cp->cp_unacked_packets =
 285					rds_sysctl_max_unacked_packets;
 286				cp->cp_unacked_bytes =
 287					rds_sysctl_max_unacked_bytes;
 288				rds_stats_inc(s_send_ack_required);
 289			} else {
 290				cp->cp_unacked_bytes -= len;
 291				cp->cp_unacked_packets--;
 292			}
 293
 294			cp->cp_xmit_rm = rm;
 295		}
 296
 297		/* The transport either sends the whole rdma or none of it */
 298		if (rm->rdma.op_active && !cp->cp_xmit_rdma_sent) {
 299			rm->m_final_op = &rm->rdma;
 300			/* The transport owns the mapped memory for now.
 301			 * You can't unmap it while it's on the send queue
 302			 */
 303			set_bit(RDS_MSG_MAPPED, &rm->m_flags);
 304			ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
 305			if (ret) {
 306				clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
 307				wake_up_interruptible(&rm->m_flush_wait);
 308				break;
 309			}
 310			cp->cp_xmit_rdma_sent = 1;
 311
 312		}
 313
 314		if (rm->atomic.op_active && !cp->cp_xmit_atomic_sent) {
 315			rm->m_final_op = &rm->atomic;
 316			/* The transport owns the mapped memory for now.
 317			 * You can't unmap it while it's on the send queue
 318			 */
 319			set_bit(RDS_MSG_MAPPED, &rm->m_flags);
 320			ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
 321			if (ret) {
 322				clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
 323				wake_up_interruptible(&rm->m_flush_wait);
 324				break;
 325			}
 326			cp->cp_xmit_atomic_sent = 1;
 327
 328		}
 329
 330		/*
 331		 * A number of cases require an RDS header to be sent
 332		 * even if there is no data.
 333		 * We permit 0-byte sends; rds-ping depends on this.
 334		 * However, if there are exclusively attached silent ops,
 335		 * we skip the hdr/data send, to enable silent operation.
 336		 */
 337		if (rm->data.op_nents == 0) {
 338			int ops_present;
 339			int all_ops_are_silent = 1;
 340
 341			ops_present = (rm->atomic.op_active || rm->rdma.op_active);
 342			if (rm->atomic.op_active && !rm->atomic.op_silent)
 343				all_ops_are_silent = 0;
 344			if (rm->rdma.op_active && !rm->rdma.op_silent)
 345				all_ops_are_silent = 0;
 346
 347			if (ops_present && all_ops_are_silent
 348			    && !rm->m_rdma_cookie)
 349				rm->data.op_active = 0;
 350		}
 351
 352		if (rm->data.op_active && !cp->cp_xmit_data_sent) {
 353			rm->m_final_op = &rm->data;
 354
 355			ret = conn->c_trans->xmit(conn, rm,
 356						  cp->cp_xmit_hdr_off,
 357						  cp->cp_xmit_sg,
 358						  cp->cp_xmit_data_off);
 359			if (ret <= 0)
 360				break;
 361
 362			if (cp->cp_xmit_hdr_off < sizeof(struct rds_header)) {
 363				tmp = min_t(int, ret,
 364					    sizeof(struct rds_header) -
 365					    cp->cp_xmit_hdr_off);
 366				cp->cp_xmit_hdr_off += tmp;
 367				ret -= tmp;
 368			}
 369
 370			sg = &rm->data.op_sg[cp->cp_xmit_sg];
 371			while (ret) {
 372				tmp = min_t(int, ret, sg->length -
 373						      cp->cp_xmit_data_off);
 374				cp->cp_xmit_data_off += tmp;
 375				ret -= tmp;
 376				if (cp->cp_xmit_data_off == sg->length) {
 377					cp->cp_xmit_data_off = 0;
 378					sg++;
 379					cp->cp_xmit_sg++;
 380					BUG_ON(ret != 0 && cp->cp_xmit_sg ==
 381					       rm->data.op_nents);
 382				}
 383			}
 384
 385			if (cp->cp_xmit_hdr_off == sizeof(struct rds_header) &&
 386			    (cp->cp_xmit_sg == rm->data.op_nents))
 387				cp->cp_xmit_data_sent = 1;
 388		}
 389
 390		/*
 391		 * A rm will only take multiple times through this loop
 392		 * if there is a data op. Thus, if the data is sent (or there was
 393		 * none), then we're done with the rm.
 394		 */
 395		if (!rm->data.op_active || cp->cp_xmit_data_sent) {
 396			cp->cp_xmit_rm = NULL;
 397			cp->cp_xmit_sg = 0;
 398			cp->cp_xmit_hdr_off = 0;
 399			cp->cp_xmit_data_off = 0;
 400			cp->cp_xmit_rdma_sent = 0;
 401			cp->cp_xmit_atomic_sent = 0;
 402			cp->cp_xmit_data_sent = 0;
 403
 404			rds_message_put(rm);
 405		}
 406	}
 407
 408over_batch:
 409	if (conn->c_trans->xmit_path_complete)
 410		conn->c_trans->xmit_path_complete(cp);
 411	release_in_xmit(cp);
 412
 413	/* Nuke any messages we decided not to retransmit. */
 414	if (!list_empty(&to_be_dropped)) {
 415		/* irqs on here, so we can put(), unlike above */
 416		list_for_each_entry(rm, &to_be_dropped, m_conn_item)
 417			rds_message_put(rm);
 418		rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
 419	}
 420
 421	/*
 422	 * Other senders can queue a message after we last test the send queue
 423	 * but before we clear RDS_IN_XMIT.  In that case they'd back off and
 424	 * not try and send their newly queued message.  We need to check the
 425	 * send queue after having cleared RDS_IN_XMIT so that their message
 426	 * doesn't get stuck on the send queue.
 427	 *
 428	 * If the transport cannot continue (i.e ret != 0), then it must
 429	 * call us when more room is available, such as from the tx
 430	 * completion handler.
 431	 *
 432	 * We have an extra generation check here so that if someone manages
 433	 * to jump in after our release_in_xmit, we'll see that they have done
 434	 * some work and we will skip our goto
 435	 */
 436	if (ret == 0) {
 437		bool raced;
 438
 439		smp_mb();
 440		raced = send_gen != READ_ONCE(cp->cp_send_gen);
 441
 442		if ((test_bit(0, &conn->c_map_queued) ||
 443		    !list_empty(&cp->cp_send_queue)) && !raced) {
 
 
 444			if (batch_count < send_batch_count)
 445				goto restart;
 446			rcu_read_lock();
 447			if (rds_destroy_pending(cp->cp_conn))
 448				ret = -ENETUNREACH;
 449			else
 450				queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
 451			rcu_read_unlock();
 452		} else if (raced) {
 453			rds_stats_inc(s_send_lock_queue_raced);
 454		}
 455	}
 456out:
 457	return ret;
 458}
 459EXPORT_SYMBOL_GPL(rds_send_xmit);
 460
 461static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
 462{
 463	u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
 464
 465	assert_spin_locked(&rs->rs_lock);
 466
 467	BUG_ON(rs->rs_snd_bytes < len);
 468	rs->rs_snd_bytes -= len;
 469
 470	if (rs->rs_snd_bytes == 0)
 471		rds_stats_inc(s_send_queue_empty);
 472}
 473
 474static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
 475				    is_acked_func is_acked)
 476{
 477	if (is_acked)
 478		return is_acked(rm, ack);
 479	return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
 480}
 481
 482/*
 483 * This is pretty similar to what happens below in the ACK
 484 * handling code - except that we call here as soon as we get
 485 * the IB send completion on the RDMA op and the accompanying
 486 * message.
 487 */
 488void rds_rdma_send_complete(struct rds_message *rm, int status)
 489{
 490	struct rds_sock *rs = NULL;
 491	struct rm_rdma_op *ro;
 492	struct rds_notifier *notifier;
 493	unsigned long flags;
 494	unsigned int notify = 0;
 495
 496	spin_lock_irqsave(&rm->m_rs_lock, flags);
 497
 498	notify =  rm->rdma.op_notify | rm->data.op_notify;
 499	ro = &rm->rdma;
 500	if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
 501	    ro->op_active && notify && ro->op_notifier) {
 502		notifier = ro->op_notifier;
 503		rs = rm->m_rs;
 504		sock_hold(rds_rs_to_sk(rs));
 505
 506		notifier->n_status = status;
 507		spin_lock(&rs->rs_lock);
 508		list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
 509		spin_unlock(&rs->rs_lock);
 510
 511		ro->op_notifier = NULL;
 512	}
 513
 514	spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 515
 516	if (rs) {
 517		rds_wake_sk_sleep(rs);
 518		sock_put(rds_rs_to_sk(rs));
 519	}
 520}
 521EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
 522
 523/*
 524 * Just like above, except looks at atomic op
 525 */
 526void rds_atomic_send_complete(struct rds_message *rm, int status)
 527{
 528	struct rds_sock *rs = NULL;
 529	struct rm_atomic_op *ao;
 530	struct rds_notifier *notifier;
 531	unsigned long flags;
 532
 533	spin_lock_irqsave(&rm->m_rs_lock, flags);
 534
 535	ao = &rm->atomic;
 536	if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
 537	    && ao->op_active && ao->op_notify && ao->op_notifier) {
 538		notifier = ao->op_notifier;
 539		rs = rm->m_rs;
 540		sock_hold(rds_rs_to_sk(rs));
 541
 542		notifier->n_status = status;
 543		spin_lock(&rs->rs_lock);
 544		list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
 545		spin_unlock(&rs->rs_lock);
 546
 547		ao->op_notifier = NULL;
 548	}
 549
 550	spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 551
 552	if (rs) {
 553		rds_wake_sk_sleep(rs);
 554		sock_put(rds_rs_to_sk(rs));
 555	}
 556}
 557EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
 558
 559/*
 560 * This is the same as rds_rdma_send_complete except we
 561 * don't do any locking - we have all the ingredients (message,
 562 * socket, socket lock) and can just move the notifier.
 563 */
 564static inline void
 565__rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
 566{
 567	struct rm_rdma_op *ro;
 568	struct rm_atomic_op *ao;
 569
 570	ro = &rm->rdma;
 571	if (ro->op_active && ro->op_notify && ro->op_notifier) {
 572		ro->op_notifier->n_status = status;
 573		list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
 574		ro->op_notifier = NULL;
 575	}
 576
 577	ao = &rm->atomic;
 578	if (ao->op_active && ao->op_notify && ao->op_notifier) {
 579		ao->op_notifier->n_status = status;
 580		list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
 581		ao->op_notifier = NULL;
 582	}
 583
 584	/* No need to wake the app - caller does this */
 585}
 586
 587/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 588 * This removes messages from the socket's list if they're on it.  The list
 589 * argument must be private to the caller, we must be able to modify it
 590 * without locks.  The messages must have a reference held for their
 591 * position on the list.  This function will drop that reference after
 592 * removing the messages from the 'messages' list regardless of if it found
 593 * the messages on the socket list or not.
 594 */
 595static void rds_send_remove_from_sock(struct list_head *messages, int status)
 596{
 597	unsigned long flags;
 598	struct rds_sock *rs = NULL;
 599	struct rds_message *rm;
 600
 601	while (!list_empty(messages)) {
 602		int was_on_sock = 0;
 603
 604		rm = list_entry(messages->next, struct rds_message,
 605				m_conn_item);
 606		list_del_init(&rm->m_conn_item);
 607
 608		/*
 609		 * If we see this flag cleared then we're *sure* that someone
 610		 * else beat us to removing it from the sock.  If we race
 611		 * with their flag update we'll get the lock and then really
 612		 * see that the flag has been cleared.
 613		 *
 614		 * The message spinlock makes sure nobody clears rm->m_rs
 615		 * while we're messing with it. It does not prevent the
 616		 * message from being removed from the socket, though.
 617		 */
 618		spin_lock_irqsave(&rm->m_rs_lock, flags);
 619		if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
 620			goto unlock_and_drop;
 621
 622		if (rs != rm->m_rs) {
 623			if (rs) {
 624				rds_wake_sk_sleep(rs);
 625				sock_put(rds_rs_to_sk(rs));
 626			}
 627			rs = rm->m_rs;
 628			if (rs)
 629				sock_hold(rds_rs_to_sk(rs));
 630		}
 631		if (!rs)
 632			goto unlock_and_drop;
 633		spin_lock(&rs->rs_lock);
 634
 635		if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
 636			struct rm_rdma_op *ro = &rm->rdma;
 637			struct rds_notifier *notifier;
 638
 639			list_del_init(&rm->m_sock_item);
 640			rds_send_sndbuf_remove(rs, rm);
 641
 642			if (ro->op_active && ro->op_notifier &&
 643			       (ro->op_notify || (ro->op_recverr && status))) {
 644				notifier = ro->op_notifier;
 645				list_add_tail(&notifier->n_list,
 646						&rs->rs_notify_queue);
 647				if (!notifier->n_status)
 648					notifier->n_status = status;
 649				rm->rdma.op_notifier = NULL;
 650			}
 651			was_on_sock = 1;
 
 652		}
 653		spin_unlock(&rs->rs_lock);
 654
 655unlock_and_drop:
 656		spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 657		rds_message_put(rm);
 658		if (was_on_sock)
 659			rds_message_put(rm);
 660	}
 661
 662	if (rs) {
 663		rds_wake_sk_sleep(rs);
 664		sock_put(rds_rs_to_sk(rs));
 665	}
 666}
 667
 668/*
 669 * Transports call here when they've determined that the receiver queued
 670 * messages up to, and including, the given sequence number.  Messages are
 671 * moved to the retrans queue when rds_send_xmit picks them off the send
 672 * queue. This means that in the TCP case, the message may not have been
 673 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
 674 * checks the RDS_MSG_HAS_ACK_SEQ bit.
 675 */
 676void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack,
 677			      is_acked_func is_acked)
 678{
 679	struct rds_message *rm, *tmp;
 680	unsigned long flags;
 681	LIST_HEAD(list);
 682
 683	spin_lock_irqsave(&cp->cp_lock, flags);
 684
 685	list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) {
 686		if (!rds_send_is_acked(rm, ack, is_acked))
 687			break;
 688
 689		list_move(&rm->m_conn_item, &list);
 690		clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
 691	}
 692
 693	/* order flag updates with spin locks */
 694	if (!list_empty(&list))
 695		smp_mb__after_atomic();
 696
 697	spin_unlock_irqrestore(&cp->cp_lock, flags);
 698
 699	/* now remove the messages from the sock list as needed */
 700	rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
 701}
 702EXPORT_SYMBOL_GPL(rds_send_path_drop_acked);
 703
 704void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
 705			 is_acked_func is_acked)
 706{
 707	WARN_ON(conn->c_trans->t_mp_capable);
 708	rds_send_path_drop_acked(&conn->c_path[0], ack, is_acked);
 709}
 710EXPORT_SYMBOL_GPL(rds_send_drop_acked);
 711
 712void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
 713{
 714	struct rds_message *rm, *tmp;
 715	struct rds_connection *conn;
 716	struct rds_conn_path *cp;
 717	unsigned long flags;
 718	LIST_HEAD(list);
 719
 720	/* get all the messages we're dropping under the rs lock */
 721	spin_lock_irqsave(&rs->rs_lock, flags);
 722
 723	list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
 724		if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
 725			     dest->sin_port != rm->m_inc.i_hdr.h_dport))
 726			continue;
 727
 728		list_move(&rm->m_sock_item, &list);
 729		rds_send_sndbuf_remove(rs, rm);
 730		clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
 731	}
 732
 733	/* order flag updates with the rs lock */
 734	smp_mb__after_atomic();
 735
 736	spin_unlock_irqrestore(&rs->rs_lock, flags);
 737
 738	if (list_empty(&list))
 739		return;
 740
 741	/* Remove the messages from the conn */
 742	list_for_each_entry(rm, &list, m_sock_item) {
 743
 744		conn = rm->m_inc.i_conn;
 745		if (conn->c_trans->t_mp_capable)
 746			cp = rm->m_inc.i_conn_path;
 747		else
 748			cp = &conn->c_path[0];
 749
 750		spin_lock_irqsave(&cp->cp_lock, flags);
 751		/*
 752		 * Maybe someone else beat us to removing rm from the conn.
 753		 * If we race with their flag update we'll get the lock and
 754		 * then really see that the flag has been cleared.
 755		 */
 756		if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
 757			spin_unlock_irqrestore(&cp->cp_lock, flags);
 
 
 
 758			continue;
 759		}
 760		list_del_init(&rm->m_conn_item);
 761		spin_unlock_irqrestore(&cp->cp_lock, flags);
 762
 763		/*
 764		 * Couldn't grab m_rs_lock in top loop (lock ordering),
 765		 * but we can now.
 766		 */
 767		spin_lock_irqsave(&rm->m_rs_lock, flags);
 768
 769		spin_lock(&rs->rs_lock);
 770		__rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
 771		spin_unlock(&rs->rs_lock);
 772
 
 773		spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 774
 775		rds_message_put(rm);
 776	}
 777
 778	rds_wake_sk_sleep(rs);
 779
 780	while (!list_empty(&list)) {
 781		rm = list_entry(list.next, struct rds_message, m_sock_item);
 782		list_del_init(&rm->m_sock_item);
 783		rds_message_wait(rm);
 784
 785		/* just in case the code above skipped this message
 786		 * because RDS_MSG_ON_CONN wasn't set, run it again here
 787		 * taking m_rs_lock is the only thing that keeps us
 788		 * from racing with ack processing.
 789		 */
 790		spin_lock_irqsave(&rm->m_rs_lock, flags);
 791
 792		spin_lock(&rs->rs_lock);
 793		__rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
 794		spin_unlock(&rs->rs_lock);
 795
 
 796		spin_unlock_irqrestore(&rm->m_rs_lock, flags);
 797
 798		rds_message_put(rm);
 799	}
 800}
 801
 802/*
 803 * we only want this to fire once so we use the callers 'queued'.  It's
 804 * possible that another thread can race with us and remove the
 805 * message from the flow with RDS_CANCEL_SENT_TO.
 806 */
 807static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
 808			     struct rds_conn_path *cp,
 809			     struct rds_message *rm, __be16 sport,
 810			     __be16 dport, int *queued)
 811{
 812	unsigned long flags;
 813	u32 len;
 814
 815	if (*queued)
 816		goto out;
 817
 818	len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
 819
 820	/* this is the only place which holds both the socket's rs_lock
 821	 * and the connection's c_lock */
 822	spin_lock_irqsave(&rs->rs_lock, flags);
 823
 824	/*
 825	 * If there is a little space in sndbuf, we don't queue anything,
 826	 * and userspace gets -EAGAIN. But poll() indicates there's send
 827	 * room. This can lead to bad behavior (spinning) if snd_bytes isn't
 828	 * freed up by incoming acks. So we check the *old* value of
 829	 * rs_snd_bytes here to allow the last msg to exceed the buffer,
 830	 * and poll() now knows no more data can be sent.
 831	 */
 832	if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
 833		rs->rs_snd_bytes += len;
 834
 835		/* let recv side know we are close to send space exhaustion.
 836		 * This is probably not the optimal way to do it, as this
 837		 * means we set the flag on *all* messages as soon as our
 838		 * throughput hits a certain threshold.
 839		 */
 840		if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
 841			set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
 842
 843		list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
 844		set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
 845		rds_message_addref(rm);
 846		sock_hold(rds_rs_to_sk(rs));
 847		rm->m_rs = rs;
 848
 849		/* The code ordering is a little weird, but we're
 850		   trying to minimize the time we hold c_lock */
 851		rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
 852		rm->m_inc.i_conn = conn;
 853		rm->m_inc.i_conn_path = cp;
 854		rds_message_addref(rm);
 855
 856		spin_lock(&cp->cp_lock);
 857		rm->m_inc.i_hdr.h_sequence = cpu_to_be64(cp->cp_next_tx_seq++);
 858		list_add_tail(&rm->m_conn_item, &cp->cp_send_queue);
 859		set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
 860		spin_unlock(&cp->cp_lock);
 861
 862		rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
 863			 rm, len, rs, rs->rs_snd_bytes,
 864			 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
 865
 866		*queued = 1;
 867	}
 868
 869	spin_unlock_irqrestore(&rs->rs_lock, flags);
 870out:
 871	return *queued;
 872}
 873
 874/*
 875 * rds_message is getting to be quite complicated, and we'd like to allocate
 876 * it all in one go. This figures out how big it needs to be up front.
 877 */
 878static int rds_rm_size(struct msghdr *msg, int num_sgs)
 879{
 880	struct cmsghdr *cmsg;
 881	int size = 0;
 882	int cmsg_groups = 0;
 883	int retval;
 884	bool zcopy_cookie = false;
 885
 886	for_each_cmsghdr(cmsg, msg) {
 887		if (!CMSG_OK(msg, cmsg))
 888			return -EINVAL;
 889
 890		if (cmsg->cmsg_level != SOL_RDS)
 891			continue;
 892
 893		switch (cmsg->cmsg_type) {
 894		case RDS_CMSG_RDMA_ARGS:
 895			cmsg_groups |= 1;
 896			retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
 897			if (retval < 0)
 898				return retval;
 899			size += retval;
 900
 901			break;
 902
 903		case RDS_CMSG_ZCOPY_COOKIE:
 904			zcopy_cookie = true;
 905			/* fall through */
 906
 907		case RDS_CMSG_RDMA_DEST:
 908		case RDS_CMSG_RDMA_MAP:
 909			cmsg_groups |= 2;
 910			/* these are valid but do no add any size */
 911			break;
 912
 913		case RDS_CMSG_ATOMIC_CSWP:
 914		case RDS_CMSG_ATOMIC_FADD:
 915		case RDS_CMSG_MASKED_ATOMIC_CSWP:
 916		case RDS_CMSG_MASKED_ATOMIC_FADD:
 917			cmsg_groups |= 1;
 918			size += sizeof(struct scatterlist);
 919			break;
 920
 921		default:
 922			return -EINVAL;
 923		}
 924
 925	}
 926
 927	if ((msg->msg_flags & MSG_ZEROCOPY) && !zcopy_cookie)
 928		return -EINVAL;
 929
 930	size += num_sgs * sizeof(struct scatterlist);
 931
 932	/* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
 933	if (cmsg_groups == 3)
 934		return -EINVAL;
 935
 936	return size;
 937}
 938
 939static int rds_cmsg_zcopy(struct rds_sock *rs, struct rds_message *rm,
 940			  struct cmsghdr *cmsg)
 941{
 942	u32 *cookie;
 943
 944	if (cmsg->cmsg_len < CMSG_LEN(sizeof(*cookie)) ||
 945	    !rm->data.op_mmp_znotifier)
 946		return -EINVAL;
 947	cookie = CMSG_DATA(cmsg);
 948	rm->data.op_mmp_znotifier->z_cookie = *cookie;
 949	return 0;
 950}
 951
 952static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
 953			 struct msghdr *msg, int *allocated_mr)
 954{
 955	struct cmsghdr *cmsg;
 956	int ret = 0;
 957
 958	for_each_cmsghdr(cmsg, msg) {
 959		if (!CMSG_OK(msg, cmsg))
 960			return -EINVAL;
 961
 962		if (cmsg->cmsg_level != SOL_RDS)
 963			continue;
 964
 965		/* As a side effect, RDMA_DEST and RDMA_MAP will set
 966		 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
 967		 */
 968		switch (cmsg->cmsg_type) {
 969		case RDS_CMSG_RDMA_ARGS:
 970			ret = rds_cmsg_rdma_args(rs, rm, cmsg);
 971			break;
 972
 973		case RDS_CMSG_RDMA_DEST:
 974			ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
 975			break;
 976
 977		case RDS_CMSG_RDMA_MAP:
 978			ret = rds_cmsg_rdma_map(rs, rm, cmsg);
 979			if (!ret)
 980				*allocated_mr = 1;
 981			else if (ret == -ENODEV)
 982				/* Accommodate the get_mr() case which can fail
 983				 * if connection isn't established yet.
 984				 */
 985				ret = -EAGAIN;
 986			break;
 987		case RDS_CMSG_ATOMIC_CSWP:
 988		case RDS_CMSG_ATOMIC_FADD:
 989		case RDS_CMSG_MASKED_ATOMIC_CSWP:
 990		case RDS_CMSG_MASKED_ATOMIC_FADD:
 991			ret = rds_cmsg_atomic(rs, rm, cmsg);
 992			break;
 993
 994		case RDS_CMSG_ZCOPY_COOKIE:
 995			ret = rds_cmsg_zcopy(rs, rm, cmsg);
 996			break;
 997
 998		default:
 999			return -EINVAL;
1000		}
1001
1002		if (ret)
1003			break;
1004	}
1005
1006	return ret;
1007}
1008
1009static int rds_send_mprds_hash(struct rds_sock *rs, struct rds_connection *conn)
1010{
1011	int hash;
1012
1013	if (conn->c_npaths == 0)
1014		hash = RDS_MPATH_HASH(rs, RDS_MPATH_WORKERS);
1015	else
1016		hash = RDS_MPATH_HASH(rs, conn->c_npaths);
1017	if (conn->c_npaths == 0 && hash != 0) {
1018		rds_send_ping(conn, 0);
1019
1020		/* The underlying connection is not up yet.  Need to wait
1021		 * until it is up to be sure that the non-zero c_path can be
1022		 * used.  But if we are interrupted, we have to use the zero
1023		 * c_path in case the connection ends up being non-MP capable.
1024		 */
1025		if (conn->c_npaths == 0)
1026			if (wait_event_interruptible(conn->c_hs_waitq,
1027						     conn->c_npaths != 0))
1028				hash = 0;
1029		if (conn->c_npaths == 1)
1030			hash = 0;
1031	}
1032	return hash;
1033}
1034
1035static int rds_rdma_bytes(struct msghdr *msg, size_t *rdma_bytes)
1036{
1037	struct rds_rdma_args *args;
1038	struct cmsghdr *cmsg;
1039
1040	for_each_cmsghdr(cmsg, msg) {
1041		if (!CMSG_OK(msg, cmsg))
1042			return -EINVAL;
1043
1044		if (cmsg->cmsg_level != SOL_RDS)
1045			continue;
1046
1047		if (cmsg->cmsg_type == RDS_CMSG_RDMA_ARGS) {
1048			if (cmsg->cmsg_len <
1049			    CMSG_LEN(sizeof(struct rds_rdma_args)))
1050				return -EINVAL;
1051			args = CMSG_DATA(cmsg);
1052			*rdma_bytes += args->remote_vec.bytes;
1053		}
1054	}
1055	return 0;
1056}
1057
1058int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
1059{
1060	struct sock *sk = sock->sk;
1061	struct rds_sock *rs = rds_sk_to_rs(sk);
1062	DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
1063	__be32 daddr;
1064	__be16 dport;
1065	struct rds_message *rm = NULL;
1066	struct rds_connection *conn;
1067	int ret = 0;
1068	int queued = 0, allocated_mr = 0;
1069	int nonblock = msg->msg_flags & MSG_DONTWAIT;
1070	long timeo = sock_sndtimeo(sk, nonblock);
1071	struct rds_conn_path *cpath;
1072	size_t total_payload_len = payload_len, rdma_payload_len = 0;
1073	bool zcopy = ((msg->msg_flags & MSG_ZEROCOPY) &&
1074		      sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY));
1075	int num_sgs = ceil(payload_len, PAGE_SIZE);
1076
1077	/* Mirror Linux UDP mirror of BSD error message compatibility */
1078	/* XXX: Perhaps MSG_MORE someday */
1079	if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT | MSG_ZEROCOPY)) {
1080		ret = -EOPNOTSUPP;
1081		goto out;
1082	}
1083
1084	if (msg->msg_namelen) {
1085		/* XXX fail non-unicast destination IPs? */
1086		if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
1087			ret = -EINVAL;
1088			goto out;
1089		}
1090		daddr = usin->sin_addr.s_addr;
1091		dport = usin->sin_port;
1092	} else {
1093		/* We only care about consistency with ->connect() */
1094		lock_sock(sk);
1095		daddr = rs->rs_conn_addr;
1096		dport = rs->rs_conn_port;
1097		release_sock(sk);
1098	}
1099
1100	lock_sock(sk);
1101	if (daddr == 0 || rs->rs_bound_addr == 0) {
1102		release_sock(sk);
1103		ret = -ENOTCONN; /* XXX not a great errno */
1104		goto out;
1105	}
1106	release_sock(sk);
1107
1108	ret = rds_rdma_bytes(msg, &rdma_payload_len);
1109	if (ret)
1110		goto out;
1111
1112	total_payload_len += rdma_payload_len;
1113	if (max_t(size_t, payload_len, rdma_payload_len) > RDS_MAX_MSG_SIZE) {
1114		ret = -EMSGSIZE;
1115		goto out;
1116	}
1117
1118	if (payload_len > rds_sk_sndbuf(rs)) {
1119		ret = -EMSGSIZE;
1120		goto out;
1121	}
1122
1123	if (zcopy) {
1124		if (rs->rs_transport->t_type != RDS_TRANS_TCP) {
1125			ret = -EOPNOTSUPP;
1126			goto out;
1127		}
1128		num_sgs = iov_iter_npages(&msg->msg_iter, INT_MAX);
1129	}
1130	/* size of rm including all sgs */
1131	ret = rds_rm_size(msg, num_sgs);
1132	if (ret < 0)
1133		goto out;
1134
1135	rm = rds_message_alloc(ret, GFP_KERNEL);
1136	if (!rm) {
1137		ret = -ENOMEM;
1138		goto out;
1139	}
1140
1141	/* Attach data to the rm */
1142	if (payload_len) {
1143		rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
1144		if (!rm->data.op_sg) {
1145			ret = -ENOMEM;
1146			goto out;
1147		}
1148		ret = rds_message_copy_from_user(rm, &msg->msg_iter, zcopy);
1149		if (ret)
1150			goto out;
1151	}
1152	rm->data.op_active = 1;
1153
1154	rm->m_daddr = daddr;
1155
1156	/* rds_conn_create has a spinlock that runs with IRQ off.
1157	 * Caching the conn in the socket helps a lot. */
1158	if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
1159		conn = rs->rs_conn;
1160	else {
1161		conn = rds_conn_create_outgoing(sock_net(sock->sk),
1162						rs->rs_bound_addr, daddr,
1163					rs->rs_transport,
1164					sock->sk->sk_allocation);
1165		if (IS_ERR(conn)) {
1166			ret = PTR_ERR(conn);
1167			goto out;
1168		}
1169		rs->rs_conn = conn;
1170	}
1171
1172	/* Parse any control messages the user may have included. */
1173	ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
1174	if (ret) {
1175		/* Trigger connection so that its ready for the next retry */
1176		if (ret ==  -EAGAIN)
1177			rds_conn_connect_if_down(conn);
1178		goto out;
1179	}
1180
1181	if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
1182		printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
1183			       &rm->rdma, conn->c_trans->xmit_rdma);
1184		ret = -EOPNOTSUPP;
1185		goto out;
1186	}
1187
1188	if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
1189		printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
1190			       &rm->atomic, conn->c_trans->xmit_atomic);
1191		ret = -EOPNOTSUPP;
1192		goto out;
1193	}
1194
1195	if (conn->c_trans->t_mp_capable)
1196		cpath = &conn->c_path[rds_send_mprds_hash(rs, conn)];
1197	else
1198		cpath = &conn->c_path[0];
1199
1200	if (rds_destroy_pending(conn)) {
1201		ret = -EAGAIN;
1202		goto out;
1203	}
1204
1205	rds_conn_path_connect_if_down(cpath);
1206
1207	ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
1208	if (ret) {
1209		rs->rs_seen_congestion = 1;
1210		goto out;
1211	}
1212	while (!rds_send_queue_rm(rs, conn, cpath, rm, rs->rs_bound_port,
 
1213				  dport, &queued)) {
1214		rds_stats_inc(s_send_queue_full);
1215
1216		if (nonblock) {
1217			ret = -EAGAIN;
1218			goto out;
1219		}
1220
1221		timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
1222					rds_send_queue_rm(rs, conn, cpath, rm,
1223							  rs->rs_bound_port,
1224							  dport,
1225							  &queued),
1226					timeo);
1227		rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1228		if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1229			continue;
1230
1231		ret = timeo;
1232		if (ret == 0)
1233			ret = -ETIMEDOUT;
1234		goto out;
1235	}
1236
1237	/*
1238	 * By now we've committed to the send.  We reuse rds_send_worker()
1239	 * to retry sends in the rds thread if the transport asks us to.
1240	 */
1241	rds_stats_inc(s_send_queued);
1242
1243	ret = rds_send_xmit(cpath);
1244	if (ret == -ENOMEM || ret == -EAGAIN) {
1245		ret = 0;
1246		rcu_read_lock();
1247		if (rds_destroy_pending(cpath->cp_conn))
1248			ret = -ENETUNREACH;
1249		else
1250			queue_delayed_work(rds_wq, &cpath->cp_send_w, 1);
1251		rcu_read_unlock();
1252	}
1253	if (ret)
1254		goto out;
1255	rds_message_put(rm);
1256	return payload_len;
1257
1258out:
1259	/* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1260	 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1261	 * or in any other way, we need to destroy the MR again */
1262	if (allocated_mr)
1263		rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1264
1265	if (rm)
1266		rds_message_put(rm);
1267	return ret;
1268}
1269
1270/*
1271 * send out a probe. Can be shared by rds_send_ping,
1272 * rds_send_pong, rds_send_hb.
1273 * rds_send_hb should use h_flags
1274 *   RDS_FLAG_HB_PING|RDS_FLAG_ACK_REQUIRED
1275 * or
1276 *   RDS_FLAG_HB_PONG|RDS_FLAG_ACK_REQUIRED
1277 */
1278static int
1279rds_send_probe(struct rds_conn_path *cp, __be16 sport,
1280	       __be16 dport, u8 h_flags)
1281{
1282	struct rds_message *rm;
1283	unsigned long flags;
1284	int ret = 0;
1285
1286	rm = rds_message_alloc(0, GFP_ATOMIC);
1287	if (!rm) {
1288		ret = -ENOMEM;
1289		goto out;
1290	}
1291
1292	rm->m_daddr = cp->cp_conn->c_faddr;
1293	rm->data.op_active = 1;
1294
1295	rds_conn_path_connect_if_down(cp);
1296
1297	ret = rds_cong_wait(cp->cp_conn->c_fcong, dport, 1, NULL);
1298	if (ret)
1299		goto out;
1300
1301	spin_lock_irqsave(&cp->cp_lock, flags);
1302	list_add_tail(&rm->m_conn_item, &cp->cp_send_queue);
1303	set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1304	rds_message_addref(rm);
1305	rm->m_inc.i_conn = cp->cp_conn;
1306	rm->m_inc.i_conn_path = cp;
1307
1308	rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport,
1309				    cp->cp_next_tx_seq);
1310	rm->m_inc.i_hdr.h_flags |= h_flags;
1311	cp->cp_next_tx_seq++;
1312
1313	if (RDS_HS_PROBE(be16_to_cpu(sport), be16_to_cpu(dport)) &&
1314	    cp->cp_conn->c_trans->t_mp_capable) {
1315		u16 npaths = cpu_to_be16(RDS_MPATH_WORKERS);
1316		u32 my_gen_num = cpu_to_be32(cp->cp_conn->c_my_gen_num);
1317
1318		rds_message_add_extension(&rm->m_inc.i_hdr,
1319					  RDS_EXTHDR_NPATHS, &npaths,
1320					  sizeof(npaths));
1321		rds_message_add_extension(&rm->m_inc.i_hdr,
1322					  RDS_EXTHDR_GEN_NUM,
1323					  &my_gen_num,
1324					  sizeof(u32));
1325	}
1326	spin_unlock_irqrestore(&cp->cp_lock, flags);
1327
1328	rds_stats_inc(s_send_queued);
1329	rds_stats_inc(s_send_pong);
1330
1331	/* schedule the send work on rds_wq */
1332	rcu_read_lock();
1333	if (!rds_destroy_pending(cp->cp_conn))
1334		queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
1335	rcu_read_unlock();
1336
1337	rds_message_put(rm);
1338	return 0;
1339
1340out:
1341	if (rm)
1342		rds_message_put(rm);
1343	return ret;
1344}
1345
1346int
1347rds_send_pong(struct rds_conn_path *cp, __be16 dport)
1348{
1349	return rds_send_probe(cp, 0, dport, 0);
1350}
1351
1352void
1353rds_send_ping(struct rds_connection *conn, int cp_index)
1354{
1355	unsigned long flags;
1356	struct rds_conn_path *cp = &conn->c_path[cp_index];
1357
1358	spin_lock_irqsave(&cp->cp_lock, flags);
1359	if (conn->c_ping_triggered) {
1360		spin_unlock_irqrestore(&cp->cp_lock, flags);
1361		return;
1362	}
1363	conn->c_ping_triggered = 1;
1364	spin_unlock_irqrestore(&cp->cp_lock, flags);
1365	rds_send_probe(cp, cpu_to_be16(RDS_FLAG_PROBE_PORT), 0, 0);
1366}
1367EXPORT_SYMBOL_GPL(rds_send_ping);