Loading...
1/*
2 * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/moduleparam.h>
35#include <linux/gfp.h>
36#include <net/sock.h>
37#include <linux/in.h>
38#include <linux/list.h>
39#include <linux/ratelimit.h>
40#include <linux/export.h>
41#include <linux/sizes.h>
42
43#include "rds.h"
44
45/* When transmitting messages in rds_send_xmit, we need to emerge from
46 * time to time and briefly release the CPU. Otherwise the softlock watchdog
47 * will kick our shin.
48 * Also, it seems fairer to not let one busy connection stall all the
49 * others.
50 *
51 * send_batch_count is the number of times we'll loop in send_xmit. Setting
52 * it to 0 will restore the old behavior (where we looped until we had
53 * drained the queue).
54 */
55static int send_batch_count = SZ_1K;
56module_param(send_batch_count, int, 0444);
57MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
58
59static void rds_send_remove_from_sock(struct list_head *messages, int status);
60
61/*
62 * Reset the send state. Callers must ensure that this doesn't race with
63 * rds_send_xmit().
64 */
65void rds_send_path_reset(struct rds_conn_path *cp)
66{
67 struct rds_message *rm, *tmp;
68 unsigned long flags;
69
70 if (cp->cp_xmit_rm) {
71 rm = cp->cp_xmit_rm;
72 cp->cp_xmit_rm = NULL;
73 /* Tell the user the RDMA op is no longer mapped by the
74 * transport. This isn't entirely true (it's flushed out
75 * independently) but as the connection is down, there's
76 * no ongoing RDMA to/from that memory */
77 rds_message_unmapped(rm);
78 rds_message_put(rm);
79 }
80
81 cp->cp_xmit_sg = 0;
82 cp->cp_xmit_hdr_off = 0;
83 cp->cp_xmit_data_off = 0;
84 cp->cp_xmit_atomic_sent = 0;
85 cp->cp_xmit_rdma_sent = 0;
86 cp->cp_xmit_data_sent = 0;
87
88 cp->cp_conn->c_map_queued = 0;
89
90 cp->cp_unacked_packets = rds_sysctl_max_unacked_packets;
91 cp->cp_unacked_bytes = rds_sysctl_max_unacked_bytes;
92
93 /* Mark messages as retransmissions, and move them to the send q */
94 spin_lock_irqsave(&cp->cp_lock, flags);
95 list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) {
96 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
97 set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
98 }
99 list_splice_init(&cp->cp_retrans, &cp->cp_send_queue);
100 spin_unlock_irqrestore(&cp->cp_lock, flags);
101}
102EXPORT_SYMBOL_GPL(rds_send_path_reset);
103
104static int acquire_in_xmit(struct rds_conn_path *cp)
105{
106 return test_and_set_bit(RDS_IN_XMIT, &cp->cp_flags) == 0;
107}
108
109static void release_in_xmit(struct rds_conn_path *cp)
110{
111 clear_bit(RDS_IN_XMIT, &cp->cp_flags);
112 smp_mb__after_atomic();
113 /*
114 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
115 * hot path and finding waiters is very rare. We don't want to walk
116 * the system-wide hashed waitqueue buckets in the fast path only to
117 * almost never find waiters.
118 */
119 if (waitqueue_active(&cp->cp_waitq))
120 wake_up_all(&cp->cp_waitq);
121}
122
123/*
124 * We're making the conscious trade-off here to only send one message
125 * down the connection at a time.
126 * Pro:
127 * - tx queueing is a simple fifo list
128 * - reassembly is optional and easily done by transports per conn
129 * - no per flow rx lookup at all, straight to the socket
130 * - less per-frag memory and wire overhead
131 * Con:
132 * - queued acks can be delayed behind large messages
133 * Depends:
134 * - small message latency is higher behind queued large messages
135 * - large message latency isn't starved by intervening small sends
136 */
137int rds_send_xmit(struct rds_conn_path *cp)
138{
139 struct rds_connection *conn = cp->cp_conn;
140 struct rds_message *rm;
141 unsigned long flags;
142 unsigned int tmp;
143 struct scatterlist *sg;
144 int ret = 0;
145 LIST_HEAD(to_be_dropped);
146 int batch_count;
147 unsigned long send_gen = 0;
148
149restart:
150 batch_count = 0;
151
152 /*
153 * sendmsg calls here after having queued its message on the send
154 * queue. We only have one task feeding the connection at a time. If
155 * another thread is already feeding the queue then we back off. This
156 * avoids blocking the caller and trading per-connection data between
157 * caches per message.
158 */
159 if (!acquire_in_xmit(cp)) {
160 rds_stats_inc(s_send_lock_contention);
161 ret = -ENOMEM;
162 goto out;
163 }
164
165 if (rds_destroy_pending(cp->cp_conn)) {
166 release_in_xmit(cp);
167 ret = -ENETUNREACH; /* dont requeue send work */
168 goto out;
169 }
170
171 /*
172 * we record the send generation after doing the xmit acquire.
173 * if someone else manages to jump in and do some work, we'll use
174 * this to avoid a goto restart farther down.
175 *
176 * The acquire_in_xmit() check above ensures that only one
177 * caller can increment c_send_gen at any time.
178 */
179 send_gen = READ_ONCE(cp->cp_send_gen) + 1;
180 WRITE_ONCE(cp->cp_send_gen, send_gen);
181
182 /*
183 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
184 * we do the opposite to avoid races.
185 */
186 if (!rds_conn_path_up(cp)) {
187 release_in_xmit(cp);
188 ret = 0;
189 goto out;
190 }
191
192 if (conn->c_trans->xmit_path_prepare)
193 conn->c_trans->xmit_path_prepare(cp);
194
195 /*
196 * spin trying to push headers and data down the connection until
197 * the connection doesn't make forward progress.
198 */
199 while (1) {
200
201 rm = cp->cp_xmit_rm;
202
203 /*
204 * If between sending messages, we can send a pending congestion
205 * map update.
206 */
207 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
208 rm = rds_cong_update_alloc(conn);
209 if (IS_ERR(rm)) {
210 ret = PTR_ERR(rm);
211 break;
212 }
213 rm->data.op_active = 1;
214 rm->m_inc.i_conn_path = cp;
215 rm->m_inc.i_conn = cp->cp_conn;
216
217 cp->cp_xmit_rm = rm;
218 }
219
220 /*
221 * If not already working on one, grab the next message.
222 *
223 * cp_xmit_rm holds a ref while we're sending this message down
224 * the connction. We can use this ref while holding the
225 * send_sem.. rds_send_reset() is serialized with it.
226 */
227 if (!rm) {
228 unsigned int len;
229
230 batch_count++;
231
232 /* we want to process as big a batch as we can, but
233 * we also want to avoid softlockups. If we've been
234 * through a lot of messages, lets back off and see
235 * if anyone else jumps in
236 */
237 if (batch_count >= send_batch_count)
238 goto over_batch;
239
240 spin_lock_irqsave(&cp->cp_lock, flags);
241
242 if (!list_empty(&cp->cp_send_queue)) {
243 rm = list_entry(cp->cp_send_queue.next,
244 struct rds_message,
245 m_conn_item);
246 rds_message_addref(rm);
247
248 /*
249 * Move the message from the send queue to the retransmit
250 * list right away.
251 */
252 list_move_tail(&rm->m_conn_item,
253 &cp->cp_retrans);
254 }
255
256 spin_unlock_irqrestore(&cp->cp_lock, flags);
257
258 if (!rm)
259 break;
260
261 /* Unfortunately, the way Infiniband deals with
262 * RDMA to a bad MR key is by moving the entire
263 * queue pair to error state. We cold possibly
264 * recover from that, but right now we drop the
265 * connection.
266 * Therefore, we never retransmit messages with RDMA ops.
267 */
268 if (test_bit(RDS_MSG_FLUSH, &rm->m_flags) ||
269 (rm->rdma.op_active &&
270 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))) {
271 spin_lock_irqsave(&cp->cp_lock, flags);
272 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
273 list_move(&rm->m_conn_item, &to_be_dropped);
274 spin_unlock_irqrestore(&cp->cp_lock, flags);
275 continue;
276 }
277
278 /* Require an ACK every once in a while */
279 len = ntohl(rm->m_inc.i_hdr.h_len);
280 if (cp->cp_unacked_packets == 0 ||
281 cp->cp_unacked_bytes < len) {
282 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
283
284 cp->cp_unacked_packets =
285 rds_sysctl_max_unacked_packets;
286 cp->cp_unacked_bytes =
287 rds_sysctl_max_unacked_bytes;
288 rds_stats_inc(s_send_ack_required);
289 } else {
290 cp->cp_unacked_bytes -= len;
291 cp->cp_unacked_packets--;
292 }
293
294 cp->cp_xmit_rm = rm;
295 }
296
297 /* The transport either sends the whole rdma or none of it */
298 if (rm->rdma.op_active && !cp->cp_xmit_rdma_sent) {
299 rm->m_final_op = &rm->rdma;
300 /* The transport owns the mapped memory for now.
301 * You can't unmap it while it's on the send queue
302 */
303 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
304 ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
305 if (ret) {
306 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
307 wake_up_interruptible(&rm->m_flush_wait);
308 break;
309 }
310 cp->cp_xmit_rdma_sent = 1;
311
312 }
313
314 if (rm->atomic.op_active && !cp->cp_xmit_atomic_sent) {
315 rm->m_final_op = &rm->atomic;
316 /* The transport owns the mapped memory for now.
317 * You can't unmap it while it's on the send queue
318 */
319 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
320 ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
321 if (ret) {
322 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
323 wake_up_interruptible(&rm->m_flush_wait);
324 break;
325 }
326 cp->cp_xmit_atomic_sent = 1;
327
328 }
329
330 /*
331 * A number of cases require an RDS header to be sent
332 * even if there is no data.
333 * We permit 0-byte sends; rds-ping depends on this.
334 * However, if there are exclusively attached silent ops,
335 * we skip the hdr/data send, to enable silent operation.
336 */
337 if (rm->data.op_nents == 0) {
338 int ops_present;
339 int all_ops_are_silent = 1;
340
341 ops_present = (rm->atomic.op_active || rm->rdma.op_active);
342 if (rm->atomic.op_active && !rm->atomic.op_silent)
343 all_ops_are_silent = 0;
344 if (rm->rdma.op_active && !rm->rdma.op_silent)
345 all_ops_are_silent = 0;
346
347 if (ops_present && all_ops_are_silent
348 && !rm->m_rdma_cookie)
349 rm->data.op_active = 0;
350 }
351
352 if (rm->data.op_active && !cp->cp_xmit_data_sent) {
353 rm->m_final_op = &rm->data;
354
355 ret = conn->c_trans->xmit(conn, rm,
356 cp->cp_xmit_hdr_off,
357 cp->cp_xmit_sg,
358 cp->cp_xmit_data_off);
359 if (ret <= 0)
360 break;
361
362 if (cp->cp_xmit_hdr_off < sizeof(struct rds_header)) {
363 tmp = min_t(int, ret,
364 sizeof(struct rds_header) -
365 cp->cp_xmit_hdr_off);
366 cp->cp_xmit_hdr_off += tmp;
367 ret -= tmp;
368 }
369
370 sg = &rm->data.op_sg[cp->cp_xmit_sg];
371 while (ret) {
372 tmp = min_t(int, ret, sg->length -
373 cp->cp_xmit_data_off);
374 cp->cp_xmit_data_off += tmp;
375 ret -= tmp;
376 if (cp->cp_xmit_data_off == sg->length) {
377 cp->cp_xmit_data_off = 0;
378 sg++;
379 cp->cp_xmit_sg++;
380 BUG_ON(ret != 0 && cp->cp_xmit_sg ==
381 rm->data.op_nents);
382 }
383 }
384
385 if (cp->cp_xmit_hdr_off == sizeof(struct rds_header) &&
386 (cp->cp_xmit_sg == rm->data.op_nents))
387 cp->cp_xmit_data_sent = 1;
388 }
389
390 /*
391 * A rm will only take multiple times through this loop
392 * if there is a data op. Thus, if the data is sent (or there was
393 * none), then we're done with the rm.
394 */
395 if (!rm->data.op_active || cp->cp_xmit_data_sent) {
396 cp->cp_xmit_rm = NULL;
397 cp->cp_xmit_sg = 0;
398 cp->cp_xmit_hdr_off = 0;
399 cp->cp_xmit_data_off = 0;
400 cp->cp_xmit_rdma_sent = 0;
401 cp->cp_xmit_atomic_sent = 0;
402 cp->cp_xmit_data_sent = 0;
403
404 rds_message_put(rm);
405 }
406 }
407
408over_batch:
409 if (conn->c_trans->xmit_path_complete)
410 conn->c_trans->xmit_path_complete(cp);
411 release_in_xmit(cp);
412
413 /* Nuke any messages we decided not to retransmit. */
414 if (!list_empty(&to_be_dropped)) {
415 /* irqs on here, so we can put(), unlike above */
416 list_for_each_entry(rm, &to_be_dropped, m_conn_item)
417 rds_message_put(rm);
418 rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
419 }
420
421 /*
422 * Other senders can queue a message after we last test the send queue
423 * but before we clear RDS_IN_XMIT. In that case they'd back off and
424 * not try and send their newly queued message. We need to check the
425 * send queue after having cleared RDS_IN_XMIT so that their message
426 * doesn't get stuck on the send queue.
427 *
428 * If the transport cannot continue (i.e ret != 0), then it must
429 * call us when more room is available, such as from the tx
430 * completion handler.
431 *
432 * We have an extra generation check here so that if someone manages
433 * to jump in after our release_in_xmit, we'll see that they have done
434 * some work and we will skip our goto
435 */
436 if (ret == 0) {
437 bool raced;
438
439 smp_mb();
440 raced = send_gen != READ_ONCE(cp->cp_send_gen);
441
442 if ((test_bit(0, &conn->c_map_queued) ||
443 !list_empty(&cp->cp_send_queue)) && !raced) {
444 if (batch_count < send_batch_count)
445 goto restart;
446 rcu_read_lock();
447 if (rds_destroy_pending(cp->cp_conn))
448 ret = -ENETUNREACH;
449 else
450 queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
451 rcu_read_unlock();
452 } else if (raced) {
453 rds_stats_inc(s_send_lock_queue_raced);
454 }
455 }
456out:
457 return ret;
458}
459EXPORT_SYMBOL_GPL(rds_send_xmit);
460
461static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
462{
463 u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
464
465 assert_spin_locked(&rs->rs_lock);
466
467 BUG_ON(rs->rs_snd_bytes < len);
468 rs->rs_snd_bytes -= len;
469
470 if (rs->rs_snd_bytes == 0)
471 rds_stats_inc(s_send_queue_empty);
472}
473
474static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
475 is_acked_func is_acked)
476{
477 if (is_acked)
478 return is_acked(rm, ack);
479 return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
480}
481
482/*
483 * This is pretty similar to what happens below in the ACK
484 * handling code - except that we call here as soon as we get
485 * the IB send completion on the RDMA op and the accompanying
486 * message.
487 */
488void rds_rdma_send_complete(struct rds_message *rm, int status)
489{
490 struct rds_sock *rs = NULL;
491 struct rm_rdma_op *ro;
492 struct rds_notifier *notifier;
493 unsigned long flags;
494 unsigned int notify = 0;
495
496 spin_lock_irqsave(&rm->m_rs_lock, flags);
497
498 notify = rm->rdma.op_notify | rm->data.op_notify;
499 ro = &rm->rdma;
500 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
501 ro->op_active && notify && ro->op_notifier) {
502 notifier = ro->op_notifier;
503 rs = rm->m_rs;
504 sock_hold(rds_rs_to_sk(rs));
505
506 notifier->n_status = status;
507 spin_lock(&rs->rs_lock);
508 list_add_tail(¬ifier->n_list, &rs->rs_notify_queue);
509 spin_unlock(&rs->rs_lock);
510
511 ro->op_notifier = NULL;
512 }
513
514 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
515
516 if (rs) {
517 rds_wake_sk_sleep(rs);
518 sock_put(rds_rs_to_sk(rs));
519 }
520}
521EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
522
523/*
524 * Just like above, except looks at atomic op
525 */
526void rds_atomic_send_complete(struct rds_message *rm, int status)
527{
528 struct rds_sock *rs = NULL;
529 struct rm_atomic_op *ao;
530 struct rds_notifier *notifier;
531 unsigned long flags;
532
533 spin_lock_irqsave(&rm->m_rs_lock, flags);
534
535 ao = &rm->atomic;
536 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
537 && ao->op_active && ao->op_notify && ao->op_notifier) {
538 notifier = ao->op_notifier;
539 rs = rm->m_rs;
540 sock_hold(rds_rs_to_sk(rs));
541
542 notifier->n_status = status;
543 spin_lock(&rs->rs_lock);
544 list_add_tail(¬ifier->n_list, &rs->rs_notify_queue);
545 spin_unlock(&rs->rs_lock);
546
547 ao->op_notifier = NULL;
548 }
549
550 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
551
552 if (rs) {
553 rds_wake_sk_sleep(rs);
554 sock_put(rds_rs_to_sk(rs));
555 }
556}
557EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
558
559/*
560 * This is the same as rds_rdma_send_complete except we
561 * don't do any locking - we have all the ingredients (message,
562 * socket, socket lock) and can just move the notifier.
563 */
564static inline void
565__rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
566{
567 struct rm_rdma_op *ro;
568 struct rm_atomic_op *ao;
569
570 ro = &rm->rdma;
571 if (ro->op_active && ro->op_notify && ro->op_notifier) {
572 ro->op_notifier->n_status = status;
573 list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
574 ro->op_notifier = NULL;
575 }
576
577 ao = &rm->atomic;
578 if (ao->op_active && ao->op_notify && ao->op_notifier) {
579 ao->op_notifier->n_status = status;
580 list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
581 ao->op_notifier = NULL;
582 }
583
584 /* No need to wake the app - caller does this */
585}
586
587/*
588 * This removes messages from the socket's list if they're on it. The list
589 * argument must be private to the caller, we must be able to modify it
590 * without locks. The messages must have a reference held for their
591 * position on the list. This function will drop that reference after
592 * removing the messages from the 'messages' list regardless of if it found
593 * the messages on the socket list or not.
594 */
595static void rds_send_remove_from_sock(struct list_head *messages, int status)
596{
597 unsigned long flags;
598 struct rds_sock *rs = NULL;
599 struct rds_message *rm;
600
601 while (!list_empty(messages)) {
602 int was_on_sock = 0;
603
604 rm = list_entry(messages->next, struct rds_message,
605 m_conn_item);
606 list_del_init(&rm->m_conn_item);
607
608 /*
609 * If we see this flag cleared then we're *sure* that someone
610 * else beat us to removing it from the sock. If we race
611 * with their flag update we'll get the lock and then really
612 * see that the flag has been cleared.
613 *
614 * The message spinlock makes sure nobody clears rm->m_rs
615 * while we're messing with it. It does not prevent the
616 * message from being removed from the socket, though.
617 */
618 spin_lock_irqsave(&rm->m_rs_lock, flags);
619 if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
620 goto unlock_and_drop;
621
622 if (rs != rm->m_rs) {
623 if (rs) {
624 rds_wake_sk_sleep(rs);
625 sock_put(rds_rs_to_sk(rs));
626 }
627 rs = rm->m_rs;
628 if (rs)
629 sock_hold(rds_rs_to_sk(rs));
630 }
631 if (!rs)
632 goto unlock_and_drop;
633 spin_lock(&rs->rs_lock);
634
635 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
636 struct rm_rdma_op *ro = &rm->rdma;
637 struct rds_notifier *notifier;
638
639 list_del_init(&rm->m_sock_item);
640 rds_send_sndbuf_remove(rs, rm);
641
642 if (ro->op_active && ro->op_notifier &&
643 (ro->op_notify || (ro->op_recverr && status))) {
644 notifier = ro->op_notifier;
645 list_add_tail(¬ifier->n_list,
646 &rs->rs_notify_queue);
647 if (!notifier->n_status)
648 notifier->n_status = status;
649 rm->rdma.op_notifier = NULL;
650 }
651 was_on_sock = 1;
652 }
653 spin_unlock(&rs->rs_lock);
654
655unlock_and_drop:
656 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
657 rds_message_put(rm);
658 if (was_on_sock)
659 rds_message_put(rm);
660 }
661
662 if (rs) {
663 rds_wake_sk_sleep(rs);
664 sock_put(rds_rs_to_sk(rs));
665 }
666}
667
668/*
669 * Transports call here when they've determined that the receiver queued
670 * messages up to, and including, the given sequence number. Messages are
671 * moved to the retrans queue when rds_send_xmit picks them off the send
672 * queue. This means that in the TCP case, the message may not have been
673 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
674 * checks the RDS_MSG_HAS_ACK_SEQ bit.
675 */
676void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack,
677 is_acked_func is_acked)
678{
679 struct rds_message *rm, *tmp;
680 unsigned long flags;
681 LIST_HEAD(list);
682
683 spin_lock_irqsave(&cp->cp_lock, flags);
684
685 list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) {
686 if (!rds_send_is_acked(rm, ack, is_acked))
687 break;
688
689 list_move(&rm->m_conn_item, &list);
690 clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
691 }
692
693 /* order flag updates with spin locks */
694 if (!list_empty(&list))
695 smp_mb__after_atomic();
696
697 spin_unlock_irqrestore(&cp->cp_lock, flags);
698
699 /* now remove the messages from the sock list as needed */
700 rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
701}
702EXPORT_SYMBOL_GPL(rds_send_path_drop_acked);
703
704void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
705 is_acked_func is_acked)
706{
707 WARN_ON(conn->c_trans->t_mp_capable);
708 rds_send_path_drop_acked(&conn->c_path[0], ack, is_acked);
709}
710EXPORT_SYMBOL_GPL(rds_send_drop_acked);
711
712void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
713{
714 struct rds_message *rm, *tmp;
715 struct rds_connection *conn;
716 struct rds_conn_path *cp;
717 unsigned long flags;
718 LIST_HEAD(list);
719
720 /* get all the messages we're dropping under the rs lock */
721 spin_lock_irqsave(&rs->rs_lock, flags);
722
723 list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
724 if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
725 dest->sin_port != rm->m_inc.i_hdr.h_dport))
726 continue;
727
728 list_move(&rm->m_sock_item, &list);
729 rds_send_sndbuf_remove(rs, rm);
730 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
731 }
732
733 /* order flag updates with the rs lock */
734 smp_mb__after_atomic();
735
736 spin_unlock_irqrestore(&rs->rs_lock, flags);
737
738 if (list_empty(&list))
739 return;
740
741 /* Remove the messages from the conn */
742 list_for_each_entry(rm, &list, m_sock_item) {
743
744 conn = rm->m_inc.i_conn;
745 if (conn->c_trans->t_mp_capable)
746 cp = rm->m_inc.i_conn_path;
747 else
748 cp = &conn->c_path[0];
749
750 spin_lock_irqsave(&cp->cp_lock, flags);
751 /*
752 * Maybe someone else beat us to removing rm from the conn.
753 * If we race with their flag update we'll get the lock and
754 * then really see that the flag has been cleared.
755 */
756 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
757 spin_unlock_irqrestore(&cp->cp_lock, flags);
758 continue;
759 }
760 list_del_init(&rm->m_conn_item);
761 spin_unlock_irqrestore(&cp->cp_lock, flags);
762
763 /*
764 * Couldn't grab m_rs_lock in top loop (lock ordering),
765 * but we can now.
766 */
767 spin_lock_irqsave(&rm->m_rs_lock, flags);
768
769 spin_lock(&rs->rs_lock);
770 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
771 spin_unlock(&rs->rs_lock);
772
773 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
774
775 rds_message_put(rm);
776 }
777
778 rds_wake_sk_sleep(rs);
779
780 while (!list_empty(&list)) {
781 rm = list_entry(list.next, struct rds_message, m_sock_item);
782 list_del_init(&rm->m_sock_item);
783 rds_message_wait(rm);
784
785 /* just in case the code above skipped this message
786 * because RDS_MSG_ON_CONN wasn't set, run it again here
787 * taking m_rs_lock is the only thing that keeps us
788 * from racing with ack processing.
789 */
790 spin_lock_irqsave(&rm->m_rs_lock, flags);
791
792 spin_lock(&rs->rs_lock);
793 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
794 spin_unlock(&rs->rs_lock);
795
796 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
797
798 rds_message_put(rm);
799 }
800}
801
802/*
803 * we only want this to fire once so we use the callers 'queued'. It's
804 * possible that another thread can race with us and remove the
805 * message from the flow with RDS_CANCEL_SENT_TO.
806 */
807static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
808 struct rds_conn_path *cp,
809 struct rds_message *rm, __be16 sport,
810 __be16 dport, int *queued)
811{
812 unsigned long flags;
813 u32 len;
814
815 if (*queued)
816 goto out;
817
818 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
819
820 /* this is the only place which holds both the socket's rs_lock
821 * and the connection's c_lock */
822 spin_lock_irqsave(&rs->rs_lock, flags);
823
824 /*
825 * If there is a little space in sndbuf, we don't queue anything,
826 * and userspace gets -EAGAIN. But poll() indicates there's send
827 * room. This can lead to bad behavior (spinning) if snd_bytes isn't
828 * freed up by incoming acks. So we check the *old* value of
829 * rs_snd_bytes here to allow the last msg to exceed the buffer,
830 * and poll() now knows no more data can be sent.
831 */
832 if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
833 rs->rs_snd_bytes += len;
834
835 /* let recv side know we are close to send space exhaustion.
836 * This is probably not the optimal way to do it, as this
837 * means we set the flag on *all* messages as soon as our
838 * throughput hits a certain threshold.
839 */
840 if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
841 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
842
843 list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
844 set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
845 rds_message_addref(rm);
846 sock_hold(rds_rs_to_sk(rs));
847 rm->m_rs = rs;
848
849 /* The code ordering is a little weird, but we're
850 trying to minimize the time we hold c_lock */
851 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
852 rm->m_inc.i_conn = conn;
853 rm->m_inc.i_conn_path = cp;
854 rds_message_addref(rm);
855
856 spin_lock(&cp->cp_lock);
857 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(cp->cp_next_tx_seq++);
858 list_add_tail(&rm->m_conn_item, &cp->cp_send_queue);
859 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
860 spin_unlock(&cp->cp_lock);
861
862 rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
863 rm, len, rs, rs->rs_snd_bytes,
864 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
865
866 *queued = 1;
867 }
868
869 spin_unlock_irqrestore(&rs->rs_lock, flags);
870out:
871 return *queued;
872}
873
874/*
875 * rds_message is getting to be quite complicated, and we'd like to allocate
876 * it all in one go. This figures out how big it needs to be up front.
877 */
878static int rds_rm_size(struct msghdr *msg, int num_sgs)
879{
880 struct cmsghdr *cmsg;
881 int size = 0;
882 int cmsg_groups = 0;
883 int retval;
884 bool zcopy_cookie = false;
885
886 for_each_cmsghdr(cmsg, msg) {
887 if (!CMSG_OK(msg, cmsg))
888 return -EINVAL;
889
890 if (cmsg->cmsg_level != SOL_RDS)
891 continue;
892
893 switch (cmsg->cmsg_type) {
894 case RDS_CMSG_RDMA_ARGS:
895 cmsg_groups |= 1;
896 retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
897 if (retval < 0)
898 return retval;
899 size += retval;
900
901 break;
902
903 case RDS_CMSG_ZCOPY_COOKIE:
904 zcopy_cookie = true;
905 /* fall through */
906
907 case RDS_CMSG_RDMA_DEST:
908 case RDS_CMSG_RDMA_MAP:
909 cmsg_groups |= 2;
910 /* these are valid but do no add any size */
911 break;
912
913 case RDS_CMSG_ATOMIC_CSWP:
914 case RDS_CMSG_ATOMIC_FADD:
915 case RDS_CMSG_MASKED_ATOMIC_CSWP:
916 case RDS_CMSG_MASKED_ATOMIC_FADD:
917 cmsg_groups |= 1;
918 size += sizeof(struct scatterlist);
919 break;
920
921 default:
922 return -EINVAL;
923 }
924
925 }
926
927 if ((msg->msg_flags & MSG_ZEROCOPY) && !zcopy_cookie)
928 return -EINVAL;
929
930 size += num_sgs * sizeof(struct scatterlist);
931
932 /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
933 if (cmsg_groups == 3)
934 return -EINVAL;
935
936 return size;
937}
938
939static int rds_cmsg_zcopy(struct rds_sock *rs, struct rds_message *rm,
940 struct cmsghdr *cmsg)
941{
942 u32 *cookie;
943
944 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*cookie)) ||
945 !rm->data.op_mmp_znotifier)
946 return -EINVAL;
947 cookie = CMSG_DATA(cmsg);
948 rm->data.op_mmp_znotifier->z_cookie = *cookie;
949 return 0;
950}
951
952static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
953 struct msghdr *msg, int *allocated_mr)
954{
955 struct cmsghdr *cmsg;
956 int ret = 0;
957
958 for_each_cmsghdr(cmsg, msg) {
959 if (!CMSG_OK(msg, cmsg))
960 return -EINVAL;
961
962 if (cmsg->cmsg_level != SOL_RDS)
963 continue;
964
965 /* As a side effect, RDMA_DEST and RDMA_MAP will set
966 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
967 */
968 switch (cmsg->cmsg_type) {
969 case RDS_CMSG_RDMA_ARGS:
970 ret = rds_cmsg_rdma_args(rs, rm, cmsg);
971 break;
972
973 case RDS_CMSG_RDMA_DEST:
974 ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
975 break;
976
977 case RDS_CMSG_RDMA_MAP:
978 ret = rds_cmsg_rdma_map(rs, rm, cmsg);
979 if (!ret)
980 *allocated_mr = 1;
981 else if (ret == -ENODEV)
982 /* Accommodate the get_mr() case which can fail
983 * if connection isn't established yet.
984 */
985 ret = -EAGAIN;
986 break;
987 case RDS_CMSG_ATOMIC_CSWP:
988 case RDS_CMSG_ATOMIC_FADD:
989 case RDS_CMSG_MASKED_ATOMIC_CSWP:
990 case RDS_CMSG_MASKED_ATOMIC_FADD:
991 ret = rds_cmsg_atomic(rs, rm, cmsg);
992 break;
993
994 case RDS_CMSG_ZCOPY_COOKIE:
995 ret = rds_cmsg_zcopy(rs, rm, cmsg);
996 break;
997
998 default:
999 return -EINVAL;
1000 }
1001
1002 if (ret)
1003 break;
1004 }
1005
1006 return ret;
1007}
1008
1009static int rds_send_mprds_hash(struct rds_sock *rs, struct rds_connection *conn)
1010{
1011 int hash;
1012
1013 if (conn->c_npaths == 0)
1014 hash = RDS_MPATH_HASH(rs, RDS_MPATH_WORKERS);
1015 else
1016 hash = RDS_MPATH_HASH(rs, conn->c_npaths);
1017 if (conn->c_npaths == 0 && hash != 0) {
1018 rds_send_ping(conn, 0);
1019
1020 /* The underlying connection is not up yet. Need to wait
1021 * until it is up to be sure that the non-zero c_path can be
1022 * used. But if we are interrupted, we have to use the zero
1023 * c_path in case the connection ends up being non-MP capable.
1024 */
1025 if (conn->c_npaths == 0)
1026 if (wait_event_interruptible(conn->c_hs_waitq,
1027 conn->c_npaths != 0))
1028 hash = 0;
1029 if (conn->c_npaths == 1)
1030 hash = 0;
1031 }
1032 return hash;
1033}
1034
1035static int rds_rdma_bytes(struct msghdr *msg, size_t *rdma_bytes)
1036{
1037 struct rds_rdma_args *args;
1038 struct cmsghdr *cmsg;
1039
1040 for_each_cmsghdr(cmsg, msg) {
1041 if (!CMSG_OK(msg, cmsg))
1042 return -EINVAL;
1043
1044 if (cmsg->cmsg_level != SOL_RDS)
1045 continue;
1046
1047 if (cmsg->cmsg_type == RDS_CMSG_RDMA_ARGS) {
1048 if (cmsg->cmsg_len <
1049 CMSG_LEN(sizeof(struct rds_rdma_args)))
1050 return -EINVAL;
1051 args = CMSG_DATA(cmsg);
1052 *rdma_bytes += args->remote_vec.bytes;
1053 }
1054 }
1055 return 0;
1056}
1057
1058int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
1059{
1060 struct sock *sk = sock->sk;
1061 struct rds_sock *rs = rds_sk_to_rs(sk);
1062 DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
1063 __be32 daddr;
1064 __be16 dport;
1065 struct rds_message *rm = NULL;
1066 struct rds_connection *conn;
1067 int ret = 0;
1068 int queued = 0, allocated_mr = 0;
1069 int nonblock = msg->msg_flags & MSG_DONTWAIT;
1070 long timeo = sock_sndtimeo(sk, nonblock);
1071 struct rds_conn_path *cpath;
1072 size_t total_payload_len = payload_len, rdma_payload_len = 0;
1073 bool zcopy = ((msg->msg_flags & MSG_ZEROCOPY) &&
1074 sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY));
1075 int num_sgs = ceil(payload_len, PAGE_SIZE);
1076
1077 /* Mirror Linux UDP mirror of BSD error message compatibility */
1078 /* XXX: Perhaps MSG_MORE someday */
1079 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT | MSG_ZEROCOPY)) {
1080 ret = -EOPNOTSUPP;
1081 goto out;
1082 }
1083
1084 if (msg->msg_namelen) {
1085 /* XXX fail non-unicast destination IPs? */
1086 if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
1087 ret = -EINVAL;
1088 goto out;
1089 }
1090 daddr = usin->sin_addr.s_addr;
1091 dport = usin->sin_port;
1092 } else {
1093 /* We only care about consistency with ->connect() */
1094 lock_sock(sk);
1095 daddr = rs->rs_conn_addr;
1096 dport = rs->rs_conn_port;
1097 release_sock(sk);
1098 }
1099
1100 lock_sock(sk);
1101 if (daddr == 0 || rs->rs_bound_addr == 0) {
1102 release_sock(sk);
1103 ret = -ENOTCONN; /* XXX not a great errno */
1104 goto out;
1105 }
1106 release_sock(sk);
1107
1108 ret = rds_rdma_bytes(msg, &rdma_payload_len);
1109 if (ret)
1110 goto out;
1111
1112 total_payload_len += rdma_payload_len;
1113 if (max_t(size_t, payload_len, rdma_payload_len) > RDS_MAX_MSG_SIZE) {
1114 ret = -EMSGSIZE;
1115 goto out;
1116 }
1117
1118 if (payload_len > rds_sk_sndbuf(rs)) {
1119 ret = -EMSGSIZE;
1120 goto out;
1121 }
1122
1123 if (zcopy) {
1124 if (rs->rs_transport->t_type != RDS_TRANS_TCP) {
1125 ret = -EOPNOTSUPP;
1126 goto out;
1127 }
1128 num_sgs = iov_iter_npages(&msg->msg_iter, INT_MAX);
1129 }
1130 /* size of rm including all sgs */
1131 ret = rds_rm_size(msg, num_sgs);
1132 if (ret < 0)
1133 goto out;
1134
1135 rm = rds_message_alloc(ret, GFP_KERNEL);
1136 if (!rm) {
1137 ret = -ENOMEM;
1138 goto out;
1139 }
1140
1141 /* Attach data to the rm */
1142 if (payload_len) {
1143 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
1144 if (!rm->data.op_sg) {
1145 ret = -ENOMEM;
1146 goto out;
1147 }
1148 ret = rds_message_copy_from_user(rm, &msg->msg_iter, zcopy);
1149 if (ret)
1150 goto out;
1151 }
1152 rm->data.op_active = 1;
1153
1154 rm->m_daddr = daddr;
1155
1156 /* rds_conn_create has a spinlock that runs with IRQ off.
1157 * Caching the conn in the socket helps a lot. */
1158 if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
1159 conn = rs->rs_conn;
1160 else {
1161 conn = rds_conn_create_outgoing(sock_net(sock->sk),
1162 rs->rs_bound_addr, daddr,
1163 rs->rs_transport,
1164 sock->sk->sk_allocation);
1165 if (IS_ERR(conn)) {
1166 ret = PTR_ERR(conn);
1167 goto out;
1168 }
1169 rs->rs_conn = conn;
1170 }
1171
1172 /* Parse any control messages the user may have included. */
1173 ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
1174 if (ret) {
1175 /* Trigger connection so that its ready for the next retry */
1176 if (ret == -EAGAIN)
1177 rds_conn_connect_if_down(conn);
1178 goto out;
1179 }
1180
1181 if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
1182 printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
1183 &rm->rdma, conn->c_trans->xmit_rdma);
1184 ret = -EOPNOTSUPP;
1185 goto out;
1186 }
1187
1188 if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
1189 printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
1190 &rm->atomic, conn->c_trans->xmit_atomic);
1191 ret = -EOPNOTSUPP;
1192 goto out;
1193 }
1194
1195 if (conn->c_trans->t_mp_capable)
1196 cpath = &conn->c_path[rds_send_mprds_hash(rs, conn)];
1197 else
1198 cpath = &conn->c_path[0];
1199
1200 if (rds_destroy_pending(conn)) {
1201 ret = -EAGAIN;
1202 goto out;
1203 }
1204
1205 rds_conn_path_connect_if_down(cpath);
1206
1207 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
1208 if (ret) {
1209 rs->rs_seen_congestion = 1;
1210 goto out;
1211 }
1212 while (!rds_send_queue_rm(rs, conn, cpath, rm, rs->rs_bound_port,
1213 dport, &queued)) {
1214 rds_stats_inc(s_send_queue_full);
1215
1216 if (nonblock) {
1217 ret = -EAGAIN;
1218 goto out;
1219 }
1220
1221 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
1222 rds_send_queue_rm(rs, conn, cpath, rm,
1223 rs->rs_bound_port,
1224 dport,
1225 &queued),
1226 timeo);
1227 rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1228 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1229 continue;
1230
1231 ret = timeo;
1232 if (ret == 0)
1233 ret = -ETIMEDOUT;
1234 goto out;
1235 }
1236
1237 /*
1238 * By now we've committed to the send. We reuse rds_send_worker()
1239 * to retry sends in the rds thread if the transport asks us to.
1240 */
1241 rds_stats_inc(s_send_queued);
1242
1243 ret = rds_send_xmit(cpath);
1244 if (ret == -ENOMEM || ret == -EAGAIN) {
1245 ret = 0;
1246 rcu_read_lock();
1247 if (rds_destroy_pending(cpath->cp_conn))
1248 ret = -ENETUNREACH;
1249 else
1250 queue_delayed_work(rds_wq, &cpath->cp_send_w, 1);
1251 rcu_read_unlock();
1252 }
1253 if (ret)
1254 goto out;
1255 rds_message_put(rm);
1256 return payload_len;
1257
1258out:
1259 /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1260 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1261 * or in any other way, we need to destroy the MR again */
1262 if (allocated_mr)
1263 rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1264
1265 if (rm)
1266 rds_message_put(rm);
1267 return ret;
1268}
1269
1270/*
1271 * send out a probe. Can be shared by rds_send_ping,
1272 * rds_send_pong, rds_send_hb.
1273 * rds_send_hb should use h_flags
1274 * RDS_FLAG_HB_PING|RDS_FLAG_ACK_REQUIRED
1275 * or
1276 * RDS_FLAG_HB_PONG|RDS_FLAG_ACK_REQUIRED
1277 */
1278static int
1279rds_send_probe(struct rds_conn_path *cp, __be16 sport,
1280 __be16 dport, u8 h_flags)
1281{
1282 struct rds_message *rm;
1283 unsigned long flags;
1284 int ret = 0;
1285
1286 rm = rds_message_alloc(0, GFP_ATOMIC);
1287 if (!rm) {
1288 ret = -ENOMEM;
1289 goto out;
1290 }
1291
1292 rm->m_daddr = cp->cp_conn->c_faddr;
1293 rm->data.op_active = 1;
1294
1295 rds_conn_path_connect_if_down(cp);
1296
1297 ret = rds_cong_wait(cp->cp_conn->c_fcong, dport, 1, NULL);
1298 if (ret)
1299 goto out;
1300
1301 spin_lock_irqsave(&cp->cp_lock, flags);
1302 list_add_tail(&rm->m_conn_item, &cp->cp_send_queue);
1303 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1304 rds_message_addref(rm);
1305 rm->m_inc.i_conn = cp->cp_conn;
1306 rm->m_inc.i_conn_path = cp;
1307
1308 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport,
1309 cp->cp_next_tx_seq);
1310 rm->m_inc.i_hdr.h_flags |= h_flags;
1311 cp->cp_next_tx_seq++;
1312
1313 if (RDS_HS_PROBE(be16_to_cpu(sport), be16_to_cpu(dport)) &&
1314 cp->cp_conn->c_trans->t_mp_capable) {
1315 u16 npaths = cpu_to_be16(RDS_MPATH_WORKERS);
1316 u32 my_gen_num = cpu_to_be32(cp->cp_conn->c_my_gen_num);
1317
1318 rds_message_add_extension(&rm->m_inc.i_hdr,
1319 RDS_EXTHDR_NPATHS, &npaths,
1320 sizeof(npaths));
1321 rds_message_add_extension(&rm->m_inc.i_hdr,
1322 RDS_EXTHDR_GEN_NUM,
1323 &my_gen_num,
1324 sizeof(u32));
1325 }
1326 spin_unlock_irqrestore(&cp->cp_lock, flags);
1327
1328 rds_stats_inc(s_send_queued);
1329 rds_stats_inc(s_send_pong);
1330
1331 /* schedule the send work on rds_wq */
1332 rcu_read_lock();
1333 if (!rds_destroy_pending(cp->cp_conn))
1334 queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
1335 rcu_read_unlock();
1336
1337 rds_message_put(rm);
1338 return 0;
1339
1340out:
1341 if (rm)
1342 rds_message_put(rm);
1343 return ret;
1344}
1345
1346int
1347rds_send_pong(struct rds_conn_path *cp, __be16 dport)
1348{
1349 return rds_send_probe(cp, 0, dport, 0);
1350}
1351
1352void
1353rds_send_ping(struct rds_connection *conn, int cp_index)
1354{
1355 unsigned long flags;
1356 struct rds_conn_path *cp = &conn->c_path[cp_index];
1357
1358 spin_lock_irqsave(&cp->cp_lock, flags);
1359 if (conn->c_ping_triggered) {
1360 spin_unlock_irqrestore(&cp->cp_lock, flags);
1361 return;
1362 }
1363 conn->c_ping_triggered = 1;
1364 spin_unlock_irqrestore(&cp->cp_lock, flags);
1365 rds_send_probe(cp, cpu_to_be16(RDS_FLAG_PROBE_PORT), 0, 0);
1366}
1367EXPORT_SYMBOL_GPL(rds_send_ping);
1/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34#include <linux/gfp.h>
35#include <net/sock.h>
36#include <linux/in.h>
37#include <linux/list.h>
38#include <linux/ratelimit.h>
39
40#include "rds.h"
41
42/* When transmitting messages in rds_send_xmit, we need to emerge from
43 * time to time and briefly release the CPU. Otherwise the softlock watchdog
44 * will kick our shin.
45 * Also, it seems fairer to not let one busy connection stall all the
46 * others.
47 *
48 * send_batch_count is the number of times we'll loop in send_xmit. Setting
49 * it to 0 will restore the old behavior (where we looped until we had
50 * drained the queue).
51 */
52static int send_batch_count = 64;
53module_param(send_batch_count, int, 0444);
54MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
55
56static void rds_send_remove_from_sock(struct list_head *messages, int status);
57
58/*
59 * Reset the send state. Callers must ensure that this doesn't race with
60 * rds_send_xmit().
61 */
62void rds_send_reset(struct rds_connection *conn)
63{
64 struct rds_message *rm, *tmp;
65 unsigned long flags;
66
67 if (conn->c_xmit_rm) {
68 rm = conn->c_xmit_rm;
69 conn->c_xmit_rm = NULL;
70 /* Tell the user the RDMA op is no longer mapped by the
71 * transport. This isn't entirely true (it's flushed out
72 * independently) but as the connection is down, there's
73 * no ongoing RDMA to/from that memory */
74 rds_message_unmapped(rm);
75 rds_message_put(rm);
76 }
77
78 conn->c_xmit_sg = 0;
79 conn->c_xmit_hdr_off = 0;
80 conn->c_xmit_data_off = 0;
81 conn->c_xmit_atomic_sent = 0;
82 conn->c_xmit_rdma_sent = 0;
83 conn->c_xmit_data_sent = 0;
84
85 conn->c_map_queued = 0;
86
87 conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
88 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
89
90 /* Mark messages as retransmissions, and move them to the send q */
91 spin_lock_irqsave(&conn->c_lock, flags);
92 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
93 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
94 set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
95 }
96 list_splice_init(&conn->c_retrans, &conn->c_send_queue);
97 spin_unlock_irqrestore(&conn->c_lock, flags);
98}
99
100static int acquire_in_xmit(struct rds_connection *conn)
101{
102 return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0;
103}
104
105static void release_in_xmit(struct rds_connection *conn)
106{
107 clear_bit(RDS_IN_XMIT, &conn->c_flags);
108 smp_mb__after_clear_bit();
109 /*
110 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
111 * hot path and finding waiters is very rare. We don't want to walk
112 * the system-wide hashed waitqueue buckets in the fast path only to
113 * almost never find waiters.
114 */
115 if (waitqueue_active(&conn->c_waitq))
116 wake_up_all(&conn->c_waitq);
117}
118
119/*
120 * We're making the conscious trade-off here to only send one message
121 * down the connection at a time.
122 * Pro:
123 * - tx queueing is a simple fifo list
124 * - reassembly is optional and easily done by transports per conn
125 * - no per flow rx lookup at all, straight to the socket
126 * - less per-frag memory and wire overhead
127 * Con:
128 * - queued acks can be delayed behind large messages
129 * Depends:
130 * - small message latency is higher behind queued large messages
131 * - large message latency isn't starved by intervening small sends
132 */
133int rds_send_xmit(struct rds_connection *conn)
134{
135 struct rds_message *rm;
136 unsigned long flags;
137 unsigned int tmp;
138 struct scatterlist *sg;
139 int ret = 0;
140 LIST_HEAD(to_be_dropped);
141
142restart:
143
144 /*
145 * sendmsg calls here after having queued its message on the send
146 * queue. We only have one task feeding the connection at a time. If
147 * another thread is already feeding the queue then we back off. This
148 * avoids blocking the caller and trading per-connection data between
149 * caches per message.
150 */
151 if (!acquire_in_xmit(conn)) {
152 rds_stats_inc(s_send_lock_contention);
153 ret = -ENOMEM;
154 goto out;
155 }
156
157 /*
158 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
159 * we do the opposite to avoid races.
160 */
161 if (!rds_conn_up(conn)) {
162 release_in_xmit(conn);
163 ret = 0;
164 goto out;
165 }
166
167 if (conn->c_trans->xmit_prepare)
168 conn->c_trans->xmit_prepare(conn);
169
170 /*
171 * spin trying to push headers and data down the connection until
172 * the connection doesn't make forward progress.
173 */
174 while (1) {
175
176 rm = conn->c_xmit_rm;
177
178 /*
179 * If between sending messages, we can send a pending congestion
180 * map update.
181 */
182 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
183 rm = rds_cong_update_alloc(conn);
184 if (IS_ERR(rm)) {
185 ret = PTR_ERR(rm);
186 break;
187 }
188 rm->data.op_active = 1;
189
190 conn->c_xmit_rm = rm;
191 }
192
193 /*
194 * If not already working on one, grab the next message.
195 *
196 * c_xmit_rm holds a ref while we're sending this message down
197 * the connction. We can use this ref while holding the
198 * send_sem.. rds_send_reset() is serialized with it.
199 */
200 if (!rm) {
201 unsigned int len;
202
203 spin_lock_irqsave(&conn->c_lock, flags);
204
205 if (!list_empty(&conn->c_send_queue)) {
206 rm = list_entry(conn->c_send_queue.next,
207 struct rds_message,
208 m_conn_item);
209 rds_message_addref(rm);
210
211 /*
212 * Move the message from the send queue to the retransmit
213 * list right away.
214 */
215 list_move_tail(&rm->m_conn_item, &conn->c_retrans);
216 }
217
218 spin_unlock_irqrestore(&conn->c_lock, flags);
219
220 if (!rm)
221 break;
222
223 /* Unfortunately, the way Infiniband deals with
224 * RDMA to a bad MR key is by moving the entire
225 * queue pair to error state. We cold possibly
226 * recover from that, but right now we drop the
227 * connection.
228 * Therefore, we never retransmit messages with RDMA ops.
229 */
230 if (rm->rdma.op_active &&
231 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
232 spin_lock_irqsave(&conn->c_lock, flags);
233 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
234 list_move(&rm->m_conn_item, &to_be_dropped);
235 spin_unlock_irqrestore(&conn->c_lock, flags);
236 continue;
237 }
238
239 /* Require an ACK every once in a while */
240 len = ntohl(rm->m_inc.i_hdr.h_len);
241 if (conn->c_unacked_packets == 0 ||
242 conn->c_unacked_bytes < len) {
243 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
244
245 conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
246 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
247 rds_stats_inc(s_send_ack_required);
248 } else {
249 conn->c_unacked_bytes -= len;
250 conn->c_unacked_packets--;
251 }
252
253 conn->c_xmit_rm = rm;
254 }
255
256 /* The transport either sends the whole rdma or none of it */
257 if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
258 rm->m_final_op = &rm->rdma;
259 ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
260 if (ret)
261 break;
262 conn->c_xmit_rdma_sent = 1;
263
264 /* The transport owns the mapped memory for now.
265 * You can't unmap it while it's on the send queue */
266 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
267 }
268
269 if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
270 rm->m_final_op = &rm->atomic;
271 ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
272 if (ret)
273 break;
274 conn->c_xmit_atomic_sent = 1;
275
276 /* The transport owns the mapped memory for now.
277 * You can't unmap it while it's on the send queue */
278 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
279 }
280
281 /*
282 * A number of cases require an RDS header to be sent
283 * even if there is no data.
284 * We permit 0-byte sends; rds-ping depends on this.
285 * However, if there are exclusively attached silent ops,
286 * we skip the hdr/data send, to enable silent operation.
287 */
288 if (rm->data.op_nents == 0) {
289 int ops_present;
290 int all_ops_are_silent = 1;
291
292 ops_present = (rm->atomic.op_active || rm->rdma.op_active);
293 if (rm->atomic.op_active && !rm->atomic.op_silent)
294 all_ops_are_silent = 0;
295 if (rm->rdma.op_active && !rm->rdma.op_silent)
296 all_ops_are_silent = 0;
297
298 if (ops_present && all_ops_are_silent
299 && !rm->m_rdma_cookie)
300 rm->data.op_active = 0;
301 }
302
303 if (rm->data.op_active && !conn->c_xmit_data_sent) {
304 rm->m_final_op = &rm->data;
305 ret = conn->c_trans->xmit(conn, rm,
306 conn->c_xmit_hdr_off,
307 conn->c_xmit_sg,
308 conn->c_xmit_data_off);
309 if (ret <= 0)
310 break;
311
312 if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
313 tmp = min_t(int, ret,
314 sizeof(struct rds_header) -
315 conn->c_xmit_hdr_off);
316 conn->c_xmit_hdr_off += tmp;
317 ret -= tmp;
318 }
319
320 sg = &rm->data.op_sg[conn->c_xmit_sg];
321 while (ret) {
322 tmp = min_t(int, ret, sg->length -
323 conn->c_xmit_data_off);
324 conn->c_xmit_data_off += tmp;
325 ret -= tmp;
326 if (conn->c_xmit_data_off == sg->length) {
327 conn->c_xmit_data_off = 0;
328 sg++;
329 conn->c_xmit_sg++;
330 BUG_ON(ret != 0 &&
331 conn->c_xmit_sg == rm->data.op_nents);
332 }
333 }
334
335 if (conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
336 (conn->c_xmit_sg == rm->data.op_nents))
337 conn->c_xmit_data_sent = 1;
338 }
339
340 /*
341 * A rm will only take multiple times through this loop
342 * if there is a data op. Thus, if the data is sent (or there was
343 * none), then we're done with the rm.
344 */
345 if (!rm->data.op_active || conn->c_xmit_data_sent) {
346 conn->c_xmit_rm = NULL;
347 conn->c_xmit_sg = 0;
348 conn->c_xmit_hdr_off = 0;
349 conn->c_xmit_data_off = 0;
350 conn->c_xmit_rdma_sent = 0;
351 conn->c_xmit_atomic_sent = 0;
352 conn->c_xmit_data_sent = 0;
353
354 rds_message_put(rm);
355 }
356 }
357
358 if (conn->c_trans->xmit_complete)
359 conn->c_trans->xmit_complete(conn);
360
361 release_in_xmit(conn);
362
363 /* Nuke any messages we decided not to retransmit. */
364 if (!list_empty(&to_be_dropped)) {
365 /* irqs on here, so we can put(), unlike above */
366 list_for_each_entry(rm, &to_be_dropped, m_conn_item)
367 rds_message_put(rm);
368 rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
369 }
370
371 /*
372 * Other senders can queue a message after we last test the send queue
373 * but before we clear RDS_IN_XMIT. In that case they'd back off and
374 * not try and send their newly queued message. We need to check the
375 * send queue after having cleared RDS_IN_XMIT so that their message
376 * doesn't get stuck on the send queue.
377 *
378 * If the transport cannot continue (i.e ret != 0), then it must
379 * call us when more room is available, such as from the tx
380 * completion handler.
381 */
382 if (ret == 0) {
383 smp_mb();
384 if (!list_empty(&conn->c_send_queue)) {
385 rds_stats_inc(s_send_lock_queue_raced);
386 goto restart;
387 }
388 }
389out:
390 return ret;
391}
392
393static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
394{
395 u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
396
397 assert_spin_locked(&rs->rs_lock);
398
399 BUG_ON(rs->rs_snd_bytes < len);
400 rs->rs_snd_bytes -= len;
401
402 if (rs->rs_snd_bytes == 0)
403 rds_stats_inc(s_send_queue_empty);
404}
405
406static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
407 is_acked_func is_acked)
408{
409 if (is_acked)
410 return is_acked(rm, ack);
411 return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
412}
413
414/*
415 * This is pretty similar to what happens below in the ACK
416 * handling code - except that we call here as soon as we get
417 * the IB send completion on the RDMA op and the accompanying
418 * message.
419 */
420void rds_rdma_send_complete(struct rds_message *rm, int status)
421{
422 struct rds_sock *rs = NULL;
423 struct rm_rdma_op *ro;
424 struct rds_notifier *notifier;
425 unsigned long flags;
426
427 spin_lock_irqsave(&rm->m_rs_lock, flags);
428
429 ro = &rm->rdma;
430 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
431 ro->op_active && ro->op_notify && ro->op_notifier) {
432 notifier = ro->op_notifier;
433 rs = rm->m_rs;
434 sock_hold(rds_rs_to_sk(rs));
435
436 notifier->n_status = status;
437 spin_lock(&rs->rs_lock);
438 list_add_tail(¬ifier->n_list, &rs->rs_notify_queue);
439 spin_unlock(&rs->rs_lock);
440
441 ro->op_notifier = NULL;
442 }
443
444 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
445
446 if (rs) {
447 rds_wake_sk_sleep(rs);
448 sock_put(rds_rs_to_sk(rs));
449 }
450}
451EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
452
453/*
454 * Just like above, except looks at atomic op
455 */
456void rds_atomic_send_complete(struct rds_message *rm, int status)
457{
458 struct rds_sock *rs = NULL;
459 struct rm_atomic_op *ao;
460 struct rds_notifier *notifier;
461 unsigned long flags;
462
463 spin_lock_irqsave(&rm->m_rs_lock, flags);
464
465 ao = &rm->atomic;
466 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
467 && ao->op_active && ao->op_notify && ao->op_notifier) {
468 notifier = ao->op_notifier;
469 rs = rm->m_rs;
470 sock_hold(rds_rs_to_sk(rs));
471
472 notifier->n_status = status;
473 spin_lock(&rs->rs_lock);
474 list_add_tail(¬ifier->n_list, &rs->rs_notify_queue);
475 spin_unlock(&rs->rs_lock);
476
477 ao->op_notifier = NULL;
478 }
479
480 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
481
482 if (rs) {
483 rds_wake_sk_sleep(rs);
484 sock_put(rds_rs_to_sk(rs));
485 }
486}
487EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
488
489/*
490 * This is the same as rds_rdma_send_complete except we
491 * don't do any locking - we have all the ingredients (message,
492 * socket, socket lock) and can just move the notifier.
493 */
494static inline void
495__rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
496{
497 struct rm_rdma_op *ro;
498 struct rm_atomic_op *ao;
499
500 ro = &rm->rdma;
501 if (ro->op_active && ro->op_notify && ro->op_notifier) {
502 ro->op_notifier->n_status = status;
503 list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
504 ro->op_notifier = NULL;
505 }
506
507 ao = &rm->atomic;
508 if (ao->op_active && ao->op_notify && ao->op_notifier) {
509 ao->op_notifier->n_status = status;
510 list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
511 ao->op_notifier = NULL;
512 }
513
514 /* No need to wake the app - caller does this */
515}
516
517/*
518 * This is called from the IB send completion when we detect
519 * a RDMA operation that failed with remote access error.
520 * So speed is not an issue here.
521 */
522struct rds_message *rds_send_get_message(struct rds_connection *conn,
523 struct rm_rdma_op *op)
524{
525 struct rds_message *rm, *tmp, *found = NULL;
526 unsigned long flags;
527
528 spin_lock_irqsave(&conn->c_lock, flags);
529
530 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
531 if (&rm->rdma == op) {
532 atomic_inc(&rm->m_refcount);
533 found = rm;
534 goto out;
535 }
536 }
537
538 list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
539 if (&rm->rdma == op) {
540 atomic_inc(&rm->m_refcount);
541 found = rm;
542 break;
543 }
544 }
545
546out:
547 spin_unlock_irqrestore(&conn->c_lock, flags);
548
549 return found;
550}
551EXPORT_SYMBOL_GPL(rds_send_get_message);
552
553/*
554 * This removes messages from the socket's list if they're on it. The list
555 * argument must be private to the caller, we must be able to modify it
556 * without locks. The messages must have a reference held for their
557 * position on the list. This function will drop that reference after
558 * removing the messages from the 'messages' list regardless of if it found
559 * the messages on the socket list or not.
560 */
561static void rds_send_remove_from_sock(struct list_head *messages, int status)
562{
563 unsigned long flags;
564 struct rds_sock *rs = NULL;
565 struct rds_message *rm;
566
567 while (!list_empty(messages)) {
568 int was_on_sock = 0;
569
570 rm = list_entry(messages->next, struct rds_message,
571 m_conn_item);
572 list_del_init(&rm->m_conn_item);
573
574 /*
575 * If we see this flag cleared then we're *sure* that someone
576 * else beat us to removing it from the sock. If we race
577 * with their flag update we'll get the lock and then really
578 * see that the flag has been cleared.
579 *
580 * The message spinlock makes sure nobody clears rm->m_rs
581 * while we're messing with it. It does not prevent the
582 * message from being removed from the socket, though.
583 */
584 spin_lock_irqsave(&rm->m_rs_lock, flags);
585 if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
586 goto unlock_and_drop;
587
588 if (rs != rm->m_rs) {
589 if (rs) {
590 rds_wake_sk_sleep(rs);
591 sock_put(rds_rs_to_sk(rs));
592 }
593 rs = rm->m_rs;
594 sock_hold(rds_rs_to_sk(rs));
595 }
596 spin_lock(&rs->rs_lock);
597
598 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
599 struct rm_rdma_op *ro = &rm->rdma;
600 struct rds_notifier *notifier;
601
602 list_del_init(&rm->m_sock_item);
603 rds_send_sndbuf_remove(rs, rm);
604
605 if (ro->op_active && ro->op_notifier &&
606 (ro->op_notify || (ro->op_recverr && status))) {
607 notifier = ro->op_notifier;
608 list_add_tail(¬ifier->n_list,
609 &rs->rs_notify_queue);
610 if (!notifier->n_status)
611 notifier->n_status = status;
612 rm->rdma.op_notifier = NULL;
613 }
614 was_on_sock = 1;
615 rm->m_rs = NULL;
616 }
617 spin_unlock(&rs->rs_lock);
618
619unlock_and_drop:
620 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
621 rds_message_put(rm);
622 if (was_on_sock)
623 rds_message_put(rm);
624 }
625
626 if (rs) {
627 rds_wake_sk_sleep(rs);
628 sock_put(rds_rs_to_sk(rs));
629 }
630}
631
632/*
633 * Transports call here when they've determined that the receiver queued
634 * messages up to, and including, the given sequence number. Messages are
635 * moved to the retrans queue when rds_send_xmit picks them off the send
636 * queue. This means that in the TCP case, the message may not have been
637 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
638 * checks the RDS_MSG_HAS_ACK_SEQ bit.
639 *
640 * XXX It's not clear to me how this is safely serialized with socket
641 * destruction. Maybe it should bail if it sees SOCK_DEAD.
642 */
643void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
644 is_acked_func is_acked)
645{
646 struct rds_message *rm, *tmp;
647 unsigned long flags;
648 LIST_HEAD(list);
649
650 spin_lock_irqsave(&conn->c_lock, flags);
651
652 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
653 if (!rds_send_is_acked(rm, ack, is_acked))
654 break;
655
656 list_move(&rm->m_conn_item, &list);
657 clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
658 }
659
660 /* order flag updates with spin locks */
661 if (!list_empty(&list))
662 smp_mb__after_clear_bit();
663
664 spin_unlock_irqrestore(&conn->c_lock, flags);
665
666 /* now remove the messages from the sock list as needed */
667 rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
668}
669EXPORT_SYMBOL_GPL(rds_send_drop_acked);
670
671void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
672{
673 struct rds_message *rm, *tmp;
674 struct rds_connection *conn;
675 unsigned long flags;
676 LIST_HEAD(list);
677
678 /* get all the messages we're dropping under the rs lock */
679 spin_lock_irqsave(&rs->rs_lock, flags);
680
681 list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
682 if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
683 dest->sin_port != rm->m_inc.i_hdr.h_dport))
684 continue;
685
686 list_move(&rm->m_sock_item, &list);
687 rds_send_sndbuf_remove(rs, rm);
688 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
689 }
690
691 /* order flag updates with the rs lock */
692 smp_mb__after_clear_bit();
693
694 spin_unlock_irqrestore(&rs->rs_lock, flags);
695
696 if (list_empty(&list))
697 return;
698
699 /* Remove the messages from the conn */
700 list_for_each_entry(rm, &list, m_sock_item) {
701
702 conn = rm->m_inc.i_conn;
703
704 spin_lock_irqsave(&conn->c_lock, flags);
705 /*
706 * Maybe someone else beat us to removing rm from the conn.
707 * If we race with their flag update we'll get the lock and
708 * then really see that the flag has been cleared.
709 */
710 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
711 spin_unlock_irqrestore(&conn->c_lock, flags);
712 continue;
713 }
714 list_del_init(&rm->m_conn_item);
715 spin_unlock_irqrestore(&conn->c_lock, flags);
716
717 /*
718 * Couldn't grab m_rs_lock in top loop (lock ordering),
719 * but we can now.
720 */
721 spin_lock_irqsave(&rm->m_rs_lock, flags);
722
723 spin_lock(&rs->rs_lock);
724 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
725 spin_unlock(&rs->rs_lock);
726
727 rm->m_rs = NULL;
728 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
729
730 rds_message_put(rm);
731 }
732
733 rds_wake_sk_sleep(rs);
734
735 while (!list_empty(&list)) {
736 rm = list_entry(list.next, struct rds_message, m_sock_item);
737 list_del_init(&rm->m_sock_item);
738
739 rds_message_wait(rm);
740 rds_message_put(rm);
741 }
742}
743
744/*
745 * we only want this to fire once so we use the callers 'queued'. It's
746 * possible that another thread can race with us and remove the
747 * message from the flow with RDS_CANCEL_SENT_TO.
748 */
749static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
750 struct rds_message *rm, __be16 sport,
751 __be16 dport, int *queued)
752{
753 unsigned long flags;
754 u32 len;
755
756 if (*queued)
757 goto out;
758
759 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
760
761 /* this is the only place which holds both the socket's rs_lock
762 * and the connection's c_lock */
763 spin_lock_irqsave(&rs->rs_lock, flags);
764
765 /*
766 * If there is a little space in sndbuf, we don't queue anything,
767 * and userspace gets -EAGAIN. But poll() indicates there's send
768 * room. This can lead to bad behavior (spinning) if snd_bytes isn't
769 * freed up by incoming acks. So we check the *old* value of
770 * rs_snd_bytes here to allow the last msg to exceed the buffer,
771 * and poll() now knows no more data can be sent.
772 */
773 if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
774 rs->rs_snd_bytes += len;
775
776 /* let recv side know we are close to send space exhaustion.
777 * This is probably not the optimal way to do it, as this
778 * means we set the flag on *all* messages as soon as our
779 * throughput hits a certain threshold.
780 */
781 if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
782 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
783
784 list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
785 set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
786 rds_message_addref(rm);
787 rm->m_rs = rs;
788
789 /* The code ordering is a little weird, but we're
790 trying to minimize the time we hold c_lock */
791 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
792 rm->m_inc.i_conn = conn;
793 rds_message_addref(rm);
794
795 spin_lock(&conn->c_lock);
796 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
797 list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
798 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
799 spin_unlock(&conn->c_lock);
800
801 rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
802 rm, len, rs, rs->rs_snd_bytes,
803 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
804
805 *queued = 1;
806 }
807
808 spin_unlock_irqrestore(&rs->rs_lock, flags);
809out:
810 return *queued;
811}
812
813/*
814 * rds_message is getting to be quite complicated, and we'd like to allocate
815 * it all in one go. This figures out how big it needs to be up front.
816 */
817static int rds_rm_size(struct msghdr *msg, int data_len)
818{
819 struct cmsghdr *cmsg;
820 int size = 0;
821 int cmsg_groups = 0;
822 int retval;
823
824 for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
825 if (!CMSG_OK(msg, cmsg))
826 return -EINVAL;
827
828 if (cmsg->cmsg_level != SOL_RDS)
829 continue;
830
831 switch (cmsg->cmsg_type) {
832 case RDS_CMSG_RDMA_ARGS:
833 cmsg_groups |= 1;
834 retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
835 if (retval < 0)
836 return retval;
837 size += retval;
838
839 break;
840
841 case RDS_CMSG_RDMA_DEST:
842 case RDS_CMSG_RDMA_MAP:
843 cmsg_groups |= 2;
844 /* these are valid but do no add any size */
845 break;
846
847 case RDS_CMSG_ATOMIC_CSWP:
848 case RDS_CMSG_ATOMIC_FADD:
849 case RDS_CMSG_MASKED_ATOMIC_CSWP:
850 case RDS_CMSG_MASKED_ATOMIC_FADD:
851 cmsg_groups |= 1;
852 size += sizeof(struct scatterlist);
853 break;
854
855 default:
856 return -EINVAL;
857 }
858
859 }
860
861 size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
862
863 /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
864 if (cmsg_groups == 3)
865 return -EINVAL;
866
867 return size;
868}
869
870static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
871 struct msghdr *msg, int *allocated_mr)
872{
873 struct cmsghdr *cmsg;
874 int ret = 0;
875
876 for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
877 if (!CMSG_OK(msg, cmsg))
878 return -EINVAL;
879
880 if (cmsg->cmsg_level != SOL_RDS)
881 continue;
882
883 /* As a side effect, RDMA_DEST and RDMA_MAP will set
884 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
885 */
886 switch (cmsg->cmsg_type) {
887 case RDS_CMSG_RDMA_ARGS:
888 ret = rds_cmsg_rdma_args(rs, rm, cmsg);
889 break;
890
891 case RDS_CMSG_RDMA_DEST:
892 ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
893 break;
894
895 case RDS_CMSG_RDMA_MAP:
896 ret = rds_cmsg_rdma_map(rs, rm, cmsg);
897 if (!ret)
898 *allocated_mr = 1;
899 break;
900 case RDS_CMSG_ATOMIC_CSWP:
901 case RDS_CMSG_ATOMIC_FADD:
902 case RDS_CMSG_MASKED_ATOMIC_CSWP:
903 case RDS_CMSG_MASKED_ATOMIC_FADD:
904 ret = rds_cmsg_atomic(rs, rm, cmsg);
905 break;
906
907 default:
908 return -EINVAL;
909 }
910
911 if (ret)
912 break;
913 }
914
915 return ret;
916}
917
918int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
919 size_t payload_len)
920{
921 struct sock *sk = sock->sk;
922 struct rds_sock *rs = rds_sk_to_rs(sk);
923 struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
924 __be32 daddr;
925 __be16 dport;
926 struct rds_message *rm = NULL;
927 struct rds_connection *conn;
928 int ret = 0;
929 int queued = 0, allocated_mr = 0;
930 int nonblock = msg->msg_flags & MSG_DONTWAIT;
931 long timeo = sock_sndtimeo(sk, nonblock);
932
933 /* Mirror Linux UDP mirror of BSD error message compatibility */
934 /* XXX: Perhaps MSG_MORE someday */
935 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
936 printk(KERN_INFO "msg_flags 0x%08X\n", msg->msg_flags);
937 ret = -EOPNOTSUPP;
938 goto out;
939 }
940
941 if (msg->msg_namelen) {
942 /* XXX fail non-unicast destination IPs? */
943 if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
944 ret = -EINVAL;
945 goto out;
946 }
947 daddr = usin->sin_addr.s_addr;
948 dport = usin->sin_port;
949 } else {
950 /* We only care about consistency with ->connect() */
951 lock_sock(sk);
952 daddr = rs->rs_conn_addr;
953 dport = rs->rs_conn_port;
954 release_sock(sk);
955 }
956
957 /* racing with another thread binding seems ok here */
958 if (daddr == 0 || rs->rs_bound_addr == 0) {
959 ret = -ENOTCONN; /* XXX not a great errno */
960 goto out;
961 }
962
963 /* size of rm including all sgs */
964 ret = rds_rm_size(msg, payload_len);
965 if (ret < 0)
966 goto out;
967
968 rm = rds_message_alloc(ret, GFP_KERNEL);
969 if (!rm) {
970 ret = -ENOMEM;
971 goto out;
972 }
973
974 /* Attach data to the rm */
975 if (payload_len) {
976 rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
977 if (!rm->data.op_sg) {
978 ret = -ENOMEM;
979 goto out;
980 }
981 ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len);
982 if (ret)
983 goto out;
984 }
985 rm->data.op_active = 1;
986
987 rm->m_daddr = daddr;
988
989 /* rds_conn_create has a spinlock that runs with IRQ off.
990 * Caching the conn in the socket helps a lot. */
991 if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
992 conn = rs->rs_conn;
993 else {
994 conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr,
995 rs->rs_transport,
996 sock->sk->sk_allocation);
997 if (IS_ERR(conn)) {
998 ret = PTR_ERR(conn);
999 goto out;
1000 }
1001 rs->rs_conn = conn;
1002 }
1003
1004 /* Parse any control messages the user may have included. */
1005 ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
1006 if (ret)
1007 goto out;
1008
1009 if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
1010 printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
1011 &rm->rdma, conn->c_trans->xmit_rdma);
1012 ret = -EOPNOTSUPP;
1013 goto out;
1014 }
1015
1016 if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
1017 printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
1018 &rm->atomic, conn->c_trans->xmit_atomic);
1019 ret = -EOPNOTSUPP;
1020 goto out;
1021 }
1022
1023 rds_conn_connect_if_down(conn);
1024
1025 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
1026 if (ret) {
1027 rs->rs_seen_congestion = 1;
1028 goto out;
1029 }
1030
1031 while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
1032 dport, &queued)) {
1033 rds_stats_inc(s_send_queue_full);
1034 /* XXX make sure this is reasonable */
1035 if (payload_len > rds_sk_sndbuf(rs)) {
1036 ret = -EMSGSIZE;
1037 goto out;
1038 }
1039 if (nonblock) {
1040 ret = -EAGAIN;
1041 goto out;
1042 }
1043
1044 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
1045 rds_send_queue_rm(rs, conn, rm,
1046 rs->rs_bound_port,
1047 dport,
1048 &queued),
1049 timeo);
1050 rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1051 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1052 continue;
1053
1054 ret = timeo;
1055 if (ret == 0)
1056 ret = -ETIMEDOUT;
1057 goto out;
1058 }
1059
1060 /*
1061 * By now we've committed to the send. We reuse rds_send_worker()
1062 * to retry sends in the rds thread if the transport asks us to.
1063 */
1064 rds_stats_inc(s_send_queued);
1065
1066 if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1067 rds_send_xmit(conn);
1068
1069 rds_message_put(rm);
1070 return payload_len;
1071
1072out:
1073 /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1074 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1075 * or in any other way, we need to destroy the MR again */
1076 if (allocated_mr)
1077 rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1078
1079 if (rm)
1080 rds_message_put(rm);
1081 return ret;
1082}
1083
1084/*
1085 * Reply to a ping packet.
1086 */
1087int
1088rds_send_pong(struct rds_connection *conn, __be16 dport)
1089{
1090 struct rds_message *rm;
1091 unsigned long flags;
1092 int ret = 0;
1093
1094 rm = rds_message_alloc(0, GFP_ATOMIC);
1095 if (!rm) {
1096 ret = -ENOMEM;
1097 goto out;
1098 }
1099
1100 rm->m_daddr = conn->c_faddr;
1101 rm->data.op_active = 1;
1102
1103 rds_conn_connect_if_down(conn);
1104
1105 ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
1106 if (ret)
1107 goto out;
1108
1109 spin_lock_irqsave(&conn->c_lock, flags);
1110 list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
1111 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1112 rds_message_addref(rm);
1113 rm->m_inc.i_conn = conn;
1114
1115 rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport,
1116 conn->c_next_tx_seq);
1117 conn->c_next_tx_seq++;
1118 spin_unlock_irqrestore(&conn->c_lock, flags);
1119
1120 rds_stats_inc(s_send_queued);
1121 rds_stats_inc(s_send_pong);
1122
1123 if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1124 rds_send_xmit(conn);
1125
1126 rds_message_put(rm);
1127 return 0;
1128
1129out:
1130 if (rm)
1131 rds_message_put(rm);
1132 return ret;
1133}