Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
v4.6
  1/*
  2 * Copyright (c) 2006 Oracle.  All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 *
 32 */
 33#include <linux/kernel.h>
 34#include <linux/in.h>
 35#include <linux/device.h>
 36#include <linux/dmapool.h>
 37#include <linux/ratelimit.h>
 38
 
 39#include "rds.h"
 40#include "ib.h"
 
 41
 42/*
 43 * Convert IB-specific error message to RDS error message and call core
 44 * completion handler.
 45 */
 46static void rds_ib_send_complete(struct rds_message *rm,
 47				 int wc_status,
 48				 void (*complete)(struct rds_message *rm, int status))
 49{
 50	int notify_status;
 51
 52	switch (wc_status) {
 53	case IB_WC_WR_FLUSH_ERR:
 54		return;
 55
 56	case IB_WC_SUCCESS:
 57		notify_status = RDS_RDMA_SUCCESS;
 58		break;
 59
 60	case IB_WC_REM_ACCESS_ERR:
 61		notify_status = RDS_RDMA_REMOTE_ERROR;
 62		break;
 63
 64	default:
 65		notify_status = RDS_RDMA_OTHER_ERROR;
 66		break;
 67	}
 68	complete(rm, notify_status);
 69}
 70
 71static void rds_ib_send_unmap_data(struct rds_ib_connection *ic,
 72				   struct rm_data_op *op,
 73				   int wc_status)
 74{
 75	if (op->op_nents)
 76		ib_dma_unmap_sg(ic->i_cm_id->device,
 77				op->op_sg, op->op_nents,
 78				DMA_TO_DEVICE);
 79}
 80
 81static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic,
 82				   struct rm_rdma_op *op,
 83				   int wc_status)
 84{
 85	if (op->op_mapped) {
 86		ib_dma_unmap_sg(ic->i_cm_id->device,
 87				op->op_sg, op->op_nents,
 88				op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
 89		op->op_mapped = 0;
 90	}
 91
 92	/* If the user asked for a completion notification on this
 93	 * message, we can implement three different semantics:
 94	 *  1.	Notify when we received the ACK on the RDS message
 95	 *	that was queued with the RDMA. This provides reliable
 96	 *	notification of RDMA status at the expense of a one-way
 97	 *	packet delay.
 98	 *  2.	Notify when the IB stack gives us the completion event for
 99	 *	the RDMA operation.
100	 *  3.	Notify when the IB stack gives us the completion event for
101	 *	the accompanying RDS messages.
102	 * Here, we implement approach #3. To implement approach #2,
103	 * we would need to take an event for the rdma WR. To implement #1,
104	 * don't call rds_rdma_send_complete at all, and fall back to the notify
105	 * handling in the ACK processing code.
106	 *
107	 * Note: There's no need to explicitly sync any RDMA buffers using
108	 * ib_dma_sync_sg_for_cpu - the completion for the RDMA
109	 * operation itself unmapped the RDMA buffers, which takes care
110	 * of synching.
111	 */
112	rds_ib_send_complete(container_of(op, struct rds_message, rdma),
113			     wc_status, rds_rdma_send_complete);
114
115	if (op->op_write)
116		rds_stats_add(s_send_rdma_bytes, op->op_bytes);
117	else
118		rds_stats_add(s_recv_rdma_bytes, op->op_bytes);
119}
120
121static void rds_ib_send_unmap_atomic(struct rds_ib_connection *ic,
122				     struct rm_atomic_op *op,
123				     int wc_status)
124{
125	/* unmap atomic recvbuf */
126	if (op->op_mapped) {
127		ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, 1,
128				DMA_FROM_DEVICE);
129		op->op_mapped = 0;
130	}
131
132	rds_ib_send_complete(container_of(op, struct rds_message, atomic),
133			     wc_status, rds_atomic_send_complete);
134
135	if (op->op_type == RDS_ATOMIC_TYPE_CSWP)
136		rds_ib_stats_inc(s_ib_atomic_cswp);
137	else
138		rds_ib_stats_inc(s_ib_atomic_fadd);
139}
140
141/*
142 * Unmap the resources associated with a struct send_work.
143 *
144 * Returns the rm for no good reason other than it is unobtainable
145 * other than by switching on wr.opcode, currently, and the caller,
146 * the event handler, needs it.
147 */
148static struct rds_message *rds_ib_send_unmap_op(struct rds_ib_connection *ic,
149						struct rds_ib_send_work *send,
150						int wc_status)
151{
152	struct rds_message *rm = NULL;
153
154	/* In the error case, wc.opcode sometimes contains garbage */
155	switch (send->s_wr.opcode) {
156	case IB_WR_SEND:
157		if (send->s_op) {
158			rm = container_of(send->s_op, struct rds_message, data);
159			rds_ib_send_unmap_data(ic, send->s_op, wc_status);
160		}
161		break;
162	case IB_WR_RDMA_WRITE:
163	case IB_WR_RDMA_READ:
164		if (send->s_op) {
165			rm = container_of(send->s_op, struct rds_message, rdma);
166			rds_ib_send_unmap_rdma(ic, send->s_op, wc_status);
167		}
168		break;
169	case IB_WR_ATOMIC_FETCH_AND_ADD:
170	case IB_WR_ATOMIC_CMP_AND_SWP:
171		if (send->s_op) {
172			rm = container_of(send->s_op, struct rds_message, atomic);
173			rds_ib_send_unmap_atomic(ic, send->s_op, wc_status);
174		}
175		break;
176	default:
177		printk_ratelimited(KERN_NOTICE
178			       "RDS/IB: %s: unexpected opcode 0x%x in WR!\n",
179			       __func__, send->s_wr.opcode);
180		break;
181	}
182
183	send->s_wr.opcode = 0xdead;
184
185	return rm;
186}
187
188void rds_ib_send_init_ring(struct rds_ib_connection *ic)
189{
190	struct rds_ib_send_work *send;
191	u32 i;
192
193	for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
194		struct ib_sge *sge;
195
196		send->s_op = NULL;
197
198		send->s_wr.wr_id = i;
199		send->s_wr.sg_list = send->s_sge;
200		send->s_wr.ex.imm_data = 0;
201
202		sge = &send->s_sge[0];
203		sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header));
 
204		sge->length = sizeof(struct rds_header);
205		sge->lkey = ic->i_pd->local_dma_lkey;
206
207		send->s_sge[1].lkey = ic->i_pd->local_dma_lkey;
208	}
209}
210
211void rds_ib_send_clear_ring(struct rds_ib_connection *ic)
212{
213	struct rds_ib_send_work *send;
214	u32 i;
215
216	for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
217		if (send->s_op && send->s_wr.opcode != 0xdead)
218			rds_ib_send_unmap_op(ic, send, IB_WC_WR_FLUSH_ERR);
219	}
220}
221
222/*
223 * The only fast path caller always has a non-zero nr, so we don't
224 * bother testing nr before performing the atomic sub.
225 */
226static void rds_ib_sub_signaled(struct rds_ib_connection *ic, int nr)
227{
228	if ((atomic_sub_return(nr, &ic->i_signaled_sends) == 0) &&
229	    waitqueue_active(&rds_ib_ring_empty_wait))
230		wake_up(&rds_ib_ring_empty_wait);
231	BUG_ON(atomic_read(&ic->i_signaled_sends) < 0);
232}
233
234/*
235 * The _oldest/_free ring operations here race cleanly with the alloc/unalloc
236 * operations performed in the send path.  As the sender allocs and potentially
237 * unallocs the next free entry in the ring it doesn't alter which is
238 * the next to be freed, which is what this is concerned with.
239 */
240void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
241{
242	struct rds_message *rm = NULL;
243	struct rds_connection *conn = ic->conn;
244	struct rds_ib_send_work *send;
245	u32 completed;
246	u32 oldest;
247	u32 i = 0;
248	int nr_sig = 0;
249
250
251	rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
252		 (unsigned long long)wc->wr_id, wc->status,
253		 ib_wc_status_msg(wc->status), wc->byte_len,
254		 be32_to_cpu(wc->ex.imm_data));
255	rds_ib_stats_inc(s_ib_tx_cq_event);
256
257	if (wc->wr_id == RDS_IB_ACK_WR_ID) {
258		if (time_after(jiffies, ic->i_ack_queued + HZ / 2))
259			rds_ib_stats_inc(s_ib_tx_stalled);
260		rds_ib_ack_send_complete(ic);
261		return;
262	}
263
264	oldest = rds_ib_ring_oldest(&ic->i_send_ring);
265
266	completed = rds_ib_ring_completed(&ic->i_send_ring, wc->wr_id, oldest);
267
268	for (i = 0; i < completed; i++) {
269		send = &ic->i_sends[oldest];
270		if (send->s_wr.send_flags & IB_SEND_SIGNALED)
271			nr_sig++;
272
273		rm = rds_ib_send_unmap_op(ic, send, wc->status);
274
275		if (time_after(jiffies, send->s_queued + HZ / 2))
276			rds_ib_stats_inc(s_ib_tx_stalled);
277
278		if (send->s_op) {
279			if (send->s_op == rm->m_final_op) {
280				/* If anyone waited for this message to get
281				 * flushed out, wake them up now
282				 */
283				rds_message_unmapped(rm);
284			}
285			rds_message_put(rm);
286			send->s_op = NULL;
287		}
288
289		oldest = (oldest + 1) % ic->i_send_ring.w_nr;
290	}
291
292	rds_ib_ring_free(&ic->i_send_ring, completed);
293	rds_ib_sub_signaled(ic, nr_sig);
294	nr_sig = 0;
295
296	if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
297	    test_bit(0, &conn->c_map_queued))
298		queue_delayed_work(rds_wq, &conn->c_send_w, 0);
299
300	/* We expect errors as the qp is drained during shutdown */
301	if (wc->status != IB_WC_SUCCESS && rds_conn_up(conn)) {
302		rds_ib_conn_error(conn, "send completion on %pI4 had status %u (%s), disconnecting and reconnecting\n",
303				  &conn->c_faddr, wc->status,
304				  ib_wc_status_msg(wc->status));
 
305	}
306}
307
308/*
309 * This is the main function for allocating credits when sending
310 * messages.
311 *
312 * Conceptually, we have two counters:
313 *  -	send credits: this tells us how many WRs we're allowed
314 *	to submit without overruning the receiver's queue. For
315 *	each SEND WR we post, we decrement this by one.
316 *
317 *  -	posted credits: this tells us how many WRs we recently
318 *	posted to the receive queue. This value is transferred
319 *	to the peer as a "credit update" in a RDS header field.
320 *	Every time we transmit credits to the peer, we subtract
321 *	the amount of transferred credits from this counter.
322 *
323 * It is essential that we avoid situations where both sides have
324 * exhausted their send credits, and are unable to send new credits
325 * to the peer. We achieve this by requiring that we send at least
326 * one credit update to the peer before exhausting our credits.
327 * When new credits arrive, we subtract one credit that is withheld
328 * until we've posted new buffers and are ready to transmit these
329 * credits (see rds_ib_send_add_credits below).
330 *
331 * The RDS send code is essentially single-threaded; rds_send_xmit
332 * sets RDS_IN_XMIT to ensure exclusive access to the send ring.
333 * However, the ACK sending code is independent and can race with
334 * message SENDs.
335 *
336 * In the send path, we need to update the counters for send credits
337 * and the counter of posted buffers atomically - when we use the
338 * last available credit, we cannot allow another thread to race us
339 * and grab the posted credits counter.  Hence, we have to use a
340 * spinlock to protect the credit counter, or use atomics.
341 *
342 * Spinlocks shared between the send and the receive path are bad,
343 * because they create unnecessary delays. An early implementation
344 * using a spinlock showed a 5% degradation in throughput at some
345 * loads.
346 *
347 * This implementation avoids spinlocks completely, putting both
348 * counters into a single atomic, and updating that atomic using
349 * atomic_add (in the receive path, when receiving fresh credits),
350 * and using atomic_cmpxchg when updating the two counters.
351 */
352int rds_ib_send_grab_credits(struct rds_ib_connection *ic,
353			     u32 wanted, u32 *adv_credits, int need_posted, int max_posted)
354{
355	unsigned int avail, posted, got = 0, advertise;
356	long oldval, newval;
357
358	*adv_credits = 0;
359	if (!ic->i_flowctl)
360		return wanted;
361
362try_again:
363	advertise = 0;
364	oldval = newval = atomic_read(&ic->i_credits);
365	posted = IB_GET_POST_CREDITS(oldval);
366	avail = IB_GET_SEND_CREDITS(oldval);
367
368	rdsdebug("wanted=%u credits=%u posted=%u\n",
369			wanted, avail, posted);
370
371	/* The last credit must be used to send a credit update. */
372	if (avail && !posted)
373		avail--;
374
375	if (avail < wanted) {
376		struct rds_connection *conn = ic->i_cm_id->context;
377
378		/* Oops, there aren't that many credits left! */
379		set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
380		got = avail;
381	} else {
382		/* Sometimes you get what you want, lalala. */
383		got = wanted;
384	}
385	newval -= IB_SET_SEND_CREDITS(got);
386
387	/*
388	 * If need_posted is non-zero, then the caller wants
389	 * the posted regardless of whether any send credits are
390	 * available.
391	 */
392	if (posted && (got || need_posted)) {
393		advertise = min_t(unsigned int, posted, max_posted);
394		newval -= IB_SET_POST_CREDITS(advertise);
395	}
396
397	/* Finally bill everything */
398	if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval)
399		goto try_again;
400
401	*adv_credits = advertise;
402	return got;
403}
404
405void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits)
406{
407	struct rds_ib_connection *ic = conn->c_transport_data;
408
409	if (credits == 0)
410		return;
411
412	rdsdebug("credits=%u current=%u%s\n",
413			credits,
414			IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)),
415			test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : "");
416
417	atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits);
418	if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags))
419		queue_delayed_work(rds_wq, &conn->c_send_w, 0);
420
421	WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384);
422
423	rds_ib_stats_inc(s_ib_rx_credit_updates);
424}
425
426void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted)
427{
428	struct rds_ib_connection *ic = conn->c_transport_data;
429
430	if (posted == 0)
431		return;
432
433	atomic_add(IB_SET_POST_CREDITS(posted), &ic->i_credits);
434
435	/* Decide whether to send an update to the peer now.
436	 * If we would send a credit update for every single buffer we
437	 * post, we would end up with an ACK storm (ACK arrives,
438	 * consumes buffer, we refill the ring, send ACK to remote
439	 * advertising the newly posted buffer... ad inf)
440	 *
441	 * Performance pretty much depends on how often we send
442	 * credit updates - too frequent updates mean lots of ACKs.
443	 * Too infrequent updates, and the peer will run out of
444	 * credits and has to throttle.
445	 * For the time being, 16 seems to be a good compromise.
446	 */
447	if (IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)) >= 16)
448		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
449}
450
451static inline int rds_ib_set_wr_signal_state(struct rds_ib_connection *ic,
452					     struct rds_ib_send_work *send,
453					     bool notify)
454{
455	/*
456	 * We want to delay signaling completions just enough to get
457	 * the batching benefits but not so much that we create dead time
458	 * on the wire.
459	 */
460	if (ic->i_unsignaled_wrs-- == 0 || notify) {
461		ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
462		send->s_wr.send_flags |= IB_SEND_SIGNALED;
463		return 1;
464	}
465	return 0;
466}
467
468/*
469 * This can be called multiple times for a given message.  The first time
470 * we see a message we map its scatterlist into the IB device so that
471 * we can provide that mapped address to the IB scatter gather entries
472 * in the IB work requests.  We translate the scatterlist into a series
473 * of work requests that fragment the message.  These work requests complete
474 * in order so we pass ownership of the message to the completion handler
475 * once we send the final fragment.
476 *
477 * The RDS core uses the c_send_lock to only enter this function once
478 * per connection.  This makes sure that the tx ring alloc/unalloc pairs
479 * don't get out of sync and confuse the ring.
480 */
481int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
482		unsigned int hdr_off, unsigned int sg, unsigned int off)
483{
484	struct rds_ib_connection *ic = conn->c_transport_data;
485	struct ib_device *dev = ic->i_cm_id->device;
486	struct rds_ib_send_work *send = NULL;
487	struct rds_ib_send_work *first;
488	struct rds_ib_send_work *prev;
489	struct ib_send_wr *failed_wr;
490	struct scatterlist *scat;
491	u32 pos;
492	u32 i;
493	u32 work_alloc;
494	u32 credit_alloc = 0;
495	u32 posted;
496	u32 adv_credits = 0;
497	int send_flags = 0;
498	int bytes_sent = 0;
499	int ret;
500	int flow_controlled = 0;
501	int nr_sig = 0;
502
503	BUG_ON(off % RDS_FRAG_SIZE);
504	BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
505
506	/* Do not send cong updates to IB loopback */
507	if (conn->c_loopback
508	    && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
509		rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
510		scat = &rm->data.op_sg[sg];
511		ret = max_t(int, RDS_CONG_MAP_BYTES, scat->length);
512		return sizeof(struct rds_header) + ret;
513	}
514
515	/* FIXME we may overallocate here */
516	if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
517		i = 1;
518	else
519		i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
520
521	work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
522	if (work_alloc == 0) {
523		set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
524		rds_ib_stats_inc(s_ib_tx_ring_full);
525		ret = -ENOMEM;
526		goto out;
527	}
528
529	if (ic->i_flowctl) {
530		credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
531		adv_credits += posted;
532		if (credit_alloc < work_alloc) {
533			rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
534			work_alloc = credit_alloc;
535			flow_controlled = 1;
536		}
537		if (work_alloc == 0) {
538			set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
539			rds_ib_stats_inc(s_ib_tx_throttle);
540			ret = -ENOMEM;
541			goto out;
542		}
543	}
544
545	/* map the message the first time we see it */
546	if (!ic->i_data_op) {
547		if (rm->data.op_nents) {
548			rm->data.op_count = ib_dma_map_sg(dev,
549							  rm->data.op_sg,
550							  rm->data.op_nents,
551							  DMA_TO_DEVICE);
552			rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
553			if (rm->data.op_count == 0) {
554				rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
555				rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
556				ret = -ENOMEM; /* XXX ? */
557				goto out;
558			}
559		} else {
560			rm->data.op_count = 0;
561		}
562
563		rds_message_addref(rm);
564		rm->data.op_dmasg = 0;
565		rm->data.op_dmaoff = 0;
566		ic->i_data_op = &rm->data;
567
568		/* Finalize the header */
569		if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags))
570			rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED;
571		if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))
572			rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
573
574		/* If it has a RDMA op, tell the peer we did it. This is
575		 * used by the peer to release use-once RDMA MRs. */
576		if (rm->rdma.op_active) {
577			struct rds_ext_header_rdma ext_hdr;
578
579			ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
580			rds_message_add_extension(&rm->m_inc.i_hdr,
581					RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
582		}
583		if (rm->m_rdma_cookie) {
584			rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr,
585					rds_rdma_cookie_key(rm->m_rdma_cookie),
586					rds_rdma_cookie_offset(rm->m_rdma_cookie));
587		}
588
589		/* Note - rds_ib_piggyb_ack clears the ACK_REQUIRED bit, so
590		 * we should not do this unless we have a chance of at least
591		 * sticking the header into the send ring. Which is why we
592		 * should call rds_ib_ring_alloc first. */
593		rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic));
594		rds_message_make_checksum(&rm->m_inc.i_hdr);
595
596		/*
597		 * Update adv_credits since we reset the ACK_REQUIRED bit.
598		 */
599		if (ic->i_flowctl) {
600			rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
601			adv_credits += posted;
602			BUG_ON(adv_credits > 255);
603		}
604	}
605
606	/* Sometimes you want to put a fence between an RDMA
607	 * READ and the following SEND.
608	 * We could either do this all the time
609	 * or when requested by the user. Right now, we let
610	 * the application choose.
611	 */
612	if (rm->rdma.op_active && rm->rdma.op_fence)
613		send_flags = IB_SEND_FENCE;
614
615	/* Each frag gets a header. Msgs may be 0 bytes */
616	send = &ic->i_sends[pos];
617	first = send;
618	prev = NULL;
619	scat = &ic->i_data_op->op_sg[rm->data.op_dmasg];
620	i = 0;
621	do {
622		unsigned int len = 0;
623
624		/* Set up the header */
625		send->s_wr.send_flags = send_flags;
626		send->s_wr.opcode = IB_WR_SEND;
627		send->s_wr.num_sge = 1;
628		send->s_wr.next = NULL;
629		send->s_queued = jiffies;
630		send->s_op = NULL;
631
632		send->s_sge[0].addr = ic->i_send_hdrs_dma
633			+ (pos * sizeof(struct rds_header));
634		send->s_sge[0].length = sizeof(struct rds_header);
 
 
 
 
 
 
 
 
635
636		memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header));
637
638		/* Set up the data, if present */
639		if (i < work_alloc
640		    && scat != &rm->data.op_sg[rm->data.op_count]) {
641			len = min(RDS_FRAG_SIZE,
642				ib_sg_dma_len(dev, scat) - rm->data.op_dmaoff);
643			send->s_wr.num_sge = 2;
644
645			send->s_sge[1].addr = ib_sg_dma_address(dev, scat);
646			send->s_sge[1].addr += rm->data.op_dmaoff;
647			send->s_sge[1].length = len;
 
648
649			bytes_sent += len;
650			rm->data.op_dmaoff += len;
651			if (rm->data.op_dmaoff == ib_sg_dma_len(dev, scat)) {
652				scat++;
653				rm->data.op_dmasg++;
654				rm->data.op_dmaoff = 0;
655			}
656		}
657
658		rds_ib_set_wr_signal_state(ic, send, 0);
659
660		/*
661		 * Always signal the last one if we're stopping due to flow control.
662		 */
663		if (ic->i_flowctl && flow_controlled && i == (work_alloc-1))
664			send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
 
 
665
666		if (send->s_wr.send_flags & IB_SEND_SIGNALED)
667			nr_sig++;
668
669		rdsdebug("send %p wr %p num_sge %u next %p\n", send,
670			 &send->s_wr, send->s_wr.num_sge, send->s_wr.next);
671
672		if (ic->i_flowctl && adv_credits) {
673			struct rds_header *hdr = &ic->i_send_hdrs[pos];
674
675			/* add credit and redo the header checksum */
676			hdr->h_credit = adv_credits;
677			rds_message_make_checksum(hdr);
678			adv_credits = 0;
679			rds_ib_stats_inc(s_ib_tx_credit_updates);
680		}
 
 
 
 
681
682		if (prev)
683			prev->s_wr.next = &send->s_wr;
684		prev = send;
685
686		pos = (pos + 1) % ic->i_send_ring.w_nr;
687		send = &ic->i_sends[pos];
688		i++;
689
690	} while (i < work_alloc
691		 && scat != &rm->data.op_sg[rm->data.op_count]);
692
693	/* Account the RDS header in the number of bytes we sent, but just once.
694	 * The caller has no concept of fragmentation. */
695	if (hdr_off == 0)
696		bytes_sent += sizeof(struct rds_header);
697
698	/* if we finished the message then send completion owns it */
699	if (scat == &rm->data.op_sg[rm->data.op_count]) {
700		prev->s_op = ic->i_data_op;
701		prev->s_wr.send_flags |= IB_SEND_SOLICITED;
702		if (!(prev->s_wr.send_flags & IB_SEND_SIGNALED)) {
703			ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
704			prev->s_wr.send_flags |= IB_SEND_SIGNALED;
705			nr_sig++;
706		}
707		ic->i_data_op = NULL;
708	}
709
710	/* Put back wrs & credits we didn't use */
711	if (i < work_alloc) {
712		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
713		work_alloc = i;
714	}
715	if (ic->i_flowctl && i < credit_alloc)
716		rds_ib_send_add_credits(conn, credit_alloc - i);
717
718	if (nr_sig)
719		atomic_add(nr_sig, &ic->i_signaled_sends);
720
721	/* XXX need to worry about failed_wr and partial sends. */
722	failed_wr = &first->s_wr;
723	ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
724	rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
725		 first, &first->s_wr, ret, failed_wr);
726	BUG_ON(failed_wr != &first->s_wr);
727	if (ret) {
728		printk(KERN_WARNING "RDS/IB: ib_post_send to %pI4 "
729		       "returned %d\n", &conn->c_faddr, ret);
730		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
731		rds_ib_sub_signaled(ic, nr_sig);
732		if (prev->s_op) {
733			ic->i_data_op = prev->s_op;
734			prev->s_op = NULL;
735		}
736
737		rds_ib_conn_error(ic->conn, "ib_post_send failed\n");
738		goto out;
739	}
740
741	ret = bytes_sent;
742out:
743	BUG_ON(adv_credits);
744	return ret;
745}
746
747/*
748 * Issue atomic operation.
749 * A simplified version of the rdma case, we always map 1 SG, and
750 * only 8 bytes, for the return value from the atomic operation.
751 */
752int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
753{
754	struct rds_ib_connection *ic = conn->c_transport_data;
755	struct rds_ib_send_work *send = NULL;
756	struct ib_send_wr *failed_wr;
757	struct rds_ib_device *rds_ibdev;
758	u32 pos;
759	u32 work_alloc;
760	int ret;
761	int nr_sig = 0;
762
763	rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
764
765	work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos);
766	if (work_alloc != 1) {
767		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
768		rds_ib_stats_inc(s_ib_tx_ring_full);
769		ret = -ENOMEM;
770		goto out;
771	}
772
773	/* address of send request in ring */
774	send = &ic->i_sends[pos];
775	send->s_queued = jiffies;
776
777	if (op->op_type == RDS_ATOMIC_TYPE_CSWP) {
778		send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_CMP_AND_SWP;
779		send->s_atomic_wr.compare_add = op->op_m_cswp.compare;
780		send->s_atomic_wr.swap = op->op_m_cswp.swap;
781		send->s_atomic_wr.compare_add_mask = op->op_m_cswp.compare_mask;
782		send->s_atomic_wr.swap_mask = op->op_m_cswp.swap_mask;
783	} else { /* FADD */
784		send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_FETCH_AND_ADD;
785		send->s_atomic_wr.compare_add = op->op_m_fadd.add;
786		send->s_atomic_wr.swap = 0;
787		send->s_atomic_wr.compare_add_mask = op->op_m_fadd.nocarry_mask;
788		send->s_atomic_wr.swap_mask = 0;
789	}
 
790	nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify);
791	send->s_atomic_wr.wr.num_sge = 1;
792	send->s_atomic_wr.wr.next = NULL;
793	send->s_atomic_wr.remote_addr = op->op_remote_addr;
794	send->s_atomic_wr.rkey = op->op_rkey;
795	send->s_op = op;
796	rds_message_addref(container_of(send->s_op, struct rds_message, atomic));
797
798	/* map 8 byte retval buffer to the device */
799	ret = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, 1, DMA_FROM_DEVICE);
800	rdsdebug("ic %p mapping atomic op %p. mapped %d pg\n", ic, op, ret);
801	if (ret != 1) {
802		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
803		rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
804		ret = -ENOMEM; /* XXX ? */
805		goto out;
806	}
807
808	/* Convert our struct scatterlist to struct ib_sge */
809	send->s_sge[0].addr = ib_sg_dma_address(ic->i_cm_id->device, op->op_sg);
810	send->s_sge[0].length = ib_sg_dma_len(ic->i_cm_id->device, op->op_sg);
811	send->s_sge[0].lkey = ic->i_pd->local_dma_lkey;
812
813	rdsdebug("rva %Lx rpa %Lx len %u\n", op->op_remote_addr,
814		 send->s_sge[0].addr, send->s_sge[0].length);
815
816	if (nr_sig)
817		atomic_add(nr_sig, &ic->i_signaled_sends);
818
819	failed_wr = &send->s_atomic_wr.wr;
820	ret = ib_post_send(ic->i_cm_id->qp, &send->s_atomic_wr.wr, &failed_wr);
821	rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic,
822		 send, &send->s_atomic_wr, ret, failed_wr);
823	BUG_ON(failed_wr != &send->s_atomic_wr.wr);
824	if (ret) {
825		printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI4 "
826		       "returned %d\n", &conn->c_faddr, ret);
827		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
828		rds_ib_sub_signaled(ic, nr_sig);
829		goto out;
830	}
831
832	if (unlikely(failed_wr != &send->s_atomic_wr.wr)) {
833		printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
834		BUG_ON(failed_wr != &send->s_atomic_wr.wr);
835	}
836
837out:
838	return ret;
839}
840
841int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
842{
843	struct rds_ib_connection *ic = conn->c_transport_data;
844	struct rds_ib_send_work *send = NULL;
845	struct rds_ib_send_work *first;
846	struct rds_ib_send_work *prev;
847	struct ib_send_wr *failed_wr;
848	struct scatterlist *scat;
849	unsigned long len;
850	u64 remote_addr = op->op_remote_addr;
851	u32 max_sge = ic->rds_ibdev->max_sge;
852	u32 pos;
853	u32 work_alloc;
854	u32 i;
855	u32 j;
856	int sent;
857	int ret;
858	int num_sge;
859	int nr_sig = 0;
 
 
860
861	/* map the op the first time we see it */
862	if (!op->op_mapped) {
863		op->op_count = ib_dma_map_sg(ic->i_cm_id->device,
864					     op->op_sg, op->op_nents, (op->op_write) ?
865					     DMA_TO_DEVICE : DMA_FROM_DEVICE);
866		rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count);
867		if (op->op_count == 0) {
868			rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
869			ret = -ENOMEM; /* XXX ? */
870			goto out;
 
 
 
 
 
 
871		}
872
873		op->op_mapped = 1;
 
874	}
875
876	/*
877	 * Instead of knowing how to return a partial rdma read/write we insist that there
878	 * be enough work requests to send the entire message.
879	 */
880	i = ceil(op->op_count, max_sge);
881
882	work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
883	if (work_alloc != i) {
884		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
885		rds_ib_stats_inc(s_ib_tx_ring_full);
886		ret = -ENOMEM;
887		goto out;
888	}
889
890	send = &ic->i_sends[pos];
891	first = send;
892	prev = NULL;
893	scat = &op->op_sg[0];
894	sent = 0;
895	num_sge = op->op_count;
896
897	for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
898		send->s_wr.send_flags = 0;
899		send->s_queued = jiffies;
900		send->s_op = NULL;
901
902		nr_sig += rds_ib_set_wr_signal_state(ic, send, op->op_notify);
 
 
903
904		send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
905		send->s_rdma_wr.remote_addr = remote_addr;
906		send->s_rdma_wr.rkey = op->op_rkey;
907
908		if (num_sge > max_sge) {
909			send->s_rdma_wr.wr.num_sge = max_sge;
910			num_sge -= max_sge;
911		} else {
912			send->s_rdma_wr.wr.num_sge = num_sge;
913		}
914
915		send->s_rdma_wr.wr.next = NULL;
916
917		if (prev)
918			prev->s_rdma_wr.wr.next = &send->s_rdma_wr.wr;
919
920		for (j = 0; j < send->s_rdma_wr.wr.num_sge &&
921		     scat != &op->op_sg[op->op_count]; j++) {
922			len = ib_sg_dma_len(ic->i_cm_id->device, scat);
923			send->s_sge[j].addr =
924				 ib_sg_dma_address(ic->i_cm_id->device, scat);
 
 
 
 
 
925			send->s_sge[j].length = len;
926			send->s_sge[j].lkey = ic->i_pd->local_dma_lkey;
927
928			sent += len;
929			rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr);
930
931			remote_addr += len;
 
932			scat++;
933		}
934
935		rdsdebug("send %p wr %p num_sge %u next %p\n", send,
936			&send->s_rdma_wr.wr,
937			send->s_rdma_wr.wr.num_sge,
938			send->s_rdma_wr.wr.next);
939
940		prev = send;
941		if (++send == &ic->i_sends[ic->i_send_ring.w_nr])
942			send = ic->i_sends;
943	}
944
945	/* give a reference to the last op */
946	if (scat == &op->op_sg[op->op_count]) {
947		prev->s_op = op;
948		rds_message_addref(container_of(op, struct rds_message, rdma));
949	}
950
951	if (i < work_alloc) {
952		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
953		work_alloc = i;
954	}
955
956	if (nr_sig)
957		atomic_add(nr_sig, &ic->i_signaled_sends);
958
959	failed_wr = &first->s_rdma_wr.wr;
960	ret = ib_post_send(ic->i_cm_id->qp, &first->s_rdma_wr.wr, &failed_wr);
961	rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
962		 first, &first->s_rdma_wr.wr, ret, failed_wr);
963	BUG_ON(failed_wr != &first->s_rdma_wr.wr);
964	if (ret) {
965		printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 "
966		       "returned %d\n", &conn->c_faddr, ret);
967		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
968		rds_ib_sub_signaled(ic, nr_sig);
969		goto out;
970	}
971
972	if (unlikely(failed_wr != &first->s_rdma_wr.wr)) {
973		printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
974		BUG_ON(failed_wr != &first->s_rdma_wr.wr);
975	}
976
977
978out:
979	return ret;
980}
981
982void rds_ib_xmit_complete(struct rds_connection *conn)
983{
 
984	struct rds_ib_connection *ic = conn->c_transport_data;
985
986	/* We may have a pending ACK or window update we were unable
987	 * to send previously (due to flow control). Try again. */
988	rds_ib_attempt_ack(ic);
989}
v6.13.7
   1/*
   2 * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 *
  32 */
  33#include <linux/kernel.h>
  34#include <linux/in.h>
  35#include <linux/device.h>
  36#include <linux/dmapool.h>
  37#include <linux/ratelimit.h>
  38
  39#include "rds_single_path.h"
  40#include "rds.h"
  41#include "ib.h"
  42#include "ib_mr.h"
  43
  44/*
  45 * Convert IB-specific error message to RDS error message and call core
  46 * completion handler.
  47 */
  48static void rds_ib_send_complete(struct rds_message *rm,
  49				 int wc_status,
  50				 void (*complete)(struct rds_message *rm, int status))
  51{
  52	int notify_status;
  53
  54	switch (wc_status) {
  55	case IB_WC_WR_FLUSH_ERR:
  56		return;
  57
  58	case IB_WC_SUCCESS:
  59		notify_status = RDS_RDMA_SUCCESS;
  60		break;
  61
  62	case IB_WC_REM_ACCESS_ERR:
  63		notify_status = RDS_RDMA_REMOTE_ERROR;
  64		break;
  65
  66	default:
  67		notify_status = RDS_RDMA_OTHER_ERROR;
  68		break;
  69	}
  70	complete(rm, notify_status);
  71}
  72
  73static void rds_ib_send_unmap_data(struct rds_ib_connection *ic,
  74				   struct rm_data_op *op,
  75				   int wc_status)
  76{
  77	if (op->op_nents)
  78		ib_dma_unmap_sg(ic->i_cm_id->device,
  79				op->op_sg, op->op_nents,
  80				DMA_TO_DEVICE);
  81}
  82
  83static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic,
  84				   struct rm_rdma_op *op,
  85				   int wc_status)
  86{
  87	if (op->op_mapped) {
  88		ib_dma_unmap_sg(ic->i_cm_id->device,
  89				op->op_sg, op->op_nents,
  90				op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  91		op->op_mapped = 0;
  92	}
  93
  94	/* If the user asked for a completion notification on this
  95	 * message, we can implement three different semantics:
  96	 *  1.	Notify when we received the ACK on the RDS message
  97	 *	that was queued with the RDMA. This provides reliable
  98	 *	notification of RDMA status at the expense of a one-way
  99	 *	packet delay.
 100	 *  2.	Notify when the IB stack gives us the completion event for
 101	 *	the RDMA operation.
 102	 *  3.	Notify when the IB stack gives us the completion event for
 103	 *	the accompanying RDS messages.
 104	 * Here, we implement approach #3. To implement approach #2,
 105	 * we would need to take an event for the rdma WR. To implement #1,
 106	 * don't call rds_rdma_send_complete at all, and fall back to the notify
 107	 * handling in the ACK processing code.
 108	 *
 109	 * Note: There's no need to explicitly sync any RDMA buffers using
 110	 * ib_dma_sync_sg_for_cpu - the completion for the RDMA
 111	 * operation itself unmapped the RDMA buffers, which takes care
 112	 * of synching.
 113	 */
 114	rds_ib_send_complete(container_of(op, struct rds_message, rdma),
 115			     wc_status, rds_rdma_send_complete);
 116
 117	if (op->op_write)
 118		rds_stats_add(s_send_rdma_bytes, op->op_bytes);
 119	else
 120		rds_stats_add(s_recv_rdma_bytes, op->op_bytes);
 121}
 122
 123static void rds_ib_send_unmap_atomic(struct rds_ib_connection *ic,
 124				     struct rm_atomic_op *op,
 125				     int wc_status)
 126{
 127	/* unmap atomic recvbuf */
 128	if (op->op_mapped) {
 129		ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, 1,
 130				DMA_FROM_DEVICE);
 131		op->op_mapped = 0;
 132	}
 133
 134	rds_ib_send_complete(container_of(op, struct rds_message, atomic),
 135			     wc_status, rds_atomic_send_complete);
 136
 137	if (op->op_type == RDS_ATOMIC_TYPE_CSWP)
 138		rds_ib_stats_inc(s_ib_atomic_cswp);
 139	else
 140		rds_ib_stats_inc(s_ib_atomic_fadd);
 141}
 142
 143/*
 144 * Unmap the resources associated with a struct send_work.
 145 *
 146 * Returns the rm for no good reason other than it is unobtainable
 147 * other than by switching on wr.opcode, currently, and the caller,
 148 * the event handler, needs it.
 149 */
 150static struct rds_message *rds_ib_send_unmap_op(struct rds_ib_connection *ic,
 151						struct rds_ib_send_work *send,
 152						int wc_status)
 153{
 154	struct rds_message *rm = NULL;
 155
 156	/* In the error case, wc.opcode sometimes contains garbage */
 157	switch (send->s_wr.opcode) {
 158	case IB_WR_SEND:
 159		if (send->s_op) {
 160			rm = container_of(send->s_op, struct rds_message, data);
 161			rds_ib_send_unmap_data(ic, send->s_op, wc_status);
 162		}
 163		break;
 164	case IB_WR_RDMA_WRITE:
 165	case IB_WR_RDMA_READ:
 166		if (send->s_op) {
 167			rm = container_of(send->s_op, struct rds_message, rdma);
 168			rds_ib_send_unmap_rdma(ic, send->s_op, wc_status);
 169		}
 170		break;
 171	case IB_WR_ATOMIC_FETCH_AND_ADD:
 172	case IB_WR_ATOMIC_CMP_AND_SWP:
 173		if (send->s_op) {
 174			rm = container_of(send->s_op, struct rds_message, atomic);
 175			rds_ib_send_unmap_atomic(ic, send->s_op, wc_status);
 176		}
 177		break;
 178	default:
 179		printk_ratelimited(KERN_NOTICE
 180			       "RDS/IB: %s: unexpected opcode 0x%x in WR!\n",
 181			       __func__, send->s_wr.opcode);
 182		break;
 183	}
 184
 185	send->s_wr.opcode = 0xdead;
 186
 187	return rm;
 188}
 189
 190void rds_ib_send_init_ring(struct rds_ib_connection *ic)
 191{
 192	struct rds_ib_send_work *send;
 193	u32 i;
 194
 195	for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
 196		struct ib_sge *sge;
 197
 198		send->s_op = NULL;
 199
 200		send->s_wr.wr_id = i;
 201		send->s_wr.sg_list = send->s_sge;
 202		send->s_wr.ex.imm_data = 0;
 203
 204		sge = &send->s_sge[0];
 205		sge->addr = ic->i_send_hdrs_dma[i];
 206
 207		sge->length = sizeof(struct rds_header);
 208		sge->lkey = ic->i_pd->local_dma_lkey;
 209
 210		send->s_sge[1].lkey = ic->i_pd->local_dma_lkey;
 211	}
 212}
 213
 214void rds_ib_send_clear_ring(struct rds_ib_connection *ic)
 215{
 216	struct rds_ib_send_work *send;
 217	u32 i;
 218
 219	for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
 220		if (send->s_op && send->s_wr.opcode != 0xdead)
 221			rds_ib_send_unmap_op(ic, send, IB_WC_WR_FLUSH_ERR);
 222	}
 223}
 224
 225/*
 226 * The only fast path caller always has a non-zero nr, so we don't
 227 * bother testing nr before performing the atomic sub.
 228 */
 229static void rds_ib_sub_signaled(struct rds_ib_connection *ic, int nr)
 230{
 231	if ((atomic_sub_return(nr, &ic->i_signaled_sends) == 0) &&
 232	    waitqueue_active(&rds_ib_ring_empty_wait))
 233		wake_up(&rds_ib_ring_empty_wait);
 234	BUG_ON(atomic_read(&ic->i_signaled_sends) < 0);
 235}
 236
 237/*
 238 * The _oldest/_free ring operations here race cleanly with the alloc/unalloc
 239 * operations performed in the send path.  As the sender allocs and potentially
 240 * unallocs the next free entry in the ring it doesn't alter which is
 241 * the next to be freed, which is what this is concerned with.
 242 */
 243void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
 244{
 245	struct rds_message *rm = NULL;
 246	struct rds_connection *conn = ic->conn;
 247	struct rds_ib_send_work *send;
 248	u32 completed;
 249	u32 oldest;
 250	u32 i = 0;
 251	int nr_sig = 0;
 252
 253
 254	rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
 255		 (unsigned long long)wc->wr_id, wc->status,
 256		 ib_wc_status_msg(wc->status), wc->byte_len,
 257		 be32_to_cpu(wc->ex.imm_data));
 258	rds_ib_stats_inc(s_ib_tx_cq_event);
 259
 260	if (wc->wr_id == RDS_IB_ACK_WR_ID) {
 261		if (time_after(jiffies, ic->i_ack_queued + HZ / 2))
 262			rds_ib_stats_inc(s_ib_tx_stalled);
 263		rds_ib_ack_send_complete(ic);
 264		return;
 265	}
 266
 267	oldest = rds_ib_ring_oldest(&ic->i_send_ring);
 268
 269	completed = rds_ib_ring_completed(&ic->i_send_ring, wc->wr_id, oldest);
 270
 271	for (i = 0; i < completed; i++) {
 272		send = &ic->i_sends[oldest];
 273		if (send->s_wr.send_flags & IB_SEND_SIGNALED)
 274			nr_sig++;
 275
 276		rm = rds_ib_send_unmap_op(ic, send, wc->status);
 277
 278		if (time_after(jiffies, send->s_queued + HZ / 2))
 279			rds_ib_stats_inc(s_ib_tx_stalled);
 280
 281		if (send->s_op) {
 282			if (send->s_op == rm->m_final_op) {
 283				/* If anyone waited for this message to get
 284				 * flushed out, wake them up now
 285				 */
 286				rds_message_unmapped(rm);
 287			}
 288			rds_message_put(rm);
 289			send->s_op = NULL;
 290		}
 291
 292		oldest = (oldest + 1) % ic->i_send_ring.w_nr;
 293	}
 294
 295	rds_ib_ring_free(&ic->i_send_ring, completed);
 296	rds_ib_sub_signaled(ic, nr_sig);
 
 297
 298	if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
 299	    test_bit(0, &conn->c_map_queued))
 300		queue_delayed_work(rds_wq, &conn->c_send_w, 0);
 301
 302	/* We expect errors as the qp is drained during shutdown */
 303	if (wc->status != IB_WC_SUCCESS && rds_conn_up(conn)) {
 304		rds_ib_conn_error(conn, "send completion on <%pI6c,%pI6c,%d> had status %u (%s), vendor err 0x%x, disconnecting and reconnecting\n",
 305				  &conn->c_laddr, &conn->c_faddr,
 306				  conn->c_tos, wc->status,
 307				  ib_wc_status_msg(wc->status), wc->vendor_err);
 308	}
 309}
 310
 311/*
 312 * This is the main function for allocating credits when sending
 313 * messages.
 314 *
 315 * Conceptually, we have two counters:
 316 *  -	send credits: this tells us how many WRs we're allowed
 317 *	to submit without overruning the receiver's queue. For
 318 *	each SEND WR we post, we decrement this by one.
 319 *
 320 *  -	posted credits: this tells us how many WRs we recently
 321 *	posted to the receive queue. This value is transferred
 322 *	to the peer as a "credit update" in a RDS header field.
 323 *	Every time we transmit credits to the peer, we subtract
 324 *	the amount of transferred credits from this counter.
 325 *
 326 * It is essential that we avoid situations where both sides have
 327 * exhausted their send credits, and are unable to send new credits
 328 * to the peer. We achieve this by requiring that we send at least
 329 * one credit update to the peer before exhausting our credits.
 330 * When new credits arrive, we subtract one credit that is withheld
 331 * until we've posted new buffers and are ready to transmit these
 332 * credits (see rds_ib_send_add_credits below).
 333 *
 334 * The RDS send code is essentially single-threaded; rds_send_xmit
 335 * sets RDS_IN_XMIT to ensure exclusive access to the send ring.
 336 * However, the ACK sending code is independent and can race with
 337 * message SENDs.
 338 *
 339 * In the send path, we need to update the counters for send credits
 340 * and the counter of posted buffers atomically - when we use the
 341 * last available credit, we cannot allow another thread to race us
 342 * and grab the posted credits counter.  Hence, we have to use a
 343 * spinlock to protect the credit counter, or use atomics.
 344 *
 345 * Spinlocks shared between the send and the receive path are bad,
 346 * because they create unnecessary delays. An early implementation
 347 * using a spinlock showed a 5% degradation in throughput at some
 348 * loads.
 349 *
 350 * This implementation avoids spinlocks completely, putting both
 351 * counters into a single atomic, and updating that atomic using
 352 * atomic_add (in the receive path, when receiving fresh credits),
 353 * and using atomic_cmpxchg when updating the two counters.
 354 */
 355int rds_ib_send_grab_credits(struct rds_ib_connection *ic,
 356			     u32 wanted, u32 *adv_credits, int need_posted, int max_posted)
 357{
 358	unsigned int avail, posted, got = 0, advertise;
 359	long oldval, newval;
 360
 361	*adv_credits = 0;
 362	if (!ic->i_flowctl)
 363		return wanted;
 364
 365try_again:
 366	advertise = 0;
 367	oldval = newval = atomic_read(&ic->i_credits);
 368	posted = IB_GET_POST_CREDITS(oldval);
 369	avail = IB_GET_SEND_CREDITS(oldval);
 370
 371	rdsdebug("wanted=%u credits=%u posted=%u\n",
 372			wanted, avail, posted);
 373
 374	/* The last credit must be used to send a credit update. */
 375	if (avail && !posted)
 376		avail--;
 377
 378	if (avail < wanted) {
 379		struct rds_connection *conn = ic->i_cm_id->context;
 380
 381		/* Oops, there aren't that many credits left! */
 382		set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
 383		got = avail;
 384	} else {
 385		/* Sometimes you get what you want, lalala. */
 386		got = wanted;
 387	}
 388	newval -= IB_SET_SEND_CREDITS(got);
 389
 390	/*
 391	 * If need_posted is non-zero, then the caller wants
 392	 * the posted regardless of whether any send credits are
 393	 * available.
 394	 */
 395	if (posted && (got || need_posted)) {
 396		advertise = min_t(unsigned int, posted, max_posted);
 397		newval -= IB_SET_POST_CREDITS(advertise);
 398	}
 399
 400	/* Finally bill everything */
 401	if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval)
 402		goto try_again;
 403
 404	*adv_credits = advertise;
 405	return got;
 406}
 407
 408void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits)
 409{
 410	struct rds_ib_connection *ic = conn->c_transport_data;
 411
 412	if (credits == 0)
 413		return;
 414
 415	rdsdebug("credits=%u current=%u%s\n",
 416			credits,
 417			IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)),
 418			test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : "");
 419
 420	atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits);
 421	if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags))
 422		queue_delayed_work(rds_wq, &conn->c_send_w, 0);
 423
 424	WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384);
 425
 426	rds_ib_stats_inc(s_ib_rx_credit_updates);
 427}
 428
 429void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted)
 430{
 431	struct rds_ib_connection *ic = conn->c_transport_data;
 432
 433	if (posted == 0)
 434		return;
 435
 436	atomic_add(IB_SET_POST_CREDITS(posted), &ic->i_credits);
 437
 438	/* Decide whether to send an update to the peer now.
 439	 * If we would send a credit update for every single buffer we
 440	 * post, we would end up with an ACK storm (ACK arrives,
 441	 * consumes buffer, we refill the ring, send ACK to remote
 442	 * advertising the newly posted buffer... ad inf)
 443	 *
 444	 * Performance pretty much depends on how often we send
 445	 * credit updates - too frequent updates mean lots of ACKs.
 446	 * Too infrequent updates, and the peer will run out of
 447	 * credits and has to throttle.
 448	 * For the time being, 16 seems to be a good compromise.
 449	 */
 450	if (IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)) >= 16)
 451		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
 452}
 453
 454static inline int rds_ib_set_wr_signal_state(struct rds_ib_connection *ic,
 455					     struct rds_ib_send_work *send,
 456					     bool notify)
 457{
 458	/*
 459	 * We want to delay signaling completions just enough to get
 460	 * the batching benefits but not so much that we create dead time
 461	 * on the wire.
 462	 */
 463	if (ic->i_unsignaled_wrs-- == 0 || notify) {
 464		ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
 465		send->s_wr.send_flags |= IB_SEND_SIGNALED;
 466		return 1;
 467	}
 468	return 0;
 469}
 470
 471/*
 472 * This can be called multiple times for a given message.  The first time
 473 * we see a message we map its scatterlist into the IB device so that
 474 * we can provide that mapped address to the IB scatter gather entries
 475 * in the IB work requests.  We translate the scatterlist into a series
 476 * of work requests that fragment the message.  These work requests complete
 477 * in order so we pass ownership of the message to the completion handler
 478 * once we send the final fragment.
 479 *
 480 * The RDS core uses the c_send_lock to only enter this function once
 481 * per connection.  This makes sure that the tx ring alloc/unalloc pairs
 482 * don't get out of sync and confuse the ring.
 483 */
 484int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
 485		unsigned int hdr_off, unsigned int sg, unsigned int off)
 486{
 487	struct rds_ib_connection *ic = conn->c_transport_data;
 488	struct ib_device *dev = ic->i_cm_id->device;
 489	struct rds_ib_send_work *send = NULL;
 490	struct rds_ib_send_work *first;
 491	struct rds_ib_send_work *prev;
 492	const struct ib_send_wr *failed_wr;
 493	struct scatterlist *scat;
 494	u32 pos;
 495	u32 i;
 496	u32 work_alloc;
 497	u32 credit_alloc = 0;
 498	u32 posted;
 499	u32 adv_credits = 0;
 500	int send_flags = 0;
 501	int bytes_sent = 0;
 502	int ret;
 503	int flow_controlled = 0;
 504	int nr_sig = 0;
 505
 506	BUG_ON(off % RDS_FRAG_SIZE);
 507	BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
 508
 509	/* Do not send cong updates to IB loopback */
 510	if (conn->c_loopback
 511	    && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
 512		rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
 513		scat = &rm->data.op_sg[sg];
 514		ret = max_t(int, RDS_CONG_MAP_BYTES, scat->length);
 515		return sizeof(struct rds_header) + ret;
 516	}
 517
 518	/* FIXME we may overallocate here */
 519	if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
 520		i = 1;
 521	else
 522		i = DIV_ROUND_UP(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
 523
 524	work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
 525	if (work_alloc == 0) {
 526		set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
 527		rds_ib_stats_inc(s_ib_tx_ring_full);
 528		ret = -ENOMEM;
 529		goto out;
 530	}
 531
 532	if (ic->i_flowctl) {
 533		credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
 534		adv_credits += posted;
 535		if (credit_alloc < work_alloc) {
 536			rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
 537			work_alloc = credit_alloc;
 538			flow_controlled = 1;
 539		}
 540		if (work_alloc == 0) {
 541			set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
 542			rds_ib_stats_inc(s_ib_tx_throttle);
 543			ret = -ENOMEM;
 544			goto out;
 545		}
 546	}
 547
 548	/* map the message the first time we see it */
 549	if (!ic->i_data_op) {
 550		if (rm->data.op_nents) {
 551			rm->data.op_count = ib_dma_map_sg(dev,
 552							  rm->data.op_sg,
 553							  rm->data.op_nents,
 554							  DMA_TO_DEVICE);
 555			rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
 556			if (rm->data.op_count == 0) {
 557				rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
 558				rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
 559				ret = -ENOMEM; /* XXX ? */
 560				goto out;
 561			}
 562		} else {
 563			rm->data.op_count = 0;
 564		}
 565
 566		rds_message_addref(rm);
 567		rm->data.op_dmasg = 0;
 568		rm->data.op_dmaoff = 0;
 569		ic->i_data_op = &rm->data;
 570
 571		/* Finalize the header */
 572		if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags))
 573			rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED;
 574		if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))
 575			rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
 576
 577		/* If it has a RDMA op, tell the peer we did it. This is
 578		 * used by the peer to release use-once RDMA MRs. */
 579		if (rm->rdma.op_active) {
 580			struct rds_ext_header_rdma ext_hdr;
 581
 582			ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
 583			rds_message_add_extension(&rm->m_inc.i_hdr,
 584					RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
 585		}
 586		if (rm->m_rdma_cookie) {
 587			rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr,
 588					rds_rdma_cookie_key(rm->m_rdma_cookie),
 589					rds_rdma_cookie_offset(rm->m_rdma_cookie));
 590		}
 591
 592		/* Note - rds_ib_piggyb_ack clears the ACK_REQUIRED bit, so
 593		 * we should not do this unless we have a chance of at least
 594		 * sticking the header into the send ring. Which is why we
 595		 * should call rds_ib_ring_alloc first. */
 596		rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic));
 597		rds_message_make_checksum(&rm->m_inc.i_hdr);
 598
 599		/*
 600		 * Update adv_credits since we reset the ACK_REQUIRED bit.
 601		 */
 602		if (ic->i_flowctl) {
 603			rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
 604			adv_credits += posted;
 605			BUG_ON(adv_credits > 255);
 606		}
 607	}
 608
 609	/* Sometimes you want to put a fence between an RDMA
 610	 * READ and the following SEND.
 611	 * We could either do this all the time
 612	 * or when requested by the user. Right now, we let
 613	 * the application choose.
 614	 */
 615	if (rm->rdma.op_active && rm->rdma.op_fence)
 616		send_flags = IB_SEND_FENCE;
 617
 618	/* Each frag gets a header. Msgs may be 0 bytes */
 619	send = &ic->i_sends[pos];
 620	first = send;
 621	prev = NULL;
 622	scat = &ic->i_data_op->op_sg[rm->data.op_dmasg];
 623	i = 0;
 624	do {
 625		unsigned int len = 0;
 626
 627		/* Set up the header */
 628		send->s_wr.send_flags = send_flags;
 629		send->s_wr.opcode = IB_WR_SEND;
 630		send->s_wr.num_sge = 1;
 631		send->s_wr.next = NULL;
 632		send->s_queued = jiffies;
 633		send->s_op = NULL;
 634
 635		send->s_sge[0].addr = ic->i_send_hdrs_dma[pos];
 636
 637		send->s_sge[0].length = sizeof(struct rds_header);
 638		send->s_sge[0].lkey = ic->i_pd->local_dma_lkey;
 639
 640		ib_dma_sync_single_for_cpu(ic->rds_ibdev->dev,
 641					   ic->i_send_hdrs_dma[pos],
 642					   sizeof(struct rds_header),
 643					   DMA_TO_DEVICE);
 644		memcpy(ic->i_send_hdrs[pos], &rm->m_inc.i_hdr,
 645		       sizeof(struct rds_header));
 646
 
 647
 648		/* Set up the data, if present */
 649		if (i < work_alloc
 650		    && scat != &rm->data.op_sg[rm->data.op_count]) {
 651			len = min(RDS_FRAG_SIZE,
 652				  sg_dma_len(scat) - rm->data.op_dmaoff);
 653			send->s_wr.num_sge = 2;
 654
 655			send->s_sge[1].addr = sg_dma_address(scat);
 656			send->s_sge[1].addr += rm->data.op_dmaoff;
 657			send->s_sge[1].length = len;
 658			send->s_sge[1].lkey = ic->i_pd->local_dma_lkey;
 659
 660			bytes_sent += len;
 661			rm->data.op_dmaoff += len;
 662			if (rm->data.op_dmaoff == sg_dma_len(scat)) {
 663				scat++;
 664				rm->data.op_dmasg++;
 665				rm->data.op_dmaoff = 0;
 666			}
 667		}
 668
 669		rds_ib_set_wr_signal_state(ic, send, false);
 670
 671		/*
 672		 * Always signal the last one if we're stopping due to flow control.
 673		 */
 674		if (ic->i_flowctl && flow_controlled && i == (work_alloc - 1)) {
 675			rds_ib_set_wr_signal_state(ic, send, true);
 676			send->s_wr.send_flags |= IB_SEND_SOLICITED;
 677		}
 678
 679		if (send->s_wr.send_flags & IB_SEND_SIGNALED)
 680			nr_sig++;
 681
 682		rdsdebug("send %p wr %p num_sge %u next %p\n", send,
 683			 &send->s_wr, send->s_wr.num_sge, send->s_wr.next);
 684
 685		if (ic->i_flowctl && adv_credits) {
 686			struct rds_header *hdr = ic->i_send_hdrs[pos];
 687
 688			/* add credit and redo the header checksum */
 689			hdr->h_credit = adv_credits;
 690			rds_message_make_checksum(hdr);
 691			adv_credits = 0;
 692			rds_ib_stats_inc(s_ib_tx_credit_updates);
 693		}
 694		ib_dma_sync_single_for_device(ic->rds_ibdev->dev,
 695					      ic->i_send_hdrs_dma[pos],
 696					      sizeof(struct rds_header),
 697					      DMA_TO_DEVICE);
 698
 699		if (prev)
 700			prev->s_wr.next = &send->s_wr;
 701		prev = send;
 702
 703		pos = (pos + 1) % ic->i_send_ring.w_nr;
 704		send = &ic->i_sends[pos];
 705		i++;
 706
 707	} while (i < work_alloc
 708		 && scat != &rm->data.op_sg[rm->data.op_count]);
 709
 710	/* Account the RDS header in the number of bytes we sent, but just once.
 711	 * The caller has no concept of fragmentation. */
 712	if (hdr_off == 0)
 713		bytes_sent += sizeof(struct rds_header);
 714
 715	/* if we finished the message then send completion owns it */
 716	if (scat == &rm->data.op_sg[rm->data.op_count]) {
 717		prev->s_op = ic->i_data_op;
 718		prev->s_wr.send_flags |= IB_SEND_SOLICITED;
 719		if (!(prev->s_wr.send_flags & IB_SEND_SIGNALED))
 720			nr_sig += rds_ib_set_wr_signal_state(ic, prev, true);
 
 
 
 721		ic->i_data_op = NULL;
 722	}
 723
 724	/* Put back wrs & credits we didn't use */
 725	if (i < work_alloc) {
 726		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
 727		work_alloc = i;
 728	}
 729	if (ic->i_flowctl && i < credit_alloc)
 730		rds_ib_send_add_credits(conn, credit_alloc - i);
 731
 732	if (nr_sig)
 733		atomic_add(nr_sig, &ic->i_signaled_sends);
 734
 735	/* XXX need to worry about failed_wr and partial sends. */
 736	failed_wr = &first->s_wr;
 737	ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
 738	rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
 739		 first, &first->s_wr, ret, failed_wr);
 740	BUG_ON(failed_wr != &first->s_wr);
 741	if (ret) {
 742		printk(KERN_WARNING "RDS/IB: ib_post_send to %pI6c "
 743		       "returned %d\n", &conn->c_faddr, ret);
 744		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
 745		rds_ib_sub_signaled(ic, nr_sig);
 746		if (prev->s_op) {
 747			ic->i_data_op = prev->s_op;
 748			prev->s_op = NULL;
 749		}
 750
 751		rds_ib_conn_error(ic->conn, "ib_post_send failed\n");
 752		goto out;
 753	}
 754
 755	ret = bytes_sent;
 756out:
 757	BUG_ON(adv_credits);
 758	return ret;
 759}
 760
 761/*
 762 * Issue atomic operation.
 763 * A simplified version of the rdma case, we always map 1 SG, and
 764 * only 8 bytes, for the return value from the atomic operation.
 765 */
 766int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
 767{
 768	struct rds_ib_connection *ic = conn->c_transport_data;
 769	struct rds_ib_send_work *send = NULL;
 770	const struct ib_send_wr *failed_wr;
 
 771	u32 pos;
 772	u32 work_alloc;
 773	int ret;
 774	int nr_sig = 0;
 775
 
 
 776	work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos);
 777	if (work_alloc != 1) {
 
 778		rds_ib_stats_inc(s_ib_tx_ring_full);
 779		ret = -ENOMEM;
 780		goto out;
 781	}
 782
 783	/* address of send request in ring */
 784	send = &ic->i_sends[pos];
 785	send->s_queued = jiffies;
 786
 787	if (op->op_type == RDS_ATOMIC_TYPE_CSWP) {
 788		send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_CMP_AND_SWP;
 789		send->s_atomic_wr.compare_add = op->op_m_cswp.compare;
 790		send->s_atomic_wr.swap = op->op_m_cswp.swap;
 791		send->s_atomic_wr.compare_add_mask = op->op_m_cswp.compare_mask;
 792		send->s_atomic_wr.swap_mask = op->op_m_cswp.swap_mask;
 793	} else { /* FADD */
 794		send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_FETCH_AND_ADD;
 795		send->s_atomic_wr.compare_add = op->op_m_fadd.add;
 796		send->s_atomic_wr.swap = 0;
 797		send->s_atomic_wr.compare_add_mask = op->op_m_fadd.nocarry_mask;
 798		send->s_atomic_wr.swap_mask = 0;
 799	}
 800	send->s_wr.send_flags = 0;
 801	nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify);
 802	send->s_atomic_wr.wr.num_sge = 1;
 803	send->s_atomic_wr.wr.next = NULL;
 804	send->s_atomic_wr.remote_addr = op->op_remote_addr;
 805	send->s_atomic_wr.rkey = op->op_rkey;
 806	send->s_op = op;
 807	rds_message_addref(container_of(send->s_op, struct rds_message, atomic));
 808
 809	/* map 8 byte retval buffer to the device */
 810	ret = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, 1, DMA_FROM_DEVICE);
 811	rdsdebug("ic %p mapping atomic op %p. mapped %d pg\n", ic, op, ret);
 812	if (ret != 1) {
 813		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
 814		rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
 815		ret = -ENOMEM; /* XXX ? */
 816		goto out;
 817	}
 818
 819	/* Convert our struct scatterlist to struct ib_sge */
 820	send->s_sge[0].addr = sg_dma_address(op->op_sg);
 821	send->s_sge[0].length = sg_dma_len(op->op_sg);
 822	send->s_sge[0].lkey = ic->i_pd->local_dma_lkey;
 823
 824	rdsdebug("rva %Lx rpa %Lx len %u\n", op->op_remote_addr,
 825		 send->s_sge[0].addr, send->s_sge[0].length);
 826
 827	if (nr_sig)
 828		atomic_add(nr_sig, &ic->i_signaled_sends);
 829
 830	failed_wr = &send->s_atomic_wr.wr;
 831	ret = ib_post_send(ic->i_cm_id->qp, &send->s_atomic_wr.wr, &failed_wr);
 832	rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic,
 833		 send, &send->s_atomic_wr, ret, failed_wr);
 834	BUG_ON(failed_wr != &send->s_atomic_wr.wr);
 835	if (ret) {
 836		printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI6c "
 837		       "returned %d\n", &conn->c_faddr, ret);
 838		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
 839		rds_ib_sub_signaled(ic, nr_sig);
 840		goto out;
 841	}
 842
 843	if (unlikely(failed_wr != &send->s_atomic_wr.wr)) {
 844		printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
 845		BUG_ON(failed_wr != &send->s_atomic_wr.wr);
 846	}
 847
 848out:
 849	return ret;
 850}
 851
 852int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
 853{
 854	struct rds_ib_connection *ic = conn->c_transport_data;
 855	struct rds_ib_send_work *send = NULL;
 856	struct rds_ib_send_work *first;
 857	struct rds_ib_send_work *prev;
 858	const struct ib_send_wr *failed_wr;
 859	struct scatterlist *scat;
 860	unsigned long len;
 861	u64 remote_addr = op->op_remote_addr;
 862	u32 max_sge = ic->rds_ibdev->max_sge;
 863	u32 pos;
 864	u32 work_alloc;
 865	u32 i;
 866	u32 j;
 867	int sent;
 868	int ret;
 869	int num_sge;
 870	int nr_sig = 0;
 871	u64 odp_addr = op->op_odp_addr;
 872	u32 odp_lkey = 0;
 873
 874	/* map the op the first time we see it */
 875	if (!op->op_odp_mr) {
 876		if (!op->op_mapped) {
 877			op->op_count =
 878				ib_dma_map_sg(ic->i_cm_id->device, op->op_sg,
 879					      op->op_nents,
 880					      (op->op_write) ? DMA_TO_DEVICE :
 881							       DMA_FROM_DEVICE);
 882			rdsdebug("ic %p mapping op %p: %d\n", ic, op,
 883				 op->op_count);
 884			if (op->op_count == 0) {
 885				rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
 886				ret = -ENOMEM; /* XXX ? */
 887				goto out;
 888			}
 889			op->op_mapped = 1;
 890		}
 891	} else {
 892		op->op_count = op->op_nents;
 893		odp_lkey = rds_ib_get_lkey(op->op_odp_mr->r_trans_private);
 894	}
 895
 896	/*
 897	 * Instead of knowing how to return a partial rdma read/write we insist that there
 898	 * be enough work requests to send the entire message.
 899	 */
 900	i = DIV_ROUND_UP(op->op_count, max_sge);
 901
 902	work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
 903	if (work_alloc != i) {
 904		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
 905		rds_ib_stats_inc(s_ib_tx_ring_full);
 906		ret = -ENOMEM;
 907		goto out;
 908	}
 909
 910	send = &ic->i_sends[pos];
 911	first = send;
 912	prev = NULL;
 913	scat = &op->op_sg[0];
 914	sent = 0;
 915	num_sge = op->op_count;
 916
 917	for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
 918		send->s_wr.send_flags = 0;
 919		send->s_queued = jiffies;
 920		send->s_op = NULL;
 921
 922		if (!op->op_notify)
 923			nr_sig += rds_ib_set_wr_signal_state(ic, send,
 924							     op->op_notify);
 925
 926		send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
 927		send->s_rdma_wr.remote_addr = remote_addr;
 928		send->s_rdma_wr.rkey = op->op_rkey;
 929
 930		if (num_sge > max_sge) {
 931			send->s_rdma_wr.wr.num_sge = max_sge;
 932			num_sge -= max_sge;
 933		} else {
 934			send->s_rdma_wr.wr.num_sge = num_sge;
 935		}
 936
 937		send->s_rdma_wr.wr.next = NULL;
 938
 939		if (prev)
 940			prev->s_rdma_wr.wr.next = &send->s_rdma_wr.wr;
 941
 942		for (j = 0; j < send->s_rdma_wr.wr.num_sge &&
 943		     scat != &op->op_sg[op->op_count]; j++) {
 944			len = sg_dma_len(scat);
 945			if (!op->op_odp_mr) {
 946				send->s_sge[j].addr = sg_dma_address(scat);
 947				send->s_sge[j].lkey = ic->i_pd->local_dma_lkey;
 948			} else {
 949				send->s_sge[j].addr = odp_addr;
 950				send->s_sge[j].lkey = odp_lkey;
 951			}
 952			send->s_sge[j].length = len;
 
 953
 954			sent += len;
 955			rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr);
 956
 957			remote_addr += len;
 958			odp_addr += len;
 959			scat++;
 960		}
 961
 962		rdsdebug("send %p wr %p num_sge %u next %p\n", send,
 963			&send->s_rdma_wr.wr,
 964			send->s_rdma_wr.wr.num_sge,
 965			send->s_rdma_wr.wr.next);
 966
 967		prev = send;
 968		if (++send == &ic->i_sends[ic->i_send_ring.w_nr])
 969			send = ic->i_sends;
 970	}
 971
 972	/* give a reference to the last op */
 973	if (scat == &op->op_sg[op->op_count]) {
 974		prev->s_op = op;
 975		rds_message_addref(container_of(op, struct rds_message, rdma));
 976	}
 977
 978	if (i < work_alloc) {
 979		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
 980		work_alloc = i;
 981	}
 982
 983	if (nr_sig)
 984		atomic_add(nr_sig, &ic->i_signaled_sends);
 985
 986	failed_wr = &first->s_rdma_wr.wr;
 987	ret = ib_post_send(ic->i_cm_id->qp, &first->s_rdma_wr.wr, &failed_wr);
 988	rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
 989		 first, &first->s_rdma_wr.wr, ret, failed_wr);
 990	BUG_ON(failed_wr != &first->s_rdma_wr.wr);
 991	if (ret) {
 992		printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI6c "
 993		       "returned %d\n", &conn->c_faddr, ret);
 994		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
 995		rds_ib_sub_signaled(ic, nr_sig);
 996		goto out;
 997	}
 998
 999	if (unlikely(failed_wr != &first->s_rdma_wr.wr)) {
1000		printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
1001		BUG_ON(failed_wr != &first->s_rdma_wr.wr);
1002	}
1003
1004
1005out:
1006	return ret;
1007}
1008
1009void rds_ib_xmit_path_complete(struct rds_conn_path *cp)
1010{
1011	struct rds_connection *conn = cp->cp_conn;
1012	struct rds_ib_connection *ic = conn->c_transport_data;
1013
1014	/* We may have a pending ACK or window update we were unable
1015	 * to send previously (due to flow control). Try again. */
1016	rds_ib_attempt_ack(ic);
1017}